Timostrijbis commited on
Commit
da64bfd
1 Parent(s): 4c3ca48

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,29 +1,26 @@
1
  {
2
- "_name_or_path": "pdelobelle/robbert-v2-dutch-base",
3
  "architectures": [
4
- "RobertaForQuestionAnswering"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
  "classifier_dropout": null,
9
- "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
14
  "initializer_range": 0.02,
15
  "intermediate_size": 3072,
16
- "layer_norm_eps": 1e-05,
17
- "max_position_embeddings": 514,
18
- "model_type": "roberta",
19
  "num_attention_heads": 12,
20
  "num_hidden_layers": 12,
21
- "output_past": true,
22
- "pad_token_id": 1,
23
  "position_embedding_type": "absolute",
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.28.0",
26
- "type_vocab_size": 1,
27
  "use_cache": true,
28
- "vocab_size": 40000
29
  }
 
1
  {
2
+ "_name_or_path": "GroNLP/bert-base-dutch-cased",
3
  "architectures": [
4
+ "BertForQuestionAnswering"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
 
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
+ "pad_token_id": 3,
 
20
  "position_embedding_type": "absolute",
21
  "torch_dtype": "float32",
22
  "transformers_version": "4.28.0",
23
+ "type_vocab_size": 2,
24
  "use_cache": true,
25
+ "vocab_size": 30073
26
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7070e04b889cbefa14b5f95764f1ec1039b4b602de4937a2c7ae58e958301cb0
3
- size 464764973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d150cea77ca12a1275fc37877f9a0bcbe50c8421697232006501ecc5c355191
3
+ size 434265581
runs/Jul13_16-27-07_eb4ed647cebd/1689265756.2445168/events.out.tfevents.1689265756.eb4ed647cebd.5537.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e336989066a94dc4ef874bd201545b69e71f30345402d6260ec4c442d1b62351
3
+ size 5841
runs/Jul13_16-27-07_eb4ed647cebd/events.out.tfevents.1689265756.eb4ed647cebd.5537.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29336e71a298a17317ea3738fd1b6004d5bb6f2f81d47f6b5f760f623ba4d74d
3
+ size 7420
special_tokens_map.json CHANGED
@@ -1,15 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
4
- "eos_token": "</s>",
5
- "mask_token": {
6
- "content": "<mask>",
7
- "lstrip": true,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "pad_token": "<pad>",
13
- "sep_token": "</s>",
14
- "unk_token": "<unk>"
15
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,64 +1,15 @@
1
  {
2
- "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<s>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
10
- },
11
  "clean_up_tokenization_spaces": true,
12
- "cls_token": {
13
- "__type": "AddedToken",
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": true,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "eos_token": {
21
- "__type": "AddedToken",
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": true,
25
- "rstrip": false,
26
- "single_word": false
27
- },
28
- "errors": "replace",
29
- "mask_token": {
30
- "__type": "AddedToken",
31
- "content": "<mask>",
32
- "lstrip": true,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
  "model_max_length": 512,
38
- "pad_token": {
39
- "__type": "AddedToken",
40
- "content": "<pad>",
41
- "lstrip": false,
42
- "normalized": true,
43
- "rstrip": false,
44
- "single_word": false
45
- },
46
- "sep_token": {
47
- "__type": "AddedToken",
48
- "content": "</s>",
49
- "lstrip": false,
50
- "normalized": true,
51
- "rstrip": false,
52
- "single_word": false
53
- },
54
- "tokenizer_class": "RobertaTokenizer",
55
- "trim_offsets": true,
56
- "unk_token": {
57
- "__type": "AddedToken",
58
- "content": "<unk>",
59
- "lstrip": false,
60
- "normalized": true,
61
- "rstrip": false,
62
- "single_word": false
63
- }
64
  }
 
1
  {
 
 
 
 
 
 
 
 
 
2
  "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "model_max_length": 512,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6d9d0b3ef6c94ba76b4f9b408cc6c828a2650b288e90f99406334abbd300507
3
  size 3579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae59aec601882220deb069901988e4b30a5b13256ef617f72e6795c231b7b83f
3
  size 3579
vocab.txt ADDED
The diff for this file is too large to render. See raw diff