Fill-Mask
Transformers
Safetensors
modernbert
masked-lm
long-context
timpal0l commited on
Commit
0f57df5
·
verified ·
1 Parent(s): 20aa685

Upload ModernBertForMaskedLM

Browse files
Files changed (2) hide show
  1. config.json +4 -4
  2. model.safetensors +3 -0
config.json CHANGED
@@ -19,10 +19,10 @@
19
  "global_rope_theta": 160000.0,
20
  "gradient_checkpointing": false,
21
  "hidden_activation": "gelu",
22
- "hidden_size": 1024,
23
  "initializer_cutoff_factor": 2.0,
24
  "initializer_range": 0.02,
25
- "intermediate_size": 2624,
26
  "layer_norm_eps": 1e-05,
27
  "local_attention": 128,
28
  "local_rope_theta": 10000.0,
@@ -32,8 +32,8 @@
32
  "model_type": "modernbert",
33
  "norm_bias": false,
34
  "norm_eps": 1e-05,
35
- "num_attention_heads": 16,
36
- "num_hidden_layers": 28,
37
  "pad_token_id": 50283,
38
  "position_embedding_type": "absolute",
39
  "repad_logits_with_grad": false,
 
19
  "global_rope_theta": 160000.0,
20
  "gradient_checkpointing": false,
21
  "hidden_activation": "gelu",
22
+ "hidden_size": 768,
23
  "initializer_cutoff_factor": 2.0,
24
  "initializer_range": 0.02,
25
+ "intermediate_size": 1152,
26
  "layer_norm_eps": 1e-05,
27
  "local_attention": 128,
28
  "local_rope_theta": 10000.0,
 
32
  "model_type": "modernbert",
33
  "norm_bias": false,
34
  "norm_eps": 1e-05,
35
+ "num_attention_heads": 12,
36
+ "num_hidden_layers": 22,
37
  "pad_token_id": 50283,
38
  "position_embedding_type": "absolute",
39
  "repad_logits_with_grad": false,
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f0c864e9da0be2645cfbcbfa51e4868a6f7973e670d5a8a712b51b9d466ede7
3
+ size 598635032