drvenabili commited on
Commit
b5eefd8
·
verified ·
1 Parent(s): 7849b62

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +114 -291
README.md CHANGED
@@ -1,295 +1,118 @@
1
  ---
2
  dataset_info:
3
- - config_name: '1640'
4
- features:
5
- - name: text
6
- dtype: string
7
- splits:
8
- - name: train
9
- num_bytes: 254777
10
- num_examples: 3509
11
- download_size: 114173
12
- dataset_size: 254777
13
- - config_name: '1650'
14
- features:
15
- - name: text
16
- dtype: string
17
- splits:
18
- - name: train
19
- num_bytes: 31314
20
- num_examples: 412
21
- download_size: 15122
22
- dataset_size: 31314
23
- - config_name: '1660'
24
- features:
25
- - name: text
26
- dtype: string
27
- splits:
28
- - name: train
29
- num_bytes: 56559
30
- num_examples: 726
31
- download_size: 25941
32
- dataset_size: 56559
33
- - config_name: '1670'
34
- features:
35
- - name: text
36
- dtype: string
37
- splits:
38
- - name: train
39
- num_bytes: 15093
40
- num_examples: 188
41
- download_size: 8153
42
- dataset_size: 15093
43
- - config_name: '1680'
44
- features:
45
- - name: text
46
- dtype: string
47
- splits:
48
- - name: train
49
- num_bytes: 1290089
50
- num_examples: 17458
51
- download_size: 609438
52
- dataset_size: 1290089
53
- - config_name: '1690'
54
- features:
55
- - name: text
56
- dtype: string
57
- splits:
58
- - name: train
59
- num_bytes: 2977705
60
- num_examples: 42333
61
- download_size: 1355778
62
- dataset_size: 2977705
63
- - config_name: '1700'
64
- features:
65
- - name: text
66
- dtype: string
67
- splits:
68
- - name: train
69
- num_bytes: 3800917
70
- num_examples: 53331
71
- download_size: 1702603
72
- dataset_size: 3800917
73
- - config_name: '1710'
74
- features:
75
- - name: text
76
- dtype: string
77
- splits:
78
- - name: train
79
- num_bytes: 1601983
80
- num_examples: 22763
81
- download_size: 733219
82
- dataset_size: 1601983
83
- - config_name: '1720'
84
- features:
85
- - name: text
86
- dtype: string
87
- splits:
88
- - name: train
89
- num_bytes: 2268261
90
- num_examples: 32813
91
- download_size: 1012144
92
- dataset_size: 2268261
93
- - config_name: '1730'
94
- features:
95
- - name: text
96
- dtype: string
97
- splits:
98
- - name: train
99
- num_bytes: 5498116
100
- num_examples: 79079
101
- download_size: 2515986
102
- dataset_size: 5498116
103
- - config_name: '1740'
104
- features:
105
- - name: text
106
- dtype: string
107
- splits:
108
- - name: train
109
- num_bytes: 10147602
110
- num_examples: 149317
111
- download_size: 4572359
112
- dataset_size: 10147602
113
- - config_name: '1750'
114
- features:
115
- - name: text
116
- dtype: string
117
- splits:
118
- - name: train
119
- num_bytes: 14183279
120
- num_examples: 212000
121
- download_size: 6235076
122
- dataset_size: 14183279
123
- - config_name: '1760'
124
- features:
125
- - name: text
126
- dtype: string
127
- splits:
128
- - name: train
129
- num_bytes: 34039377
130
- num_examples: 545759
131
- download_size: 15159865
132
- dataset_size: 34039377
133
- - config_name: '1770'
134
- features:
135
- - name: text
136
- dtype: string
137
- splits:
138
- - name: train
139
- num_bytes: 89191958
140
- num_examples: 1333609
141
- download_size: 39582304
142
- dataset_size: 89191958
143
- - config_name: '1780'
144
- features:
145
- - name: text
146
- dtype: string
147
- splits:
148
- - name: train
149
- num_bytes: 136703541
150
- num_examples: 2015223
151
- download_size: 60960878
152
- dataset_size: 136703541
153
- - config_name: '1790'
154
- features:
155
- - name: text
156
- dtype: string
157
- splits:
158
- - name: train
159
- num_bytes: 163823087
160
- num_examples: 2435714
161
- download_size: 72860792
162
- dataset_size: 163823087
163
- - config_name: '1800'
164
- features:
165
- - name: text
166
- dtype: string
167
- splits:
168
- - name: train
169
- num_bytes: 220361417
170
- num_examples: 3368887
171
- download_size: 98935407
172
- dataset_size: 220361417
173
- - config_name: '1810'
174
- features:
175
- - name: text
176
- dtype: string
177
- splits:
178
- - name: train
179
- num_bytes: 263830012
180
- num_examples: 4205776
181
- download_size: 122219730
182
- dataset_size: 263830012
183
- - config_name: '1820'
184
- features:
185
- - name: text
186
- dtype: string
187
- splits:
188
- - name: train
189
- num_bytes: 395727486
190
- num_examples: 6265710
191
- download_size: 175240370
192
- dataset_size: 395727486
193
- - config_name: '1830'
194
- features:
195
- - name: text
196
- dtype: string
197
- splits:
198
- - name: train
199
- num_bytes: 580725783
200
- num_examples: 9355635
201
- download_size: 254403662
202
- dataset_size: 580725783
203
- - config_name: '1840'
204
- features:
205
- - name: text
206
- dtype: string
207
- splits:
208
- - name: train
209
- num_bytes: 898420001
210
- num_examples: 14051720
211
- download_size: 381018147
212
- dataset_size: 898420001
213
- - config_name: '1850'
214
- features:
215
- - name: text
216
- dtype: string
217
- splits:
218
- - name: train
219
- num_bytes: 1354049159
220
- num_examples: 21187511
221
- download_size: 570228565
222
- dataset_size: 1354049159
223
- - config_name: '1860'
224
- features:
225
- - name: text
226
- dtype: string
227
- splits:
228
- - name: train
229
- num_bytes: 2512543535
230
- num_examples: 39321823
231
- download_size: 1046916115
232
- dataset_size: 2512543535
233
- - config_name: '1870'
234
- features:
235
- - name: text
236
- dtype: string
237
- splits:
238
- - name: train
239
- num_bytes: 3383836222
240
- num_examples: 53045312
241
- download_size: 1399880807
242
- dataset_size: 3383836222
243
- - config_name: '1880'
244
- features:
245
- - name: text
246
- dtype: string
247
- splits:
248
- - name: train
249
- num_bytes: 4501878144
250
- num_examples: 72015436
251
- download_size: 1827179641
252
- dataset_size: 4501878144
253
- - config_name: '1890'
254
- features:
255
- - name: text
256
- dtype: string
257
- splits:
258
- - name: train
259
- num_bytes: 3219902112
260
- num_examples: 52337279
261
- download_size: 1315107645
262
- dataset_size: 3219902112
263
- - config_name: '1900'
264
- features:
265
- - name: text
266
- dtype: string
267
- splits:
268
- - name: train
269
- num_bytes: 205822484
270
- num_examples: 3284826
271
- download_size: 84811326
272
- dataset_size: 205822484
273
- - config_name: all
274
- features:
275
- - name: text
276
- dtype: string
277
- splits:
278
- - name: train
279
- num_bytes: 7999426267
280
- num_examples: 285384149
281
- download_size: 7483375536
282
- dataset_size: 7999426267
283
- - config_name: default
284
- features:
285
- - name: text
286
- dtype: string
287
- splits:
288
- - name: train
289
- num_bytes: 36005960212
290
- num_examples: 570768298
291
- download_size: 14966780782
292
- dataset_size: 36005960212
293
  license: cc-by-sa-4.0
294
  task_categories:
295
  - text-generation
@@ -465,4 +288,4 @@ You should always cite the original kubhist2 release, provided below as bibtex.
465
 
466
  The compute dedicated to the creation of the dataset has been provided by [iguanodon.ai](https://iguanodon.ai).
467
 
468
- Many thanks got to Språkbanken Text for creating and curating this resource.
 
1
  ---
2
  dataset_info:
3
+ - config_name: '1640'
4
+ data_files:
5
+ - split: train
6
+ path: data/1640/train/*
7
+ - config_name: '1650'
8
+ data_files:
9
+ - split: train
10
+ path: data/1650/train/*
11
+ - config_name: '1660'
12
+ data_files:
13
+ - split: train
14
+ path: data/1660/train/*
15
+ - config_name: '1670'
16
+ data_files:
17
+ - split: train
18
+ path: data/1670/train/*
19
+ - config_name: '1680'
20
+ data_files:
21
+ - split: train
22
+ path: data/1680/train/*
23
+ - config_name: '1690'
24
+ data_files:
25
+ - split: train
26
+ path: data/1690/train/*
27
+ - config_name: '1700'
28
+ data_files:
29
+ - split: train
30
+ path: data/1700/train/*
31
+ - config_name: '1710'
32
+ data_files:
33
+ - split: train
34
+ path: data/1710/train/*
35
+ - config_name: '1720'
36
+ data_files:
37
+ - split: train
38
+ path: data/1720/train/*
39
+ - config_name: '1730'
40
+ data_files:
41
+ - split: train
42
+ path: data/1730/train/*
43
+ - config_name: '1740'
44
+ data_files:
45
+ - split: train
46
+ path: data/1740/train/*
47
+ - config_name: '1750'
48
+ data_files:
49
+ - split: train
50
+ path: data/1750/train/*
51
+ - config_name: '1760'
52
+ data_files:
53
+ - split: train
54
+ path: data/1760/train/*
55
+ - config_name: '1770'
56
+ data_files:
57
+ - split: train
58
+ path: data/1770/train/*
59
+ - config_name: '1780'
60
+ data_files:
61
+ - split: train
62
+ path: data/1780/train/*
63
+ - config_name: '1790'
64
+ data_files:
65
+ - split: train
66
+ path: data/1790/train/*
67
+ - config_name: '1800'
68
+ data_files:
69
+ - split: train
70
+ path: data/1800/train/*
71
+ - config_name: '1810'
72
+ data_files:
73
+ - split: train
74
+ path: data/1810/train/*
75
+ - config_name: '1820'
76
+ data_files:
77
+ - split: train
78
+ path: data/1820/train/*
79
+ - config_name: '1830'
80
+ data_files:
81
+ - split: train
82
+ path: data/1830/train/*
83
+ - config_name: '1840'
84
+ data_files:
85
+ - split: train
86
+ path: data/1840/train/*
87
+ - config_name: '1850'
88
+ data_files:
89
+ - split: train
90
+ path: data/1850/train/*
91
+ - config_name: '1860'
92
+ data_files:
93
+ - split: train
94
+ path: data/1860/train/*
95
+ - config_name: '1870'
96
+ data_files:
97
+ - split: train
98
+ path: data/1870/train/*
99
+ - config_name: '1880'
100
+ data_files:
101
+ - split: train
102
+ path: data/1880/train/*
103
+ - config_name: '1890'
104
+ data_files:
105
+ - split: train
106
+ path: data/1890/train/*
107
+ - config_name: '1900'
108
+ data_files:
109
+ - split: train
110
+ path: data/1900/train/*
111
+ - config_name: 'all'
112
+ data_files:
113
+ - split: train
114
+ path: data/all/train/*
115
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  license: cc-by-sa-4.0
117
  task_categories:
118
  - text-generation
 
288
 
289
  The compute dedicated to the creation of the dataset has been provided by [iguanodon.ai](https://iguanodon.ai).
290
 
291
+ Many thanks got to Språkbanken Text for creating and curating this resource.