nlparabic commited on
Commit
a6b9b65
1 Parent(s): dd92e82

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -35,5 +35,5 @@
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
- "vocab_size": 64005
39
  }
 
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
+ "vocab_size": 64006
39
  }
egy_training_log.txt CHANGED
@@ -70,7 +70,7 @@ local_rank=0,
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_eg/runs/Aug31_17-47-31_lmgpu-node-09,
74
  logging_first_step=False,
75
  logging_nan_inf_filter=True,
76
  logging_steps=500,
@@ -128,194 +128,21 @@ warmup_ratio=0.0,
128
  warmup_steps=500,
129
  weight_decay=0.0,
130
  )
131
- INFO:datasets.builder:Using custom data configuration default-22b8aff308bb6a9a
132
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
- INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
134
- INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
135
  INFO:datasets.download.download_manager:Downloading took 0.0 min
136
  INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
137
  INFO:datasets.builder:Generating train split
138
  INFO:datasets.builder:Generating validation split
139
  INFO:datasets.utils.info_utils:Unable to verify splits sizes.
140
- INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
141
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-635af492cb87c1d9.arrow
142
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-d0fed37c7d2b78b7.arrow
143
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
144
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-1ecefed58b42de7f.arrow
145
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-5b8dbf7c961d9c88.arrow
146
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
147
  INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
148
  INFO:absl:Using default tokenizer.
149
- INFO:root:Epoch 2.0: Train Loss = 1.1436, Eval Loss = 0.8277140259742737
150
- INFO:absl:Using default tokenizer.
151
- INFO:root:Epoch 3.0: Train Loss = 0.7508, Eval Loss = 0.7543078064918518
152
- INFO:absl:Using default tokenizer.
153
- INFO:root:Epoch 4.0: Train Loss = 0.6471, Eval Loss = 0.7337948083877563
154
- INFO:absl:Using default tokenizer.
155
- INFO:root:Epoch 5.0: Train Loss = 0.5713, Eval Loss = 0.7315686941146851
156
- INFO:absl:Using default tokenizer.
157
- INFO:root:Epoch 6.0: Train Loss = 0.5097, Eval Loss = 0.7390380501747131
158
- INFO:absl:Using default tokenizer.
159
- INFO:root:Epoch 7.0: Train Loss = 0.4573, Eval Loss = 0.748293399810791
160
- INFO:absl:Using default tokenizer.
161
- INFO:root:Epoch 8.0: Train Loss = 0.4118, Eval Loss = 0.7635838389396667
162
- INFO:absl:Using default tokenizer.
163
- INFO:root:Epoch 9.0: Train Loss = 0.3725, Eval Loss = 0.7796261310577393
164
- INFO:absl:Using default tokenizer.
165
- INFO:__main__:*** Evaluate ***
166
- INFO:absl:Using default tokenizer.
167
- WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
168
- INFO:__main__:Training/evaluation parameters TrainingArguments(
169
- _n_gpu=1,
170
- accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
171
- adafactor=False,
172
- adam_beta1=0.9,
173
- adam_beta2=0.999,
174
- adam_epsilon=1e-08,
175
- auto_find_batch_size=False,
176
- batch_eval_metrics=False,
177
- bf16=False,
178
- bf16_full_eval=False,
179
- data_seed=None,
180
- dataloader_drop_last=False,
181
- dataloader_num_workers=0,
182
- dataloader_persistent_workers=False,
183
- dataloader_pin_memory=True,
184
- dataloader_prefetch_factor=None,
185
- ddp_backend=None,
186
- ddp_broadcast_buffers=None,
187
- ddp_bucket_cap_mb=None,
188
- ddp_find_unused_parameters=None,
189
- ddp_timeout=1800,
190
- debug=[],
191
- deepspeed=None,
192
- disable_tqdm=False,
193
- dispatch_batches=None,
194
- do_eval=True,
195
- do_predict=False,
196
- do_train=True,
197
- eval_accumulation_steps=None,
198
- eval_delay=0,
199
- eval_do_concat_batches=True,
200
- eval_on_start=False,
201
- eval_steps=None,
202
- eval_strategy=IntervalStrategy.EPOCH,
203
- eval_use_gather_object=False,
204
- evaluation_strategy=epoch,
205
- fp16=False,
206
- fp16_backend=auto,
207
- fp16_full_eval=False,
208
- fp16_opt_level=O1,
209
- fsdp=[],
210
- fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
211
- fsdp_min_num_params=0,
212
- fsdp_transformer_layer_cls_to_wrap=None,
213
- full_determinism=False,
214
- gradient_accumulation_steps=1,
215
- gradient_checkpointing=False,
216
- gradient_checkpointing_kwargs=None,
217
- greater_is_better=False,
218
- group_by_length=False,
219
- half_precision_backend=auto,
220
- hub_always_push=False,
221
- hub_model_id=None,
222
- hub_private_repo=False,
223
- hub_strategy=HubStrategy.EVERY_SAVE,
224
- hub_token=<HUB_TOKEN>,
225
- ignore_data_skip=False,
226
- include_inputs_for_metrics=False,
227
- include_num_input_tokens_seen=False,
228
- include_tokens_per_second=False,
229
- jit_mode_eval=False,
230
- label_names=None,
231
- label_smoothing_factor=0.0,
232
- learning_rate=5e-05,
233
- length_column_name=length,
234
- load_best_model_at_end=True,
235
- local_rank=0,
236
- log_level=passive,
237
- log_level_replica=warning,
238
- log_on_each_node=True,
239
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_eg/runs/Sep01_08-33-59_lmgpu-node-09,
240
- logging_first_step=False,
241
- logging_nan_inf_filter=True,
242
- logging_steps=500,
243
- logging_strategy=IntervalStrategy.EPOCH,
244
- lr_scheduler_kwargs={},
245
- lr_scheduler_type=SchedulerType.LINEAR,
246
- max_grad_norm=1.0,
247
- max_steps=-1,
248
- metric_for_best_model=loss,
249
- mp_parameters=,
250
- neftune_noise_alpha=None,
251
- no_cuda=False,
252
- num_train_epochs=20.0,
253
- optim=OptimizerNames.ADAMW_TORCH,
254
- optim_args=None,
255
- optim_target_modules=None,
256
- output_dir=/home/iais_marenpielka/Bouthaina/res_nw_eg,
257
- overwrite_output_dir=False,
258
- past_index=-1,
259
- per_device_eval_batch_size=8,
260
- per_device_train_batch_size=8,
261
- prediction_loss_only=False,
262
- push_to_hub=True,
263
- push_to_hub_model_id=None,
264
- push_to_hub_organization=None,
265
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
266
- ray_scope=last,
267
- remove_unused_columns=True,
268
- report_to=[],
269
- restore_callback_states_from_checkpoint=False,
270
- resume_from_checkpoint=None,
271
- run_name=/home/iais_marenpielka/Bouthaina/res_nw_eg,
272
- save_on_each_node=False,
273
- save_only_model=False,
274
- save_safetensors=True,
275
- save_steps=500,
276
- save_strategy=IntervalStrategy.EPOCH,
277
- save_total_limit=None,
278
- seed=42,
279
- skip_memory_metrics=True,
280
- split_batches=None,
281
- tf32=None,
282
- torch_compile=False,
283
- torch_compile_backend=None,
284
- torch_compile_mode=None,
285
- torch_empty_cache_steps=None,
286
- torchdynamo=None,
287
- tpu_metrics_debug=False,
288
- tpu_num_cores=None,
289
- use_cpu=False,
290
- use_ipex=False,
291
- use_legacy_prediction_loop=False,
292
- use_mps_device=False,
293
- warmup_ratio=0.0,
294
- warmup_steps=500,
295
- weight_decay=0.0,
296
- )
297
- INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_eg/checkpoint-63963. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
298
- INFO:datasets.builder:Using custom data configuration default-22b8aff308bb6a9a
299
- INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
300
- INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
301
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
302
- INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
303
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
304
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-635af492cb87c1d9.arrow
305
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-d0fed37c7d2b78b7.arrow
306
- WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
307
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-1ecefed58b42de7f.arrow
308
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-22b8aff308bb6a9a/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-5b8dbf7c961d9c88.arrow
309
- WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
310
- INFO:root:Epoch 10.0: Train Loss = 0.3375, Eval Loss = 0.7973926663398743
311
- INFO:absl:Using default tokenizer.
312
- INFO:root:Epoch 11.0: Train Loss = 0.3074, Eval Loss = 0.8155524134635925
313
- INFO:absl:Using default tokenizer.
314
- INFO:root:Epoch 12.0: Train Loss = 0.2813, Eval Loss = 0.8325821757316589
315
- INFO:absl:Using default tokenizer.
316
- INFO:root:Epoch 13.0: Train Loss = 0.2586, Eval Loss = 0.8498404026031494
317
- INFO:absl:Using default tokenizer.
318
- INFO:root:Epoch 14.0: Train Loss = 0.2392, Eval Loss = 0.867546021938324
319
- INFO:absl:Using default tokenizer.
320
- INFO:__main__:*** Evaluate ***
321
- INFO:absl:Using default tokenizer.
 
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_eg/runs/Sep01_15-14-35_lmgpu-node-07,
74
  logging_first_step=False,
75
  logging_nan_inf_filter=True,
76
  logging_steps=500,
 
128
  warmup_steps=500,
129
  weight_decay=0.0,
130
  )
131
+ INFO:datasets.builder:Using custom data configuration default-8c97581fc2299c6f
132
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
+ INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
134
+ INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
135
  INFO:datasets.download.download_manager:Downloading took 0.0 min
136
  INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
137
  INFO:datasets.builder:Generating train split
138
  INFO:datasets.builder:Generating validation split
139
  INFO:datasets.utils.info_utils:Unable to verify splits sizes.
140
+ INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
141
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-c736ae48f9f531d9.arrow
142
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-0ac9dd935c074d94.arrow
143
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
144
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-75a2a4e12a96c35b.arrow
145
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-8c97581fc2299c6f/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-fac740983703b34b.arrow
146
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
147
  INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
148
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c48820fe165554837ee56106ed03ab5600e64cfec5fb5c557af50d8262deb68f
3
- size 539221632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb7017b4985f68c76b4938c5ff19a3718c2120c96b1a12bb1499d68e24d45be4
3
+ size 539224704
special_tokens_map.json CHANGED
@@ -1,29 +1,22 @@
1
  {
2
  "additional_special_tokens": [
3
  {
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "</s>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- {
18
- "content": "[sep]",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
24
  ],
 
 
 
 
 
 
 
25
  "eos_token": {
26
- "content": "<EOS>",
27
  "lstrip": false,
28
  "normalized": false,
29
  "rstrip": false,
@@ -35,5 +28,12 @@
35
  "normalized": false,
36
  "rstrip": false,
37
  "single_word": false
 
 
 
 
 
 
 
38
  }
39
  }
 
1
  {
2
  "additional_special_tokens": [
3
  {
4
+ "content": "<sep>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  }
10
  ],
11
+ "bos_token": {
12
+ "content": "<|bos|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
  "eos_token": {
19
+ "content": "<|endoftext|>",
20
  "lstrip": false,
21
  "normalized": false,
22
  "rstrip": false,
 
28
  "normalized": false,
29
  "rstrip": false,
30
  "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|unk|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
  }
39
  }
tokenizer.json CHANGED
@@ -46,7 +46,7 @@
46
  },
47
  {
48
  "id": 64002,
49
- "content": "<s>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
@@ -55,7 +55,7 @@
55
  },
56
  {
57
  "id": 64003,
58
- "content": "</s>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
@@ -64,7 +64,16 @@
64
  },
65
  {
66
  "id": 64004,
67
- "content": "[sep]",
 
 
 
 
 
 
 
 
 
68
  "single_word": false,
69
  "lstrip": false,
70
  "rstrip": false,
 
46
  },
47
  {
48
  "id": 64002,
49
+ "content": "<|bos|>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
 
55
  },
56
  {
57
  "id": 64003,
58
+ "content": "<|endoftext|>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
 
64
  },
65
  {
66
  "id": 64004,
67
+ "content": "<|unk|>",
68
+ "single_word": false,
69
+ "lstrip": false,
70
+ "rstrip": false,
71
+ "normalized": false,
72
+ "special": true
73
+ },
74
+ {
75
+ "id": 64005,
76
+ "content": "<sep>",
77
  "single_word": false,
78
  "lstrip": false,
79
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "64002": {
28
- "content": "<s>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -33,7 +33,7 @@
33
  "special": true
34
  },
35
  "64003": {
36
- "content": "</s>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -41,7 +41,15 @@
41
  "special": true
42
  },
43
  "64004": {
44
- "content": "[sep]",
 
 
 
 
 
 
 
 
45
  "lstrip": false,
46
  "normalized": false,
47
  "rstrip": false,
@@ -50,13 +58,13 @@
50
  }
51
  },
52
  "additional_special_tokens": [
53
- "<s>",
54
- "</s>",
55
- "[sep]"
56
  ],
 
57
  "clean_up_tokenization_spaces": true,
58
- "eos_token": "<EOS>",
59
  "model_max_length": 1000000000000000019884624838656,
60
  "pad_token": "<EOS>",
61
- "tokenizer_class": "PreTrainedTokenizerFast"
 
62
  }
 
25
  "special": true
26
  },
27
  "64002": {
28
+ "content": "<|bos|>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
33
  "special": true
34
  },
35
  "64003": {
36
+ "content": "<|endoftext|>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
41
  "special": true
42
  },
43
  "64004": {
44
+ "content": "<|unk|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "64005": {
52
+ "content": "<sep>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
 
58
  }
59
  },
60
  "additional_special_tokens": [
61
+ "<sep>"
 
 
62
  ],
63
+ "bos_token": "<|bos|>",
64
  "clean_up_tokenization_spaces": true,
65
+ "eos_token": "<|endoftext|>",
66
  "model_max_length": 1000000000000000019884624838656,
67
  "pad_token": "<EOS>",
68
+ "tokenizer_class": "PreTrainedTokenizerFast",
69
+ "unk_token": "<|unk|>"
70
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd36943d3eded9f117e778c5c5c541e64620c4c17aa921ceb7ca6c9375a0ca60
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feecf93d1a9684356770c8578e7f6273e50ec0e263be5b04e9149a0271c36b49
3
  size 5240