nlparabic commited on
Commit
e64ba8b
1 Parent(s): 7e84a34

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -35,5 +35,5 @@
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
- "vocab_size": 64005
39
  }
 
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.45.0.dev0",
37
  "use_cache": true,
38
+ "vocab_size": 64006
39
  }
egy_training_log.txt CHANGED
@@ -70,7 +70,7 @@ local_rank=0,
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem/runs/Aug31_18-23-30_lmgpu-node-09,
74
  logging_first_step=False,
75
  logging_nan_inf_filter=True,
76
  logging_steps=500,
@@ -128,202 +128,23 @@ warmup_ratio=0.0,
128
  warmup_steps=500,
129
  weight_decay=0.0,
130
  )
131
- INFO:datasets.builder:Using custom data configuration default-553f61aee4dda760
132
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
- INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
134
- INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
135
  INFO:datasets.download.download_manager:Downloading took 0.0 min
136
  INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
137
  INFO:datasets.builder:Generating train split
138
  INFO:datasets.builder:Generating validation split
139
  INFO:datasets.utils.info_utils:Unable to verify splits sizes.
140
- INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
141
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-0c0ca65a5faa1c69.arrow
142
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-ff33919083e7cfc3.arrow
143
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
144
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b67d61859d143d55.arrow
145
- INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b5882b385151b146.arrow
146
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
147
  INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
148
  INFO:absl:Using default tokenizer.
149
- INFO:root:Epoch 2.0: Train Loss = 7.8314, Eval Loss = 2.359743595123291
150
- INFO:absl:Using default tokenizer.
151
- WARNING:huggingface_hub.utils._http:'(MaxRetryError("HTTPSConnectionPool(host='hf-hub-lfs-us-east-1.s3-accelerate.amazonaws.com', port=443): Max retries exceeded with url: /repos/4e/c5/4ec5c12bd4b31fb218515ee480d86e26419d948a9a596a06a6ad09ce77d37e3a/deff7f99580238a45f3ba2bc7047e31a358560b58dc3266d8058c6e623cbfa0f?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA2JU7TKAQLC2QXPN7%2F20240831%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240831T182623Z&X-Amz-Expires=86400&X-Amz-Signature=e2b8cc4c6a6039dd14e9fd45f97285bd8f4c36e3b9f82774a0533eb3ab485699&X-Amz-SignedHeaders=host&partNumber=30&uploadId=1pifW86uMHklMcYfwNTOkTEVjye.aOXHsYNw1UiFajy5ulD2SMmE7lXK11IQuIa2xOtCET.QPTYOMxy0LOMBKD1rWT_Aq4IFRTZ.IqMHIpcpxuwSEUr.74Jrxu7CFsaq&x-id=UploadPart (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:2406)')))"), '(Request ID: 45c28252-333d-4bd7-bdae-103a87df191a)')' thrown while requesting PUT https://hf-hub-lfs-us-east-1.s3-accelerate.amazonaws.com/repos/4e/c5/4ec5c12bd4b31fb218515ee480d86e26419d948a9a596a06a6ad09ce77d37e3a/deff7f99580238a45f3ba2bc7047e31a358560b58dc3266d8058c6e623cbfa0f?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA2JU7TKAQLC2QXPN7%2F20240831%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240831T182623Z&X-Amz-Expires=86400&X-Amz-Signature=e2b8cc4c6a6039dd14e9fd45f97285bd8f4c36e3b9f82774a0533eb3ab485699&X-Amz-SignedHeaders=host&partNumber=30&uploadId=1pifW86uMHklMcYfwNTOkTEVjye.aOXHsYNw1UiFajy5ulD2SMmE7lXK11IQuIa2xOtCET.QPTYOMxy0LOMBKD1rWT_Aq4IFRTZ.IqMHIpcpxuwSEUr.74Jrxu7CFsaq&x-id=UploadPart
152
- WARNING:huggingface_hub.utils._http:Retrying in 1s [Retry 1/5].
153
- INFO:root:Epoch 3.0: Train Loss = 0.9995, Eval Loss = 0.5537932515144348
154
- INFO:absl:Using default tokenizer.
155
- INFO:root:Epoch 4.0: Train Loss = 0.4848, Eval Loss = 0.5034472942352295
156
- INFO:absl:Using default tokenizer.
157
- INFO:root:Epoch 5.0: Train Loss = 0.3823, Eval Loss = 0.4827471375465393
158
- INFO:absl:Using default tokenizer.
159
- INFO:root:Epoch 6.0: Train Loss = 0.293, Eval Loss = 0.4732062816619873
160
- INFO:absl:Using default tokenizer.
161
- INFO:root:Epoch 7.0: Train Loss = 0.2239, Eval Loss = 0.47202983498573303
162
- INFO:absl:Using default tokenizer.
163
- INFO:root:Epoch 8.0: Train Loss = 0.1766, Eval Loss = 0.47373390197753906
164
- INFO:absl:Using default tokenizer.
165
- INFO:root:Epoch 9.0: Train Loss = 0.1434, Eval Loss = 0.47583380341529846
166
- INFO:absl:Using default tokenizer.
167
- INFO:root:Epoch 10.0: Train Loss = 0.1202, Eval Loss = 0.4827924072742462
168
- INFO:absl:Using default tokenizer.
169
- INFO:root:Epoch 11.0: Train Loss = 0.1042, Eval Loss = 0.4852388799190521
170
- INFO:absl:Using default tokenizer.
171
- WARNING:huggingface_hub.utils._http:'(MaxRetryError("HTTPSConnectionPool(host='hf-hub-lfs-us-east-1.s3-accelerate.amazonaws.com', port=443): Max retries exceeded with url: /repos/4e/c5/4ec5c12bd4b31fb218515ee480d86e26419d948a9a596a06a6ad09ce77d37e3a/7e7cfbb2940a8097a9e6e8e5bc5cd9f274cf2ba28fd92151cbe4dd0e72ae5712?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA2JU7TKAQLC2QXPN7%2F20240831%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240831T191048Z&X-Amz-Expires=86400&X-Amz-Signature=d59b69eb9175411687ff7810cd33c6c85c44bf981f49b4f8ae534408ec3562e2&X-Amz-SignedHeaders=host&partNumber=2&uploadId=xNqFvN721XV0SFXx42EP0sJDJW.W2QqIOgUvk5YQWI9SYvlnvF8kCxC_2mjzHt0k5t51hFB.oftAU6tGWfNhGNpUPE9HMn_UnOlRe.JGXG5J64HYNr3rofly6kQEo7ZO&x-id=UploadPart (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:2406)')))"), '(Request ID: 784209eb-439a-4fa9-b9f8-072965b6bd5d)')' thrown while requesting PUT https://hf-hub-lfs-us-east-1.s3-accelerate.amazonaws.com/repos/4e/c5/4ec5c12bd4b31fb218515ee480d86e26419d948a9a596a06a6ad09ce77d37e3a/7e7cfbb2940a8097a9e6e8e5bc5cd9f274cf2ba28fd92151cbe4dd0e72ae5712?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA2JU7TKAQLC2QXPN7%2F20240831%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240831T191048Z&X-Amz-Expires=86400&X-Amz-Signature=d59b69eb9175411687ff7810cd33c6c85c44bf981f49b4f8ae534408ec3562e2&X-Amz-SignedHeaders=host&partNumber=2&uploadId=xNqFvN721XV0SFXx42EP0sJDJW.W2QqIOgUvk5YQWI9SYvlnvF8kCxC_2mjzHt0k5t51hFB.oftAU6tGWfNhGNpUPE9HMn_UnOlRe.JGXG5J64HYNr3rofly6kQEo7ZO&x-id=UploadPart
172
- WARNING:huggingface_hub.utils._http:Retrying in 1s [Retry 1/5].
173
- INFO:__main__:*** Evaluate ***
174
- INFO:absl:Using default tokenizer.
175
- WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
176
- INFO:__main__:Training/evaluation parameters TrainingArguments(
177
- _n_gpu=1,
178
- accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
179
- adafactor=False,
180
- adam_beta1=0.9,
181
- adam_beta2=0.999,
182
- adam_epsilon=1e-08,
183
- auto_find_batch_size=False,
184
- batch_eval_metrics=False,
185
- bf16=False,
186
- bf16_full_eval=False,
187
- data_seed=None,
188
- dataloader_drop_last=False,
189
- dataloader_num_workers=0,
190
- dataloader_persistent_workers=False,
191
- dataloader_pin_memory=True,
192
- dataloader_prefetch_factor=None,
193
- ddp_backend=None,
194
- ddp_broadcast_buffers=None,
195
- ddp_bucket_cap_mb=None,
196
- ddp_find_unused_parameters=None,
197
- ddp_timeout=1800,
198
- debug=[],
199
- deepspeed=None,
200
- disable_tqdm=False,
201
- dispatch_batches=None,
202
- do_eval=True,
203
- do_predict=False,
204
- do_train=True,
205
- eval_accumulation_steps=None,
206
- eval_delay=0,
207
- eval_do_concat_batches=True,
208
- eval_on_start=False,
209
- eval_steps=None,
210
- eval_strategy=IntervalStrategy.EPOCH,
211
- eval_use_gather_object=False,
212
- evaluation_strategy=epoch,
213
- fp16=False,
214
- fp16_backend=auto,
215
- fp16_full_eval=False,
216
- fp16_opt_level=O1,
217
- fsdp=[],
218
- fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
219
- fsdp_min_num_params=0,
220
- fsdp_transformer_layer_cls_to_wrap=None,
221
- full_determinism=False,
222
- gradient_accumulation_steps=1,
223
- gradient_checkpointing=False,
224
- gradient_checkpointing_kwargs=None,
225
- greater_is_better=False,
226
- group_by_length=False,
227
- half_precision_backend=auto,
228
- hub_always_push=False,
229
- hub_model_id=None,
230
- hub_private_repo=False,
231
- hub_strategy=HubStrategy.EVERY_SAVE,
232
- hub_token=<HUB_TOKEN>,
233
- ignore_data_skip=False,
234
- include_inputs_for_metrics=False,
235
- include_num_input_tokens_seen=False,
236
- include_tokens_per_second=False,
237
- jit_mode_eval=False,
238
- label_names=None,
239
- label_smoothing_factor=0.0,
240
- learning_rate=5e-05,
241
- length_column_name=length,
242
- load_best_model_at_end=True,
243
- local_rank=0,
244
- log_level=passive,
245
- log_level_replica=warning,
246
- log_on_each_node=True,
247
- logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem/runs/Sep01_08-33-14_lmgpu-node-09,
248
- logging_first_step=False,
249
- logging_nan_inf_filter=True,
250
- logging_steps=500,
251
- logging_strategy=IntervalStrategy.EPOCH,
252
- lr_scheduler_kwargs={},
253
- lr_scheduler_type=SchedulerType.LINEAR,
254
- max_grad_norm=1.0,
255
- max_steps=-1,
256
- metric_for_best_model=loss,
257
- mp_parameters=,
258
- neftune_noise_alpha=None,
259
- no_cuda=False,
260
- num_train_epochs=20.0,
261
- optim=OptimizerNames.ADAMW_TORCH,
262
- optim_args=None,
263
- optim_target_modules=None,
264
- output_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem,
265
- overwrite_output_dir=False,
266
- past_index=-1,
267
- per_device_eval_batch_size=8,
268
- per_device_train_batch_size=8,
269
- prediction_loss_only=False,
270
- push_to_hub=True,
271
- push_to_hub_model_id=None,
272
- push_to_hub_organization=None,
273
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
274
- ray_scope=last,
275
- remove_unused_columns=True,
276
- report_to=[],
277
- restore_callback_states_from_checkpoint=False,
278
- resume_from_checkpoint=None,
279
- run_name=/home/iais_marenpielka/Bouthaina/res_nw_yem,
280
- save_on_each_node=False,
281
- save_only_model=False,
282
- save_safetensors=True,
283
- save_steps=500,
284
- save_strategy=IntervalStrategy.EPOCH,
285
- save_total_limit=None,
286
- seed=42,
287
- skip_memory_metrics=True,
288
- split_batches=None,
289
- tf32=None,
290
- torch_compile=False,
291
- torch_compile_backend=None,
292
- torch_compile_mode=None,
293
- torch_empty_cache_steps=None,
294
- torchdynamo=None,
295
- tpu_metrics_debug=False,
296
- tpu_num_cores=None,
297
- use_cpu=False,
298
- use_ipex=False,
299
- use_legacy_prediction_loop=False,
300
- use_mps_device=False,
301
- warmup_ratio=0.0,
302
- warmup_steps=500,
303
- weight_decay=0.0,
304
- )
305
- INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/res_nw_yem/checkpoint-1683. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
306
- INFO:datasets.builder:Using custom data configuration default-553f61aee4dda760
307
- INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
308
- INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
309
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
310
- INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
311
- INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
312
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-0c0ca65a5faa1c69.arrow
313
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-ff33919083e7cfc3.arrow
314
- WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
315
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b67d61859d143d55.arrow
316
- INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-553f61aee4dda760/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-b5882b385151b146.arrow
317
- WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
318
- INFO:root:Epoch 12.0: Train Loss = 0.0955, Eval Loss = 0.4884573221206665
319
- INFO:absl:Using default tokenizer.
320
- INFO:root:Epoch 13.0: Train Loss = 0.0875, Eval Loss = 0.4913596212863922
321
- INFO:absl:Using default tokenizer.
322
- INFO:root:Epoch 14.0: Train Loss = 0.0825, Eval Loss = 0.4981193542480469
323
- INFO:absl:Using default tokenizer.
324
- INFO:root:Epoch 15.0: Train Loss = 0.0787, Eval Loss = 0.5004830360412598
325
- INFO:absl:Using default tokenizer.
326
- INFO:root:Epoch 16.0: Train Loss = 0.0757, Eval Loss = 0.5019999146461487
327
- INFO:absl:Using default tokenizer.
328
- INFO:__main__:*** Evaluate ***
329
  INFO:absl:Using default tokenizer.
 
70
  log_level=passive,
71
  log_level_replica=warning,
72
  log_on_each_node=True,
73
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem/runs/Sep01_15-13-26_lmgpu-node-07,
74
  logging_first_step=False,
75
  logging_nan_inf_filter=True,
76
  logging_steps=500,
 
128
  warmup_steps=500,
129
  weight_decay=0.0,
130
  )
131
+ INFO:datasets.builder:Using custom data configuration default-7d1e1bd6ffb527f0
132
  INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
+ INFO:datasets.builder:Generating dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
134
+ INFO:datasets.builder:Downloading and preparing dataset text/default to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101...
135
  INFO:datasets.download.download_manager:Downloading took 0.0 min
136
  INFO:datasets.download.download_manager:Checksum Computation took 0.0 min
137
  INFO:datasets.builder:Generating train split
138
  INFO:datasets.builder:Generating validation split
139
  INFO:datasets.utils.info_utils:Unable to verify splits sizes.
140
+ INFO:datasets.builder:Dataset text downloaded and prepared to /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101. Subsequent calls will reuse this data.
141
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-c86dc1fa59adefd8.arrow
142
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-996802700aec694a.arrow
143
  WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
144
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-757314bea7c4cdbc.arrow
145
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-9b49241998bcfbce.arrow
146
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
147
  INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
148
  INFO:absl:Using default tokenizer.
149
+ INFO:root:Epoch 2.0: Train Loss = 7.4125, Eval Loss = 2.3318939208984375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  INFO:absl:Using default tokenizer.
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf56a64dd62f3be727306ab67370db8491594201ccd885d57f7c785a06b25700
3
- size 539221632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897ae320f0e59012271d818a8a6f9cf510c4ccb0411e1491732dae0875c13abf
3
+ size 539224704
special_tokens_map.json CHANGED
@@ -1,29 +1,22 @@
1
  {
2
  "additional_special_tokens": [
3
  {
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "</s>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- {
18
- "content": "[sep]",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
24
  ],
 
 
 
 
 
 
 
25
  "eos_token": {
26
- "content": "<EOS>",
27
  "lstrip": false,
28
  "normalized": false,
29
  "rstrip": false,
@@ -35,5 +28,12 @@
35
  "normalized": false,
36
  "rstrip": false,
37
  "single_word": false
 
 
 
 
 
 
 
38
  }
39
  }
 
1
  {
2
  "additional_special_tokens": [
3
  {
4
+ "content": "<sep>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  }
10
  ],
11
+ "bos_token": {
12
+ "content": "<|bos|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
  "eos_token": {
19
+ "content": "<|endoftext|>",
20
  "lstrip": false,
21
  "normalized": false,
22
  "rstrip": false,
 
28
  "normalized": false,
29
  "rstrip": false,
30
  "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|unk|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
  }
39
  }
tokenizer.json CHANGED
@@ -46,7 +46,7 @@
46
  },
47
  {
48
  "id": 64002,
49
- "content": "<s>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
@@ -55,7 +55,7 @@
55
  },
56
  {
57
  "id": 64003,
58
- "content": "</s>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
@@ -64,7 +64,16 @@
64
  },
65
  {
66
  "id": 64004,
67
- "content": "[sep]",
 
 
 
 
 
 
 
 
 
68
  "single_word": false,
69
  "lstrip": false,
70
  "rstrip": false,
 
46
  },
47
  {
48
  "id": 64002,
49
+ "content": "<|bos|>",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
 
55
  },
56
  {
57
  "id": 64003,
58
+ "content": "<|endoftext|>",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
 
64
  },
65
  {
66
  "id": 64004,
67
+ "content": "<|unk|>",
68
+ "single_word": false,
69
+ "lstrip": false,
70
+ "rstrip": false,
71
+ "normalized": false,
72
+ "special": true
73
+ },
74
+ {
75
+ "id": 64005,
76
+ "content": "<sep>",
77
  "single_word": false,
78
  "lstrip": false,
79
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "64002": {
28
- "content": "<s>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -33,7 +33,7 @@
33
  "special": true
34
  },
35
  "64003": {
36
- "content": "</s>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -41,7 +41,15 @@
41
  "special": true
42
  },
43
  "64004": {
44
- "content": "[sep]",
 
 
 
 
 
 
 
 
45
  "lstrip": false,
46
  "normalized": false,
47
  "rstrip": false,
@@ -50,13 +58,13 @@
50
  }
51
  },
52
  "additional_special_tokens": [
53
- "<s>",
54
- "</s>",
55
- "[sep]"
56
  ],
 
57
  "clean_up_tokenization_spaces": true,
58
- "eos_token": "<EOS>",
59
  "model_max_length": 1000000000000000019884624838656,
60
  "pad_token": "<EOS>",
61
- "tokenizer_class": "PreTrainedTokenizerFast"
 
62
  }
 
25
  "special": true
26
  },
27
  "64002": {
28
+ "content": "<|bos|>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
33
  "special": true
34
  },
35
  "64003": {
36
+ "content": "<|endoftext|>",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
41
  "special": true
42
  },
43
  "64004": {
44
+ "content": "<|unk|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "64005": {
52
+ "content": "<sep>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
 
58
  }
59
  },
60
  "additional_special_tokens": [
61
+ "<sep>"
 
 
62
  ],
63
+ "bos_token": "<|bos|>",
64
  "clean_up_tokenization_spaces": true,
65
+ "eos_token": "<|endoftext|>",
66
  "model_max_length": 1000000000000000019884624838656,
67
  "pad_token": "<EOS>",
68
+ "tokenizer_class": "PreTrainedTokenizerFast",
69
+ "unk_token": "<|unk|>"
70
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5054e6f18f3985644637f15765ef06b685855481d4edbdb73928ea68067378fb
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:934b0ce67dcb82b1960e357dd4465f4f7d3861fcc1a99669c6ce09158c23a063
3
  size 5240