nlparabic commited on
Commit
37af3e1
1 Parent(s): 70ccabd

Model save

Browse files
Files changed (3) hide show
  1. README.md +8 -8
  2. egy_training_log.txt +143 -0
  3. training_args.bin +1 -1
README.md CHANGED
@@ -18,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 2.1411
22
- - Bleu: 0.2987
23
- - Rouge1: 0.5831
24
- - Rouge2: 0.3405
25
- - Rougel: 0.5413
26
 
27
  ## Model description
28
 
@@ -48,7 +48,7 @@ The following hyperparameters were used during training:
48
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
  - lr_scheduler_type: linear
50
  - lr_scheduler_warmup_steps: 500
51
- - num_epochs: 20.0
52
 
53
  ### Training results
54
 
@@ -72,8 +72,8 @@ The following hyperparameters were used during training:
72
  | 1.4594 | 16.0 | 7376 | 0.3133 | 1.9800 | 0.5846 | 0.3469 | 0.5466 |
73
  | 1.4361 | 17.0 | 7837 | 0.3151 | 1.9799 | 0.5865 | 0.3493 | 0.5485 |
74
  | 1.4159 | 18.0 | 8298 | 0.3149 | 1.9809 | 0.5865 | 0.3495 | 0.5486 |
75
- | 1.4159 | 19.0 | 8398 | 2.0767 | 0.3099 | 0.5858 | 0.3476 | 0.5471 |
76
- | 1.6189 | 20.0 | 8840 | 2.0654 | 0.3119 | 0.5862 | 0.3489 | 0.5479 |
77
 
78
 
79
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Bleu: 0.3119
22
+ - Loss: 2.0654
23
+ - Rouge1: 0.5862
24
+ - Rouge2: 0.3489
25
+ - Rougel: 0.5479
26
 
27
  ## Model description
28
 
 
48
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
  - lr_scheduler_type: linear
50
  - lr_scheduler_warmup_steps: 500
51
+ - num_epochs: 3.0
52
 
53
  ### Training results
54
 
 
72
  | 1.4594 | 16.0 | 7376 | 0.3133 | 1.9800 | 0.5846 | 0.3469 | 0.5466 |
73
  | 1.4361 | 17.0 | 7837 | 0.3151 | 1.9799 | 0.5865 | 0.3493 | 0.5485 |
74
  | 1.4159 | 18.0 | 8298 | 0.3149 | 1.9809 | 0.5865 | 0.3495 | 0.5486 |
75
+ | 1.4159 | 19.0 | 8398 | 0.3099 | 2.0767 | 0.5858 | 0.3476 | 0.5471 |
76
+ | 1.6189 | 20.0 | 8840 | 0.3119 | 2.0654 | 0.5862 | 0.3489 | 0.5479 |
77
 
78
 
79
  ### Framework versions
egy_training_log.txt CHANGED
@@ -474,3 +474,146 @@ INFO:root:Epoch 20.0: Train Loss = 1.6189, Eval Loss = 2.0766849517822266
474
  INFO:absl:Using default tokenizer.
475
  INFO:__main__:*** Evaluate ***
476
  INFO:absl:Using default tokenizer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
  INFO:absl:Using default tokenizer.
475
  INFO:__main__:*** Evaluate ***
476
  INFO:absl:Using default tokenizer.
477
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
478
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
479
+ _n_gpu=1,
480
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
481
+ adafactor=False,
482
+ adam_beta1=0.9,
483
+ adam_beta2=0.999,
484
+ adam_epsilon=1e-08,
485
+ auto_find_batch_size=False,
486
+ batch_eval_metrics=False,
487
+ bf16=False,
488
+ bf16_full_eval=False,
489
+ data_seed=None,
490
+ dataloader_drop_last=False,
491
+ dataloader_num_workers=0,
492
+ dataloader_persistent_workers=False,
493
+ dataloader_pin_memory=True,
494
+ dataloader_prefetch_factor=None,
495
+ ddp_backend=None,
496
+ ddp_broadcast_buffers=None,
497
+ ddp_bucket_cap_mb=None,
498
+ ddp_find_unused_parameters=None,
499
+ ddp_timeout=1800,
500
+ debug=[],
501
+ deepspeed=None,
502
+ disable_tqdm=False,
503
+ dispatch_batches=None,
504
+ do_eval=True,
505
+ do_predict=False,
506
+ do_train=True,
507
+ eval_accumulation_steps=None,
508
+ eval_delay=0,
509
+ eval_do_concat_batches=True,
510
+ eval_on_start=False,
511
+ eval_steps=None,
512
+ eval_strategy=IntervalStrategy.EPOCH,
513
+ eval_use_gather_object=False,
514
+ evaluation_strategy=epoch,
515
+ fp16=False,
516
+ fp16_backend=auto,
517
+ fp16_full_eval=False,
518
+ fp16_opt_level=O1,
519
+ fsdp=[],
520
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
521
+ fsdp_min_num_params=0,
522
+ fsdp_transformer_layer_cls_to_wrap=None,
523
+ full_determinism=False,
524
+ gradient_accumulation_steps=1,
525
+ gradient_checkpointing=False,
526
+ gradient_checkpointing_kwargs=None,
527
+ greater_is_better=False,
528
+ group_by_length=False,
529
+ half_precision_backend=auto,
530
+ hub_always_push=False,
531
+ hub_model_id=None,
532
+ hub_private_repo=False,
533
+ hub_strategy=HubStrategy.EVERY_SAVE,
534
+ hub_token=<HUB_TOKEN>,
535
+ ignore_data_skip=False,
536
+ include_inputs_for_metrics=False,
537
+ include_num_input_tokens_seen=False,
538
+ include_tokens_per_second=False,
539
+ jit_mode_eval=False,
540
+ label_names=None,
541
+ label_smoothing_factor=0.0,
542
+ learning_rate=5e-05,
543
+ length_column_name=length,
544
+ load_best_model_at_end=True,
545
+ local_rank=0,
546
+ log_level=passive,
547
+ log_level_replica=warning,
548
+ log_on_each_node=True,
549
+ logging_dir=/home/iais_marenpielka/Bouthaina/results_fixed/runs/Aug25_16-35-33_lmgpu-node-09,
550
+ logging_first_step=False,
551
+ logging_nan_inf_filter=True,
552
+ logging_steps=500,
553
+ logging_strategy=IntervalStrategy.EPOCH,
554
+ lr_scheduler_kwargs={},
555
+ lr_scheduler_type=SchedulerType.LINEAR,
556
+ max_grad_norm=1.0,
557
+ max_steps=-1,
558
+ metric_for_best_model=loss,
559
+ mp_parameters=,
560
+ neftune_noise_alpha=None,
561
+ no_cuda=False,
562
+ num_train_epochs=3.0,
563
+ optim=OptimizerNames.ADAMW_TORCH,
564
+ optim_args=None,
565
+ optim_target_modules=None,
566
+ output_dir=/home/iais_marenpielka/Bouthaina/results_fixed,
567
+ overwrite_output_dir=False,
568
+ past_index=-1,
569
+ per_device_eval_batch_size=8,
570
+ per_device_train_batch_size=8,
571
+ prediction_loss_only=False,
572
+ push_to_hub=True,
573
+ push_to_hub_model_id=None,
574
+ push_to_hub_organization=None,
575
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
576
+ ray_scope=last,
577
+ remove_unused_columns=True,
578
+ report_to=[],
579
+ restore_callback_states_from_checkpoint=False,
580
+ resume_from_checkpoint=None,
581
+ run_name=/home/iais_marenpielka/Bouthaina/results_fixed,
582
+ save_on_each_node=False,
583
+ save_only_model=False,
584
+ save_safetensors=True,
585
+ save_steps=500,
586
+ save_strategy=IntervalStrategy.EPOCH,
587
+ save_total_limit=None,
588
+ seed=42,
589
+ skip_memory_metrics=True,
590
+ split_batches=None,
591
+ tf32=None,
592
+ torch_compile=False,
593
+ torch_compile_backend=None,
594
+ torch_compile_mode=None,
595
+ torch_empty_cache_steps=None,
596
+ torchdynamo=None,
597
+ tpu_metrics_debug=False,
598
+ tpu_num_cores=None,
599
+ use_cpu=False,
600
+ use_ipex=False,
601
+ use_legacy_prediction_loop=False,
602
+ use_mps_device=False,
603
+ warmup_ratio=0.0,
604
+ warmup_steps=500,
605
+ weight_decay=0.0,
606
+ )
607
+ INFO:__main__:Checkpoint detected, resuming training at /home/iais_marenpielka/Bouthaina/results_fixed/checkpoint-8840. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.
608
+ INFO:datasets.builder:Using custom data configuration default-93ed01be52df6f6e
609
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
610
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
611
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
612
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
613
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
614
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4cda59a599643701.arrow
615
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-d82ef9a45800c64f.arrow
616
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=768 instead. You can change that default value by passing --block_size xxx.
617
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-038f8e8385bf6638.arrow
618
+ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-51f1e2b6546273ed.arrow
619
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92e8fb7e384c0e60dc19bccc81d5ba66ecd494477a99a468184ef66231010c0a
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6380797c9c557c863558a7b0048e8d57ea8cce6472efacd2b83e78695feb823c
3
  size 5240