nlparabic commited on
Commit
caca0bb
1 Parent(s): 37af3e1

End of training

Browse files
README.md CHANGED
@@ -18,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Bleu: 0.3119
22
- - Loss: 2.0654
23
- - Rouge1: 0.5862
24
- - Rouge2: 0.3489
25
- - Rougel: 0.5479
26
 
27
  ## Model description
28
 
 
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 2.1411
22
+ - Bleu: 0.2987
23
+ - Rouge1: 0.5831
24
+ - Rouge2: 0.3405
25
+ - Rougel: 0.5413
26
 
27
  ## Model description
28
 
all_results.json CHANGED
@@ -5,15 +5,15 @@
5
  "eval_rouge1": 0.5830931277059221,
6
  "eval_rouge2": 0.3405124611999597,
7
  "eval_rougeL": 0.5412563338799627,
8
- "eval_runtime": 29.7624,
9
  "eval_samples": 884,
10
- "eval_samples_per_second": 29.702,
11
- "eval_steps_per_second": 3.73,
12
  "perplexity": 8.50903297033769,
13
  "total_flos": 2.7664555180032e+16,
14
- "train_loss": 0.09870077279897836,
15
- "train_runtime": 270.9621,
16
  "train_samples": 3531,
17
- "train_samples_per_second": 260.627,
18
- "train_steps_per_second": 32.624
19
  }
 
5
  "eval_rouge1": 0.5830931277059221,
6
  "eval_rouge2": 0.3405124611999597,
7
  "eval_rougeL": 0.5412563338799627,
8
+ "eval_runtime": 30.3764,
9
  "eval_samples": 884,
10
+ "eval_samples_per_second": 29.102,
11
+ "eval_steps_per_second": 3.654,
12
  "perplexity": 8.50903297033769,
13
  "total_flos": 2.7664555180032e+16,
14
+ "train_loss": 0.0,
15
+ "train_runtime": 0.0941,
16
  "train_samples": 3531,
17
+ "train_samples_per_second": 112613.962,
18
+ "train_steps_per_second": 14096.678
19
  }
egy_training_log.txt CHANGED
@@ -617,3 +617,5 @@ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_leng
617
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-038f8e8385bf6638.arrow
618
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-51f1e2b6546273ed.arrow
619
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
 
 
 
617
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-038f8e8385bf6638.arrow
618
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-93ed01be52df6f6e/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-51f1e2b6546273ed.arrow
619
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
620
+ INFO:__main__:*** Evaluate ***
621
+ INFO:absl:Using default tokenizer.
eval_results.json CHANGED
@@ -5,9 +5,9 @@
5
  "eval_rouge1": 0.5830931277059221,
6
  "eval_rouge2": 0.3405124611999597,
7
  "eval_rougeL": 0.5412563338799627,
8
- "eval_runtime": 29.7624,
9
  "eval_samples": 884,
10
- "eval_samples_per_second": 29.702,
11
- "eval_steps_per_second": 3.73,
12
  "perplexity": 8.50903297033769
13
  }
 
5
  "eval_rouge1": 0.5830931277059221,
6
  "eval_rouge2": 0.3405124611999597,
7
  "eval_rougeL": 0.5412563338799627,
8
+ "eval_runtime": 30.3764,
9
  "eval_samples": 884,
10
+ "eval_samples_per_second": 29.102,
11
+ "eval_steps_per_second": 3.654,
12
  "perplexity": 8.50903297033769
13
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 20.0,
3
  "total_flos": 2.7664555180032e+16,
4
- "train_loss": 0.09870077279897836,
5
- "train_runtime": 270.9621,
6
  "train_samples": 3531,
7
- "train_samples_per_second": 260.627,
8
- "train_steps_per_second": 32.624
9
  }
 
1
  {
2
  "epoch": 20.0,
3
  "total_flos": 2.7664555180032e+16,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.0941,
6
  "train_samples": 3531,
7
+ "train_samples_per_second": 112613.962,
8
+ "train_steps_per_second": 14096.678
9
  }
trainer_state.json CHANGED
@@ -371,16 +371,16 @@
371
  "epoch": 20.0,
372
  "step": 8840,
373
  "total_flos": 2.7664555180032e+16,
374
- "train_loss": 0.09870077279897836,
375
- "train_runtime": 270.9621,
376
- "train_samples_per_second": 260.627,
377
- "train_steps_per_second": 32.624
378
  }
379
  ],
380
  "logging_steps": 500,
381
- "max_steps": 8840,
382
  "num_input_tokens_seen": 0,
383
- "num_train_epochs": 20,
384
  "save_steps": 500,
385
  "stateful_callbacks": {
386
  "EarlyStoppingCallback": {
 
371
  "epoch": 20.0,
372
  "step": 8840,
373
  "total_flos": 2.7664555180032e+16,
374
+ "train_loss": 0.0,
375
+ "train_runtime": 0.0941,
376
+ "train_samples_per_second": 112613.962,
377
+ "train_steps_per_second": 14096.678
378
  }
379
  ],
380
  "logging_steps": 500,
381
+ "max_steps": 1326,
382
  "num_input_tokens_seen": 0,
383
+ "num_train_epochs": 3,
384
  "save_steps": 500,
385
  "stateful_callbacks": {
386
  "EarlyStoppingCallback": {