nlparabic commited on
Commit
6061ab0
1 Parent(s): d312faf

End of training

Browse files
README.md CHANGED
@@ -18,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7974
22
- - Bleu: 0.2491
23
- - Rouge1: 0.6112
24
- - Rouge2: 0.3654
25
- - Rougel: 0.6074
26
 
27
  ## Model description
28
 
 
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.7316
22
+ - Bleu: 0.2459
23
+ - Rouge1: 0.6017
24
+ - Rouge2: 0.3519
25
+ - Rougel: 0.5983
26
 
27
  ## Model description
28
 
all_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_bleu": 0.24587227576979195,
4
+ "eval_loss": 0.7315686941146851,
5
+ "eval_rouge1": 0.6017197427075045,
6
+ "eval_rouge2": 0.3518746485163118,
7
+ "eval_rougeL": 0.5982542515796094,
8
+ "eval_runtime": 47.1591,
9
+ "eval_samples": 14212,
10
+ "eval_samples_per_second": 301.363,
11
+ "eval_steps_per_second": 37.681,
12
+ "perplexity": 2.078338328926906,
13
+ "total_flos": 3.3423104950272e+16,
14
+ "train_loss": 0.5779510789562912,
15
+ "train_runtime": 8660.6869,
16
+ "train_samples": 56851,
17
+ "train_samples_per_second": 131.285,
18
+ "train_steps_per_second": 16.412
19
+ }
egy_training_log.txt CHANGED
@@ -162,3 +162,5 @@ INFO:root:Epoch 8.0: Train Loss = 0.4118, Eval Loss = 0.7635838389396667
162
  INFO:absl:Using default tokenizer.
163
  INFO:root:Epoch 9.0: Train Loss = 0.3725, Eval Loss = 0.7796261310577393
164
  INFO:absl:Using default tokenizer.
 
 
 
162
  INFO:absl:Using default tokenizer.
163
  INFO:root:Epoch 9.0: Train Loss = 0.3725, Eval Loss = 0.7796261310577393
164
  INFO:absl:Using default tokenizer.
165
+ INFO:__main__:*** Evaluate ***
166
+ INFO:absl:Using default tokenizer.
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_bleu": 0.24587227576979195,
4
+ "eval_loss": 0.7315686941146851,
5
+ "eval_rouge1": 0.6017197427075045,
6
+ "eval_rouge2": 0.3518746485163118,
7
+ "eval_rougeL": 0.5982542515796094,
8
+ "eval_runtime": 47.1591,
9
+ "eval_samples": 14212,
10
+ "eval_samples_per_second": 301.363,
11
+ "eval_steps_per_second": 37.681,
12
+ "perplexity": 2.078338328926906
13
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "total_flos": 3.3423104950272e+16,
4
+ "train_loss": 0.5779510789562912,
5
+ "train_runtime": 8660.6869,
6
+ "train_samples": 56851,
7
+ "train_samples_per_second": 131.285,
8
+ "train_steps_per_second": 16.412
9
+ }
train_vs_val_loss.png ADDED
trainer_state.json ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7315686941146851,
3
+ "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_eg/checkpoint-28428",
4
+ "epoch": 9.0,
5
+ "eval_steps": 500,
6
+ "global_step": 63963,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 2.742945671081543,
14
+ "learning_rate": 4.7667678621858235e-05,
15
+ "loss": 1.1436,
16
+ "step": 7107
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_bleu": 0.1900415894207328,
21
+ "eval_loss": 0.8277140259742737,
22
+ "eval_rouge1": 0.5211536867388353,
23
+ "eval_rouge2": 0.2576275131704426,
24
+ "eval_rougeL": 0.5169189427573101,
25
+ "eval_runtime": 204.5959,
26
+ "eval_samples_per_second": 69.464,
27
+ "eval_steps_per_second": 8.685,
28
+ "step": 7107
29
+ },
30
+ {
31
+ "epoch": 2.0,
32
+ "grad_norm": 3.031801223754883,
33
+ "learning_rate": 4.515885343123411e-05,
34
+ "loss": 0.7508,
35
+ "step": 14214
36
+ },
37
+ {
38
+ "epoch": 2.0,
39
+ "eval_bleu": 0.22138684401610842,
40
+ "eval_loss": 0.7543078064918518,
41
+ "eval_rouge1": 0.5674397247471176,
42
+ "eval_rouge2": 0.3108337383535441,
43
+ "eval_rougeL": 0.5636106781794015,
44
+ "eval_runtime": 171.1246,
45
+ "eval_samples_per_second": 83.051,
46
+ "eval_steps_per_second": 10.384,
47
+ "step": 14214
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "grad_norm": 1.8590487241744995,
52
+ "learning_rate": 4.265002824061e-05,
53
+ "loss": 0.6471,
54
+ "step": 21321
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "eval_bleu": 0.2374960454489342,
59
+ "eval_loss": 0.7337948083877563,
60
+ "eval_rouge1": 0.5880985827608463,
61
+ "eval_rouge2": 0.33558513842625187,
62
+ "eval_rougeL": 0.5844518671510625,
63
+ "eval_runtime": 68.7002,
64
+ "eval_samples_per_second": 206.87,
65
+ "eval_steps_per_second": 25.866,
66
+ "step": 21321
67
+ },
68
+ {
69
+ "epoch": 4.0,
70
+ "grad_norm": 1.3944027423858643,
71
+ "learning_rate": 4.0141203049985884e-05,
72
+ "loss": 0.5713,
73
+ "step": 28428
74
+ },
75
+ {
76
+ "epoch": 4.0,
77
+ "eval_bleu": 0.24587227576979195,
78
+ "eval_loss": 0.7315686941146851,
79
+ "eval_rouge1": 0.6017197427075045,
80
+ "eval_rouge2": 0.3518746485163118,
81
+ "eval_rougeL": 0.5982542515796094,
82
+ "eval_runtime": 62.0721,
83
+ "eval_samples_per_second": 228.959,
84
+ "eval_steps_per_second": 28.628,
85
+ "step": 28428
86
+ },
87
+ {
88
+ "epoch": 5.0,
89
+ "grad_norm": 2.199220657348633,
90
+ "learning_rate": 3.763237785936176e-05,
91
+ "loss": 0.5097,
92
+ "step": 35535
93
+ },
94
+ {
95
+ "epoch": 5.0,
96
+ "eval_bleu": 0.24748155317226092,
97
+ "eval_loss": 0.7390380501747131,
98
+ "eval_rouge1": 0.6058102682046419,
99
+ "eval_rouge2": 0.357170685615976,
100
+ "eval_rougeL": 0.6021635755679425,
101
+ "eval_runtime": 67.8747,
102
+ "eval_samples_per_second": 209.386,
103
+ "eval_steps_per_second": 26.181,
104
+ "step": 35535
105
+ },
106
+ {
107
+ "epoch": 6.0,
108
+ "grad_norm": 2.055725574493408,
109
+ "learning_rate": 3.512355266873765e-05,
110
+ "loss": 0.4573,
111
+ "step": 42642
112
+ },
113
+ {
114
+ "epoch": 6.0,
115
+ "eval_bleu": 0.25030630377831276,
116
+ "eval_loss": 0.748293399810791,
117
+ "eval_rouge1": 0.6103116816448397,
118
+ "eval_rouge2": 0.361846050958361,
119
+ "eval_rougeL": 0.6066395364597333,
120
+ "eval_runtime": 56.4418,
121
+ "eval_samples_per_second": 251.799,
122
+ "eval_steps_per_second": 31.484,
123
+ "step": 42642
124
+ },
125
+ {
126
+ "epoch": 7.0,
127
+ "grad_norm": 1.6595733165740967,
128
+ "learning_rate": 3.2614727478113526e-05,
129
+ "loss": 0.4118,
130
+ "step": 49749
131
+ },
132
+ {
133
+ "epoch": 7.0,
134
+ "eval_bleu": 0.2494244558337241,
135
+ "eval_loss": 0.7635838389396667,
136
+ "eval_rouge1": 0.610621109140437,
137
+ "eval_rouge2": 0.3633959713058441,
138
+ "eval_rougeL": 0.6069537363647842,
139
+ "eval_runtime": 173.9311,
140
+ "eval_samples_per_second": 81.711,
141
+ "eval_steps_per_second": 10.217,
142
+ "step": 49749
143
+ },
144
+ {
145
+ "epoch": 8.0,
146
+ "grad_norm": 3.863671064376831,
147
+ "learning_rate": 3.010590228748941e-05,
148
+ "loss": 0.3725,
149
+ "step": 56856
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_bleu": 0.25065847486647275,
154
+ "eval_loss": 0.7796261310577393,
155
+ "eval_rouge1": 0.6126587801190159,
156
+ "eval_rouge2": 0.3659624175392553,
157
+ "eval_rougeL": 0.6088959046619336,
158
+ "eval_runtime": 170.86,
159
+ "eval_samples_per_second": 83.179,
160
+ "eval_steps_per_second": 10.4,
161
+ "step": 56856
162
+ },
163
+ {
164
+ "epoch": 9.0,
165
+ "grad_norm": 3.546931266784668,
166
+ "learning_rate": 2.7597077096865293e-05,
167
+ "loss": 0.3375,
168
+ "step": 63963
169
+ },
170
+ {
171
+ "epoch": 9.0,
172
+ "eval_bleu": 0.24908190761452426,
173
+ "eval_loss": 0.7973926663398743,
174
+ "eval_rouge1": 0.6111967178899901,
175
+ "eval_rouge2": 0.36536691181853787,
176
+ "eval_rougeL": 0.6074289902749841,
177
+ "eval_runtime": 173.0755,
178
+ "eval_samples_per_second": 82.114,
179
+ "eval_steps_per_second": 10.267,
180
+ "step": 63963
181
+ },
182
+ {
183
+ "epoch": 9.0,
184
+ "step": 63963,
185
+ "total_flos": 3.3423104950272e+16,
186
+ "train_loss": 0.5779510789562912,
187
+ "train_runtime": 8660.6869,
188
+ "train_samples_per_second": 131.285,
189
+ "train_steps_per_second": 16.412
190
+ }
191
+ ],
192
+ "logging_steps": 500,
193
+ "max_steps": 142140,
194
+ "num_input_tokens_seen": 0,
195
+ "num_train_epochs": 20,
196
+ "save_steps": 500,
197
+ "stateful_callbacks": {
198
+ "EarlyStoppingCallback": {
199
+ "args": {
200
+ "early_stopping_patience": 5,
201
+ "early_stopping_threshold": 0.0
202
+ },
203
+ "attributes": {
204
+ "early_stopping_patience_counter": 0
205
+ }
206
+ },
207
+ "TrainerControl": {
208
+ "args": {
209
+ "should_epoch_stop": false,
210
+ "should_evaluate": false,
211
+ "should_log": false,
212
+ "should_save": true,
213
+ "should_training_stop": true
214
+ },
215
+ "attributes": {}
216
+ }
217
+ },
218
+ "total_flos": 3.3423104950272e+16,
219
+ "train_batch_size": 8,
220
+ "trial_name": null,
221
+ "trial_params": null
222
+ }