End of training
Browse files- README.md +5 -5
- all_results.json +15 -15
- egy_training_log.txt +2 -0
- eval_results.json +10 -10
- train_results.json +6 -6
- train_vs_val_loss.png +0 -0
- trainer_state.json +147 -14
README.md
CHANGED
@@ -18,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
|
|
18 |
|
19 |
This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
-
- Loss: 0.
|
22 |
-
- Bleu: 0.
|
23 |
-
- Rouge1: 0.
|
24 |
-
- Rouge2: 0.
|
25 |
-
- Rougel: 0.
|
26 |
|
27 |
## Model description
|
28 |
|
|
|
18 |
|
19 |
This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.6266
|
22 |
+
- Bleu: 0.2679
|
23 |
+
- Rouge1: 0.5977
|
24 |
+
- Rouge2: 0.3443
|
25 |
+
- Rougel: 0.5959
|
26 |
|
27 |
## Model description
|
28 |
|
all_results.json
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"eval_bleu": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_rouge1": 0.
|
6 |
-
"eval_rouge2": 0.
|
7 |
-
"eval_rougeL": 0.
|
8 |
-
"eval_runtime":
|
9 |
"eval_samples": 5405,
|
10 |
-
"eval_samples_per_second":
|
11 |
-
"eval_steps_per_second":
|
12 |
-
"perplexity": 1.
|
13 |
-
"total_flos":
|
14 |
-
"train_loss": 0.
|
15 |
-
"train_runtime":
|
16 |
"train_samples": 21622,
|
17 |
-
"train_samples_per_second":
|
18 |
-
"train_steps_per_second":
|
19 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"eval_bleu": 0.2678870499231116,
|
4 |
+
"eval_loss": 0.6265950798988342,
|
5 |
+
"eval_rouge1": 0.5977012354572853,
|
6 |
+
"eval_rouge2": 0.34430833134800065,
|
7 |
+
"eval_rougeL": 0.5958973349618409,
|
8 |
+
"eval_runtime": 58.6194,
|
9 |
"eval_samples": 5405,
|
10 |
+
"eval_samples_per_second": 92.205,
|
11 |
+
"eval_steps_per_second": 11.532,
|
12 |
+
"perplexity": 1.8712283369394682,
|
13 |
+
"total_flos": 1.412414078976e+16,
|
14 |
+
"train_loss": 0.26264210826945755,
|
15 |
+
"train_runtime": 7304.9287,
|
16 |
"train_samples": 21622,
|
17 |
+
"train_samples_per_second": 59.198,
|
18 |
+
"train_steps_per_second": 7.4
|
19 |
}
|
egy_training_log.txt
CHANGED
@@ -459,3 +459,5 @@ INFO:root:Epoch 9.0: Train Loss = 0.3108, Eval Loss = 0.646577000617981
|
|
459 |
INFO:absl:Using default tokenizer.
|
460 |
INFO:root:Epoch 10.0: Train Loss = 0.2719, Eval Loss = 0.6550981998443604
|
461 |
INFO:absl:Using default tokenizer.
|
|
|
|
|
|
459 |
INFO:absl:Using default tokenizer.
|
460 |
INFO:root:Epoch 10.0: Train Loss = 0.2719, Eval Loss = 0.6550981998443604
|
461 |
INFO:absl:Using default tokenizer.
|
462 |
+
INFO:__main__:*** Evaluate ***
|
463 |
+
INFO:absl:Using default tokenizer.
|
eval_results.json
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"eval_bleu": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_rouge1": 0.
|
6 |
-
"eval_rouge2": 0.
|
7 |
-
"eval_rougeL": 0.
|
8 |
-
"eval_runtime":
|
9 |
"eval_samples": 5405,
|
10 |
-
"eval_samples_per_second":
|
11 |
-
"eval_steps_per_second":
|
12 |
-
"perplexity": 1.
|
13 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"eval_bleu": 0.2678870499231116,
|
4 |
+
"eval_loss": 0.6265950798988342,
|
5 |
+
"eval_rouge1": 0.5977012354572853,
|
6 |
+
"eval_rouge2": 0.34430833134800065,
|
7 |
+
"eval_rougeL": 0.5958973349618409,
|
8 |
+
"eval_runtime": 58.6194,
|
9 |
"eval_samples": 5405,
|
10 |
+
"eval_samples_per_second": 92.205,
|
11 |
+
"eval_steps_per_second": 11.532,
|
12 |
+
"perplexity": 1.8712283369394682
|
13 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"total_flos":
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 21622,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second":
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"total_flos": 1.412414078976e+16,
|
4 |
+
"train_loss": 0.26264210826945755,
|
5 |
+
"train_runtime": 7304.9287,
|
6 |
"train_samples": 21622,
|
7 |
+
"train_samples_per_second": 59.198,
|
8 |
+
"train_steps_per_second": 7.4
|
9 |
}
|
train_vs_val_loss.png
CHANGED
trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
-
"best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_dj/checkpoint-
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -66,19 +66,152 @@
|
|
66 |
"step": 8109
|
67 |
},
|
68 |
{
|
69 |
-
"epoch":
|
70 |
-
"
|
71 |
-
"
|
72 |
-
"
|
73 |
-
"
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
}
|
77 |
],
|
78 |
"logging_steps": 500,
|
79 |
-
"max_steps":
|
80 |
"num_input_tokens_seen": 0,
|
81 |
-
"num_train_epochs":
|
82 |
"save_steps": 500,
|
83 |
"stateful_callbacks": {
|
84 |
"EarlyStoppingCallback": {
|
@@ -101,7 +234,7 @@
|
|
101 |
"attributes": {}
|
102 |
}
|
103 |
},
|
104 |
-
"total_flos":
|
105 |
"train_batch_size": 8,
|
106 |
"trial_name": null,
|
107 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.6265950798988342,
|
3 |
+
"best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_dj/checkpoint-13515",
|
4 |
+
"epoch": 10.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 27030,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
66 |
"step": 8109
|
67 |
},
|
68 |
{
|
69 |
+
"epoch": 4.0,
|
70 |
+
"grad_norm": 1.4183156490325928,
|
71 |
+
"learning_rate": 4.037341299477222e-05,
|
72 |
+
"loss": 0.556,
|
73 |
+
"step": 10812
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 4.0,
|
77 |
+
"eval_bleu": 0.25983253831381714,
|
78 |
+
"eval_loss": 0.63295978307724,
|
79 |
+
"eval_rouge1": 0.5829310998998971,
|
80 |
+
"eval_rouge2": 0.326193735265751,
|
81 |
+
"eval_rougeL": 0.5807349807527458,
|
82 |
+
"eval_runtime": 40.5509,
|
83 |
+
"eval_samples_per_second": 133.289,
|
84 |
+
"eval_steps_per_second": 16.67,
|
85 |
+
"step": 10812
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"epoch": 5.0,
|
89 |
+
"grad_norm": 1.186664342880249,
|
90 |
+
"learning_rate": 3.785007468259896e-05,
|
91 |
+
"loss": 0.4786,
|
92 |
+
"step": 13515
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 5.0,
|
96 |
+
"eval_bleu": 0.2678870499231116,
|
97 |
+
"eval_loss": 0.6265950798988342,
|
98 |
+
"eval_rouge1": 0.5977012354572853,
|
99 |
+
"eval_rouge2": 0.34430833134800065,
|
100 |
+
"eval_rougeL": 0.5958973349618409,
|
101 |
+
"eval_runtime": 36.0053,
|
102 |
+
"eval_samples_per_second": 150.117,
|
103 |
+
"eval_steps_per_second": 18.775,
|
104 |
+
"step": 13515
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 6.0,
|
108 |
+
"grad_norm": 1.1597645282745361,
|
109 |
+
"learning_rate": 3.5326736370425696e-05,
|
110 |
+
"loss": 0.4123,
|
111 |
+
"step": 16218
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"epoch": 6.0,
|
115 |
+
"eval_bleu": 0.2707063976336222,
|
116 |
+
"eval_loss": 0.630312979221344,
|
117 |
+
"eval_rouge1": 0.604436172840783,
|
118 |
+
"eval_rouge2": 0.35482389915380186,
|
119 |
+
"eval_rougeL": 0.6026792061487433,
|
120 |
+
"eval_runtime": 161.1219,
|
121 |
+
"eval_samples_per_second": 33.546,
|
122 |
+
"eval_steps_per_second": 4.196,
|
123 |
+
"step": 16218
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"epoch": 7.0,
|
127 |
+
"grad_norm": 1.27555251121521,
|
128 |
+
"learning_rate": 3.280339805825243e-05,
|
129 |
+
"loss": 0.3573,
|
130 |
+
"step": 18921
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"epoch": 7.0,
|
134 |
+
"eval_bleu": 0.27393786087649125,
|
135 |
+
"eval_loss": 0.6372247338294983,
|
136 |
+
"eval_rouge1": 0.6108088318784769,
|
137 |
+
"eval_rouge2": 0.36307215008060617,
|
138 |
+
"eval_rougeL": 0.6088126794948809,
|
139 |
+
"eval_runtime": 159.7397,
|
140 |
+
"eval_samples_per_second": 33.836,
|
141 |
+
"eval_steps_per_second": 4.232,
|
142 |
+
"step": 18921
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 8.0,
|
146 |
+
"grad_norm": 1.6310491561889648,
|
147 |
+
"learning_rate": 3.0280059746079166e-05,
|
148 |
+
"loss": 0.3108,
|
149 |
+
"step": 21624
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 8.0,
|
153 |
+
"eval_bleu": 0.2759919170184585,
|
154 |
+
"eval_loss": 0.646577000617981,
|
155 |
+
"eval_rouge1": 0.6130373826214599,
|
156 |
+
"eval_rouge2": 0.36776670773707465,
|
157 |
+
"eval_rougeL": 0.6111699095881062,
|
158 |
+
"eval_runtime": 150.1086,
|
159 |
+
"eval_samples_per_second": 36.007,
|
160 |
+
"eval_steps_per_second": 4.503,
|
161 |
+
"step": 21624
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"epoch": 9.0,
|
165 |
+
"grad_norm": 1.6629250049591064,
|
166 |
+
"learning_rate": 2.77567214339059e-05,
|
167 |
+
"loss": 0.2719,
|
168 |
+
"step": 24327
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"epoch": 9.0,
|
172 |
+
"eval_bleu": 0.2801810856751608,
|
173 |
+
"eval_loss": 0.6550981998443604,
|
174 |
+
"eval_rouge1": 0.6177880665801061,
|
175 |
+
"eval_rouge2": 0.3727206564119486,
|
176 |
+
"eval_rougeL": 0.6159303776582042,
|
177 |
+
"eval_runtime": 159.1145,
|
178 |
+
"eval_samples_per_second": 33.969,
|
179 |
+
"eval_steps_per_second": 4.249,
|
180 |
+
"step": 24327
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"epoch": 10.0,
|
184 |
+
"grad_norm": 1.5184797048568726,
|
185 |
+
"learning_rate": 2.523338312173264e-05,
|
186 |
+
"loss": 0.2396,
|
187 |
+
"step": 27030
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"epoch": 10.0,
|
191 |
+
"eval_bleu": 0.27697504925957206,
|
192 |
+
"eval_loss": 0.665830671787262,
|
193 |
+
"eval_rouge1": 0.6173182845826588,
|
194 |
+
"eval_rouge2": 0.37288754745117264,
|
195 |
+
"eval_rougeL": 0.6152835100872982,
|
196 |
+
"eval_runtime": 110.2964,
|
197 |
+
"eval_samples_per_second": 49.004,
|
198 |
+
"eval_steps_per_second": 6.129,
|
199 |
+
"step": 27030
|
200 |
+
},
|
201 |
+
{
|
202 |
+
"epoch": 10.0,
|
203 |
+
"step": 27030,
|
204 |
+
"total_flos": 1.412414078976e+16,
|
205 |
+
"train_loss": 0.26264210826945755,
|
206 |
+
"train_runtime": 7304.9287,
|
207 |
+
"train_samples_per_second": 59.198,
|
208 |
+
"train_steps_per_second": 7.4
|
209 |
}
|
210 |
],
|
211 |
"logging_steps": 500,
|
212 |
+
"max_steps": 54060,
|
213 |
"num_input_tokens_seen": 0,
|
214 |
+
"num_train_epochs": 20,
|
215 |
"save_steps": 500,
|
216 |
"stateful_callbacks": {
|
217 |
"EarlyStoppingCallback": {
|
|
|
234 |
"attributes": {}
|
235 |
}
|
236 |
},
|
237 |
+
"total_flos": 1.412414078976e+16,
|
238 |
"train_batch_size": 8,
|
239 |
"trial_name": null,
|
240 |
"trial_params": null
|