|
{ |
|
"best_metric": 0.8298296928405762, |
|
"best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_eg/checkpoint-21315", |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 56840, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.2983179092407227, |
|
"learning_rate": 4.766772598870057e-05, |
|
"loss": 1.1362, |
|
"step": 7105 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 0.3024315492316285, |
|
"eval_loss": 0.9122783541679382, |
|
"eval_rouge1": 0.5275815926361394, |
|
"eval_rouge2": 0.250835305590573, |
|
"eval_rougeL": 0.5227642919859343, |
|
"eval_runtime": 82.2039, |
|
"eval_samples_per_second": 172.851, |
|
"eval_steps_per_second": 21.617, |
|
"step": 7105 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.6412582397460938, |
|
"learning_rate": 4.515889830508475e-05, |
|
"loss": 0.7501, |
|
"step": 14210 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 0.3299343390399215, |
|
"eval_loss": 0.8436459302902222, |
|
"eval_rouge1": 0.5753620498690527, |
|
"eval_rouge2": 0.30552900678559713, |
|
"eval_rougeL": 0.5712795722916149, |
|
"eval_runtime": 47.3241, |
|
"eval_samples_per_second": 300.248, |
|
"eval_steps_per_second": 37.55, |
|
"step": 14210 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.4929105043411255, |
|
"learning_rate": 4.265007062146893e-05, |
|
"loss": 0.6462, |
|
"step": 21315 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_bleu": 0.3389516198368023, |
|
"eval_loss": 0.8298296928405762, |
|
"eval_rouge1": 0.5961824107324037, |
|
"eval_rouge2": 0.32984558195042607, |
|
"eval_rougeL": 0.5921451862516953, |
|
"eval_runtime": 46.7775, |
|
"eval_samples_per_second": 303.757, |
|
"eval_steps_per_second": 37.988, |
|
"step": 21315 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 2.472024440765381, |
|
"learning_rate": 4.014124293785311e-05, |
|
"loss": 0.5705, |
|
"step": 28420 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_bleu": 0.3443865327631572, |
|
"eval_loss": 0.8327584862709045, |
|
"eval_rouge1": 0.6048121716287991, |
|
"eval_rouge2": 0.3411757857050619, |
|
"eval_rougeL": 0.6010600637633374, |
|
"eval_runtime": 51.9902, |
|
"eval_samples_per_second": 273.301, |
|
"eval_steps_per_second": 34.18, |
|
"step": 28420 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.5785757303237915, |
|
"learning_rate": 3.763241525423729e-05, |
|
"loss": 0.5087, |
|
"step": 35525 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_bleu": 0.3480087983574886, |
|
"eval_loss": 0.8406437635421753, |
|
"eval_rouge1": 0.6133106901142944, |
|
"eval_rouge2": 0.3512185978691529, |
|
"eval_rougeL": 0.6095993795603691, |
|
"eval_runtime": 46.8243, |
|
"eval_samples_per_second": 303.453, |
|
"eval_steps_per_second": 37.95, |
|
"step": 35525 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.7464922666549683, |
|
"learning_rate": 3.5123587570621466e-05, |
|
"loss": 0.4559, |
|
"step": 42630 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_bleu": 0.34779219505501374, |
|
"eval_loss": 0.8647022843360901, |
|
"eval_rouge1": 0.6132791748034587, |
|
"eval_rouge2": 0.35198863487088, |
|
"eval_rougeL": 0.6093736659279629, |
|
"eval_runtime": 46.987, |
|
"eval_samples_per_second": 302.403, |
|
"eval_steps_per_second": 37.819, |
|
"step": 42630 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 2.2738301753997803, |
|
"learning_rate": 3.261475988700565e-05, |
|
"loss": 0.4104, |
|
"step": 49735 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_bleu": 0.3484641332073553, |
|
"eval_loss": 0.8769952058792114, |
|
"eval_rouge1": 0.6147660525828296, |
|
"eval_rouge2": 0.3532593327836824, |
|
"eval_rougeL": 0.6107018352641345, |
|
"eval_runtime": 47.1718, |
|
"eval_samples_per_second": 301.218, |
|
"eval_steps_per_second": 37.671, |
|
"step": 49735 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.9538367986679077, |
|
"learning_rate": 3.010593220338983e-05, |
|
"loss": 0.3708, |
|
"step": 56840 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_bleu": 0.3478035270510471, |
|
"eval_loss": 0.9003333449363708, |
|
"eval_rouge1": 0.6165900788913368, |
|
"eval_rouge2": 0.3548552782268418, |
|
"eval_rougeL": 0.6124632709173936, |
|
"eval_runtime": 47.008, |
|
"eval_samples_per_second": 302.268, |
|
"eval_steps_per_second": 37.802, |
|
"step": 56840 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"step": 56840, |
|
"total_flos": 2.9701587861504e+16, |
|
"train_loss": 0.6060978588128408, |
|
"train_runtime": 4864.2589, |
|
"train_samples_per_second": 233.688, |
|
"train_steps_per_second": 29.213 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 142100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.9701587861504e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|