|
{ |
|
"best_metric": 76.36038789574842, |
|
"best_model_checkpoint": "/home/jcanete/ft-data/all_results/tar/albeto_base_10/epochs_2_bs_16_lr_5e-5/checkpoint-10200", |
|
"epoch": 2.0, |
|
"global_step": 10970, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"eval_exact_match": 45.53453169347209, |
|
"eval_f1": 64.38889275299238, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.774840474020055e-05, |
|
"loss": 2.3938, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_exact_match": 49.14853358561968, |
|
"eval_f1": 67.53518688183068, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_exact_match": 51.26773888363292, |
|
"eval_f1": 69.15241192316618, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.546946216955333e-05, |
|
"loss": 1.8676, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_exact_match": 52.677388836329236, |
|
"eval_f1": 71.264646480928, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.319051959890611e-05, |
|
"loss": 1.7557, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_exact_match": 52.8003784295175, |
|
"eval_f1": 70.69843839338733, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_exact_match": 53.841059602649004, |
|
"eval_f1": 71.55219520099376, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.091157702825889e-05, |
|
"loss": 1.7093, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_exact_match": 54.08703878902554, |
|
"eval_f1": 72.75312871370792, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_exact_match": 54.15326395458846, |
|
"eval_f1": 72.25373492579445, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.863263445761167e-05, |
|
"loss": 1.6266, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_exact_match": 55.81835383159886, |
|
"eval_f1": 73.13623090017391, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 3.635369188696445e-05, |
|
"loss": 1.6179, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_exact_match": 56.017029328287606, |
|
"eval_f1": 73.68563507466432, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_exact_match": 57.03878902554399, |
|
"eval_f1": 73.81599366386958, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.4074749316317225e-05, |
|
"loss": 1.5863, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_exact_match": 56.9441816461684, |
|
"eval_f1": 74.03932269687274, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_exact_match": 57.11447492904446, |
|
"eval_f1": 74.25288243781013, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.1795806745670006e-05, |
|
"loss": 1.5291, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_exact_match": 57.63481551561022, |
|
"eval_f1": 74.89927170312892, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.951686417502279e-05, |
|
"loss": 1.5386, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_exact_match": 57.95648060548723, |
|
"eval_f1": 74.61884384474561, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_exact_match": 58.02270577105014, |
|
"eval_f1": 75.32600315809357, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.723792160437557e-05, |
|
"loss": 1.5132, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_exact_match": 58.306527909176914, |
|
"eval_f1": 74.89942757720935, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_exact_match": 58.64711447492905, |
|
"eval_f1": 75.59723967213186, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 2.4958979033728353e-05, |
|
"loss": 1.4682, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"eval_exact_match": 59.15799432355724, |
|
"eval_f1": 75.83737312760167, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.268003646308113e-05, |
|
"loss": 1.2319, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_exact_match": 58.609271523178805, |
|
"eval_f1": 75.81477740927686, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"eval_exact_match": 58.59035004730369, |
|
"eval_f1": 75.43205542917899, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 2.040109389243391e-05, |
|
"loss": 1.2259, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_exact_match": 58.77010406811731, |
|
"eval_f1": 75.82376993425314, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_exact_match": 58.24976348155156, |
|
"eval_f1": 75.0320889415698, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.8122151321786692e-05, |
|
"loss": 1.2123, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_exact_match": 58.98770104068117, |
|
"eval_f1": 76.02527498333018, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.5847766636280767e-05, |
|
"loss": 1.1885, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"eval_exact_match": 58.9120151371807, |
|
"eval_f1": 76.0439212810425, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"eval_exact_match": 58.826868495742666, |
|
"eval_f1": 75.94488358894192, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.3568824065633548e-05, |
|
"loss": 1.2468, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_exact_match": 59.3282876064333, |
|
"eval_f1": 76.00751273155576, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_exact_match": 58.99716177861873, |
|
"eval_f1": 75.85711081597596, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.1289881494986327e-05, |
|
"loss": 1.2093, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_exact_match": 59.04446546830653, |
|
"eval_f1": 75.83015637646359, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.010938924339108e-06, |
|
"loss": 1.2199, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_exact_match": 59.10122989593188, |
|
"eval_f1": 76.0261318966032, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_exact_match": 59.0728476821192, |
|
"eval_f1": 76.13920824424939, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 6.731996353691887e-06, |
|
"loss": 1.2102, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_exact_match": 59.0728476821192, |
|
"eval_f1": 76.09156663606447, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"eval_exact_match": 59.47965941343425, |
|
"eval_f1": 76.2619678916128, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.453053783044668e-06, |
|
"loss": 1.1986, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_exact_match": 59.6310312204352, |
|
"eval_f1": 76.36038789574842, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.174111212397448e-06, |
|
"loss": 1.2024, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_exact_match": 59.39451277199622, |
|
"eval_f1": 76.24512822926708, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_exact_match": 59.593188268684955, |
|
"eval_f1": 76.29176107833557, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 10970, |
|
"total_flos": 2544773915225664.0, |
|
"train_loss": 1.452151380763232, |
|
"train_runtime": 2009.9163, |
|
"train_samples_per_second": 87.314, |
|
"train_steps_per_second": 5.458 |
|
} |
|
], |
|
"max_steps": 10970, |
|
"num_train_epochs": 2, |
|
"total_flos": 2544773915225664.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|