|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.0, |
|
"global_step": 12336, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9189364461738006e-05, |
|
"loss": 2.5752, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.8378728923476004e-05, |
|
"loss": 2.4758, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.756809338521401e-05, |
|
"loss": 2.4695, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.0689399242401123, |
|
"eval_runtime": 81.3366, |
|
"eval_samples_per_second": 63.846, |
|
"eval_steps_per_second": 7.991, |
|
"step": 1542 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.675745784695201e-05, |
|
"loss": 2.4094, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.5946822308690015e-05, |
|
"loss": 2.3802, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.5136186770428017e-05, |
|
"loss": 2.3622, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.0509471893310547, |
|
"eval_runtime": 81.3625, |
|
"eval_samples_per_second": 63.825, |
|
"eval_steps_per_second": 7.989, |
|
"step": 3084 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.432555123216602e-05, |
|
"loss": 2.3547, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.3514915693904023e-05, |
|
"loss": 2.3319, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 1.2704280155642024e-05, |
|
"loss": 2.3303, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.0795907974243164, |
|
"eval_runtime": 81.2667, |
|
"eval_samples_per_second": 63.901, |
|
"eval_steps_per_second": 7.998, |
|
"step": 4626 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.1893644617380028e-05, |
|
"loss": 2.293, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.108300907911803e-05, |
|
"loss": 2.2819, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.027237354085603e-05, |
|
"loss": 2.2865, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.9920992851257324, |
|
"eval_runtime": 81.5086, |
|
"eval_samples_per_second": 63.711, |
|
"eval_steps_per_second": 7.975, |
|
"step": 6168 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 9.461738002594033e-06, |
|
"loss": 2.2856, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 8.651102464332037e-06, |
|
"loss": 2.2669, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 7.84046692607004e-06, |
|
"loss": 2.2598, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.9843554496765137, |
|
"eval_runtime": 81.4242, |
|
"eval_samples_per_second": 63.777, |
|
"eval_steps_per_second": 7.983, |
|
"step": 7710 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 7.029831387808041e-06, |
|
"loss": 2.2482, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 6.219195849546045e-06, |
|
"loss": 2.2285, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 5.4085603112840465e-06, |
|
"loss": 2.2544, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.0076751708984375, |
|
"eval_runtime": 81.3076, |
|
"eval_samples_per_second": 63.869, |
|
"eval_steps_per_second": 7.994, |
|
"step": 9252 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 4.5979247730220496e-06, |
|
"loss": 2.2357, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 3.7872892347600522e-06, |
|
"loss": 2.218, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 2.9766536964980545e-06, |
|
"loss": 2.2128, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.9913570880889893, |
|
"eval_runtime": 81.2325, |
|
"eval_samples_per_second": 63.928, |
|
"eval_steps_per_second": 8.002, |
|
"step": 10794 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 2.166018158236057e-06, |
|
"loss": 2.2075, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 1.3553826199740596e-06, |
|
"loss": 2.1936, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 5.447470817120623e-07, |
|
"loss": 2.1963, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.9882222414016724, |
|
"eval_runtime": 81.2899, |
|
"eval_samples_per_second": 63.883, |
|
"eval_steps_per_second": 7.996, |
|
"step": 12336 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"step": 12336, |
|
"total_flos": 2.0776857912786125e+17, |
|
"train_loss": 2.304505779894719, |
|
"train_runtime": 46564.1697, |
|
"train_samples_per_second": 16.953, |
|
"train_steps_per_second": 0.265 |
|
} |
|
], |
|
"max_steps": 12336, |
|
"num_train_epochs": 8, |
|
"total_flos": 2.0776857912786125e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|