|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"global_step": 12330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.797242497972425e-05, |
|
"loss": 0.6204, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.59448499594485e-05, |
|
"loss": 0.48, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.3917274939172754e-05, |
|
"loss": 0.4046, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.1889699918897e-05, |
|
"loss": 0.3753, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 3.986212489862125e-05, |
|
"loss": 0.3585, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 3.78345498783455e-05, |
|
"loss": 0.3135, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 3.580697485806975e-05, |
|
"loss": 0.3133, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.3779399837794e-05, |
|
"loss": 0.2859, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.175182481751825e-05, |
|
"loss": 0.2699, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 2.9724249797242497e-05, |
|
"loss": 0.2659, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.7696674776966745e-05, |
|
"loss": 0.2406, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 2.5669099756691e-05, |
|
"loss": 0.243, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 2.3641524736415248e-05, |
|
"loss": 0.2233, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 2.1613949716139496e-05, |
|
"loss": 0.2175, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 1.9586374695863748e-05, |
|
"loss": 0.2141, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 1.7558799675587996e-05, |
|
"loss": 0.1998, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 1.5531224655312247e-05, |
|
"loss": 0.1982, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 1.3503649635036497e-05, |
|
"loss": 0.188, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 1.1476074614760747e-05, |
|
"loss": 0.1835, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 9.448499594484997e-06, |
|
"loss": 0.1825, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 7.420924574209247e-06, |
|
"loss": 0.1745, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 5.393349553933496e-06, |
|
"loss": 0.1752, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 3.365774533657746e-06, |
|
"loss": 0.17, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 1.3381995133819951e-06, |
|
"loss": 0.167, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 12330, |
|
"total_flos": 1.2986290396643328e+16, |
|
"train_loss": 0.26658661773508385, |
|
"train_runtime": 2799.7511, |
|
"train_samples_per_second": 70.438, |
|
"train_steps_per_second": 4.404 |
|
} |
|
], |
|
"max_steps": 12330, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.2986290396643328e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|