|
{ |
|
"best_metric": 1.4257140159606934, |
|
"best_model_checkpoint": "./outputs/checkpoint-2300", |
|
"epoch": 1.6757741347905282, |
|
"eval_steps": 100, |
|
"global_step": 2300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1907, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.0242879390716553, |
|
"eval_runtime": 144.9414, |
|
"eval_samples_per_second": 43.286, |
|
"eval_steps_per_second": 5.416, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9738, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 1.9313020706176758, |
|
"eval_runtime": 143.9274, |
|
"eval_samples_per_second": 43.591, |
|
"eval_steps_per_second": 5.454, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9003, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 1.8709847927093506, |
|
"eval_runtime": 144.0274, |
|
"eval_samples_per_second": 43.561, |
|
"eval_steps_per_second": 5.45, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 1.849, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 1.8261810541152954, |
|
"eval_runtime": 144.0251, |
|
"eval_samples_per_second": 43.562, |
|
"eval_steps_per_second": 5.45, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7978, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 1.783812403678894, |
|
"eval_runtime": 143.9209, |
|
"eval_samples_per_second": 43.593, |
|
"eval_steps_per_second": 5.454, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7696, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 1.7559951543807983, |
|
"eval_runtime": 143.9146, |
|
"eval_samples_per_second": 43.595, |
|
"eval_steps_per_second": 5.455, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7308, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 1.7238949537277222, |
|
"eval_runtime": 143.9925, |
|
"eval_samples_per_second": 43.572, |
|
"eval_steps_per_second": 5.452, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7094, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 1.6951390504837036, |
|
"eval_runtime": 143.9787, |
|
"eval_samples_per_second": 43.576, |
|
"eval_steps_per_second": 5.452, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 1.674, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 1.6701356172561646, |
|
"eval_runtime": 144.0222, |
|
"eval_samples_per_second": 43.563, |
|
"eval_steps_per_second": 5.451, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6589, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 1.6467818021774292, |
|
"eval_runtime": 143.9955, |
|
"eval_samples_per_second": 43.571, |
|
"eval_steps_per_second": 5.452, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6385, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 1.6242790222167969, |
|
"eval_runtime": 144.0793, |
|
"eval_samples_per_second": 43.545, |
|
"eval_steps_per_second": 5.448, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6033, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 1.6063916683197021, |
|
"eval_runtime": 144.0465, |
|
"eval_samples_per_second": 43.555, |
|
"eval_steps_per_second": 5.45, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6104, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 1.582713007926941, |
|
"eval_runtime": 144.0533, |
|
"eval_samples_per_second": 43.553, |
|
"eval_steps_per_second": 5.449, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5638, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 1.5626935958862305, |
|
"eval_runtime": 143.9538, |
|
"eval_samples_per_second": 43.583, |
|
"eval_steps_per_second": 5.453, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5362, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 1.5464439392089844, |
|
"eval_runtime": 143.8789, |
|
"eval_samples_per_second": 43.606, |
|
"eval_steps_per_second": 5.456, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5169, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 1.529025673866272, |
|
"eval_runtime": 143.9243, |
|
"eval_samples_per_second": 43.592, |
|
"eval_steps_per_second": 5.454, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5148, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 1.511838436126709, |
|
"eval_runtime": 143.9329, |
|
"eval_samples_per_second": 43.59, |
|
"eval_steps_per_second": 5.454, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4882, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 1.4979889392852783, |
|
"eval_runtime": 143.9162, |
|
"eval_samples_per_second": 43.595, |
|
"eval_steps_per_second": 5.455, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4777, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 1.4818388223648071, |
|
"eval_runtime": 143.9405, |
|
"eval_samples_per_second": 43.587, |
|
"eval_steps_per_second": 5.454, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4608, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 1.4687143564224243, |
|
"eval_runtime": 143.8741, |
|
"eval_samples_per_second": 43.608, |
|
"eval_steps_per_second": 5.456, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4461, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 1.452826738357544, |
|
"eval_runtime": 143.8579, |
|
"eval_samples_per_second": 43.612, |
|
"eval_steps_per_second": 5.457, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 1.421, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 1.4381742477416992, |
|
"eval_runtime": 143.8526, |
|
"eval_samples_per_second": 43.614, |
|
"eval_steps_per_second": 5.457, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 1.42, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 1.4257140159606934, |
|
"eval_runtime": 143.9412, |
|
"eval_samples_per_second": 43.587, |
|
"eval_steps_per_second": 5.454, |
|
"step": 2300 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.3710280298073293e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|