|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.8705493166187864, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00010077565027123787, |
|
"loss": 0.4732, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00010075885246660077, |
|
"loss": 0.4445, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00010073086073069567, |
|
"loss": 0.4329, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001006916819700645, |
|
"loss": 0.4256, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001006414377109213, |
|
"loss": 0.421, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00010057993897463803, |
|
"loss": 0.4167, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010050729045131372, |
|
"loss": 0.4132, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00010042351006588448, |
|
"loss": 0.4107, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00010032861848990674, |
|
"loss": 0.4091, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001002228621438404, |
|
"loss": 0.4067, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 0.3877949118614197, |
|
"eval_runtime": 108.0468, |
|
"eval_samples_per_second": 46.276, |
|
"eval_steps_per_second": 0.731, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00010010584325708389, |
|
"loss": 0.4038, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.997779155931062e-05, |
|
"loss": 0.402, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.983873864536092e-05, |
|
"loss": 0.401, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.968871882446063e-05, |
|
"loss": 0.4001, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 9.952776911175577e-05, |
|
"loss": 0.3987, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 9.935592921917959e-05, |
|
"loss": 0.3974, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.917361771757108e-05, |
|
"loss": 0.3954, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.898014889719868e-05, |
|
"loss": 0.3945, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.877592501404869e-05, |
|
"loss": 0.394, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 9.856099645730841e-05, |
|
"loss": 0.3926, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 0.37431180477142334, |
|
"eval_runtime": 106.4392, |
|
"eval_samples_per_second": 46.975, |
|
"eval_steps_per_second": 0.742, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.833541625738316e-05, |
|
"loss": 0.3929, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.809924007281187e-05, |
|
"loss": 0.3906, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.78525261765341e-05, |
|
"loss": 0.3899, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.759533544151208e-05, |
|
"loss": 0.3892, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.732773132571125e-05, |
|
"loss": 0.3889, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.705034604088048e-05, |
|
"loss": 0.3865, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.676213628592508e-05, |
|
"loss": 0.3865, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.64637187296151e-05, |
|
"loss": 0.3867, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.615516700201724e-05, |
|
"loss": 0.3858, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.583720443927501e-05, |
|
"loss": 0.3853, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 0.36506548523902893, |
|
"eval_runtime": 96.1419, |
|
"eval_samples_per_second": 52.006, |
|
"eval_steps_per_second": 0.822, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 9.550863512110018e-05, |
|
"loss": 0.3844, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.517016728422667e-05, |
|
"loss": 0.3834, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.482188444052858e-05, |
|
"loss": 0.3826, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.446387252358614e-05, |
|
"loss": 0.3824, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 9.40962198674828e-05, |
|
"loss": 0.3825, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 9.371901718501017e-05, |
|
"loss": 0.3804, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.333314023958391e-05, |
|
"loss": 0.3803, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.293713767144707e-05, |
|
"loss": 0.381, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.253187106312908e-05, |
|
"loss": 0.3793, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 9.211744040810141e-05, |
|
"loss": 0.3793, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 0.35968872904777527, |
|
"eval_runtime": 95.0235, |
|
"eval_samples_per_second": 52.619, |
|
"eval_steps_per_second": 0.831, |
|
"step": 20000 |
|
} |
|
], |
|
"max_steps": 100000, |
|
"num_train_epochs": 9, |
|
"total_flos": 9.419722694590464e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|