GAMA-Tiny-Code-Generator / trainer_state.json
Phanh2532's picture
Upload 6 files
50ddc8d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 50,
"global_step": 450,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.33,
"learning_rate": 5.2354709418837674e-05,
"loss": 1.6331,
"step": 25
},
{
"epoch": 0.67,
"learning_rate": 4.959919839679359e-05,
"loss": 0.8124,
"step": 50
},
{
"epoch": 0.67,
"eval_loss": 2.1140503883361816,
"eval_runtime": 11.8386,
"eval_samples_per_second": 0.76,
"eval_steps_per_second": 0.169,
"step": 50
},
{
"epoch": 1.0,
"learning_rate": 4.68436873747495e-05,
"loss": 0.766,
"step": 75
},
{
"epoch": 1.33,
"learning_rate": 4.408817635270541e-05,
"loss": 0.562,
"step": 100
},
{
"epoch": 1.33,
"eval_loss": 1.9290642738342285,
"eval_runtime": 11.9715,
"eval_samples_per_second": 0.752,
"eval_steps_per_second": 0.167,
"step": 100
},
{
"epoch": 1.67,
"learning_rate": 4.133266533066132e-05,
"loss": 0.3301,
"step": 125
},
{
"epoch": 2.0,
"learning_rate": 3.8577154308617235e-05,
"loss": 0.3811,
"step": 150
},
{
"epoch": 2.0,
"eval_loss": 2.065908908843994,
"eval_runtime": 11.9277,
"eval_samples_per_second": 0.755,
"eval_steps_per_second": 0.168,
"step": 150
},
{
"epoch": 2.33,
"learning_rate": 3.582164328657315e-05,
"loss": 0.3086,
"step": 175
},
{
"epoch": 2.67,
"learning_rate": 3.306613226452906e-05,
"loss": 0.2969,
"step": 200
},
{
"epoch": 2.67,
"eval_loss": 2.2028963565826416,
"eval_runtime": 11.9591,
"eval_samples_per_second": 0.753,
"eval_steps_per_second": 0.167,
"step": 200
},
{
"epoch": 3.0,
"learning_rate": 3.0310621242484972e-05,
"loss": 0.2236,
"step": 225
},
{
"epoch": 3.33,
"learning_rate": 2.755511022044088e-05,
"loss": 0.2148,
"step": 250
},
{
"epoch": 3.33,
"eval_loss": 2.3397459983825684,
"eval_runtime": 11.8913,
"eval_samples_per_second": 0.757,
"eval_steps_per_second": 0.168,
"step": 250
},
{
"epoch": 3.67,
"learning_rate": 2.4799599198396796e-05,
"loss": 0.1813,
"step": 275
},
{
"epoch": 4.0,
"learning_rate": 2.2044088176352705e-05,
"loss": 0.2172,
"step": 300
},
{
"epoch": 4.0,
"eval_loss": 2.2869534492492676,
"eval_runtime": 11.8718,
"eval_samples_per_second": 0.758,
"eval_steps_per_second": 0.168,
"step": 300
},
{
"epoch": 4.33,
"learning_rate": 1.9288577154308618e-05,
"loss": 0.1436,
"step": 325
},
{
"epoch": 4.67,
"learning_rate": 1.653306613226453e-05,
"loss": 0.1659,
"step": 350
},
{
"epoch": 4.67,
"eval_loss": 2.3965632915496826,
"eval_runtime": 11.9151,
"eval_samples_per_second": 0.755,
"eval_steps_per_second": 0.168,
"step": 350
},
{
"epoch": 5.0,
"learning_rate": 1.377755511022044e-05,
"loss": 0.1664,
"step": 375
},
{
"epoch": 5.33,
"learning_rate": 1.1022044088176353e-05,
"loss": 0.1165,
"step": 400
},
{
"epoch": 5.33,
"eval_loss": 2.4968247413635254,
"eval_runtime": 11.858,
"eval_samples_per_second": 0.759,
"eval_steps_per_second": 0.169,
"step": 400
},
{
"epoch": 5.67,
"learning_rate": 8.266533066132265e-06,
"loss": 0.1572,
"step": 425
},
{
"epoch": 6.0,
"learning_rate": 5.511022044088176e-06,
"loss": 0.1174,
"step": 450
},
{
"epoch": 6.0,
"eval_loss": 2.4439687728881836,
"eval_runtime": 11.9003,
"eval_samples_per_second": 0.756,
"eval_steps_per_second": 0.168,
"step": 450
}
],
"logging_steps": 25,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 50,
"total_flos": 1.104886503936e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}