andrewbai's picture
Model save
f4cab46 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.997867803837953,
"eval_steps": 500,
"global_step": 234,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0042643923240938165,
"grad_norm": 13.164892633164182,
"learning_rate": 8.333333333333333e-07,
"loss": 1.2838,
"step": 1
},
{
"epoch": 0.021321961620469083,
"grad_norm": 13.029711373453647,
"learning_rate": 4.166666666666667e-06,
"loss": 1.2526,
"step": 5
},
{
"epoch": 0.042643923240938165,
"grad_norm": 3.5199617069158804,
"learning_rate": 8.333333333333334e-06,
"loss": 1.0605,
"step": 10
},
{
"epoch": 0.06396588486140725,
"grad_norm": 1.2776560944344613,
"learning_rate": 1.25e-05,
"loss": 0.9123,
"step": 15
},
{
"epoch": 0.08528784648187633,
"grad_norm": 0.8246714856851984,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.8532,
"step": 20
},
{
"epoch": 0.10660980810234541,
"grad_norm": 0.7374629021484648,
"learning_rate": 1.9998881018102735e-05,
"loss": 0.8593,
"step": 25
},
{
"epoch": 0.1279317697228145,
"grad_norm": 0.7107522845433699,
"learning_rate": 1.9959742939952393e-05,
"loss": 0.831,
"step": 30
},
{
"epoch": 0.14925373134328357,
"grad_norm": 0.6801911731580033,
"learning_rate": 1.9864905939235215e-05,
"loss": 0.8091,
"step": 35
},
{
"epoch": 0.17057569296375266,
"grad_norm": 0.6722594515036292,
"learning_rate": 1.9714900382928674e-05,
"loss": 0.8082,
"step": 40
},
{
"epoch": 0.19189765458422176,
"grad_norm": 0.7086508757812415,
"learning_rate": 1.9510565162951538e-05,
"loss": 0.7721,
"step": 45
},
{
"epoch": 0.21321961620469082,
"grad_norm": 0.6938808747188856,
"learning_rate": 1.9253043004739967e-05,
"loss": 0.7891,
"step": 50
},
{
"epoch": 0.2345415778251599,
"grad_norm": 0.6827952547206133,
"learning_rate": 1.8943774076663372e-05,
"loss": 0.7839,
"step": 55
},
{
"epoch": 0.255863539445629,
"grad_norm": 0.684615563560688,
"learning_rate": 1.8584487936018663e-05,
"loss": 0.7652,
"step": 60
},
{
"epoch": 0.2771855010660981,
"grad_norm": 0.7280058924197303,
"learning_rate": 1.8177193856644315e-05,
"loss": 0.7944,
"step": 65
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.6699981012677432,
"learning_rate": 1.7724169592245996e-05,
"loss": 0.7975,
"step": 70
},
{
"epoch": 0.31982942430703626,
"grad_norm": 0.6588823343752745,
"learning_rate": 1.7227948638273918e-05,
"loss": 0.7341,
"step": 75
},
{
"epoch": 0.3411513859275053,
"grad_norm": 0.6800591707914998,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.7687,
"step": 80
},
{
"epoch": 0.3624733475479744,
"grad_norm": 0.7300767152467426,
"learning_rate": 1.6117242991150064e-05,
"loss": 0.7607,
"step": 85
},
{
"epoch": 0.3837953091684435,
"grad_norm": 0.6705302157505296,
"learning_rate": 1.5508969814521026e-05,
"loss": 0.7897,
"step": 90
},
{
"epoch": 0.4051172707889126,
"grad_norm": 0.6951439660005515,
"learning_rate": 1.4869888244043674e-05,
"loss": 0.7535,
"step": 95
},
{
"epoch": 0.42643923240938164,
"grad_norm": 0.6846075174693704,
"learning_rate": 1.4203572283095657e-05,
"loss": 0.7678,
"step": 100
},
{
"epoch": 0.44776119402985076,
"grad_norm": 0.6585701842637474,
"learning_rate": 1.3513748240813429e-05,
"loss": 0.7525,
"step": 105
},
{
"epoch": 0.4690831556503198,
"grad_norm": 0.7211401440531015,
"learning_rate": 1.2804273893060028e-05,
"loss": 0.7424,
"step": 110
},
{
"epoch": 0.4904051172707889,
"grad_norm": 0.6870611840328074,
"learning_rate": 1.2079116908177592e-05,
"loss": 0.7711,
"step": 115
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.6338815690410836,
"learning_rate": 1.1342332658176556e-05,
"loss": 0.7356,
"step": 120
},
{
"epoch": 0.5330490405117271,
"grad_norm": 0.628195657516242,
"learning_rate": 1.0598041539450344e-05,
"loss": 0.7168,
"step": 125
},
{
"epoch": 0.5543710021321961,
"grad_norm": 0.6711724525095714,
"learning_rate": 9.850405929847367e-06,
"loss": 0.7445,
"step": 130
},
{
"epoch": 0.5756929637526652,
"grad_norm": 0.6984524411519915,
"learning_rate": 9.103606910965666e-06,
"loss": 0.7783,
"step": 135
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.649642511023887,
"learning_rate": 8.361820885848623e-06,
"loss": 0.7354,
"step": 140
},
{
"epoch": 0.6183368869936035,
"grad_norm": 0.6521697494650107,
"learning_rate": 7.629196222845027e-06,
"loss": 0.7228,
"step": 145
},
{
"epoch": 0.6396588486140725,
"grad_norm": 0.656726721576637,
"learning_rate": 6.909830056250527e-06,
"loss": 0.747,
"step": 150
},
{
"epoch": 0.6609808102345416,
"grad_norm": 0.6578315858420589,
"learning_rate": 6.207745373470717e-06,
"loss": 0.7272,
"step": 155
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.656996754461004,
"learning_rate": 5.526868516843673e-06,
"loss": 0.7439,
"step": 160
},
{
"epoch": 0.7036247334754797,
"grad_norm": 0.6693850015532866,
"learning_rate": 4.87100722594094e-06,
"loss": 0.7347,
"step": 165
},
{
"epoch": 0.7249466950959488,
"grad_norm": 0.6765008677512011,
"learning_rate": 4.2438293431432665e-06,
"loss": 0.7324,
"step": 170
},
{
"epoch": 0.746268656716418,
"grad_norm": 0.6114867479530466,
"learning_rate": 3.6488423015782128e-06,
"loss": 0.7459,
"step": 175
},
{
"epoch": 0.767590618336887,
"grad_norm": 0.6195342974114637,
"learning_rate": 3.089373510131354e-06,
"loss": 0.7286,
"step": 180
},
{
"epoch": 0.7889125799573561,
"grad_norm": 0.6675783046459663,
"learning_rate": 2.5685517452260566e-06,
"loss": 0.7227,
"step": 185
},
{
"epoch": 0.8102345415778252,
"grad_norm": 0.6190369577895048,
"learning_rate": 2.08928965343659e-06,
"loss": 0.7218,
"step": 190
},
{
"epoch": 0.8315565031982942,
"grad_norm": 0.6525827306828497,
"learning_rate": 1.6542674627869738e-06,
"loss": 0.7234,
"step": 195
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.6419699978132826,
"learning_rate": 1.2659179938287035e-06,
"loss": 0.7289,
"step": 200
},
{
"epoch": 0.8742004264392325,
"grad_norm": 0.6649952150232479,
"learning_rate": 9.264130543213512e-07,
"loss": 0.7376,
"step": 205
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.6750907974087639,
"learning_rate": 6.37651293602628e-07,
"loss": 0.7386,
"step": 210
},
{
"epoch": 0.9168443496801706,
"grad_norm": 0.6851406465183166,
"learning_rate": 4.012475845711106e-07,
"loss": 0.7617,
"step": 215
},
{
"epoch": 0.9381663113006397,
"grad_norm": 0.6824828927280917,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.7308,
"step": 220
},
{
"epoch": 0.9594882729211087,
"grad_norm": 0.6503758882585686,
"learning_rate": 9.0502382320653e-08,
"loss": 0.7134,
"step": 225
},
{
"epoch": 0.9808102345415778,
"grad_norm": 0.8072821198755136,
"learning_rate": 1.7898702322648453e-08,
"loss": 0.7371,
"step": 230
},
{
"epoch": 0.997867803837953,
"eval_loss": 0.8081588745117188,
"eval_runtime": 280.7935,
"eval_samples_per_second": 1.781,
"eval_steps_per_second": 0.057,
"step": 234
},
{
"epoch": 0.997867803837953,
"step": 234,
"total_flos": 33774549073920.0,
"train_loss": 0.10635672063909025,
"train_runtime": 571.525,
"train_samples_per_second": 52.491,
"train_steps_per_second": 0.409
}
],
"logging_steps": 5,
"max_steps": 234,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 33774549073920.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}