dimasik2987's picture
Training in progress, step 75, checkpoint
a42d4d5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10621348911311737,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0014161798548415648,
"grad_norm": 23.20075035095215,
"learning_rate": 3.3333333333333335e-05,
"loss": 11.3912,
"step": 1
},
{
"epoch": 0.0014161798548415648,
"eval_loss": 10.59463119506836,
"eval_runtime": 151.6501,
"eval_samples_per_second": 3.924,
"eval_steps_per_second": 1.965,
"step": 1
},
{
"epoch": 0.0028323597096831295,
"grad_norm": 28.277725219726562,
"learning_rate": 6.666666666666667e-05,
"loss": 10.9081,
"step": 2
},
{
"epoch": 0.004248539564524694,
"grad_norm": 24.364511489868164,
"learning_rate": 0.0001,
"loss": 10.0504,
"step": 3
},
{
"epoch": 0.005664719419366259,
"grad_norm": 18.489341735839844,
"learning_rate": 9.99524110790929e-05,
"loss": 7.3342,
"step": 4
},
{
"epoch": 0.007080899274207825,
"grad_norm": 25.614126205444336,
"learning_rate": 9.980973490458728e-05,
"loss": 3.8967,
"step": 5
},
{
"epoch": 0.008497079129049389,
"grad_norm": 41.44906234741211,
"learning_rate": 9.957224306869053e-05,
"loss": 1.9891,
"step": 6
},
{
"epoch": 0.009913258983890954,
"grad_norm": 24.81721305847168,
"learning_rate": 9.924038765061042e-05,
"loss": 1.3137,
"step": 7
},
{
"epoch": 0.011329438838732518,
"grad_norm": 7.7688493728637695,
"learning_rate": 9.881480035599667e-05,
"loss": 0.4025,
"step": 8
},
{
"epoch": 0.012745618693574084,
"grad_norm": 4.148148059844971,
"learning_rate": 9.829629131445342e-05,
"loss": 0.1716,
"step": 9
},
{
"epoch": 0.01416179854841565,
"grad_norm": 0.756351113319397,
"learning_rate": 9.768584753741134e-05,
"loss": 0.0929,
"step": 10
},
{
"epoch": 0.015577978403257213,
"grad_norm": 4.284754276275635,
"learning_rate": 9.698463103929542e-05,
"loss": 0.1716,
"step": 11
},
{
"epoch": 0.016994158258098777,
"grad_norm": 5.088748455047607,
"learning_rate": 9.619397662556435e-05,
"loss": 0.1915,
"step": 12
},
{
"epoch": 0.018410338112940343,
"grad_norm": 3.0917534828186035,
"learning_rate": 9.53153893518325e-05,
"loss": 0.1343,
"step": 13
},
{
"epoch": 0.01982651796778191,
"grad_norm": 3.725177049636841,
"learning_rate": 9.435054165891109e-05,
"loss": 0.1964,
"step": 14
},
{
"epoch": 0.021242697822623474,
"grad_norm": 1.8048001527786255,
"learning_rate": 9.330127018922194e-05,
"loss": 0.1206,
"step": 15
},
{
"epoch": 0.022658877677465036,
"grad_norm": 0.94793301820755,
"learning_rate": 9.21695722906443e-05,
"loss": 0.1201,
"step": 16
},
{
"epoch": 0.024075057532306602,
"grad_norm": 2.34434175491333,
"learning_rate": 9.09576022144496e-05,
"loss": 0.1184,
"step": 17
},
{
"epoch": 0.025491237387148168,
"grad_norm": 2.191009759902954,
"learning_rate": 8.966766701456177e-05,
"loss": 0.1479,
"step": 18
},
{
"epoch": 0.026907417241989733,
"grad_norm": 1.4999676942825317,
"learning_rate": 8.83022221559489e-05,
"loss": 0.1267,
"step": 19
},
{
"epoch": 0.0283235970968313,
"grad_norm": 0.44820383191108704,
"learning_rate": 8.68638668405062e-05,
"loss": 0.1192,
"step": 20
},
{
"epoch": 0.02973977695167286,
"grad_norm": 2.531404733657837,
"learning_rate": 8.535533905932738e-05,
"loss": 0.1483,
"step": 21
},
{
"epoch": 0.031155956806514427,
"grad_norm": 0.7770451903343201,
"learning_rate": 8.377951038078302e-05,
"loss": 0.1114,
"step": 22
},
{
"epoch": 0.03257213666135599,
"grad_norm": 1.0737981796264648,
"learning_rate": 8.213938048432697e-05,
"loss": 0.118,
"step": 23
},
{
"epoch": 0.033988316516197555,
"grad_norm": 3.9276962280273438,
"learning_rate": 8.043807145043604e-05,
"loss": 0.2074,
"step": 24
},
{
"epoch": 0.035404496371039124,
"grad_norm": 4.217297554016113,
"learning_rate": 7.86788218175523e-05,
"loss": 0.2623,
"step": 25
},
{
"epoch": 0.035404496371039124,
"eval_loss": 0.1676926612854004,
"eval_runtime": 152.4912,
"eval_samples_per_second": 3.902,
"eval_steps_per_second": 1.954,
"step": 25
},
{
"epoch": 0.036820676225880686,
"grad_norm": 2.2406327724456787,
"learning_rate": 7.68649804173412e-05,
"loss": 0.1696,
"step": 26
},
{
"epoch": 0.038236856080722255,
"grad_norm": 0.21706989407539368,
"learning_rate": 7.500000000000001e-05,
"loss": 0.1207,
"step": 27
},
{
"epoch": 0.03965303593556382,
"grad_norm": 0.6819403767585754,
"learning_rate": 7.308743066175172e-05,
"loss": 0.1059,
"step": 28
},
{
"epoch": 0.04106921579040538,
"grad_norm": 1.85183584690094,
"learning_rate": 7.113091308703498e-05,
"loss": 0.1389,
"step": 29
},
{
"epoch": 0.04248539564524695,
"grad_norm": 0.6912545561790466,
"learning_rate": 6.91341716182545e-05,
"loss": 0.1074,
"step": 30
},
{
"epoch": 0.04390157550008851,
"grad_norm": 0.2985907196998596,
"learning_rate": 6.710100716628344e-05,
"loss": 0.1139,
"step": 31
},
{
"epoch": 0.04531775535493007,
"grad_norm": 0.9557470083236694,
"learning_rate": 6.503528997521366e-05,
"loss": 0.1193,
"step": 32
},
{
"epoch": 0.04673393520977164,
"grad_norm": 0.38003310561180115,
"learning_rate": 6.294095225512603e-05,
"loss": 0.112,
"step": 33
},
{
"epoch": 0.048150115064613204,
"grad_norm": 0.3061525523662567,
"learning_rate": 6.0821980696905146e-05,
"loss": 0.107,
"step": 34
},
{
"epoch": 0.04956629491945477,
"grad_norm": 0.29527008533477783,
"learning_rate": 5.868240888334653e-05,
"loss": 0.1145,
"step": 35
},
{
"epoch": 0.050982474774296335,
"grad_norm": 0.4223112463951111,
"learning_rate": 5.6526309611002594e-05,
"loss": 0.0935,
"step": 36
},
{
"epoch": 0.0523986546291379,
"grad_norm": 0.8453797101974487,
"learning_rate": 5.435778713738292e-05,
"loss": 0.0726,
"step": 37
},
{
"epoch": 0.053814834483979467,
"grad_norm": 0.26757749915122986,
"learning_rate": 5.218096936826681e-05,
"loss": 0.0648,
"step": 38
},
{
"epoch": 0.05523101433882103,
"grad_norm": 1.951507806777954,
"learning_rate": 5e-05,
"loss": 0.145,
"step": 39
},
{
"epoch": 0.0566471941936626,
"grad_norm": 2.7410080432891846,
"learning_rate": 4.781903063173321e-05,
"loss": 0.206,
"step": 40
},
{
"epoch": 0.05806337404850416,
"grad_norm": 0.6865355968475342,
"learning_rate": 4.564221286261709e-05,
"loss": 0.0908,
"step": 41
},
{
"epoch": 0.05947955390334572,
"grad_norm": 0.949466347694397,
"learning_rate": 4.347369038899744e-05,
"loss": 0.0796,
"step": 42
},
{
"epoch": 0.06089573375818729,
"grad_norm": 0.7530195713043213,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0922,
"step": 43
},
{
"epoch": 0.06231191361302885,
"grad_norm": 1.2768175601959229,
"learning_rate": 3.917801930309486e-05,
"loss": 0.0763,
"step": 44
},
{
"epoch": 0.06372809346787042,
"grad_norm": 0.44944190979003906,
"learning_rate": 3.705904774487396e-05,
"loss": 0.0563,
"step": 45
},
{
"epoch": 0.06514427332271198,
"grad_norm": 1.1507816314697266,
"learning_rate": 3.4964710024786354e-05,
"loss": 0.1047,
"step": 46
},
{
"epoch": 0.06656045317755355,
"grad_norm": 0.27491295337677,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0581,
"step": 47
},
{
"epoch": 0.06797663303239511,
"grad_norm": 0.8990926742553711,
"learning_rate": 3.086582838174551e-05,
"loss": 0.1178,
"step": 48
},
{
"epoch": 0.06939281288723669,
"grad_norm": 0.36759093403816223,
"learning_rate": 2.886908691296504e-05,
"loss": 0.0581,
"step": 49
},
{
"epoch": 0.07080899274207825,
"grad_norm": 0.25813916325569153,
"learning_rate": 2.6912569338248315e-05,
"loss": 0.0469,
"step": 50
},
{
"epoch": 0.07080899274207825,
"eval_loss": 0.10698376595973969,
"eval_runtime": 153.0381,
"eval_samples_per_second": 3.888,
"eval_steps_per_second": 1.947,
"step": 50
},
{
"epoch": 0.07222517259691981,
"grad_norm": 6.394037246704102,
"learning_rate": 2.500000000000001e-05,
"loss": 0.752,
"step": 51
},
{
"epoch": 0.07364135245176137,
"grad_norm": 0.4659101068973541,
"learning_rate": 2.3135019582658802e-05,
"loss": 0.0882,
"step": 52
},
{
"epoch": 0.07505753230660293,
"grad_norm": 0.36920779943466187,
"learning_rate": 2.132117818244771e-05,
"loss": 0.0927,
"step": 53
},
{
"epoch": 0.07647371216144451,
"grad_norm": 0.7038587927818298,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.1617,
"step": 54
},
{
"epoch": 0.07788989201628607,
"grad_norm": 0.3950757086277008,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.1044,
"step": 55
},
{
"epoch": 0.07930607187112763,
"grad_norm": 2.2838950157165527,
"learning_rate": 1.622048961921699e-05,
"loss": 0.1557,
"step": 56
},
{
"epoch": 0.0807222517259692,
"grad_norm": 0.9748347401618958,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.1041,
"step": 57
},
{
"epoch": 0.08213843158081076,
"grad_norm": 0.6119588613510132,
"learning_rate": 1.3136133159493802e-05,
"loss": 0.093,
"step": 58
},
{
"epoch": 0.08355461143565233,
"grad_norm": 0.511449933052063,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.1383,
"step": 59
},
{
"epoch": 0.0849707912904939,
"grad_norm": 1.2149078845977783,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.1075,
"step": 60
},
{
"epoch": 0.08638697114533546,
"grad_norm": 1.0309619903564453,
"learning_rate": 9.042397785550405e-06,
"loss": 0.0967,
"step": 61
},
{
"epoch": 0.08780315100017702,
"grad_norm": 2.3510637283325195,
"learning_rate": 7.830427709355725e-06,
"loss": 0.1512,
"step": 62
},
{
"epoch": 0.08921933085501858,
"grad_norm": 1.107035517692566,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0723,
"step": 63
},
{
"epoch": 0.09063551070986015,
"grad_norm": 0.5546442866325378,
"learning_rate": 5.649458341088915e-06,
"loss": 0.1117,
"step": 64
},
{
"epoch": 0.09205169056470172,
"grad_norm": 1.5751676559448242,
"learning_rate": 4.684610648167503e-06,
"loss": 0.1086,
"step": 65
},
{
"epoch": 0.09346787041954328,
"grad_norm": 0.3522906005382538,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.0987,
"step": 66
},
{
"epoch": 0.09488405027438485,
"grad_norm": 0.4917367398738861,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.1069,
"step": 67
},
{
"epoch": 0.09630023012922641,
"grad_norm": 1.3632891178131104,
"learning_rate": 2.314152462588659e-06,
"loss": 0.1204,
"step": 68
},
{
"epoch": 0.09771640998406797,
"grad_norm": 0.27073511481285095,
"learning_rate": 1.70370868554659e-06,
"loss": 0.0979,
"step": 69
},
{
"epoch": 0.09913258983890955,
"grad_norm": 0.3300623595714569,
"learning_rate": 1.1851996440033319e-06,
"loss": 0.0899,
"step": 70
},
{
"epoch": 0.10054876969375111,
"grad_norm": 2.2044923305511475,
"learning_rate": 7.596123493895991e-07,
"loss": 0.1263,
"step": 71
},
{
"epoch": 0.10196494954859267,
"grad_norm": 1.6999863386154175,
"learning_rate": 4.277569313094809e-07,
"loss": 0.1534,
"step": 72
},
{
"epoch": 0.10338112940343423,
"grad_norm": 0.6548103094100952,
"learning_rate": 1.9026509541272275e-07,
"loss": 0.1092,
"step": 73
},
{
"epoch": 0.1047973092582758,
"grad_norm": 0.9054380059242249,
"learning_rate": 4.7588920907110094e-08,
"loss": 0.1227,
"step": 74
},
{
"epoch": 0.10621348911311737,
"grad_norm": 0.3931428790092468,
"learning_rate": 0.0,
"loss": 0.1289,
"step": 75
},
{
"epoch": 0.10621348911311737,
"eval_loss": 0.10729946196079254,
"eval_runtime": 153.0149,
"eval_samples_per_second": 3.889,
"eval_steps_per_second": 1.948,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1302058350293811e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}