lesso11's picture
Training in progress, step 75, checkpoint
cb61680 verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.44776119402985076,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005970149253731343,
"grad_norm": 1.2009718418121338,
"learning_rate": 1e-05,
"loss": 7.1874,
"step": 1
},
{
"epoch": 0.005970149253731343,
"eval_loss": 3.6131668090820312,
"eval_runtime": 43.8864,
"eval_samples_per_second": 3.213,
"eval_steps_per_second": 0.41,
"step": 1
},
{
"epoch": 0.011940298507462687,
"grad_norm": 1.276452660560608,
"learning_rate": 2e-05,
"loss": 7.3753,
"step": 2
},
{
"epoch": 0.01791044776119403,
"grad_norm": 1.169365406036377,
"learning_rate": 3e-05,
"loss": 7.1167,
"step": 3
},
{
"epoch": 0.023880597014925373,
"grad_norm": 1.4007748365402222,
"learning_rate": 4e-05,
"loss": 7.2851,
"step": 4
},
{
"epoch": 0.029850746268656716,
"grad_norm": 1.4416389465332031,
"learning_rate": 5e-05,
"loss": 6.9871,
"step": 5
},
{
"epoch": 0.03582089552238806,
"grad_norm": 1.5692150592803955,
"learning_rate": 6e-05,
"loss": 7.028,
"step": 6
},
{
"epoch": 0.041791044776119404,
"grad_norm": 1.3784325122833252,
"learning_rate": 7e-05,
"loss": 6.816,
"step": 7
},
{
"epoch": 0.04776119402985075,
"grad_norm": 1.6804227828979492,
"learning_rate": 8e-05,
"loss": 6.9157,
"step": 8
},
{
"epoch": 0.05373134328358209,
"grad_norm": 1.4350379705429077,
"learning_rate": 9e-05,
"loss": 6.7714,
"step": 9
},
{
"epoch": 0.05373134328358209,
"eval_loss": 3.358555316925049,
"eval_runtime": 44.0564,
"eval_samples_per_second": 3.2,
"eval_steps_per_second": 0.409,
"step": 9
},
{
"epoch": 0.05970149253731343,
"grad_norm": 1.7629762887954712,
"learning_rate": 0.0001,
"loss": 6.7025,
"step": 10
},
{
"epoch": 0.06567164179104477,
"grad_norm": 1.9589104652404785,
"learning_rate": 9.99695413509548e-05,
"loss": 6.7321,
"step": 11
},
{
"epoch": 0.07164179104477612,
"grad_norm": 1.3560996055603027,
"learning_rate": 9.987820251299122e-05,
"loss": 6.4935,
"step": 12
},
{
"epoch": 0.07761194029850746,
"grad_norm": 1.097153663635254,
"learning_rate": 9.972609476841367e-05,
"loss": 6.4837,
"step": 13
},
{
"epoch": 0.08358208955223881,
"grad_norm": 0.8275943994522095,
"learning_rate": 9.951340343707852e-05,
"loss": 6.2538,
"step": 14
},
{
"epoch": 0.08955223880597014,
"grad_norm": 0.6273999810218811,
"learning_rate": 9.924038765061042e-05,
"loss": 6.2581,
"step": 15
},
{
"epoch": 0.0955223880597015,
"grad_norm": 0.6474035978317261,
"learning_rate": 9.890738003669029e-05,
"loss": 6.2101,
"step": 16
},
{
"epoch": 0.10149253731343283,
"grad_norm": 0.7804368734359741,
"learning_rate": 9.851478631379982e-05,
"loss": 6.1777,
"step": 17
},
{
"epoch": 0.10746268656716418,
"grad_norm": 0.8049015402793884,
"learning_rate": 9.806308479691595e-05,
"loss": 6.2771,
"step": 18
},
{
"epoch": 0.10746268656716418,
"eval_loss": 3.0857627391815186,
"eval_runtime": 44.0268,
"eval_samples_per_second": 3.203,
"eval_steps_per_second": 0.409,
"step": 18
},
{
"epoch": 0.11343283582089553,
"grad_norm": 0.6202137470245361,
"learning_rate": 9.755282581475769e-05,
"loss": 6.1886,
"step": 19
},
{
"epoch": 0.11940298507462686,
"grad_norm": 0.6927691102027893,
"learning_rate": 9.698463103929542e-05,
"loss": 6.2423,
"step": 20
},
{
"epoch": 0.1253731343283582,
"grad_norm": 0.6712962985038757,
"learning_rate": 9.635919272833938e-05,
"loss": 6.1187,
"step": 21
},
{
"epoch": 0.13134328358208955,
"grad_norm": 0.39803004264831543,
"learning_rate": 9.567727288213005e-05,
"loss": 6.1228,
"step": 22
},
{
"epoch": 0.1373134328358209,
"grad_norm": 0.8054197430610657,
"learning_rate": 9.493970231495835e-05,
"loss": 6.0574,
"step": 23
},
{
"epoch": 0.14328358208955225,
"grad_norm": 0.35388606786727905,
"learning_rate": 9.414737964294636e-05,
"loss": 6.0201,
"step": 24
},
{
"epoch": 0.14925373134328357,
"grad_norm": 0.6076826453208923,
"learning_rate": 9.330127018922194e-05,
"loss": 6.1896,
"step": 25
},
{
"epoch": 0.15522388059701492,
"grad_norm": 0.7924426198005676,
"learning_rate": 9.24024048078213e-05,
"loss": 6.0612,
"step": 26
},
{
"epoch": 0.16119402985074627,
"grad_norm": 0.4275815784931183,
"learning_rate": 9.145187862775209e-05,
"loss": 6.0404,
"step": 27
},
{
"epoch": 0.16119402985074627,
"eval_loss": 3.0337231159210205,
"eval_runtime": 44.0377,
"eval_samples_per_second": 3.202,
"eval_steps_per_second": 0.409,
"step": 27
},
{
"epoch": 0.16716417910447762,
"grad_norm": 0.706186056137085,
"learning_rate": 9.045084971874738e-05,
"loss": 5.9746,
"step": 28
},
{
"epoch": 0.17313432835820897,
"grad_norm": 0.7469488978385925,
"learning_rate": 8.940053768033609e-05,
"loss": 6.0592,
"step": 29
},
{
"epoch": 0.1791044776119403,
"grad_norm": 0.3669968843460083,
"learning_rate": 8.83022221559489e-05,
"loss": 6.0306,
"step": 30
},
{
"epoch": 0.18507462686567164,
"grad_norm": 1.3864649534225464,
"learning_rate": 8.715724127386972e-05,
"loss": 6.0186,
"step": 31
},
{
"epoch": 0.191044776119403,
"grad_norm": 0.3378469944000244,
"learning_rate": 8.596699001693255e-05,
"loss": 6.0835,
"step": 32
},
{
"epoch": 0.19701492537313434,
"grad_norm": 1.3633979558944702,
"learning_rate": 8.473291852294987e-05,
"loss": 6.0245,
"step": 33
},
{
"epoch": 0.20298507462686566,
"grad_norm": 0.9031431078910828,
"learning_rate": 8.345653031794292e-05,
"loss": 5.9733,
"step": 34
},
{
"epoch": 0.208955223880597,
"grad_norm": 0.328767865896225,
"learning_rate": 8.213938048432697e-05,
"loss": 6.0025,
"step": 35
},
{
"epoch": 0.21492537313432836,
"grad_norm": 1.1566050052642822,
"learning_rate": 8.07830737662829e-05,
"loss": 6.0371,
"step": 36
},
{
"epoch": 0.21492537313432836,
"eval_loss": 3.032942056655884,
"eval_runtime": 44.0184,
"eval_samples_per_second": 3.203,
"eval_steps_per_second": 0.409,
"step": 36
},
{
"epoch": 0.2208955223880597,
"grad_norm": 1.31416654586792,
"learning_rate": 7.938926261462366e-05,
"loss": 6.1139,
"step": 37
},
{
"epoch": 0.22686567164179106,
"grad_norm": 0.2981594502925873,
"learning_rate": 7.795964517353735e-05,
"loss": 6.0316,
"step": 38
},
{
"epoch": 0.23283582089552238,
"grad_norm": 0.7097679972648621,
"learning_rate": 7.649596321166024e-05,
"loss": 5.9398,
"step": 39
},
{
"epoch": 0.23880597014925373,
"grad_norm": 1.0947890281677246,
"learning_rate": 7.500000000000001e-05,
"loss": 5.9944,
"step": 40
},
{
"epoch": 0.24477611940298508,
"grad_norm": 0.27537035942077637,
"learning_rate": 7.347357813929454e-05,
"loss": 6.0236,
"step": 41
},
{
"epoch": 0.2507462686567164,
"grad_norm": 0.45621415972709656,
"learning_rate": 7.191855733945387e-05,
"loss": 5.949,
"step": 42
},
{
"epoch": 0.25671641791044775,
"grad_norm": 0.20884762704372406,
"learning_rate": 7.033683215379002e-05,
"loss": 6.0089,
"step": 43
},
{
"epoch": 0.2626865671641791,
"grad_norm": 0.21904632449150085,
"learning_rate": 6.873032967079561e-05,
"loss": 5.973,
"step": 44
},
{
"epoch": 0.26865671641791045,
"grad_norm": 0.36318573355674744,
"learning_rate": 6.710100716628344e-05,
"loss": 5.9505,
"step": 45
},
{
"epoch": 0.26865671641791045,
"eval_loss": 3.018928289413452,
"eval_runtime": 44.0252,
"eval_samples_per_second": 3.203,
"eval_steps_per_second": 0.409,
"step": 45
},
{
"epoch": 0.2746268656716418,
"grad_norm": 0.4027023911476135,
"learning_rate": 6.545084971874738e-05,
"loss": 6.1368,
"step": 46
},
{
"epoch": 0.28059701492537314,
"grad_norm": 0.7140593528747559,
"learning_rate": 6.378186779084995e-05,
"loss": 6.0893,
"step": 47
},
{
"epoch": 0.2865671641791045,
"grad_norm": 0.27870509028434753,
"learning_rate": 6.209609477998338e-05,
"loss": 6.0323,
"step": 48
},
{
"epoch": 0.29253731343283584,
"grad_norm": 0.8685962557792664,
"learning_rate": 6.0395584540887963e-05,
"loss": 6.1308,
"step": 49
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.5305348634719849,
"learning_rate": 5.868240888334653e-05,
"loss": 6.0474,
"step": 50
},
{
"epoch": 0.3044776119402985,
"grad_norm": 0.8471565842628479,
"learning_rate": 5.695865504800327e-05,
"loss": 6.0034,
"step": 51
},
{
"epoch": 0.31044776119402984,
"grad_norm": 1.006734013557434,
"learning_rate": 5.522642316338268e-05,
"loss": 6.0647,
"step": 52
},
{
"epoch": 0.3164179104477612,
"grad_norm": 1.746330976486206,
"learning_rate": 5.348782368720626e-05,
"loss": 6.1285,
"step": 53
},
{
"epoch": 0.32238805970149254,
"grad_norm": 0.7526426911354065,
"learning_rate": 5.174497483512506e-05,
"loss": 6.0263,
"step": 54
},
{
"epoch": 0.32238805970149254,
"eval_loss": 3.018310546875,
"eval_runtime": 43.9264,
"eval_samples_per_second": 3.21,
"eval_steps_per_second": 0.41,
"step": 54
},
{
"epoch": 0.3283582089552239,
"grad_norm": 0.24178102612495422,
"learning_rate": 5e-05,
"loss": 5.9838,
"step": 55
},
{
"epoch": 0.33432835820895523,
"grad_norm": 1.259934425354004,
"learning_rate": 4.825502516487497e-05,
"loss": 6.0442,
"step": 56
},
{
"epoch": 0.3402985074626866,
"grad_norm": 0.9597307443618774,
"learning_rate": 4.6512176312793736e-05,
"loss": 6.0847,
"step": 57
},
{
"epoch": 0.34626865671641793,
"grad_norm": 0.7438874244689941,
"learning_rate": 4.477357683661734e-05,
"loss": 6.0213,
"step": 58
},
{
"epoch": 0.3522388059701492,
"grad_norm": 0.2544308304786682,
"learning_rate": 4.3041344951996746e-05,
"loss": 6.0455,
"step": 59
},
{
"epoch": 0.3582089552238806,
"grad_norm": 0.6676498055458069,
"learning_rate": 4.131759111665349e-05,
"loss": 6.0191,
"step": 60
},
{
"epoch": 0.3641791044776119,
"grad_norm": 1.6943082809448242,
"learning_rate": 3.960441545911204e-05,
"loss": 5.9287,
"step": 61
},
{
"epoch": 0.3701492537313433,
"grad_norm": 1.1571624279022217,
"learning_rate": 3.790390522001662e-05,
"loss": 6.0959,
"step": 62
},
{
"epoch": 0.3761194029850746,
"grad_norm": 0.43850600719451904,
"learning_rate": 3.6218132209150045e-05,
"loss": 5.9537,
"step": 63
},
{
"epoch": 0.3761194029850746,
"eval_loss": 3.017953395843506,
"eval_runtime": 43.8204,
"eval_samples_per_second": 3.218,
"eval_steps_per_second": 0.411,
"step": 63
},
{
"epoch": 0.382089552238806,
"grad_norm": 0.7406497597694397,
"learning_rate": 3.4549150281252636e-05,
"loss": 6.1041,
"step": 64
},
{
"epoch": 0.3880597014925373,
"grad_norm": 1.1883533000946045,
"learning_rate": 3.289899283371657e-05,
"loss": 6.0868,
"step": 65
},
{
"epoch": 0.3940298507462687,
"grad_norm": 1.2352616786956787,
"learning_rate": 3.12696703292044e-05,
"loss": 6.0639,
"step": 66
},
{
"epoch": 0.4,
"grad_norm": 0.7118608355522156,
"learning_rate": 2.9663167846209998e-05,
"loss": 5.969,
"step": 67
},
{
"epoch": 0.4059701492537313,
"grad_norm": 0.40361595153808594,
"learning_rate": 2.8081442660546125e-05,
"loss": 5.9868,
"step": 68
},
{
"epoch": 0.41194029850746267,
"grad_norm": 0.34668585658073425,
"learning_rate": 2.6526421860705473e-05,
"loss": 6.0418,
"step": 69
},
{
"epoch": 0.417910447761194,
"grad_norm": 0.48091813921928406,
"learning_rate": 2.500000000000001e-05,
"loss": 6.0231,
"step": 70
},
{
"epoch": 0.42388059701492536,
"grad_norm": 0.5058419704437256,
"learning_rate": 2.350403678833976e-05,
"loss": 6.0534,
"step": 71
},
{
"epoch": 0.4298507462686567,
"grad_norm": 0.36839550733566284,
"learning_rate": 2.2040354826462668e-05,
"loss": 6.0709,
"step": 72
},
{
"epoch": 0.4298507462686567,
"eval_loss": 3.015293836593628,
"eval_runtime": 43.8313,
"eval_samples_per_second": 3.217,
"eval_steps_per_second": 0.411,
"step": 72
},
{
"epoch": 0.43582089552238806,
"grad_norm": 0.4969329833984375,
"learning_rate": 2.061073738537635e-05,
"loss": 6.0208,
"step": 73
},
{
"epoch": 0.4417910447761194,
"grad_norm": 0.3160986304283142,
"learning_rate": 1.9216926233717085e-05,
"loss": 6.0143,
"step": 74
},
{
"epoch": 0.44776119402985076,
"grad_norm": 0.43142208456993103,
"learning_rate": 1.7860619515673033e-05,
"loss": 6.0745,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.897989119410176e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}