farmery's picture
Upload folder using huggingface_hub
dd65ce0 verified
{
"best_metric": 1.1555566787719727,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.33641715727502103,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00336417157275021,
"grad_norm": 3.9489922523498535,
"learning_rate": 5.000000000000001e-07,
"loss": 6.0569,
"step": 1
},
{
"epoch": 0.00336417157275021,
"eval_loss": 2.1594057083129883,
"eval_runtime": 56.5321,
"eval_samples_per_second": 8.862,
"eval_steps_per_second": 2.229,
"step": 1
},
{
"epoch": 0.00672834314550042,
"grad_norm": 4.482152938842773,
"learning_rate": 1.0000000000000002e-06,
"loss": 7.2046,
"step": 2
},
{
"epoch": 0.010092514718250631,
"grad_norm": 5.5263352394104,
"learning_rate": 1.5e-06,
"loss": 8.4916,
"step": 3
},
{
"epoch": 0.01345668629100084,
"grad_norm": 5.707150459289551,
"learning_rate": 2.0000000000000003e-06,
"loss": 8.8524,
"step": 4
},
{
"epoch": 0.01682085786375105,
"grad_norm": 6.016858100891113,
"learning_rate": 2.5e-06,
"loss": 8.9214,
"step": 5
},
{
"epoch": 0.020185029436501262,
"grad_norm": 6.257847309112549,
"learning_rate": 3e-06,
"loss": 9.1536,
"step": 6
},
{
"epoch": 0.023549201009251473,
"grad_norm": 6.696770191192627,
"learning_rate": 3.5e-06,
"loss": 10.0647,
"step": 7
},
{
"epoch": 0.02691337258200168,
"grad_norm": 7.250229835510254,
"learning_rate": 4.000000000000001e-06,
"loss": 11.0152,
"step": 8
},
{
"epoch": 0.030277544154751892,
"grad_norm": 7.7119245529174805,
"learning_rate": 4.5e-06,
"loss": 11.2444,
"step": 9
},
{
"epoch": 0.0336417157275021,
"grad_norm": 7.244583606719971,
"learning_rate": 5e-06,
"loss": 10.8585,
"step": 10
},
{
"epoch": 0.03700588730025231,
"grad_norm": 7.843494415283203,
"learning_rate": 4.99847706754774e-06,
"loss": 10.627,
"step": 11
},
{
"epoch": 0.040370058873002525,
"grad_norm": 8.349863052368164,
"learning_rate": 4.993910125649561e-06,
"loss": 11.6061,
"step": 12
},
{
"epoch": 0.04373423044575273,
"grad_norm": 7.974443435668945,
"learning_rate": 4.986304738420684e-06,
"loss": 10.578,
"step": 13
},
{
"epoch": 0.04709840201850295,
"grad_norm": 8.698466300964355,
"learning_rate": 4.975670171853926e-06,
"loss": 11.2472,
"step": 14
},
{
"epoch": 0.050462573591253154,
"grad_norm": 8.997441291809082,
"learning_rate": 4.962019382530521e-06,
"loss": 11.7005,
"step": 15
},
{
"epoch": 0.05382674516400336,
"grad_norm": 9.786170959472656,
"learning_rate": 4.9453690018345144e-06,
"loss": 12.2871,
"step": 16
},
{
"epoch": 0.057190916736753576,
"grad_norm": 9.312055587768555,
"learning_rate": 4.925739315689991e-06,
"loss": 12.1815,
"step": 17
},
{
"epoch": 0.060555088309503784,
"grad_norm": 9.848001480102539,
"learning_rate": 4.903154239845798e-06,
"loss": 12.2101,
"step": 18
},
{
"epoch": 0.063919259882254,
"grad_norm": 10.315635681152344,
"learning_rate": 4.8776412907378845e-06,
"loss": 12.0979,
"step": 19
},
{
"epoch": 0.0672834314550042,
"grad_norm": 11.100384712219238,
"learning_rate": 4.849231551964771e-06,
"loss": 12.8286,
"step": 20
},
{
"epoch": 0.07064760302775441,
"grad_norm": 11.478273391723633,
"learning_rate": 4.817959636416969e-06,
"loss": 13.5465,
"step": 21
},
{
"epoch": 0.07401177460050462,
"grad_norm": 11.831793785095215,
"learning_rate": 4.783863644106502e-06,
"loss": 13.9233,
"step": 22
},
{
"epoch": 0.07737594617325484,
"grad_norm": 12.055624008178711,
"learning_rate": 4.746985115747918e-06,
"loss": 13.4583,
"step": 23
},
{
"epoch": 0.08074011774600505,
"grad_norm": 12.33119010925293,
"learning_rate": 4.707368982147318e-06,
"loss": 13.772,
"step": 24
},
{
"epoch": 0.08410428931875526,
"grad_norm": 14.89177131652832,
"learning_rate": 4.665063509461098e-06,
"loss": 15.4565,
"step": 25
},
{
"epoch": 0.08746846089150546,
"grad_norm": 14.688849449157715,
"learning_rate": 4.620120240391065e-06,
"loss": 15.1635,
"step": 26
},
{
"epoch": 0.09083263246425567,
"grad_norm": 14.966446876525879,
"learning_rate": 4.572593931387604e-06,
"loss": 15.1824,
"step": 27
},
{
"epoch": 0.0941968040370059,
"grad_norm": 15.037090301513672,
"learning_rate": 4.522542485937369e-06,
"loss": 15.248,
"step": 28
},
{
"epoch": 0.0975609756097561,
"grad_norm": 14.603853225708008,
"learning_rate": 4.470026884016805e-06,
"loss": 16.433,
"step": 29
},
{
"epoch": 0.10092514718250631,
"grad_norm": 16.51859474182129,
"learning_rate": 4.415111107797445e-06,
"loss": 16.8033,
"step": 30
},
{
"epoch": 0.10428931875525652,
"grad_norm": 16.52759552001953,
"learning_rate": 4.357862063693486e-06,
"loss": 16.493,
"step": 31
},
{
"epoch": 0.10765349032800672,
"grad_norm": 17.3706111907959,
"learning_rate": 4.2983495008466285e-06,
"loss": 16.9004,
"step": 32
},
{
"epoch": 0.11101766190075694,
"grad_norm": 17.978242874145508,
"learning_rate": 4.236645926147493e-06,
"loss": 17.7257,
"step": 33
},
{
"epoch": 0.11438183347350715,
"grad_norm": 17.74814224243164,
"learning_rate": 4.172826515897146e-06,
"loss": 15.9999,
"step": 34
},
{
"epoch": 0.11774600504625736,
"grad_norm": 21.516483306884766,
"learning_rate": 4.106969024216348e-06,
"loss": 19.2002,
"step": 35
},
{
"epoch": 0.12111017661900757,
"grad_norm": 21.676025390625,
"learning_rate": 4.039153688314146e-06,
"loss": 19.3753,
"step": 36
},
{
"epoch": 0.12447434819175777,
"grad_norm": 20.633501052856445,
"learning_rate": 3.969463130731183e-06,
"loss": 17.3614,
"step": 37
},
{
"epoch": 0.127838519764508,
"grad_norm": 23.484853744506836,
"learning_rate": 3.897982258676867e-06,
"loss": 18.6916,
"step": 38
},
{
"epoch": 0.1312026913372582,
"grad_norm": 22.21061897277832,
"learning_rate": 3.824798160583012e-06,
"loss": 18.4351,
"step": 39
},
{
"epoch": 0.1345668629100084,
"grad_norm": 22.049198150634766,
"learning_rate": 3.7500000000000005e-06,
"loss": 18.0943,
"step": 40
},
{
"epoch": 0.13793103448275862,
"grad_norm": 24.594404220581055,
"learning_rate": 3.6736789069647273e-06,
"loss": 19.8341,
"step": 41
},
{
"epoch": 0.14129520605550883,
"grad_norm": 23.650936126708984,
"learning_rate": 3.595927866972694e-06,
"loss": 20.1564,
"step": 42
},
{
"epoch": 0.14465937762825903,
"grad_norm": 24.20736312866211,
"learning_rate": 3.516841607689501e-06,
"loss": 19.0633,
"step": 43
},
{
"epoch": 0.14802354920100924,
"grad_norm": 24.75364112854004,
"learning_rate": 3.436516483539781e-06,
"loss": 20.2374,
"step": 44
},
{
"epoch": 0.15138772077375945,
"grad_norm": 25.638423919677734,
"learning_rate": 3.3550503583141726e-06,
"loss": 20.1314,
"step": 45
},
{
"epoch": 0.15475189234650968,
"grad_norm": 24.455801010131836,
"learning_rate": 3.272542485937369e-06,
"loss": 20.3837,
"step": 46
},
{
"epoch": 0.1581160639192599,
"grad_norm": 28.880544662475586,
"learning_rate": 3.189093389542498e-06,
"loss": 20.9076,
"step": 47
},
{
"epoch": 0.1614802354920101,
"grad_norm": 28.061445236206055,
"learning_rate": 3.1048047389991693e-06,
"loss": 21.1376,
"step": 48
},
{
"epoch": 0.1648444070647603,
"grad_norm": 30.0025577545166,
"learning_rate": 3.019779227044398e-06,
"loss": 22.3604,
"step": 49
},
{
"epoch": 0.16820857863751051,
"grad_norm": 32.991703033447266,
"learning_rate": 2.9341204441673267e-06,
"loss": 23.079,
"step": 50
},
{
"epoch": 0.16820857863751051,
"eval_loss": 1.5404291152954102,
"eval_runtime": 57.1121,
"eval_samples_per_second": 8.772,
"eval_steps_per_second": 2.206,
"step": 50
},
{
"epoch": 0.17157275021026072,
"grad_norm": 5.8676066398620605,
"learning_rate": 2.847932752400164e-06,
"loss": 4.9187,
"step": 51
},
{
"epoch": 0.17493692178301093,
"grad_norm": 6.816829681396484,
"learning_rate": 2.761321158169134e-06,
"loss": 5.2629,
"step": 52
},
{
"epoch": 0.17830109335576114,
"grad_norm": 7.70700740814209,
"learning_rate": 2.6743911843603134e-06,
"loss": 6.4171,
"step": 53
},
{
"epoch": 0.18166526492851134,
"grad_norm": 7.824767589569092,
"learning_rate": 2.587248741756253e-06,
"loss": 6.0923,
"step": 54
},
{
"epoch": 0.18502943650126155,
"grad_norm": 8.090392112731934,
"learning_rate": 2.5e-06,
"loss": 6.6339,
"step": 55
},
{
"epoch": 0.1883936080740118,
"grad_norm": 8.604589462280273,
"learning_rate": 2.4127512582437486e-06,
"loss": 6.1605,
"step": 56
},
{
"epoch": 0.191757779646762,
"grad_norm": 8.222731590270996,
"learning_rate": 2.325608815639687e-06,
"loss": 6.1605,
"step": 57
},
{
"epoch": 0.1951219512195122,
"grad_norm": 10.205119132995605,
"learning_rate": 2.238678841830867e-06,
"loss": 7.0509,
"step": 58
},
{
"epoch": 0.1984861227922624,
"grad_norm": 10.104853630065918,
"learning_rate": 2.1520672475998374e-06,
"loss": 6.9668,
"step": 59
},
{
"epoch": 0.20185029436501262,
"grad_norm": 10.174232482910156,
"learning_rate": 2.0658795558326745e-06,
"loss": 7.273,
"step": 60
},
{
"epoch": 0.20521446593776282,
"grad_norm": 11.0505952835083,
"learning_rate": 1.9802207729556023e-06,
"loss": 7.1026,
"step": 61
},
{
"epoch": 0.20857863751051303,
"grad_norm": 10.55052661895752,
"learning_rate": 1.895195261000831e-06,
"loss": 7.1445,
"step": 62
},
{
"epoch": 0.21194280908326324,
"grad_norm": 10.593603134155273,
"learning_rate": 1.8109066104575023e-06,
"loss": 6.6755,
"step": 63
},
{
"epoch": 0.21530698065601345,
"grad_norm": 10.96327018737793,
"learning_rate": 1.7274575140626318e-06,
"loss": 7.2853,
"step": 64
},
{
"epoch": 0.21867115222876365,
"grad_norm": 10.896140098571777,
"learning_rate": 1.6449496416858285e-06,
"loss": 6.8225,
"step": 65
},
{
"epoch": 0.2220353238015139,
"grad_norm": 10.829171180725098,
"learning_rate": 1.56348351646022e-06,
"loss": 7.2135,
"step": 66
},
{
"epoch": 0.2253994953742641,
"grad_norm": 12.62612247467041,
"learning_rate": 1.4831583923105e-06,
"loss": 8.1609,
"step": 67
},
{
"epoch": 0.2287636669470143,
"grad_norm": 12.466354370117188,
"learning_rate": 1.4040721330273063e-06,
"loss": 7.5541,
"step": 68
},
{
"epoch": 0.2321278385197645,
"grad_norm": 12.358624458312988,
"learning_rate": 1.3263210930352737e-06,
"loss": 8.0098,
"step": 69
},
{
"epoch": 0.23549201009251472,
"grad_norm": 13.029424667358398,
"learning_rate": 1.2500000000000007e-06,
"loss": 7.1127,
"step": 70
},
{
"epoch": 0.23885618166526493,
"grad_norm": 12.36991024017334,
"learning_rate": 1.1752018394169882e-06,
"loss": 7.7343,
"step": 71
},
{
"epoch": 0.24222035323801513,
"grad_norm": 14.253127098083496,
"learning_rate": 1.1020177413231334e-06,
"loss": 8.1448,
"step": 72
},
{
"epoch": 0.24558452481076534,
"grad_norm": 14.902377128601074,
"learning_rate": 1.0305368692688175e-06,
"loss": 8.5399,
"step": 73
},
{
"epoch": 0.24894869638351555,
"grad_norm": 13.825746536254883,
"learning_rate": 9.608463116858544e-07,
"loss": 8.2775,
"step": 74
},
{
"epoch": 0.2523128679562658,
"grad_norm": 14.28534984588623,
"learning_rate": 8.930309757836517e-07,
"loss": 8.3315,
"step": 75
},
{
"epoch": 0.255677039529016,
"grad_norm": 14.561500549316406,
"learning_rate": 8.271734841028553e-07,
"loss": 8.2985,
"step": 76
},
{
"epoch": 0.2590412111017662,
"grad_norm": 16.219362258911133,
"learning_rate": 7.633540738525066e-07,
"loss": 9.6124,
"step": 77
},
{
"epoch": 0.2624053826745164,
"grad_norm": 16.25676155090332,
"learning_rate": 7.016504991533727e-07,
"loss": 9.5415,
"step": 78
},
{
"epoch": 0.2657695542472666,
"grad_norm": 15.985312461853027,
"learning_rate": 6.421379363065142e-07,
"loss": 8.8435,
"step": 79
},
{
"epoch": 0.2691337258200168,
"grad_norm": 17.515792846679688,
"learning_rate": 5.848888922025553e-07,
"loss": 9.9004,
"step": 80
},
{
"epoch": 0.27249789739276703,
"grad_norm": 18.003767013549805,
"learning_rate": 5.299731159831953e-07,
"loss": 9.6217,
"step": 81
},
{
"epoch": 0.27586206896551724,
"grad_norm": 17.7401180267334,
"learning_rate": 4.774575140626317e-07,
"loss": 10.091,
"step": 82
},
{
"epoch": 0.27922624053826745,
"grad_norm": 18.93708038330078,
"learning_rate": 4.27406068612396e-07,
"loss": 10.3955,
"step": 83
},
{
"epoch": 0.28259041211101765,
"grad_norm": 20.82859992980957,
"learning_rate": 3.798797596089351e-07,
"loss": 10.6501,
"step": 84
},
{
"epoch": 0.28595458368376786,
"grad_norm": 20.40983009338379,
"learning_rate": 3.3493649053890325e-07,
"loss": 11.2433,
"step": 85
},
{
"epoch": 0.28931875525651807,
"grad_norm": 20.346174240112305,
"learning_rate": 2.9263101785268253e-07,
"loss": 11.8944,
"step": 86
},
{
"epoch": 0.2926829268292683,
"grad_norm": 21.919641494750977,
"learning_rate": 2.53014884252083e-07,
"loss": 11.9275,
"step": 87
},
{
"epoch": 0.2960470984020185,
"grad_norm": 21.59522819519043,
"learning_rate": 2.1613635589349756e-07,
"loss": 11.1671,
"step": 88
},
{
"epoch": 0.2994112699747687,
"grad_norm": 23.782527923583984,
"learning_rate": 1.8204036358303173e-07,
"loss": 13.3871,
"step": 89
},
{
"epoch": 0.3027754415475189,
"grad_norm": 22.470487594604492,
"learning_rate": 1.507684480352292e-07,
"loss": 12.2082,
"step": 90
},
{
"epoch": 0.30613961312026916,
"grad_norm": 22.669078826904297,
"learning_rate": 1.223587092621162e-07,
"loss": 12.4097,
"step": 91
},
{
"epoch": 0.30950378469301937,
"grad_norm": 23.26660919189453,
"learning_rate": 9.684576015420277e-08,
"loss": 13.0533,
"step": 92
},
{
"epoch": 0.3128679562657696,
"grad_norm": 25.632230758666992,
"learning_rate": 7.426068431000883e-08,
"loss": 14.406,
"step": 93
},
{
"epoch": 0.3162321278385198,
"grad_norm": 23.212289810180664,
"learning_rate": 5.463099816548578e-08,
"loss": 12.672,
"step": 94
},
{
"epoch": 0.31959629941127,
"grad_norm": 26.72962188720703,
"learning_rate": 3.798061746947995e-08,
"loss": 14.1783,
"step": 95
},
{
"epoch": 0.3229604709840202,
"grad_norm": 26.62249183654785,
"learning_rate": 2.4329828146074096e-08,
"loss": 14.9785,
"step": 96
},
{
"epoch": 0.3263246425567704,
"grad_norm": 24.253948211669922,
"learning_rate": 1.3695261579316776e-08,
"loss": 13.4523,
"step": 97
},
{
"epoch": 0.3296888141295206,
"grad_norm": 27.36448860168457,
"learning_rate": 6.089874350439507e-09,
"loss": 15.0062,
"step": 98
},
{
"epoch": 0.3330529857022708,
"grad_norm": 31.766481399536133,
"learning_rate": 1.5229324522605949e-09,
"loss": 15.6437,
"step": 99
},
{
"epoch": 0.33641715727502103,
"grad_norm": 31.894515991210938,
"learning_rate": 0.0,
"loss": 17.4361,
"step": 100
},
{
"epoch": 0.33641715727502103,
"eval_loss": 1.1555566787719727,
"eval_runtime": 57.1133,
"eval_samples_per_second": 8.772,
"eval_steps_per_second": 2.206,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.53913198493696e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}