|
{ |
|
"best_metric": 0.587303638458252, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.6984866123399301, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009313154831199068, |
|
"grad_norm": 4.13957405090332, |
|
"learning_rate": 2e-05, |
|
"loss": 1.6206, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.009313154831199068, |
|
"eval_loss": 2.42331862449646, |
|
"eval_runtime": 16.7951, |
|
"eval_samples_per_second": 86.156, |
|
"eval_steps_per_second": 5.418, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018626309662398137, |
|
"grad_norm": 5.211028575897217, |
|
"learning_rate": 4e-05, |
|
"loss": 1.9336, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.027939464493597205, |
|
"grad_norm": 5.437460899353027, |
|
"learning_rate": 6e-05, |
|
"loss": 1.9453, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.037252619324796274, |
|
"grad_norm": 4.105167865753174, |
|
"learning_rate": 8e-05, |
|
"loss": 1.6002, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.046565774155995346, |
|
"grad_norm": 3.570579767227173, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3408, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05587892898719441, |
|
"grad_norm": 2.802839756011963, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 1.1321, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06519208381839348, |
|
"grad_norm": 2.4726405143737793, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 1.0462, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07450523864959255, |
|
"grad_norm": 1.6714907884597778, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 0.9939, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08381839348079162, |
|
"grad_norm": 1.7399799823760986, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 0.9237, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09313154831199069, |
|
"grad_norm": 1.7758259773254395, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.9164, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10244470314318975, |
|
"grad_norm": 1.526700735092163, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 0.8874, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11175785797438882, |
|
"grad_norm": 1.484175682067871, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 0.8658, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1210710128055879, |
|
"grad_norm": 2.3066554069519043, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 0.8771, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13038416763678695, |
|
"grad_norm": 1.369391679763794, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 0.7882, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13969732246798602, |
|
"grad_norm": 2.3046209812164307, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 0.8022, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1490104772991851, |
|
"grad_norm": 1.3936975002288818, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 0.7166, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15832363213038417, |
|
"grad_norm": 1.3665108680725098, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 0.7556, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16763678696158324, |
|
"grad_norm": 0.9268532991409302, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 0.7559, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1769499417927823, |
|
"grad_norm": 1.043634057044983, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 0.741, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18626309662398138, |
|
"grad_norm": 1.2843759059906006, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.7239, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19557625145518046, |
|
"grad_norm": 1.1267271041870117, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 0.7646, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2048894062863795, |
|
"grad_norm": 1.0400360822677612, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 0.7485, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21420256111757857, |
|
"grad_norm": 1.1732821464538574, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 0.7245, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22351571594877764, |
|
"grad_norm": 1.28049898147583, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.7087, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.23282887077997672, |
|
"grad_norm": 1.5667166709899902, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.734, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23282887077997672, |
|
"eval_loss": 0.7290714979171753, |
|
"eval_runtime": 16.8674, |
|
"eval_samples_per_second": 85.787, |
|
"eval_steps_per_second": 5.395, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2421420256111758, |
|
"grad_norm": 1.6958057880401611, |
|
"learning_rate": 8.842005554284296e-05, |
|
"loss": 0.7597, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.25145518044237486, |
|
"grad_norm": 1.23319411277771, |
|
"learning_rate": 8.73410738492077e-05, |
|
"loss": 0.6973, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2607683352735739, |
|
"grad_norm": 0.9983011484146118, |
|
"learning_rate": 8.622126023955446e-05, |
|
"loss": 0.6683, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.270081490104773, |
|
"grad_norm": 0.9224321842193604, |
|
"learning_rate": 8.506183921362443e-05, |
|
"loss": 0.6433, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.27939464493597205, |
|
"grad_norm": 1.100515604019165, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.7131, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28870779976717115, |
|
"grad_norm": 0.9684303402900696, |
|
"learning_rate": 8.262928807620843e-05, |
|
"loss": 0.6402, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2980209545983702, |
|
"grad_norm": 0.9174941182136536, |
|
"learning_rate": 8.135881792367686e-05, |
|
"loss": 0.6461, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3073341094295693, |
|
"grad_norm": 0.998300313949585, |
|
"learning_rate": 8.005405736415126e-05, |
|
"loss": 0.6634, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.31664726426076834, |
|
"grad_norm": 1.0989651679992676, |
|
"learning_rate": 7.871643313414718e-05, |
|
"loss": 0.6501, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3259604190919674, |
|
"grad_norm": 1.152661919593811, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 0.7261, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3352735739231665, |
|
"grad_norm": 1.1657490730285645, |
|
"learning_rate": 7.594847868906076e-05, |
|
"loss": 0.6728, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3445867287543655, |
|
"grad_norm": 1.2655208110809326, |
|
"learning_rate": 7.452117519152542e-05, |
|
"loss": 0.6361, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3538998835855646, |
|
"grad_norm": 1.10227632522583, |
|
"learning_rate": 7.30670581489344e-05, |
|
"loss": 0.6442, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.36321303841676367, |
|
"grad_norm": 0.8832430243492126, |
|
"learning_rate": 7.158771761692464e-05, |
|
"loss": 0.6495, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.37252619324796277, |
|
"grad_norm": 0.7977732419967651, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 0.6015, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3818393480791618, |
|
"grad_norm": 0.8318562507629395, |
|
"learning_rate": 6.855986244591104e-05, |
|
"loss": 0.6, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3911525029103609, |
|
"grad_norm": 0.8057540655136108, |
|
"learning_rate": 6.701465872208216e-05, |
|
"loss": 0.6115, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.40046565774155995, |
|
"grad_norm": 0.8961812853813171, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.6151, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.409778812572759, |
|
"grad_norm": 0.9129841923713684, |
|
"learning_rate": 6.387014543809223e-05, |
|
"loss": 0.5908, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4190919674039581, |
|
"grad_norm": 0.970668613910675, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 0.6279, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.42840512223515714, |
|
"grad_norm": 0.9627373814582825, |
|
"learning_rate": 6.066498153718735e-05, |
|
"loss": 0.6181, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.43771827706635624, |
|
"grad_norm": 0.997525691986084, |
|
"learning_rate": 5.90440267166055e-05, |
|
"loss": 0.5777, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4470314318975553, |
|
"grad_norm": 1.2201300859451294, |
|
"learning_rate": 5.74131823855921e-05, |
|
"loss": 0.6627, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4563445867287544, |
|
"grad_norm": 1.1989392042160034, |
|
"learning_rate": 5.577423184847932e-05, |
|
"loss": 0.6586, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.46565774155995343, |
|
"grad_norm": 1.3301013708114624, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 0.6635, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.46565774155995343, |
|
"eval_loss": 0.6151813268661499, |
|
"eval_runtime": 16.7916, |
|
"eval_samples_per_second": 86.174, |
|
"eval_steps_per_second": 5.419, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47497089639115253, |
|
"grad_norm": 2.323246717453003, |
|
"learning_rate": 5.247918773366112e-05, |
|
"loss": 0.6403, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4842840512223516, |
|
"grad_norm": 1.077490210533142, |
|
"learning_rate": 5.0826697238317935e-05, |
|
"loss": 0.5904, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.4935972060535506, |
|
"grad_norm": 0.8611325621604919, |
|
"learning_rate": 4.917330276168208e-05, |
|
"loss": 0.5742, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5029103608847497, |
|
"grad_norm": 1.0277608633041382, |
|
"learning_rate": 4.7520812266338885e-05, |
|
"loss": 0.594, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5122235157159488, |
|
"grad_norm": 1.1187914609909058, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 0.5995, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5215366705471478, |
|
"grad_norm": 0.8779079914093018, |
|
"learning_rate": 4.4225768151520694e-05, |
|
"loss": 0.602, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5308498253783469, |
|
"grad_norm": 0.9306710362434387, |
|
"learning_rate": 4.2586817614407895e-05, |
|
"loss": 0.5815, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.540162980209546, |
|
"grad_norm": 1.022081971168518, |
|
"learning_rate": 4.095597328339452e-05, |
|
"loss": 0.6049, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5494761350407451, |
|
"grad_norm": 1.087207555770874, |
|
"learning_rate": 3.933501846281267e-05, |
|
"loss": 0.6495, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5587892898719441, |
|
"grad_norm": 1.0766098499298096, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 0.6144, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5681024447031432, |
|
"grad_norm": 1.2028203010559082, |
|
"learning_rate": 3.612985456190778e-05, |
|
"loss": 0.6065, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5774155995343423, |
|
"grad_norm": 1.1321920156478882, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.5828, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5867287543655413, |
|
"grad_norm": 1.5199047327041626, |
|
"learning_rate": 3.298534127791785e-05, |
|
"loss": 0.6298, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5960419091967404, |
|
"grad_norm": 1.229564905166626, |
|
"learning_rate": 3.144013755408895e-05, |
|
"loss": 0.5927, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6053550640279395, |
|
"grad_norm": 0.8759309649467468, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 0.5443, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6146682188591386, |
|
"grad_norm": 0.7552090287208557, |
|
"learning_rate": 2.8412282383075363e-05, |
|
"loss": 0.5487, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6239813736903376, |
|
"grad_norm": 0.9531745314598083, |
|
"learning_rate": 2.693294185106562e-05, |
|
"loss": 0.5292, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6332945285215367, |
|
"grad_norm": 1.0337250232696533, |
|
"learning_rate": 2.547882480847461e-05, |
|
"loss": 0.5255, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6426076833527358, |
|
"grad_norm": 0.9133326411247253, |
|
"learning_rate": 2.405152131093926e-05, |
|
"loss": 0.552, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6519208381839348, |
|
"grad_norm": 0.99112468957901, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 0.5296, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6612339930151339, |
|
"grad_norm": 1.1152122020721436, |
|
"learning_rate": 2.128356686585282e-05, |
|
"loss": 0.5799, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.670547147846333, |
|
"grad_norm": 0.9779994487762451, |
|
"learning_rate": 1.9945942635848748e-05, |
|
"loss": 0.5775, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6798603026775321, |
|
"grad_norm": 0.9947242736816406, |
|
"learning_rate": 1.8641182076323148e-05, |
|
"loss": 0.5926, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.689173457508731, |
|
"grad_norm": 1.2468385696411133, |
|
"learning_rate": 1.7370711923791567e-05, |
|
"loss": 0.5915, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6984866123399301, |
|
"grad_norm": 1.1480826139450073, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 0.5552, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6984866123399301, |
|
"eval_loss": 0.587303638458252, |
|
"eval_runtime": 16.8051, |
|
"eval_samples_per_second": 86.105, |
|
"eval_steps_per_second": 5.415, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.768391340490752e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|