|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0023866348448687, |
|
"eval_steps": 27, |
|
"global_step": 105, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00954653937947494, |
|
"grad_norm": 0.557092547416687, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.8067, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00954653937947494, |
|
"eval_loss": 0.8745781183242798, |
|
"eval_runtime": 24.2118, |
|
"eval_samples_per_second": 7.31, |
|
"eval_steps_per_second": 0.95, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01909307875894988, |
|
"grad_norm": 0.3518330752849579, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.8173, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.028639618138424822, |
|
"grad_norm": 0.44602614641189575, |
|
"learning_rate": 6e-06, |
|
"loss": 0.8109, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03818615751789976, |
|
"grad_norm": 0.3439026474952698, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.8386, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0477326968973747, |
|
"grad_norm": 0.6216179728507996, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8368, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.057279236276849645, |
|
"grad_norm": 0.5188976526260376, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.8412, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06682577565632458, |
|
"grad_norm": 0.56082683801651, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.8185, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07637231503579953, |
|
"grad_norm": 0.3061634302139282, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.8269, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08591885441527446, |
|
"grad_norm": 0.40400129556655884, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.9036, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0954653937947494, |
|
"grad_norm": 0.3948858976364136, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0401, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10501193317422435, |
|
"grad_norm": 0.4475814402103424, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.7965, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11455847255369929, |
|
"grad_norm": 0.6639652252197266, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.8854, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12410501193317422, |
|
"grad_norm": 0.3040810823440552, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.823, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13365155131264916, |
|
"grad_norm": 0.42495399713516235, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 0.8349, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1431980906921241, |
|
"grad_norm": 0.3658050000667572, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9066, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15274463007159905, |
|
"grad_norm": 0.5003547668457031, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.915, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.162291169451074, |
|
"grad_norm": 0.36432796716690063, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.8575, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1718377088305489, |
|
"grad_norm": 0.5415899157524109, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.9126, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.18138424821002386, |
|
"grad_norm": 0.47764062881469727, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.7483, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1909307875894988, |
|
"grad_norm": 0.7140861749649048, |
|
"learning_rate": 4e-05, |
|
"loss": 0.7709, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20047732696897375, |
|
"grad_norm": 0.5826449394226074, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.7296, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2100238663484487, |
|
"grad_norm": 0.5641223788261414, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.7602, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21957040572792363, |
|
"grad_norm": 0.3947891592979431, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.7261, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22911694510739858, |
|
"grad_norm": 0.5210841298103333, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.7717, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2386634844868735, |
|
"grad_norm": 0.5490278005599976, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7453, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24821002386634844, |
|
"grad_norm": 0.37281742691993713, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 0.8168, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2577565632458234, |
|
"grad_norm": 0.44266802072525024, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.7166, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2577565632458234, |
|
"eval_loss": 0.766373872756958, |
|
"eval_runtime": 23.8576, |
|
"eval_samples_per_second": 7.419, |
|
"eval_steps_per_second": 0.964, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.26730310262529833, |
|
"grad_norm": 0.3665648400783539, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 0.767, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27684964200477324, |
|
"grad_norm": 0.8591362237930298, |
|
"learning_rate": 5.8e-05, |
|
"loss": 0.705, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2863961813842482, |
|
"grad_norm": 0.41430267691612244, |
|
"learning_rate": 6e-05, |
|
"loss": 0.8619, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29594272076372313, |
|
"grad_norm": 0.39334383606910706, |
|
"learning_rate": 6.2e-05, |
|
"loss": 0.6811, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3054892601431981, |
|
"grad_norm": 0.40349045395851135, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.7721, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.315035799522673, |
|
"grad_norm": 1.0046414136886597, |
|
"learning_rate": 6.6e-05, |
|
"loss": 0.6848, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.324582338902148, |
|
"grad_norm": 0.3854268789291382, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 0.66, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3341288782816229, |
|
"grad_norm": 0.4841301441192627, |
|
"learning_rate": 7e-05, |
|
"loss": 0.7041, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3436754176610978, |
|
"grad_norm": 0.5415286421775818, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.7231, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3532219570405728, |
|
"grad_norm": 0.5308293104171753, |
|
"learning_rate": 7.4e-05, |
|
"loss": 0.7049, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3627684964200477, |
|
"grad_norm": 0.6722102761268616, |
|
"learning_rate": 7.6e-05, |
|
"loss": 0.74, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3723150357995227, |
|
"grad_norm": 0.32239794731140137, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 0.7882, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3818615751789976, |
|
"grad_norm": 0.45458799600601196, |
|
"learning_rate": 8e-05, |
|
"loss": 0.7267, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3914081145584726, |
|
"grad_norm": 0.3064737319946289, |
|
"learning_rate": 8.2e-05, |
|
"loss": 0.7723, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4009546539379475, |
|
"grad_norm": 0.30180808901786804, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.7049, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4105011933174224, |
|
"grad_norm": 0.29743000864982605, |
|
"learning_rate": 8.6e-05, |
|
"loss": 0.7717, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4200477326968974, |
|
"grad_norm": 0.27107685804367065, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.6435, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4295942720763723, |
|
"grad_norm": 0.5112220048904419, |
|
"learning_rate": 9e-05, |
|
"loss": 0.6855, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.43914081145584727, |
|
"grad_norm": 0.31432729959487915, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.7694, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4486873508353222, |
|
"grad_norm": 0.28413301706314087, |
|
"learning_rate": 9.4e-05, |
|
"loss": 0.6846, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.45823389021479716, |
|
"grad_norm": 0.24350856244564056, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.6338, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4677804295942721, |
|
"grad_norm": 0.4011653959751129, |
|
"learning_rate": 9.8e-05, |
|
"loss": 0.7449, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.477326968973747, |
|
"grad_norm": 1.8651820421218872, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7579, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48687350835322196, |
|
"grad_norm": 0.35909950733184814, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.7355, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4964200477326969, |
|
"grad_norm": 0.3429948091506958, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 0.769, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5059665871121718, |
|
"grad_norm": 0.2979097068309784, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.7101, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5155131264916468, |
|
"grad_norm": 0.4369233250617981, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 0.7202, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5155131264916468, |
|
"eval_loss": 0.727745771408081, |
|
"eval_runtime": 24.7767, |
|
"eval_samples_per_second": 7.144, |
|
"eval_steps_per_second": 0.928, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5250596658711217, |
|
"grad_norm": 0.30109623074531555, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.7224, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5346062052505967, |
|
"grad_norm": 0.3661574125289917, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 0.6922, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5441527446300716, |
|
"grad_norm": 0.6971948146820068, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.6352, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5536992840095465, |
|
"grad_norm": 0.3875482976436615, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 0.7978, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5632458233890215, |
|
"grad_norm": 0.36422282457351685, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 0.6756, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5727923627684964, |
|
"grad_norm": 0.2757071852684021, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.6597, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5823389021479713, |
|
"grad_norm": 0.41333040595054626, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.7087, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5918854415274463, |
|
"grad_norm": 0.2851635217666626, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 0.6639, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6014319809069213, |
|
"grad_norm": 0.8846507668495178, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 0.7019, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6109785202863962, |
|
"grad_norm": 0.4758082926273346, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 0.7133, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6205250596658711, |
|
"grad_norm": 0.5413456559181213, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.6502, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.630071599045346, |
|
"grad_norm": 0.2852914333343506, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 0.7473, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.639618138424821, |
|
"grad_norm": 0.2875913381576538, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 0.7285, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.649164677804296, |
|
"grad_norm": 0.38858604431152344, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 0.7041, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6587112171837709, |
|
"grad_norm": 0.3244612216949463, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 0.7474, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6682577565632458, |
|
"grad_norm": 0.422497421503067, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.7319, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6778042959427207, |
|
"grad_norm": 0.5892928838729858, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 0.7233, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6873508353221957, |
|
"grad_norm": 0.36724957823753357, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.719, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6968973747016707, |
|
"grad_norm": 0.31456321477890015, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 0.7036, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7064439140811456, |
|
"grad_norm": 0.3656250536441803, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 0.7342, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7159904534606205, |
|
"grad_norm": 0.46808162331581116, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.7315, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7255369928400954, |
|
"grad_norm": 0.24579700827598572, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 0.7165, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7350835322195705, |
|
"grad_norm": 0.3572556674480438, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 0.8029, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7446300715990454, |
|
"grad_norm": 0.32164067029953003, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 0.7031, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7541766109785203, |
|
"grad_norm": 0.4904593825340271, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 0.7183, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7637231503579952, |
|
"grad_norm": 0.3569331765174866, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.6614, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7732696897374701, |
|
"grad_norm": 0.25237053632736206, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 0.5782, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7732696897374701, |
|
"eval_loss": 0.716273307800293, |
|
"eval_runtime": 23.8764, |
|
"eval_samples_per_second": 7.413, |
|
"eval_steps_per_second": 0.963, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7828162291169452, |
|
"grad_norm": 0.5811595916748047, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 0.751, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7923627684964201, |
|
"grad_norm": 0.3307061493396759, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.6591, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.801909307875895, |
|
"grad_norm": 0.37130656838417053, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 0.6976, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8114558472553699, |
|
"grad_norm": 0.34512534737586975, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.6458, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8210023866348448, |
|
"grad_norm": 0.7749953866004944, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 0.6685, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8305489260143198, |
|
"grad_norm": 0.4207349419593811, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 0.7796, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8400954653937948, |
|
"grad_norm": 0.4382290244102478, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 0.7064, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8496420047732697, |
|
"grad_norm": 0.2827201783657074, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 0.7097, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8591885441527446, |
|
"grad_norm": 1.0254180431365967, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 0.7281, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8687350835322196, |
|
"grad_norm": 0.36662358045578003, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 0.6881, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8782816229116945, |
|
"grad_norm": 0.4404708445072174, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 0.7104, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8878281622911695, |
|
"grad_norm": 0.35020115971565247, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 0.8352, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8973747016706444, |
|
"grad_norm": 0.31341487169265747, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.747, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9069212410501193, |
|
"grad_norm": 0.29987096786499023, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 0.7966, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9164677804295943, |
|
"grad_norm": 0.32762154936790466, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 0.7366, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9260143198090692, |
|
"grad_norm": 0.7294496893882751, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 0.6375, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9355608591885441, |
|
"grad_norm": 0.4568740725517273, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 0.6917, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9451073985680191, |
|
"grad_norm": 0.37320995330810547, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 0.6043, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.954653937947494, |
|
"grad_norm": 0.4177183508872986, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 0.643, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.964200477326969, |
|
"grad_norm": 0.3155246078968048, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 0.6766, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9737470167064439, |
|
"grad_norm": 0.23985552787780762, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 0.6213, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9832935560859188, |
|
"grad_norm": 0.3547389805316925, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 0.7614, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9928400954653938, |
|
"grad_norm": 0.31457066535949707, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 0.6257, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0023866348448687, |
|
"grad_norm": 0.42900577187538147, |
|
"learning_rate": 0.0, |
|
"loss": 0.6607, |
|
"step": 105 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 105, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.231853181907763e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|