|
{ |
|
"best_metric": 0.1996617466211319, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-200", |
|
"epoch": 1.7660044150110377, |
|
"eval_steps": 25, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008830022075055188, |
|
"grad_norm": 0.539530336856842, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.7436, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008830022075055188, |
|
"eval_loss": 0.7710736989974976, |
|
"eval_runtime": 8.9925, |
|
"eval_samples_per_second": 5.56, |
|
"eval_steps_per_second": 0.778, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.017660044150110375, |
|
"grad_norm": 0.6179248094558716, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.8066, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.026490066225165563, |
|
"grad_norm": 0.6645671725273132, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.8396, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03532008830022075, |
|
"grad_norm": 0.6200567483901978, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.7884, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04415011037527594, |
|
"grad_norm": 0.5380943417549133, |
|
"learning_rate": 0.00015, |
|
"loss": 0.5853, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.052980132450331126, |
|
"grad_norm": 0.5036277174949646, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.5389, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06181015452538632, |
|
"grad_norm": 0.6514653563499451, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.4683, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0706401766004415, |
|
"grad_norm": 0.9316419959068298, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.54, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07947019867549669, |
|
"grad_norm": 0.5196866989135742, |
|
"learning_rate": 0.00027, |
|
"loss": 0.4545, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08830022075055188, |
|
"grad_norm": 0.5413261651992798, |
|
"learning_rate": 0.0003, |
|
"loss": 0.4244, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09713024282560706, |
|
"grad_norm": 0.5664142966270447, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.3903, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10596026490066225, |
|
"grad_norm": 0.6020936965942383, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.4023, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.11479028697571744, |
|
"grad_norm": 0.49377548694610596, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.3985, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12362030905077263, |
|
"grad_norm": 0.39252859354019165, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.3617, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13245033112582782, |
|
"grad_norm": 0.4053530991077423, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.3259, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.141280353200883, |
|
"grad_norm": 0.4019186794757843, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.3568, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.15011037527593818, |
|
"grad_norm": 0.38053813576698303, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.3454, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.15894039735099338, |
|
"grad_norm": 0.3768708407878876, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.3432, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.16777041942604856, |
|
"grad_norm": 0.33955276012420654, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.3089, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.17660044150110377, |
|
"grad_norm": 0.31420886516571045, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.2576, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18543046357615894, |
|
"grad_norm": 0.3766714036464691, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.3017, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.19426048565121412, |
|
"grad_norm": 0.4495747983455658, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.2717, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.20309050772626933, |
|
"grad_norm": 0.3679252564907074, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.2564, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2119205298013245, |
|
"grad_norm": 0.35804858803749084, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.2278, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.22075055187637968, |
|
"grad_norm": 0.49645286798477173, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.2242, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22075055187637968, |
|
"eval_loss": 0.3220010995864868, |
|
"eval_runtime": 8.6165, |
|
"eval_samples_per_second": 5.803, |
|
"eval_steps_per_second": 0.812, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22958057395143489, |
|
"grad_norm": 0.3857850730419159, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.2466, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.23841059602649006, |
|
"grad_norm": 0.5409634113311768, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.4729, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.24724061810154527, |
|
"grad_norm": 4.009927749633789, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 1.4493, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2560706401766004, |
|
"grad_norm": 0.3887677788734436, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.4336, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.26490066225165565, |
|
"grad_norm": 0.3323811888694763, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.4117, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2737306843267108, |
|
"grad_norm": 0.2985091209411621, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.3453, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.282560706401766, |
|
"grad_norm": 0.3181455731391907, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 0.3032, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2913907284768212, |
|
"grad_norm": 0.27217257022857666, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 0.2274, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.30022075055187636, |
|
"grad_norm": 0.2923267185688019, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.2747, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3090507726269316, |
|
"grad_norm": 0.31327348947525024, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.2876, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.31788079470198677, |
|
"grad_norm": 0.2787221670150757, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.2623, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.32671081677704195, |
|
"grad_norm": 0.2709098756313324, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.2678, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3355408388520971, |
|
"grad_norm": 0.32480549812316895, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.2934, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3443708609271523, |
|
"grad_norm": 0.2852940559387207, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.2727, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.35320088300220753, |
|
"grad_norm": 0.27713730931282043, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.2662, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3620309050772627, |
|
"grad_norm": 0.2732914090156555, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.24, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3708609271523179, |
|
"grad_norm": 0.2701897919178009, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.2617, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.37969094922737306, |
|
"grad_norm": 0.2648429572582245, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.2488, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.38852097130242824, |
|
"grad_norm": 0.29421964287757874, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.278, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3973509933774834, |
|
"grad_norm": 0.25156694650650024, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.248, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.40618101545253865, |
|
"grad_norm": 0.24106387794017792, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.2162, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.41501103752759383, |
|
"grad_norm": 0.27839741110801697, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.2568, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.423841059602649, |
|
"grad_norm": 0.233811616897583, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.1938, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4326710816777042, |
|
"grad_norm": 0.23097911477088928, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.1853, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.44150110375275936, |
|
"grad_norm": 0.2746119499206543, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.233, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.44150110375275936, |
|
"eval_loss": 0.26192525029182434, |
|
"eval_runtime": 8.6079, |
|
"eval_samples_per_second": 5.809, |
|
"eval_steps_per_second": 0.813, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4503311258278146, |
|
"grad_norm": 0.26269274950027466, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 0.1918, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.45916114790286977, |
|
"grad_norm": 0.4410167336463928, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 0.2198, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.46799116997792495, |
|
"grad_norm": 0.3116855025291443, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 0.2459, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.4768211920529801, |
|
"grad_norm": 0.37757059931755066, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 0.2896, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.4856512141280353, |
|
"grad_norm": 0.4453379511833191, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 0.3244, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.49448123620309054, |
|
"grad_norm": 1.5678483247756958, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 0.6317, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5033112582781457, |
|
"grad_norm": 0.3457544147968292, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 0.3821, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5121412803532008, |
|
"grad_norm": 0.3128604292869568, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 0.3191, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5209713024282561, |
|
"grad_norm": 0.28137052059173584, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 0.3323, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5298013245033113, |
|
"grad_norm": 0.2762452960014343, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 0.3163, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5386313465783664, |
|
"grad_norm": 0.26939550042152405, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 0.2338, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5474613686534217, |
|
"grad_norm": 0.25849199295043945, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 0.24, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5562913907284768, |
|
"grad_norm": 0.2425060272216797, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 0.234, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.565121412803532, |
|
"grad_norm": 0.23768416047096252, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 0.2171, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5739514348785872, |
|
"grad_norm": 0.2728182077407837, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 0.227, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5827814569536424, |
|
"grad_norm": 0.23953858017921448, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 0.2238, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5916114790286976, |
|
"grad_norm": 0.25809717178344727, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 0.2426, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6004415011037527, |
|
"grad_norm": 0.28087127208709717, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 0.2699, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.609271523178808, |
|
"grad_norm": 0.25422003865242004, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 0.2469, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6181015452538632, |
|
"grad_norm": 0.271660178899765, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 0.2638, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6269315673289183, |
|
"grad_norm": 0.25974762439727783, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 0.2447, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6357615894039735, |
|
"grad_norm": 0.23339006304740906, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 0.2318, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6445916114790287, |
|
"grad_norm": 0.2516346871852875, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 0.2679, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6534216335540839, |
|
"grad_norm": 0.2606465518474579, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 0.2452, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6622516556291391, |
|
"grad_norm": 0.2597995698451996, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 0.1828, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6622516556291391, |
|
"eval_loss": 0.23728808760643005, |
|
"eval_runtime": 9.0028, |
|
"eval_samples_per_second": 5.554, |
|
"eval_steps_per_second": 0.778, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6710816777041942, |
|
"grad_norm": 0.2870580852031708, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 0.222, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.6799116997792495, |
|
"grad_norm": 0.24285611510276794, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 0.1982, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.6887417218543046, |
|
"grad_norm": 0.24280601739883423, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 0.2086, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.6975717439293598, |
|
"grad_norm": 0.275769978761673, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 0.2052, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7064017660044151, |
|
"grad_norm": 0.2648524045944214, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 0.204, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7152317880794702, |
|
"grad_norm": 0.34556815028190613, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 0.2676, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7240618101545254, |
|
"grad_norm": 0.3545214831829071, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 0.2618, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7328918322295805, |
|
"grad_norm": 0.3948529064655304, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 0.2681, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.7417218543046358, |
|
"grad_norm": 1.6499781608581543, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 0.6606, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.7505518763796909, |
|
"grad_norm": 0.2808334529399872, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 0.344, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.7593818984547461, |
|
"grad_norm": 0.27674341201782227, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.3014, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7682119205298014, |
|
"grad_norm": 0.2440793216228485, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 0.3069, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7770419426048565, |
|
"grad_norm": 0.2647138237953186, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 0.28, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.7858719646799117, |
|
"grad_norm": 0.2215111255645752, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 0.2274, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.7947019867549668, |
|
"grad_norm": 0.20203767716884613, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 0.1834, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8035320088300221, |
|
"grad_norm": 0.23644301295280457, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 0.244, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8123620309050773, |
|
"grad_norm": 0.23923274874687195, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 0.1987, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8211920529801324, |
|
"grad_norm": 0.2625834047794342, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 0.2483, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8300220750551877, |
|
"grad_norm": 0.2455959916114807, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 0.2222, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.8388520971302428, |
|
"grad_norm": 0.2274852693080902, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 0.2276, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.847682119205298, |
|
"grad_norm": 0.23596301674842834, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 0.2226, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.8565121412803532, |
|
"grad_norm": 0.25137948989868164, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 0.2432, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.8653421633554084, |
|
"grad_norm": 0.25432929396629333, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 0.2066, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.8741721854304636, |
|
"grad_norm": 0.2482401430606842, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 0.2212, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8830022075055187, |
|
"grad_norm": 0.2436574399471283, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 0.2298, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8830022075055187, |
|
"eval_loss": 0.22081802785396576, |
|
"eval_runtime": 8.4385, |
|
"eval_samples_per_second": 5.925, |
|
"eval_steps_per_second": 0.83, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.891832229580574, |
|
"grad_norm": 0.2530083656311035, |
|
"learning_rate": 0.0001599135876488549, |
|
"loss": 0.2319, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9006622516556292, |
|
"grad_norm": 0.21293362975120544, |
|
"learning_rate": 0.00015743756320098332, |
|
"loss": 0.1978, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9094922737306843, |
|
"grad_norm": 0.25216004252433777, |
|
"learning_rate": 0.0001549595053975962, |
|
"loss": 0.1811, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9183222958057395, |
|
"grad_norm": 0.23433512449264526, |
|
"learning_rate": 0.00015248009171495378, |
|
"loss": 0.2014, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.9271523178807947, |
|
"grad_norm": 0.20752263069152832, |
|
"learning_rate": 0.00015, |
|
"loss": 0.1645, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.9359823399558499, |
|
"grad_norm": 0.2603313624858856, |
|
"learning_rate": 0.00014751990828504622, |
|
"loss": 0.191, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.9448123620309051, |
|
"grad_norm": 0.27333831787109375, |
|
"learning_rate": 0.00014504049460240375, |
|
"loss": 0.2247, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.9536423841059603, |
|
"grad_norm": 0.24938559532165527, |
|
"learning_rate": 0.00014256243679901663, |
|
"loss": 0.1789, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.9624724061810155, |
|
"grad_norm": 0.2354554533958435, |
|
"learning_rate": 0.00014008641235114508, |
|
"loss": 0.1606, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.9713024282560706, |
|
"grad_norm": 0.3264496922492981, |
|
"learning_rate": 0.00013761309817915014, |
|
"loss": 0.2777, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9801324503311258, |
|
"grad_norm": 0.3939518332481384, |
|
"learning_rate": 0.00013514317046243058, |
|
"loss": 0.3138, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.9889624724061811, |
|
"grad_norm": 1.1526501178741455, |
|
"learning_rate": 0.00013267730445456208, |
|
"loss": 0.5539, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.9977924944812362, |
|
"grad_norm": 0.2903338074684143, |
|
"learning_rate": 0.00013021617429868963, |
|
"loss": 0.2596, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.0066225165562914, |
|
"grad_norm": 0.6565661430358887, |
|
"learning_rate": 0.00012776045284322368, |
|
"loss": 0.5067, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.0154525386313467, |
|
"grad_norm": 0.2586728036403656, |
|
"learning_rate": 0.00012531081145788987, |
|
"loss": 0.2531, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.0242825607064017, |
|
"grad_norm": 0.2610194683074951, |
|
"learning_rate": 0.00012286791985018355, |
|
"loss": 0.2808, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.033112582781457, |
|
"grad_norm": 0.2583560049533844, |
|
"learning_rate": 0.00012043244588227796, |
|
"loss": 0.2729, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.0419426048565121, |
|
"grad_norm": 0.217752605676651, |
|
"learning_rate": 0.00011800505538843798, |
|
"loss": 0.2058, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.0507726269315674, |
|
"grad_norm": 0.2030157893896103, |
|
"learning_rate": 0.00011558641199298727, |
|
"loss": 0.1815, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.0596026490066226, |
|
"grad_norm": 0.20954853296279907, |
|
"learning_rate": 0.00011317717692888012, |
|
"loss": 0.1792, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0684326710816776, |
|
"grad_norm": 0.18091771006584167, |
|
"learning_rate": 0.00011077800885692702, |
|
"loss": 0.1662, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.0772626931567328, |
|
"grad_norm": 0.20627768337726593, |
|
"learning_rate": 0.00010838956368572334, |
|
"loss": 0.1777, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.086092715231788, |
|
"grad_norm": 0.21561631560325623, |
|
"learning_rate": 0.0001060124943923303, |
|
"loss": 0.197, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.0949227373068433, |
|
"grad_norm": 0.24049685895442963, |
|
"learning_rate": 0.0001036474508437579, |
|
"loss": 0.1965, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.1037527593818985, |
|
"grad_norm": 0.2354094386100769, |
|
"learning_rate": 0.00010129507961929748, |
|
"loss": 0.1869, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.1037527593818985, |
|
"eval_loss": 0.21981161832809448, |
|
"eval_runtime": 7.9161, |
|
"eval_samples_per_second": 6.316, |
|
"eval_steps_per_second": 0.884, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.1125827814569536, |
|
"grad_norm": 0.24173709750175476, |
|
"learning_rate": 9.895602383375353e-05, |
|
"loss": 0.2063, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.1214128035320088, |
|
"grad_norm": 0.24440215528011322, |
|
"learning_rate": 9.663092296162251e-05, |
|
"loss": 0.1866, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.130242825607064, |
|
"grad_norm": 0.23964616656303406, |
|
"learning_rate": 9.432041266226686e-05, |
|
"loss": 0.1952, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.1390728476821192, |
|
"grad_norm": 0.23612673580646515, |
|
"learning_rate": 9.202512460613219e-05, |
|
"loss": 0.1978, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.1479028697571745, |
|
"grad_norm": 0.21302524209022522, |
|
"learning_rate": 8.97456863020546e-05, |
|
"loss": 0.1789, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.1567328918322295, |
|
"grad_norm": 0.22610829770565033, |
|
"learning_rate": 8.748272092570646e-05, |
|
"loss": 0.1921, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.1655629139072847, |
|
"grad_norm": 0.2158009558916092, |
|
"learning_rate": 8.523684714922608e-05, |
|
"loss": 0.1716, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.17439293598234, |
|
"grad_norm": 0.2384001463651657, |
|
"learning_rate": 8.300867897207903e-05, |
|
"loss": 0.1488, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.1832229580573952, |
|
"grad_norm": 0.20479005575180054, |
|
"learning_rate": 8.079882555319684e-05, |
|
"loss": 0.1429, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.1920529801324504, |
|
"grad_norm": 0.21124428510665894, |
|
"learning_rate": 7.860789104443896e-05, |
|
"loss": 0.1542, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.2008830022075054, |
|
"grad_norm": 0.2595142126083374, |
|
"learning_rate": 7.643647442542382e-05, |
|
"loss": 0.1562, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.2097130242825607, |
|
"grad_norm": 0.24693676829338074, |
|
"learning_rate": 7.428516933977347e-05, |
|
"loss": 0.1826, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.218543046357616, |
|
"grad_norm": 0.2386799454689026, |
|
"learning_rate": 7.215456393281776e-05, |
|
"loss": 0.1453, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.2273730684326711, |
|
"grad_norm": 0.292263388633728, |
|
"learning_rate": 7.004524069080096e-05, |
|
"loss": 0.2029, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.2362030905077264, |
|
"grad_norm": 0.30869174003601074, |
|
"learning_rate": 6.795777628163599e-05, |
|
"loss": 0.2001, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.2450331125827814, |
|
"grad_norm": 0.7546313405036926, |
|
"learning_rate": 6.58927413972491e-05, |
|
"loss": 0.33, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.2538631346578366, |
|
"grad_norm": 0.28588002920150757, |
|
"learning_rate": 6.385070059755846e-05, |
|
"loss": 0.1978, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.2626931567328918, |
|
"grad_norm": 0.3258229196071625, |
|
"learning_rate": 6.183221215612904e-05, |
|
"loss": 0.2734, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.271523178807947, |
|
"grad_norm": 0.2730714678764343, |
|
"learning_rate": 5.983782790754623e-05, |
|
"loss": 0.2314, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.280353200883002, |
|
"grad_norm": 0.2608177959918976, |
|
"learning_rate": 5.786809309654982e-05, |
|
"loss": 0.208, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.2891832229580573, |
|
"grad_norm": 0.20529748499393463, |
|
"learning_rate": 5.592354622896944e-05, |
|
"loss": 0.156, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.2980132450331126, |
|
"grad_norm": 0.19379666447639465, |
|
"learning_rate": 5.40047189245025e-05, |
|
"loss": 0.1629, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.3068432671081678, |
|
"grad_norm": 0.21285969018936157, |
|
"learning_rate": 5.211213577137469e-05, |
|
"loss": 0.1852, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.315673289183223, |
|
"grad_norm": 0.21158519387245178, |
|
"learning_rate": 5.024631418292274e-05, |
|
"loss": 0.171, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.3245033112582782, |
|
"grad_norm": 0.25292232632637024, |
|
"learning_rate": 4.840776425613886e-05, |
|
"loss": 0.2063, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.3245033112582782, |
|
"eval_loss": 0.21060962975025177, |
|
"eval_runtime": 8.6102, |
|
"eval_samples_per_second": 5.807, |
|
"eval_steps_per_second": 0.813, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.22895404696464539, |
|
"learning_rate": 4.659698863221513e-05, |
|
"loss": 0.1954, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.3421633554083885, |
|
"grad_norm": 0.26738283038139343, |
|
"learning_rate": 4.481448235912671e-05, |
|
"loss": 0.1996, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.3509933774834437, |
|
"grad_norm": 0.23517753183841705, |
|
"learning_rate": 4.306073275629044e-05, |
|
"loss": 0.1866, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.359823399558499, |
|
"grad_norm": 0.24005506932735443, |
|
"learning_rate": 4.133621928133665e-05, |
|
"loss": 0.1938, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.368653421633554, |
|
"grad_norm": 0.2558877468109131, |
|
"learning_rate": 3.964141339903026e-05, |
|
"loss": 0.1885, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.3774834437086092, |
|
"grad_norm": 0.22536706924438477, |
|
"learning_rate": 3.797677845237696e-05, |
|
"loss": 0.1748, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.3863134657836644, |
|
"grad_norm": 0.24991802871227264, |
|
"learning_rate": 3.634276953594982e-05, |
|
"loss": 0.187, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.3951434878587197, |
|
"grad_norm": 0.22741112112998962, |
|
"learning_rate": 3.473983337147118e-05, |
|
"loss": 0.1728, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.403973509933775, |
|
"grad_norm": 0.247371643781662, |
|
"learning_rate": 3.316840818568315e-05, |
|
"loss": 0.1947, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.4128035320088301, |
|
"grad_norm": 0.2145814299583435, |
|
"learning_rate": 3.162892359054098e-05, |
|
"loss": 0.1477, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.4216335540838851, |
|
"grad_norm": 0.20137929916381836, |
|
"learning_rate": 3.0121800465761293e-05, |
|
"loss": 0.147, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.4304635761589404, |
|
"grad_norm": 0.22576574981212616, |
|
"learning_rate": 2.8647450843757897e-05, |
|
"loss": 0.1609, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.4392935982339956, |
|
"grad_norm": 0.20561394095420837, |
|
"learning_rate": 2.7206277796996144e-05, |
|
"loss": 0.1319, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.4481236203090508, |
|
"grad_norm": 0.22655843198299408, |
|
"learning_rate": 2.5798675327796993e-05, |
|
"loss": 0.1745, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.4569536423841059, |
|
"grad_norm": 0.24106654524803162, |
|
"learning_rate": 2.4425028260620715e-05, |
|
"loss": 0.1623, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.465783664459161, |
|
"grad_norm": 0.25789502263069153, |
|
"learning_rate": 2.3085712136859668e-05, |
|
"loss": 0.1694, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.4746136865342163, |
|
"grad_norm": 0.26895081996917725, |
|
"learning_rate": 2.178109311216913e-05, |
|
"loss": 0.1587, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.4834437086092715, |
|
"grad_norm": 0.30352866649627686, |
|
"learning_rate": 2.0511527856363912e-05, |
|
"loss": 0.1995, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.4922737306843268, |
|
"grad_norm": 0.5954450964927673, |
|
"learning_rate": 1.927736345590839e-05, |
|
"loss": 0.3586, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.501103752759382, |
|
"grad_norm": 0.21726396679878235, |
|
"learning_rate": 1.8078937319026654e-05, |
|
"loss": 0.2243, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.5099337748344372, |
|
"grad_norm": 0.22568368911743164, |
|
"learning_rate": 1.6916577083458228e-05, |
|
"loss": 0.2319, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.5187637969094923, |
|
"grad_norm": 0.23746433854103088, |
|
"learning_rate": 1.579060052688548e-05, |
|
"loss": 0.2414, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.5275938189845475, |
|
"grad_norm": 0.21537818014621735, |
|
"learning_rate": 1.4701315480056164e-05, |
|
"loss": 0.2029, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.5364238410596025, |
|
"grad_norm": 0.22447596490383148, |
|
"learning_rate": 1.3649019742625623e-05, |
|
"loss": 0.1825, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.5452538631346577, |
|
"grad_norm": 0.2091474086046219, |
|
"learning_rate": 1.2634001001741373e-05, |
|
"loss": 0.1776, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.5452538631346577, |
|
"eval_loss": 0.19788633286952972, |
|
"eval_runtime": 7.9222, |
|
"eval_samples_per_second": 6.311, |
|
"eval_steps_per_second": 0.884, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.554083885209713, |
|
"grad_norm": 0.22721268236637115, |
|
"learning_rate": 1.1656536753392287e-05, |
|
"loss": 0.2137, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.5629139072847682, |
|
"grad_norm": 0.20839567482471466, |
|
"learning_rate": 1.0716894226543953e-05, |
|
"loss": 0.1749, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.5717439293598234, |
|
"grad_norm": 0.21761561930179596, |
|
"learning_rate": 9.815330310080887e-06, |
|
"loss": 0.1772, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.5805739514348787, |
|
"grad_norm": 0.22803503274917603, |
|
"learning_rate": 8.952091482575824e-06, |
|
"loss": 0.1855, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.589403973509934, |
|
"grad_norm": 0.21269898116588593, |
|
"learning_rate": 8.127413744904804e-06, |
|
"loss": 0.1775, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.598233995584989, |
|
"grad_norm": 0.22217035293579102, |
|
"learning_rate": 7.34152255572697e-06, |
|
"loss": 0.2023, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.6070640176600441, |
|
"grad_norm": 0.21203316748142242, |
|
"learning_rate": 6.594632769846353e-06, |
|
"loss": 0.1833, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.6158940397350994, |
|
"grad_norm": 0.20570416748523712, |
|
"learning_rate": 5.886948579472778e-06, |
|
"loss": 0.1535, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.6247240618101544, |
|
"grad_norm": 0.22212745249271393, |
|
"learning_rate": 5.218663458397715e-06, |
|
"loss": 0.1816, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.6335540838852096, |
|
"grad_norm": 0.22477659583091736, |
|
"learning_rate": 4.589960109100444e-06, |
|
"loss": 0.1934, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.6423841059602649, |
|
"grad_norm": 0.21524512767791748, |
|
"learning_rate": 4.001010412799138e-06, |
|
"loss": 0.1755, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.65121412803532, |
|
"grad_norm": 0.2027374505996704, |
|
"learning_rate": 3.451975382460109e-06, |
|
"loss": 0.1684, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.6600441501103753, |
|
"grad_norm": 0.1885063648223877, |
|
"learning_rate": 2.9430051187785962e-06, |
|
"loss": 0.1393, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.6688741721854305, |
|
"grad_norm": 0.20242398977279663, |
|
"learning_rate": 2.4742387691426445e-06, |
|
"loss": 0.1542, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.6777041942604858, |
|
"grad_norm": 0.20245841145515442, |
|
"learning_rate": 2.0458044895916513e-06, |
|
"loss": 0.1246, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.6865342163355408, |
|
"grad_norm": 0.19838440418243408, |
|
"learning_rate": 1.6578194097797258e-06, |
|
"loss": 0.15, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.695364238410596, |
|
"grad_norm": 0.19168749451637268, |
|
"learning_rate": 1.3103896009537207e-06, |
|
"loss": 0.1339, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.7041942604856513, |
|
"grad_norm": 0.21052022278308868, |
|
"learning_rate": 1.0036100469542786e-06, |
|
"loss": 0.1397, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.7130242825607063, |
|
"grad_norm": 0.24240228533744812, |
|
"learning_rate": 7.375646182482875e-07, |
|
"loss": 0.1545, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.7218543046357615, |
|
"grad_norm": 0.2707175314426422, |
|
"learning_rate": 5.123260489995229e-07, |
|
"loss": 0.1654, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.7306843267108167, |
|
"grad_norm": 0.3442803621292114, |
|
"learning_rate": 3.2795591718381975e-07, |
|
"loss": 0.2205, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.739514348785872, |
|
"grad_norm": 0.7022460103034973, |
|
"learning_rate": 1.8450462775428942e-07, |
|
"loss": 0.3136, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.7483443708609272, |
|
"grad_norm": 0.190536230802536, |
|
"learning_rate": 8.201139886109264e-08, |
|
"loss": 0.2159, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.7571743929359824, |
|
"grad_norm": 0.21792356669902802, |
|
"learning_rate": 2.0504251129649374e-08, |
|
"loss": 0.2479, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.7660044150110377, |
|
"grad_norm": 0.22573994100093842, |
|
"learning_rate": 0.0, |
|
"loss": 0.2372, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.7660044150110377, |
|
"eval_loss": 0.1996617466211319, |
|
"eval_runtime": 8.6103, |
|
"eval_samples_per_second": 5.807, |
|
"eval_steps_per_second": 0.813, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.3349111537821286e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|