|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.22796352583586627, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00303951367781155, |
|
"grad_norm": 13.49870491027832, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 3.1121, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00303951367781155, |
|
"eval_loss": 4.797636032104492, |
|
"eval_runtime": 36.7942, |
|
"eval_samples_per_second": 3.778, |
|
"eval_steps_per_second": 1.902, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0060790273556231, |
|
"grad_norm": 12.006007194519043, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.7642, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00911854103343465, |
|
"grad_norm": 11.162591934204102, |
|
"learning_rate": 5e-05, |
|
"loss": 3.077, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0121580547112462, |
|
"grad_norm": 8.176030158996582, |
|
"learning_rate": 4.997620553954645e-05, |
|
"loss": 2.8574, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.015197568389057751, |
|
"grad_norm": 6.960273265838623, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 2.5954, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0182370820668693, |
|
"grad_norm": 7.929886817932129, |
|
"learning_rate": 4.9786121534345265e-05, |
|
"loss": 2.6525, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02127659574468085, |
|
"grad_norm": 6.948627948760986, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 2.2277, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0243161094224924, |
|
"grad_norm": 6.242570877075195, |
|
"learning_rate": 4.940740017799833e-05, |
|
"loss": 1.5744, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02735562310030395, |
|
"grad_norm": 7.785113334655762, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 2.0428, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.030395136778115502, |
|
"grad_norm": 7.158974647521973, |
|
"learning_rate": 4.884292376870567e-05, |
|
"loss": 1.4865, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03343465045592705, |
|
"grad_norm": 7.481812953948975, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 1.6615, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0364741641337386, |
|
"grad_norm": 13.22747802734375, |
|
"learning_rate": 4.8096988312782174e-05, |
|
"loss": 2.5028, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03951367781155015, |
|
"grad_norm": 7.720076560974121, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 1.1715, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0425531914893617, |
|
"grad_norm": 6.172296047210693, |
|
"learning_rate": 4.717527082945554e-05, |
|
"loss": 0.7897, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04559270516717325, |
|
"grad_norm": 7.666730880737305, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 1.42, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0486322188449848, |
|
"grad_norm": 8.414482116699219, |
|
"learning_rate": 4.608478614532215e-05, |
|
"loss": 1.0577, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.05167173252279635, |
|
"grad_norm": 8.490312576293945, |
|
"learning_rate": 4.54788011072248e-05, |
|
"loss": 1.4698, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0547112462006079, |
|
"grad_norm": 6.877030372619629, |
|
"learning_rate": 4.4833833507280884e-05, |
|
"loss": 1.1734, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.057750759878419454, |
|
"grad_norm": 7.7711052894592285, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 1.1899, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.060790273556231005, |
|
"grad_norm": 6.355740547180176, |
|
"learning_rate": 4.34319334202531e-05, |
|
"loss": 1.0894, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06382978723404255, |
|
"grad_norm": 5.492956161499023, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 0.5664, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0668693009118541, |
|
"grad_norm": 5.8868303298950195, |
|
"learning_rate": 4.188975519039151e-05, |
|
"loss": 0.7452, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06990881458966565, |
|
"grad_norm": 5.961940288543701, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 0.8316, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0729483282674772, |
|
"grad_norm": 10.402515411376953, |
|
"learning_rate": 4.021903572521802e-05, |
|
"loss": 1.2375, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07598784194528875, |
|
"grad_norm": 9.500027656555176, |
|
"learning_rate": 3.933941090877615e-05, |
|
"loss": 1.34, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07598784194528875, |
|
"eval_loss": 1.3861757516860962, |
|
"eval_runtime": 37.3804, |
|
"eval_samples_per_second": 3.719, |
|
"eval_steps_per_second": 1.873, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0790273556231003, |
|
"grad_norm": 10.619952201843262, |
|
"learning_rate": 3.84324902086706e-05, |
|
"loss": 0.971, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08206686930091185, |
|
"grad_norm": 8.14050579071045, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.4617, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0851063829787234, |
|
"grad_norm": 8.265830993652344, |
|
"learning_rate": 3.654371533087586e-05, |
|
"loss": 0.9466, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08814589665653495, |
|
"grad_norm": 14.07198429107666, |
|
"learning_rate": 3.556545654351749e-05, |
|
"loss": 1.1971, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0911854103343465, |
|
"grad_norm": 7.206356525421143, |
|
"learning_rate": 3.456708580912725e-05, |
|
"loss": 0.5795, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09422492401215805, |
|
"grad_norm": 6.850846290588379, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 0.7625, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0972644376899696, |
|
"grad_norm": 18.45955467224121, |
|
"learning_rate": 3.251764498760683e-05, |
|
"loss": 0.9912, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10030395136778116, |
|
"grad_norm": 20.52401351928711, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 1.1835, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1033434650455927, |
|
"grad_norm": 7.496253490447998, |
|
"learning_rate": 3.0410990348452573e-05, |
|
"loss": 0.512, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10638297872340426, |
|
"grad_norm": 8.358292579650879, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 0.7951, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1094224924012158, |
|
"grad_norm": 6.15350866317749, |
|
"learning_rate": 2.8263154805501297e-05, |
|
"loss": 0.8166, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11246200607902736, |
|
"grad_norm": 8.419435501098633, |
|
"learning_rate": 2.717889356869146e-05, |
|
"loss": 1.0166, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.11550151975683891, |
|
"grad_norm": 7.933197975158691, |
|
"learning_rate": 2.6090484684133404e-05, |
|
"loss": 1.1009, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.11854103343465046, |
|
"grad_norm": 8.382092475891113, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.7886, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12158054711246201, |
|
"grad_norm": 6.936803817749023, |
|
"learning_rate": 2.3909515315866605e-05, |
|
"loss": 0.9169, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12462006079027356, |
|
"grad_norm": 8.673044204711914, |
|
"learning_rate": 2.2821106431308544e-05, |
|
"loss": 0.8285, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.1276595744680851, |
|
"grad_norm": 7.593755722045898, |
|
"learning_rate": 2.173684519449872e-05, |
|
"loss": 0.9036, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13069908814589665, |
|
"grad_norm": 34.3929557800293, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 1.1941, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1337386018237082, |
|
"grad_norm": 16.547935485839844, |
|
"learning_rate": 1.958900965154743e-05, |
|
"loss": 1.3811, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.13677811550151975, |
|
"grad_norm": 32.15914535522461, |
|
"learning_rate": 1.852952387243698e-05, |
|
"loss": 1.1523, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1398176291793313, |
|
"grad_norm": 12.116189956665039, |
|
"learning_rate": 1.7482355012393177e-05, |
|
"loss": 0.9528, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 22.938390731811523, |
|
"learning_rate": 1.6449496416858284e-05, |
|
"loss": 2.451, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1458966565349544, |
|
"grad_norm": 41.893733978271484, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 1.3534, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.14893617021276595, |
|
"grad_norm": 29.780670166015625, |
|
"learning_rate": 1.443454345648252e-05, |
|
"loss": 1.8623, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1519756838905775, |
|
"grad_norm": 17.589799880981445, |
|
"learning_rate": 1.3456284669124158e-05, |
|
"loss": 1.9448, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1519756838905775, |
|
"eval_loss": 1.0934056043624878, |
|
"eval_runtime": 37.3691, |
|
"eval_samples_per_second": 3.72, |
|
"eval_steps_per_second": 1.873, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15501519756838905, |
|
"grad_norm": 17.6353759765625, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 1.4377, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1580547112462006, |
|
"grad_norm": 15.932900428771973, |
|
"learning_rate": 1.1567509791329401e-05, |
|
"loss": 1.1834, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.16109422492401215, |
|
"grad_norm": 13.163067817687988, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 1.5885, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.1641337386018237, |
|
"grad_norm": 11.45263385772705, |
|
"learning_rate": 9.780964274781984e-06, |
|
"loss": 0.9741, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.16717325227963525, |
|
"grad_norm": 8.3353910446167, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 0.6441, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1702127659574468, |
|
"grad_norm": 7.865023612976074, |
|
"learning_rate": 8.110244809608495e-06, |
|
"loss": 0.658, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.17325227963525835, |
|
"grad_norm": 6.3831095695495605, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 0.8165, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.1762917933130699, |
|
"grad_norm": 5.159278869628906, |
|
"learning_rate": 6.568066579746901e-06, |
|
"loss": 0.665, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.17933130699088146, |
|
"grad_norm": 4.828774452209473, |
|
"learning_rate": 5.848888922025553e-06, |
|
"loss": 0.647, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.182370820668693, |
|
"grad_norm": 5.115424156188965, |
|
"learning_rate": 5.166166492719124e-06, |
|
"loss": 0.47, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18541033434650456, |
|
"grad_norm": 3.0081770420074463, |
|
"learning_rate": 4.521198892775203e-06, |
|
"loss": 0.4042, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1884498480243161, |
|
"grad_norm": 3.4292054176330566, |
|
"learning_rate": 3.9152138546778625e-06, |
|
"loss": 0.5876, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.19148936170212766, |
|
"grad_norm": 5.665149211883545, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 0.6981, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1945288753799392, |
|
"grad_norm": 5.063657283782959, |
|
"learning_rate": 2.8247291705444575e-06, |
|
"loss": 0.8123, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.19756838905775076, |
|
"grad_norm": 5.569523334503174, |
|
"learning_rate": 2.3423053240837515e-06, |
|
"loss": 0.7608, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.2006079027355623, |
|
"grad_norm": 5.494087219238281, |
|
"learning_rate": 1.9030116872178316e-06, |
|
"loss": 0.6362, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.20364741641337386, |
|
"grad_norm": 3.7691006660461426, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 0.4503, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.2066869300911854, |
|
"grad_norm": 2.7975122928619385, |
|
"learning_rate": 1.1570762312943295e-06, |
|
"loss": 0.5325, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.20972644376899696, |
|
"grad_norm": 5.918449401855469, |
|
"learning_rate": 8.51854342773295e-07, |
|
"loss": 0.5773, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2127659574468085, |
|
"grad_norm": 5.920218467712402, |
|
"learning_rate": 5.925998220016659e-07, |
|
"loss": 0.731, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21580547112462006, |
|
"grad_norm": 6.536037445068359, |
|
"learning_rate": 3.7980617469479953e-07, |
|
"loss": 0.6718, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.2188449848024316, |
|
"grad_norm": 6.299208164215088, |
|
"learning_rate": 2.1387846565474045e-07, |
|
"loss": 0.7229, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.22188449848024316, |
|
"grad_norm": 6.822452545166016, |
|
"learning_rate": 9.513254770636137e-08, |
|
"loss": 0.7534, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.22492401215805471, |
|
"grad_norm": 7.545007705688477, |
|
"learning_rate": 2.3794460453555047e-08, |
|
"loss": 0.7793, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.22796352583586627, |
|
"grad_norm": 8.794013977050781, |
|
"learning_rate": 0.0, |
|
"loss": 0.8974, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.22796352583586627, |
|
"eval_loss": 0.7993611693382263, |
|
"eval_runtime": 37.3913, |
|
"eval_samples_per_second": 3.717, |
|
"eval_steps_per_second": 1.872, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.4882650284032e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|