|
{ |
|
"best_metric": 6.946237564086914, |
|
"best_model_checkpoint": "./results/models/checkpoint-45320", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 45320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05516328331862312, |
|
"grad_norm": 0.3359375, |
|
"learning_rate": 0.004994483671668138, |
|
"loss": 7.1096, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11032656663724624, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 0.004988967343336276, |
|
"loss": 6.9824, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.16548984995586938, |
|
"grad_norm": 0.369140625, |
|
"learning_rate": 0.004983451015004413, |
|
"loss": 6.971, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.22065313327449249, |
|
"grad_norm": 0.8046875, |
|
"learning_rate": 0.004977934686672551, |
|
"loss": 7.0129, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2758164165931156, |
|
"grad_norm": 4.59375, |
|
"learning_rate": 0.004972418358340689, |
|
"loss": 7.0571, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.33097969991173876, |
|
"grad_norm": 4.03125, |
|
"learning_rate": 0.004966902030008827, |
|
"loss": 7.0532, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3861429832303619, |
|
"grad_norm": 3.75, |
|
"learning_rate": 0.0049613857016769635, |
|
"loss": 7.0486, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.44130626654898497, |
|
"grad_norm": 9.125, |
|
"learning_rate": 0.004955869373345101, |
|
"loss": 7.0385, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.4964695498676081, |
|
"grad_norm": 9.1875, |
|
"learning_rate": 0.00495035304501324, |
|
"loss": 7.0304, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.5516328331862312, |
|
"grad_norm": 12.625, |
|
"learning_rate": 0.004944836716681377, |
|
"loss": 7.023, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.6067961165048543, |
|
"grad_norm": 8.6875, |
|
"learning_rate": 0.0049393203883495145, |
|
"loss": 7.0205, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.6619593998234775, |
|
"grad_norm": 7.6875, |
|
"learning_rate": 0.004933804060017652, |
|
"loss": 7.025, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.7171226831421006, |
|
"grad_norm": 4.96875, |
|
"learning_rate": 0.00492828773168579, |
|
"loss": 7.0285, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.7722859664607238, |
|
"grad_norm": 5.46875, |
|
"learning_rate": 0.004922771403353928, |
|
"loss": 7.025, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.8274492497793469, |
|
"grad_norm": 20.125, |
|
"learning_rate": 0.0049172550750220655, |
|
"loss": 7.0162, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.8826125330979699, |
|
"grad_norm": 7.1875, |
|
"learning_rate": 0.004911738746690203, |
|
"loss": 7.0154, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.9377758164165931, |
|
"grad_norm": 8.1875, |
|
"learning_rate": 0.004906222418358341, |
|
"loss": 7.0104, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.9929390997352162, |
|
"grad_norm": 6.71875, |
|
"learning_rate": 0.004900706090026478, |
|
"loss": 7.0374, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 7.0309014320373535, |
|
"eval_runtime": 9.4135, |
|
"eval_samples_per_second": 53.115, |
|
"eval_steps_per_second": 1.7, |
|
"step": 9064 |
|
}, |
|
{ |
|
"epoch": 1.0481023830538394, |
|
"grad_norm": 3.5, |
|
"learning_rate": 0.0048951897616946165, |
|
"loss": 7.02, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.1032656663724625, |
|
"grad_norm": 10.5, |
|
"learning_rate": 0.004889673433362754, |
|
"loss": 7.0297, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.1584289496910856, |
|
"grad_norm": 8.875, |
|
"learning_rate": 0.004884157105030891, |
|
"loss": 7.0361, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.2135922330097086, |
|
"grad_norm": 3.5625, |
|
"learning_rate": 0.004878640776699029, |
|
"loss": 7.015, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.268755516328332, |
|
"grad_norm": 9.1875, |
|
"learning_rate": 0.0048731244483671676, |
|
"loss": 6.9996, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.323918799646955, |
|
"grad_norm": 13.625, |
|
"learning_rate": 0.0048676081200353044, |
|
"loss": 6.9975, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.379082082965578, |
|
"grad_norm": 6.34375, |
|
"learning_rate": 0.004862091791703442, |
|
"loss": 7.0081, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.4342453662842012, |
|
"grad_norm": 14.375, |
|
"learning_rate": 0.00485657546337158, |
|
"loss": 7.0008, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.4894086496028245, |
|
"grad_norm": 8.8125, |
|
"learning_rate": 0.004851059135039718, |
|
"loss": 6.9962, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.5445719329214476, |
|
"grad_norm": 22.625, |
|
"learning_rate": 0.0048455428067078555, |
|
"loss": 6.9928, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.5997352162400706, |
|
"grad_norm": 12.6875, |
|
"learning_rate": 0.004840026478375993, |
|
"loss": 6.9919, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.6548984995586937, |
|
"grad_norm": 53.75, |
|
"learning_rate": 0.004834510150044131, |
|
"loss": 6.9923, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.7100617828773168, |
|
"grad_norm": 9.9375, |
|
"learning_rate": 0.004828993821712269, |
|
"loss": 6.9966, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.7652250661959399, |
|
"grad_norm": 13.0625, |
|
"learning_rate": 0.004823477493380406, |
|
"loss": 6.9908, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.820388349514563, |
|
"grad_norm": 8.9375, |
|
"learning_rate": 0.004817961165048544, |
|
"loss": 6.9937, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.8755516328331863, |
|
"grad_norm": 42.0, |
|
"learning_rate": 0.004812444836716681, |
|
"loss": 6.9894, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.9307149161518093, |
|
"grad_norm": 17.5, |
|
"learning_rate": 0.004806928508384819, |
|
"loss": 6.9926, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.9858781994704324, |
|
"grad_norm": 30.75, |
|
"learning_rate": 0.004801412180052957, |
|
"loss": 6.9989, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 6.996521472930908, |
|
"eval_runtime": 8.6549, |
|
"eval_samples_per_second": 57.771, |
|
"eval_steps_per_second": 1.849, |
|
"step": 18128 |
|
}, |
|
{ |
|
"epoch": 2.0410414827890557, |
|
"grad_norm": 12.125, |
|
"learning_rate": 0.004795895851721094, |
|
"loss": 6.9915, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.096204766107679, |
|
"grad_norm": 18.625, |
|
"learning_rate": 0.004790379523389232, |
|
"loss": 6.9839, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.151368049426302, |
|
"grad_norm": 21.25, |
|
"learning_rate": 0.00478486319505737, |
|
"loss": 6.9821, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.206531332744925, |
|
"grad_norm": 21.375, |
|
"learning_rate": 0.004779346866725508, |
|
"loss": 6.9842, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.261694616063548, |
|
"grad_norm": 16.625, |
|
"learning_rate": 0.004773830538393645, |
|
"loss": 6.9836, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.316857899382171, |
|
"grad_norm": 17.125, |
|
"learning_rate": 0.004768314210061783, |
|
"loss": 6.9837, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.372021182700794, |
|
"grad_norm": 8.9375, |
|
"learning_rate": 0.004762797881729921, |
|
"loss": 6.9822, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.4271844660194173, |
|
"grad_norm": 8.25, |
|
"learning_rate": 0.004757281553398059, |
|
"loss": 6.9791, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.4823477493380404, |
|
"grad_norm": 8.375, |
|
"learning_rate": 0.0047517652250661955, |
|
"loss": 6.986, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.537511032656664, |
|
"grad_norm": 10.6875, |
|
"learning_rate": 0.004746248896734333, |
|
"loss": 6.9807, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.592674315975287, |
|
"grad_norm": 14.375, |
|
"learning_rate": 0.004740732568402472, |
|
"loss": 6.9822, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.64783759929391, |
|
"grad_norm": 10.625, |
|
"learning_rate": 0.004735216240070609, |
|
"loss": 6.9701, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.703000882612533, |
|
"grad_norm": 11.75, |
|
"learning_rate": 0.0047296999117387465, |
|
"loss": 6.9707, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.758164165931156, |
|
"grad_norm": 30.375, |
|
"learning_rate": 0.004724183583406884, |
|
"loss": 6.9792, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.8133274492497793, |
|
"grad_norm": 17.25, |
|
"learning_rate": 0.004718667255075022, |
|
"loss": 6.9803, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 2.8684907325684024, |
|
"grad_norm": 9.0, |
|
"learning_rate": 0.00471315092674316, |
|
"loss": 6.9752, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.9236540158870254, |
|
"grad_norm": 6.0, |
|
"learning_rate": 0.0047076345984112975, |
|
"loss": 6.9687, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 2.978817299205649, |
|
"grad_norm": 12.0625, |
|
"learning_rate": 0.004702118270079435, |
|
"loss": 6.9635, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 6.960501194000244, |
|
"eval_runtime": 8.9893, |
|
"eval_samples_per_second": 55.622, |
|
"eval_steps_per_second": 1.78, |
|
"step": 27192 |
|
}, |
|
{ |
|
"epoch": 3.033980582524272, |
|
"grad_norm": 15.25, |
|
"learning_rate": 0.004696601941747573, |
|
"loss": 6.9586, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 3.089143865842895, |
|
"grad_norm": 12.1875, |
|
"learning_rate": 0.004691085613415711, |
|
"loss": 6.9644, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 3.144307149161518, |
|
"grad_norm": 12.125, |
|
"learning_rate": 0.0046855692850838486, |
|
"loss": 6.955, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 3.1994704324801413, |
|
"grad_norm": 11.3125, |
|
"learning_rate": 0.004680052956751986, |
|
"loss": 6.9499, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 3.2546337157987644, |
|
"grad_norm": 9.0625, |
|
"learning_rate": 0.004674536628420123, |
|
"loss": 6.9522, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 3.3097969991173875, |
|
"grad_norm": 10.25, |
|
"learning_rate": 0.004669020300088262, |
|
"loss": 6.952, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 3.3649602824360105, |
|
"grad_norm": 15.6875, |
|
"learning_rate": 0.0046635039717564, |
|
"loss": 6.9524, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 3.4201235657546336, |
|
"grad_norm": 18.875, |
|
"learning_rate": 0.0046579876434245365, |
|
"loss": 6.9502, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 3.4752868490732567, |
|
"grad_norm": 8.6875, |
|
"learning_rate": 0.004652471315092674, |
|
"loss": 6.9465, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 3.5304501323918798, |
|
"grad_norm": 16.375, |
|
"learning_rate": 0.004646954986760812, |
|
"loss": 6.9453, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 3.585613415710503, |
|
"grad_norm": 20.5, |
|
"learning_rate": 0.00464143865842895, |
|
"loss": 6.9477, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 3.6407766990291264, |
|
"grad_norm": 15.5625, |
|
"learning_rate": 0.0046359223300970875, |
|
"loss": 6.9507, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 3.6959399823477495, |
|
"grad_norm": 47.5, |
|
"learning_rate": 0.004630406001765225, |
|
"loss": 6.9513, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 3.7511032656663725, |
|
"grad_norm": 17.125, |
|
"learning_rate": 0.004624889673433363, |
|
"loss": 6.9489, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 3.8062665489849956, |
|
"grad_norm": 190.0, |
|
"learning_rate": 0.004619373345101501, |
|
"loss": 6.9486, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 3.8614298323036187, |
|
"grad_norm": 12.375, |
|
"learning_rate": 0.0046138570167696385, |
|
"loss": 6.954, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 3.9165931156222418, |
|
"grad_norm": 12.25, |
|
"learning_rate": 0.004608340688437776, |
|
"loss": 6.945, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 3.971756398940865, |
|
"grad_norm": 9.6875, |
|
"learning_rate": 0.004602824360105914, |
|
"loss": 6.9449, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 6.9535441398620605, |
|
"eval_runtime": 9.4952, |
|
"eval_samples_per_second": 52.658, |
|
"eval_steps_per_second": 1.685, |
|
"step": 36256 |
|
}, |
|
{ |
|
"epoch": 4.026919682259488, |
|
"grad_norm": 13.375, |
|
"learning_rate": 0.004597308031774051, |
|
"loss": 6.9449, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 4.0820829655781115, |
|
"grad_norm": 12.0625, |
|
"learning_rate": 0.0045917917034421895, |
|
"loss": 6.9412, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 4.1372462488967345, |
|
"grad_norm": 12.8125, |
|
"learning_rate": 0.004586275375110326, |
|
"loss": 6.9405, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 4.192409532215358, |
|
"grad_norm": 10.6875, |
|
"learning_rate": 0.004580759046778464, |
|
"loss": 6.9472, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 4.247572815533981, |
|
"grad_norm": 12.625, |
|
"learning_rate": 0.004575242718446602, |
|
"loss": 6.9417, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 4.302736098852604, |
|
"grad_norm": 7.96875, |
|
"learning_rate": 0.00456972639011474, |
|
"loss": 6.9397, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 4.357899382171227, |
|
"grad_norm": 9.5625, |
|
"learning_rate": 0.004564210061782877, |
|
"loss": 6.9399, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 4.41306266548985, |
|
"grad_norm": 8.9375, |
|
"learning_rate": 0.004558693733451015, |
|
"loss": 6.9393, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 4.468225948808473, |
|
"grad_norm": 12.875, |
|
"learning_rate": 0.004553177405119153, |
|
"loss": 6.9348, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 4.523389232127096, |
|
"grad_norm": 20.125, |
|
"learning_rate": 0.004547661076787291, |
|
"loss": 6.94, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 4.578552515445719, |
|
"grad_norm": 12.0, |
|
"learning_rate": 0.0045421447484554275, |
|
"loss": 6.9417, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 4.633715798764342, |
|
"grad_norm": 14.6875, |
|
"learning_rate": 0.004536628420123566, |
|
"loss": 6.9362, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 4.688879082082965, |
|
"grad_norm": 12.3125, |
|
"learning_rate": 0.004531112091791704, |
|
"loss": 6.9365, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 4.744042365401588, |
|
"grad_norm": 9.1875, |
|
"learning_rate": 0.004525595763459841, |
|
"loss": 6.9306, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 4.7992056487202115, |
|
"grad_norm": 36.5, |
|
"learning_rate": 0.0045200794351279786, |
|
"loss": 6.9657, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 4.854368932038835, |
|
"grad_norm": 33.25, |
|
"learning_rate": 0.004514563106796117, |
|
"loss": 6.953, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 4.9095322153574585, |
|
"grad_norm": 20.375, |
|
"learning_rate": 0.004509046778464254, |
|
"loss": 6.9506, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 4.964695498676081, |
|
"grad_norm": 7.875, |
|
"learning_rate": 0.004503530450132392, |
|
"loss": 6.9484, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 6.946237564086914, |
|
"eval_runtime": 8.659, |
|
"eval_samples_per_second": 57.744, |
|
"eval_steps_per_second": 1.848, |
|
"step": 45320 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 453200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.553070427093514e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|