|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.8942544153811759, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017885088307623517, |
|
"grad_norm": 2.1336276531219482, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0684, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.035770176615247035, |
|
"grad_norm": 1.5654758214950562, |
|
"learning_rate": 4e-05, |
|
"loss": 1.7055, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05365526492287056, |
|
"grad_norm": 1.2047432661056519, |
|
"learning_rate": 6e-05, |
|
"loss": 0.9471, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07154035323049407, |
|
"grad_norm": 0.1950729787349701, |
|
"learning_rate": 8e-05, |
|
"loss": 0.1249, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0894254415381176, |
|
"grad_norm": 0.09746120870113373, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0457, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0894254415381176, |
|
"eval_loss": 0.04336774721741676, |
|
"eval_runtime": 342.8992, |
|
"eval_samples_per_second": 5.798, |
|
"eval_steps_per_second": 2.899, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10731052984574112, |
|
"grad_norm": 0.10309002548456192, |
|
"learning_rate": 9.777777777777778e-05, |
|
"loss": 0.0417, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12519561815336464, |
|
"grad_norm": 0.10960160195827484, |
|
"learning_rate": 9.555555555555557e-05, |
|
"loss": 0.038, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14308070646098814, |
|
"grad_norm": 0.1561357080936432, |
|
"learning_rate": 9.333333333333334e-05, |
|
"loss": 0.032, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16096579476861167, |
|
"grad_norm": 0.0917748436331749, |
|
"learning_rate": 9.111111111111112e-05, |
|
"loss": 0.0213, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1788508830762352, |
|
"grad_norm": 0.06082587316632271, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.0156, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1788508830762352, |
|
"eval_loss": 0.010981782339513302, |
|
"eval_runtime": 343.0451, |
|
"eval_samples_per_second": 5.795, |
|
"eval_steps_per_second": 2.898, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1967359713838587, |
|
"grad_norm": 0.11666049808263779, |
|
"learning_rate": 8.666666666666667e-05, |
|
"loss": 0.0085, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21462105969148224, |
|
"grad_norm": 0.05114075168967247, |
|
"learning_rate": 8.444444444444444e-05, |
|
"loss": 0.0038, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23250614799910574, |
|
"grad_norm": 0.03657279163599014, |
|
"learning_rate": 8.222222222222222e-05, |
|
"loss": 0.0031, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.25039123630672927, |
|
"grad_norm": 0.013491852208971977, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0029, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2682763246143528, |
|
"grad_norm": 0.006547242868691683, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 0.0029, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2682763246143528, |
|
"eval_loss": 0.002737525850534439, |
|
"eval_runtime": 342.9108, |
|
"eval_samples_per_second": 5.797, |
|
"eval_steps_per_second": 2.899, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2861614129219763, |
|
"grad_norm": 0.005285318940877914, |
|
"learning_rate": 7.555555555555556e-05, |
|
"loss": 0.0028, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3040465012295998, |
|
"grad_norm": 0.006457226816564798, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 0.0028, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.32193158953722334, |
|
"grad_norm": 0.0023280009627342224, |
|
"learning_rate": 7.111111111111112e-05, |
|
"loss": 0.0027, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.33981667784484687, |
|
"grad_norm": 0.0014506684383377433, |
|
"learning_rate": 6.88888888888889e-05, |
|
"loss": 0.0028, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3577017661524704, |
|
"grad_norm": 0.002698718337342143, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.0028, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3577017661524704, |
|
"eval_loss": 0.0027305446565151215, |
|
"eval_runtime": 342.6882, |
|
"eval_samples_per_second": 5.801, |
|
"eval_steps_per_second": 2.901, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3755868544600939, |
|
"grad_norm": 0.003260605735704303, |
|
"learning_rate": 6.444444444444446e-05, |
|
"loss": 0.0028, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3934719427677174, |
|
"grad_norm": 0.002418739954009652, |
|
"learning_rate": 6.222222222222222e-05, |
|
"loss": 0.0028, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.41135703107534094, |
|
"grad_norm": 0.001470407354645431, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0028, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.42924211938296447, |
|
"grad_norm": 0.004997240845113993, |
|
"learning_rate": 5.7777777777777776e-05, |
|
"loss": 0.0027, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.44712720769058795, |
|
"grad_norm": 0.0029559540562331676, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 0.0028, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.44712720769058795, |
|
"eval_loss": 0.0027349325828254223, |
|
"eval_runtime": 342.8604, |
|
"eval_samples_per_second": 5.798, |
|
"eval_steps_per_second": 2.899, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4650122959982115, |
|
"grad_norm": 0.004728947766125202, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 0.0028, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.482897384305835, |
|
"grad_norm": 0.003956568893045187, |
|
"learning_rate": 5.111111111111111e-05, |
|
"loss": 0.0027, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5007824726134585, |
|
"grad_norm": 0.0033387644216418266, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.0028, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.518667560921082, |
|
"grad_norm": 0.0044438643380999565, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.0028, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5365526492287056, |
|
"grad_norm": 0.002528380835428834, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.0028, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5365526492287056, |
|
"eval_loss": 0.0027327719144523144, |
|
"eval_runtime": 342.796, |
|
"eval_samples_per_second": 5.799, |
|
"eval_steps_per_second": 2.9, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5544377375363291, |
|
"grad_norm": 0.003869276260957122, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.0028, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5723228258439526, |
|
"grad_norm": 0.0063848476856946945, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0027, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5902079141515761, |
|
"grad_norm": 0.00741184875369072, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.0027, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6080930024591996, |
|
"grad_norm": 0.007532441522926092, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.0028, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6259780907668232, |
|
"grad_norm": 0.0035441338550299406, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0028, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6259780907668232, |
|
"eval_loss": 0.00272406660951674, |
|
"eval_runtime": 342.7543, |
|
"eval_samples_per_second": 5.8, |
|
"eval_steps_per_second": 2.9, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6438631790744467, |
|
"grad_norm": 0.002798376139253378, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.0028, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6617482673820702, |
|
"grad_norm": 0.0012276788474991918, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 0.0028, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.6796333556896937, |
|
"grad_norm": 0.0022674233186990023, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.0027, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.6975184439973172, |
|
"grad_norm": 0.004862118978053331, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.0028, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7154035323049408, |
|
"grad_norm": 0.0016343879979103804, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.0027, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7154035323049408, |
|
"eval_loss": 0.0027308915741741657, |
|
"eval_runtime": 343.0129, |
|
"eval_samples_per_second": 5.796, |
|
"eval_steps_per_second": 2.898, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7332886206125643, |
|
"grad_norm": 0.0016705788439139724, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0027, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.7511737089201878, |
|
"grad_norm": 0.0037323199212551117, |
|
"learning_rate": 1.777777777777778e-05, |
|
"loss": 0.0028, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7690587972278113, |
|
"grad_norm": 0.002069295383989811, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.0028, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7869438855354348, |
|
"grad_norm": 0.00587103795260191, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0027, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8048289738430584, |
|
"grad_norm": 0.004777251742780209, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.0027, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8048289738430584, |
|
"eval_loss": 0.0027228216640651226, |
|
"eval_runtime": 342.6715, |
|
"eval_samples_per_second": 5.801, |
|
"eval_steps_per_second": 2.901, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8227140621506819, |
|
"grad_norm": 0.004516826942563057, |
|
"learning_rate": 8.88888888888889e-06, |
|
"loss": 0.0028, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.8405991504583054, |
|
"grad_norm": 0.0023202409502118826, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.0027, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.8584842387659289, |
|
"grad_norm": 0.003505381755530834, |
|
"learning_rate": 4.444444444444445e-06, |
|
"loss": 0.0028, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8763693270735524, |
|
"grad_norm": 0.004354626871645451, |
|
"learning_rate": 2.2222222222222225e-06, |
|
"loss": 0.0027, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.8942544153811759, |
|
"grad_norm": 0.0022386154159903526, |
|
"learning_rate": 0.0, |
|
"loss": 0.0027, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8942544153811759, |
|
"eval_loss": 0.0027302990201860666, |
|
"eval_runtime": 342.9424, |
|
"eval_samples_per_second": 5.797, |
|
"eval_steps_per_second": 2.898, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.710113009369088e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|