|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.8674136321195145, |
|
"eval_steps": 50, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"eval_loss": 22.8203182220459, |
|
"eval_runtime": 3.0437, |
|
"eval_samples_per_second": 32.854, |
|
"eval_steps_per_second": 4.271, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 21.029277801513672, |
|
"eval_runtime": 3.063, |
|
"eval_samples_per_second": 32.648, |
|
"eval_steps_per_second": 4.244, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 20.389341354370117, |
|
"eval_runtime": 3.0675, |
|
"eval_samples_per_second": 32.6, |
|
"eval_steps_per_second": 4.238, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_loss": 19.8281307220459, |
|
"eval_runtime": 3.071, |
|
"eval_samples_per_second": 32.563, |
|
"eval_steps_per_second": 4.233, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_loss": 18.816303253173828, |
|
"eval_runtime": 3.0799, |
|
"eval_samples_per_second": 32.468, |
|
"eval_steps_per_second": 4.221, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 18.291460037231445, |
|
"eval_runtime": 3.0838, |
|
"eval_samples_per_second": 32.427, |
|
"eval_steps_per_second": 4.216, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 17.624303817749023, |
|
"eval_runtime": 3.0609, |
|
"eval_samples_per_second": 32.67, |
|
"eval_steps_per_second": 4.247, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 17.041908264160156, |
|
"eval_runtime": 3.0724, |
|
"eval_samples_per_second": 32.547, |
|
"eval_steps_per_second": 4.231, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_loss": 16.60989761352539, |
|
"eval_runtime": 3.0682, |
|
"eval_samples_per_second": 32.592, |
|
"eval_steps_per_second": 4.237, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 8.75e-05, |
|
"loss": 20.2413, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 15.918641090393066, |
|
"eval_runtime": 3.0677, |
|
"eval_samples_per_second": 32.598, |
|
"eval_steps_per_second": 4.238, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 15.275712013244629, |
|
"eval_runtime": 3.0784, |
|
"eval_samples_per_second": 32.485, |
|
"eval_steps_per_second": 4.223, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_loss": 14.903023719787598, |
|
"eval_runtime": 3.0814, |
|
"eval_samples_per_second": 32.453, |
|
"eval_steps_per_second": 4.219, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_loss": 14.478271484375, |
|
"eval_runtime": 3.0668, |
|
"eval_samples_per_second": 32.608, |
|
"eval_steps_per_second": 4.239, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_loss": 14.302887916564941, |
|
"eval_runtime": 3.0701, |
|
"eval_samples_per_second": 32.572, |
|
"eval_steps_per_second": 4.234, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_loss": 13.83132553100586, |
|
"eval_runtime": 3.0703, |
|
"eval_samples_per_second": 32.57, |
|
"eval_steps_per_second": 4.234, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 13.52160358428955, |
|
"eval_runtime": 3.0694, |
|
"eval_samples_per_second": 32.579, |
|
"eval_steps_per_second": 4.235, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_loss": 12.77853012084961, |
|
"eval_runtime": 3.0776, |
|
"eval_samples_per_second": 32.493, |
|
"eval_steps_per_second": 4.224, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 12.551227569580078, |
|
"eval_runtime": 3.0749, |
|
"eval_samples_per_second": 32.521, |
|
"eval_steps_per_second": 4.228, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_loss": 12.139355659484863, |
|
"eval_runtime": 3.0686, |
|
"eval_samples_per_second": 32.588, |
|
"eval_steps_per_second": 4.236, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 16.164, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_loss": 11.858689308166504, |
|
"eval_runtime": 3.0577, |
|
"eval_samples_per_second": 32.704, |
|
"eval_steps_per_second": 4.252, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_loss": 11.4910249710083, |
|
"eval_runtime": 3.071, |
|
"eval_samples_per_second": 32.562, |
|
"eval_steps_per_second": 4.233, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_loss": 11.197161674499512, |
|
"eval_runtime": 3.0645, |
|
"eval_samples_per_second": 32.632, |
|
"eval_steps_per_second": 4.242, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_loss": 10.903576850891113, |
|
"eval_runtime": 3.0551, |
|
"eval_samples_per_second": 32.732, |
|
"eval_steps_per_second": 4.255, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_loss": 10.947243690490723, |
|
"eval_runtime": 3.0615, |
|
"eval_samples_per_second": 32.664, |
|
"eval_steps_per_second": 4.246, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 10.560125350952148, |
|
"eval_runtime": 3.0709, |
|
"eval_samples_per_second": 32.563, |
|
"eval_steps_per_second": 4.233, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"eval_loss": 10.332932472229004, |
|
"eval_runtime": 3.0883, |
|
"eval_samples_per_second": 32.38, |
|
"eval_steps_per_second": 4.209, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_loss": 9.66699504852295, |
|
"eval_runtime": 3.0701, |
|
"eval_samples_per_second": 32.572, |
|
"eval_steps_per_second": 4.234, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 9.532146453857422, |
|
"eval_runtime": 3.0603, |
|
"eval_samples_per_second": 32.676, |
|
"eval_steps_per_second": 4.248, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_loss": 9.379703521728516, |
|
"eval_runtime": 3.0723, |
|
"eval_samples_per_second": 32.549, |
|
"eval_steps_per_second": 4.231, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 6.25e-05, |
|
"loss": 12.3127, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_loss": 8.9696683883667, |
|
"eval_runtime": 3.0645, |
|
"eval_samples_per_second": 32.632, |
|
"eval_steps_per_second": 4.242, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_loss": 8.936990737915039, |
|
"eval_runtime": 3.0799, |
|
"eval_samples_per_second": 32.468, |
|
"eval_steps_per_second": 4.221, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_loss": 8.862835884094238, |
|
"eval_runtime": 3.0649, |
|
"eval_samples_per_second": 32.627, |
|
"eval_steps_per_second": 4.242, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"eval_loss": 8.701496124267578, |
|
"eval_runtime": 3.0772, |
|
"eval_samples_per_second": 32.497, |
|
"eval_steps_per_second": 4.225, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_loss": 8.539968490600586, |
|
"eval_runtime": 3.0598, |
|
"eval_samples_per_second": 32.682, |
|
"eval_steps_per_second": 4.249, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_loss": 8.266422271728516, |
|
"eval_runtime": 3.0852, |
|
"eval_samples_per_second": 32.413, |
|
"eval_steps_per_second": 4.214, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 8.135841369628906, |
|
"eval_runtime": 3.0678, |
|
"eval_samples_per_second": 32.596, |
|
"eval_steps_per_second": 4.238, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"eval_loss": 7.678401470184326, |
|
"eval_runtime": 3.0876, |
|
"eval_samples_per_second": 32.388, |
|
"eval_steps_per_second": 4.21, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_loss": 7.719874382019043, |
|
"eval_runtime": 3.0607, |
|
"eval_samples_per_second": 32.672, |
|
"eval_steps_per_second": 4.247, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 7.3947248458862305, |
|
"eval_runtime": 3.0791, |
|
"eval_samples_per_second": 32.477, |
|
"eval_steps_per_second": 4.222, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 5e-05, |
|
"loss": 10.9628, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 7.449285984039307, |
|
"eval_runtime": 3.0645, |
|
"eval_samples_per_second": 32.631, |
|
"eval_steps_per_second": 4.242, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 4000, |
|
"num_train_epochs": 4, |
|
"save_steps": 1000, |
|
"total_flos": 2.738687031115776e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|