{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 10.0, | |
"eval_steps": 500, | |
"global_step": 110, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0, | |
"loss": 6.3097, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 1e-05, | |
"loss": 0.6836, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 1.5849625007211564e-05, | |
"loss": 0.7616, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 2e-05, | |
"loss": 0.3457, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 2e-05, | |
"loss": 0.6649, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 2e-05, | |
"loss": 0.3251, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 2e-05, | |
"loss": 0.556, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 2e-05, | |
"loss": 0.4171, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 2e-05, | |
"loss": 0.3769, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 2e-05, | |
"loss": 0.3902, | |
"step": 10 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 2e-05, | |
"loss": 0.333, | |
"step": 11 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 2e-05, | |
"loss": 0.2863, | |
"step": 12 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 2e-05, | |
"loss": 0.383, | |
"step": 13 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 2e-05, | |
"loss": 0.3927, | |
"step": 14 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 2e-05, | |
"loss": 0.3603, | |
"step": 15 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 2e-05, | |
"loss": 0.33, | |
"step": 16 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 2e-05, | |
"loss": 0.2814, | |
"step": 17 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 2e-05, | |
"loss": 0.3283, | |
"step": 18 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 2e-05, | |
"loss": 0.3627, | |
"step": 19 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 2e-05, | |
"loss": 0.3356, | |
"step": 20 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 2e-05, | |
"loss": 0.2905, | |
"step": 21 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 2e-05, | |
"loss": 0.2514, | |
"step": 22 | |
}, | |
{ | |
"epoch": 2.09, | |
"learning_rate": 2e-05, | |
"loss": 0.2527, | |
"step": 23 | |
}, | |
{ | |
"epoch": 2.18, | |
"learning_rate": 2e-05, | |
"loss": 0.2567, | |
"step": 24 | |
}, | |
{ | |
"epoch": 2.27, | |
"learning_rate": 2e-05, | |
"loss": 0.2435, | |
"step": 25 | |
}, | |
{ | |
"epoch": 2.36, | |
"learning_rate": 2e-05, | |
"loss": 0.1736, | |
"step": 26 | |
}, | |
{ | |
"epoch": 2.45, | |
"learning_rate": 2e-05, | |
"loss": 0.2297, | |
"step": 27 | |
}, | |
{ | |
"epoch": 2.55, | |
"learning_rate": 2e-05, | |
"loss": 0.3149, | |
"step": 28 | |
}, | |
{ | |
"epoch": 2.64, | |
"learning_rate": 2e-05, | |
"loss": 0.2916, | |
"step": 29 | |
}, | |
{ | |
"epoch": 2.73, | |
"learning_rate": 2e-05, | |
"loss": 0.2716, | |
"step": 30 | |
}, | |
{ | |
"epoch": 2.82, | |
"learning_rate": 2e-05, | |
"loss": 0.2821, | |
"step": 31 | |
}, | |
{ | |
"epoch": 2.91, | |
"learning_rate": 2e-05, | |
"loss": 0.2358, | |
"step": 32 | |
}, | |
{ | |
"epoch": 3.0, | |
"learning_rate": 2e-05, | |
"loss": 0.1992, | |
"step": 33 | |
}, | |
{ | |
"epoch": 3.09, | |
"learning_rate": 2e-05, | |
"loss": 0.2099, | |
"step": 34 | |
}, | |
{ | |
"epoch": 3.18, | |
"learning_rate": 2e-05, | |
"loss": 0.1803, | |
"step": 35 | |
}, | |
{ | |
"epoch": 3.27, | |
"learning_rate": 2e-05, | |
"loss": 0.1684, | |
"step": 36 | |
}, | |
{ | |
"epoch": 3.36, | |
"learning_rate": 2e-05, | |
"loss": 0.1741, | |
"step": 37 | |
}, | |
{ | |
"epoch": 3.45, | |
"learning_rate": 2e-05, | |
"loss": 0.1489, | |
"step": 38 | |
}, | |
{ | |
"epoch": 3.55, | |
"learning_rate": 2e-05, | |
"loss": 0.1206, | |
"step": 39 | |
}, | |
{ | |
"epoch": 3.64, | |
"learning_rate": 2e-05, | |
"loss": 0.1229, | |
"step": 40 | |
}, | |
{ | |
"epoch": 3.73, | |
"learning_rate": 2e-05, | |
"loss": 0.1768, | |
"step": 41 | |
}, | |
{ | |
"epoch": 3.82, | |
"learning_rate": 2e-05, | |
"loss": 0.2359, | |
"step": 42 | |
}, | |
{ | |
"epoch": 3.91, | |
"learning_rate": 2e-05, | |
"loss": 0.1988, | |
"step": 43 | |
}, | |
{ | |
"epoch": 4.0, | |
"learning_rate": 2e-05, | |
"loss": 0.1007, | |
"step": 44 | |
}, | |
{ | |
"epoch": 4.09, | |
"learning_rate": 2e-05, | |
"loss": 0.0862, | |
"step": 45 | |
}, | |
{ | |
"epoch": 4.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0499, | |
"step": 46 | |
}, | |
{ | |
"epoch": 4.27, | |
"learning_rate": 2e-05, | |
"loss": 0.1121, | |
"step": 47 | |
}, | |
{ | |
"epoch": 4.36, | |
"learning_rate": 2e-05, | |
"loss": 0.1171, | |
"step": 48 | |
}, | |
{ | |
"epoch": 4.45, | |
"learning_rate": 2e-05, | |
"loss": 0.0602, | |
"step": 49 | |
}, | |
{ | |
"epoch": 4.55, | |
"learning_rate": 2e-05, | |
"loss": 0.1707, | |
"step": 50 | |
}, | |
{ | |
"epoch": 4.64, | |
"learning_rate": 2e-05, | |
"loss": 0.1781, | |
"step": 51 | |
}, | |
{ | |
"epoch": 4.73, | |
"learning_rate": 2e-05, | |
"loss": 0.2078, | |
"step": 52 | |
}, | |
{ | |
"epoch": 4.82, | |
"learning_rate": 2e-05, | |
"loss": 0.0712, | |
"step": 53 | |
}, | |
{ | |
"epoch": 4.91, | |
"learning_rate": 2e-05, | |
"loss": 0.095, | |
"step": 54 | |
}, | |
{ | |
"epoch": 5.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0861, | |
"step": 55 | |
}, | |
{ | |
"epoch": 5.09, | |
"learning_rate": 2e-05, | |
"loss": 0.0746, | |
"step": 56 | |
}, | |
{ | |
"epoch": 5.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0783, | |
"step": 57 | |
}, | |
{ | |
"epoch": 5.27, | |
"learning_rate": 2e-05, | |
"loss": 0.084, | |
"step": 58 | |
}, | |
{ | |
"epoch": 5.36, | |
"learning_rate": 2e-05, | |
"loss": 0.0676, | |
"step": 59 | |
}, | |
{ | |
"epoch": 5.45, | |
"learning_rate": 2e-05, | |
"loss": 0.061, | |
"step": 60 | |
}, | |
{ | |
"epoch": 5.55, | |
"learning_rate": 2e-05, | |
"loss": 0.0978, | |
"step": 61 | |
}, | |
{ | |
"epoch": 5.64, | |
"learning_rate": 2e-05, | |
"loss": 0.0711, | |
"step": 62 | |
}, | |
{ | |
"epoch": 5.73, | |
"learning_rate": 2e-05, | |
"loss": 0.1171, | |
"step": 63 | |
}, | |
{ | |
"epoch": 5.82, | |
"learning_rate": 2e-05, | |
"loss": 0.08, | |
"step": 64 | |
}, | |
{ | |
"epoch": 5.91, | |
"learning_rate": 2e-05, | |
"loss": 0.0684, | |
"step": 65 | |
}, | |
{ | |
"epoch": 6.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0481, | |
"step": 66 | |
}, | |
{ | |
"epoch": 6.09, | |
"learning_rate": 2e-05, | |
"loss": 0.0073, | |
"step": 67 | |
}, | |
{ | |
"epoch": 6.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0563, | |
"step": 68 | |
}, | |
{ | |
"epoch": 6.27, | |
"learning_rate": 2e-05, | |
"loss": 0.0255, | |
"step": 69 | |
}, | |
{ | |
"epoch": 6.36, | |
"learning_rate": 2e-05, | |
"loss": 0.0696, | |
"step": 70 | |
}, | |
{ | |
"epoch": 6.45, | |
"learning_rate": 2e-05, | |
"loss": 0.0765, | |
"step": 71 | |
}, | |
{ | |
"epoch": 6.55, | |
"learning_rate": 2e-05, | |
"loss": 0.0109, | |
"step": 72 | |
}, | |
{ | |
"epoch": 6.64, | |
"learning_rate": 2e-05, | |
"loss": 0.0341, | |
"step": 73 | |
}, | |
{ | |
"epoch": 6.73, | |
"learning_rate": 2e-05, | |
"loss": 0.0641, | |
"step": 74 | |
}, | |
{ | |
"epoch": 6.82, | |
"learning_rate": 2e-05, | |
"loss": 0.1228, | |
"step": 75 | |
}, | |
{ | |
"epoch": 6.91, | |
"learning_rate": 2e-05, | |
"loss": 0.0827, | |
"step": 76 | |
}, | |
{ | |
"epoch": 7.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0406, | |
"step": 77 | |
}, | |
{ | |
"epoch": 7.09, | |
"learning_rate": 2e-05, | |
"loss": 0.0545, | |
"step": 78 | |
}, | |
{ | |
"epoch": 7.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0592, | |
"step": 79 | |
}, | |
{ | |
"epoch": 7.27, | |
"learning_rate": 2e-05, | |
"loss": 0.0684, | |
"step": 80 | |
}, | |
{ | |
"epoch": 7.36, | |
"learning_rate": 2e-05, | |
"loss": 0.0492, | |
"step": 81 | |
}, | |
{ | |
"epoch": 7.45, | |
"learning_rate": 2e-05, | |
"loss": 0.0737, | |
"step": 82 | |
}, | |
{ | |
"epoch": 7.55, | |
"learning_rate": 2e-05, | |
"loss": 0.0837, | |
"step": 83 | |
}, | |
{ | |
"epoch": 7.64, | |
"learning_rate": 2e-05, | |
"loss": 0.0534, | |
"step": 84 | |
}, | |
{ | |
"epoch": 7.73, | |
"learning_rate": 2e-05, | |
"loss": 0.0535, | |
"step": 85 | |
}, | |
{ | |
"epoch": 7.82, | |
"learning_rate": 2e-05, | |
"loss": 0.0884, | |
"step": 86 | |
}, | |
{ | |
"epoch": 7.91, | |
"learning_rate": 2e-05, | |
"loss": 0.0774, | |
"step": 87 | |
}, | |
{ | |
"epoch": 8.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0364, | |
"step": 88 | |
}, | |
{ | |
"epoch": 8.09, | |
"learning_rate": 2e-05, | |
"loss": 0.0125, | |
"step": 89 | |
}, | |
{ | |
"epoch": 8.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0412, | |
"step": 90 | |
}, | |
{ | |
"epoch": 8.27, | |
"learning_rate": 2e-05, | |
"loss": 0.0517, | |
"step": 91 | |
}, | |
{ | |
"epoch": 8.36, | |
"learning_rate": 2e-05, | |
"loss": 0.0257, | |
"step": 92 | |
}, | |
{ | |
"epoch": 8.45, | |
"learning_rate": 2e-05, | |
"loss": 0.038, | |
"step": 93 | |
}, | |
{ | |
"epoch": 8.55, | |
"learning_rate": 2e-05, | |
"loss": 0.0745, | |
"step": 94 | |
}, | |
{ | |
"epoch": 8.64, | |
"learning_rate": 2e-05, | |
"loss": 0.0161, | |
"step": 95 | |
}, | |
{ | |
"epoch": 8.73, | |
"learning_rate": 2e-05, | |
"loss": 0.0536, | |
"step": 96 | |
}, | |
{ | |
"epoch": 8.82, | |
"learning_rate": 2e-05, | |
"loss": 0.0205, | |
"step": 97 | |
}, | |
{ | |
"epoch": 8.91, | |
"learning_rate": 2e-05, | |
"loss": 0.0299, | |
"step": 98 | |
}, | |
{ | |
"epoch": 9.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0165, | |
"step": 99 | |
}, | |
{ | |
"epoch": 9.09, | |
"learning_rate": 2e-05, | |
"loss": 0.037, | |
"step": 100 | |
}, | |
{ | |
"epoch": 9.18, | |
"learning_rate": 2e-05, | |
"loss": 0.0063, | |
"step": 101 | |
}, | |
{ | |
"epoch": 9.27, | |
"learning_rate": 2e-05, | |
"loss": 0.0218, | |
"step": 102 | |
}, | |
{ | |
"epoch": 9.36, | |
"learning_rate": 2e-05, | |
"loss": 0.0193, | |
"step": 103 | |
}, | |
{ | |
"epoch": 9.45, | |
"learning_rate": 2e-05, | |
"loss": 0.0014, | |
"step": 104 | |
}, | |
{ | |
"epoch": 9.55, | |
"learning_rate": 2e-05, | |
"loss": 0.0017, | |
"step": 105 | |
}, | |
{ | |
"epoch": 9.64, | |
"learning_rate": 2e-05, | |
"loss": 0.0097, | |
"step": 106 | |
}, | |
{ | |
"epoch": 9.73, | |
"learning_rate": 2e-05, | |
"loss": 0.0068, | |
"step": 107 | |
}, | |
{ | |
"epoch": 9.82, | |
"learning_rate": 2e-05, | |
"loss": 0.0034, | |
"step": 108 | |
}, | |
{ | |
"epoch": 9.91, | |
"learning_rate": 2e-05, | |
"loss": 0.0533, | |
"step": 109 | |
}, | |
{ | |
"epoch": 10.0, | |
"learning_rate": 2e-05, | |
"loss": 0.0278, | |
"step": 110 | |
}, | |
{ | |
"epoch": 10.0, | |
"step": 110, | |
"total_flos": 3159825776640.0, | |
"train_loss": 0.21176275113089518, | |
"train_runtime": 3825.7954, | |
"train_samples_per_second": 3.461, | |
"train_steps_per_second": 0.029 | |
} | |
], | |
"logging_steps": 1.0, | |
"max_steps": 110, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 50000, | |
"total_flos": 3159825776640.0, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |