|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 4232, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 6.645576477050781, |
|
"learning_rate": 1.9810964083175805e-05, |
|
"loss": 0.5588, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 4.662398815155029, |
|
"learning_rate": 1.962192816635161e-05, |
|
"loss": 0.3737, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 5.282985687255859, |
|
"learning_rate": 1.9432892249527412e-05, |
|
"loss": 0.2863, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 1.5854848623275757, |
|
"learning_rate": 1.9243856332703215e-05, |
|
"loss": 0.323, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 19.09914779663086, |
|
"learning_rate": 1.905482041587902e-05, |
|
"loss": 0.2956, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.20477035641670227, |
|
"eval_runtime": 4.245, |
|
"eval_samples_per_second": 91.638, |
|
"eval_steps_per_second": 6.596, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 26.59105682373047, |
|
"learning_rate": 1.8865784499054822e-05, |
|
"loss": 0.2269, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 9.394390106201172, |
|
"learning_rate": 1.8676748582230626e-05, |
|
"loss": 0.2393, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 13.779593467712402, |
|
"learning_rate": 1.848771266540643e-05, |
|
"loss": 0.2135, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.6053704619407654, |
|
"learning_rate": 1.8298676748582232e-05, |
|
"loss": 0.2241, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"grad_norm": 16.190296173095703, |
|
"learning_rate": 1.8109640831758036e-05, |
|
"loss": 0.2355, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.21921110153198242, |
|
"eval_runtime": 4.2197, |
|
"eval_samples_per_second": 92.187, |
|
"eval_steps_per_second": 6.636, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.10688259452581406, |
|
"learning_rate": 1.792060491493384e-05, |
|
"loss": 0.1909, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 29.609710693359375, |
|
"learning_rate": 1.7731568998109643e-05, |
|
"loss": 0.1637, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"grad_norm": 0.16163112223148346, |
|
"learning_rate": 1.7542533081285446e-05, |
|
"loss": 0.1828, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 2.127821207046509, |
|
"learning_rate": 1.735349716446125e-05, |
|
"loss": 0.1827, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"grad_norm": 0.030160142108798027, |
|
"learning_rate": 1.7164461247637053e-05, |
|
"loss": 0.1528, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.2590792179107666, |
|
"eval_runtime": 4.2371, |
|
"eval_samples_per_second": 91.808, |
|
"eval_steps_per_second": 6.608, |
|
"step": 1587 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"grad_norm": 0.0637928918004036, |
|
"learning_rate": 1.6975425330812856e-05, |
|
"loss": 0.1574, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"grad_norm": 0.01419603731483221, |
|
"learning_rate": 1.678638941398866e-05, |
|
"loss": 0.0918, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 9.31396770477295, |
|
"learning_rate": 1.6597353497164463e-05, |
|
"loss": 0.1175, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"grad_norm": 0.023506687954068184, |
|
"learning_rate": 1.6408317580340267e-05, |
|
"loss": 0.1094, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"grad_norm": 13.4344482421875, |
|
"learning_rate": 1.621928166351607e-05, |
|
"loss": 0.1103, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"grad_norm": 30.01926040649414, |
|
"learning_rate": 1.6030245746691873e-05, |
|
"loss": 0.1398, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.33959057927131653, |
|
"eval_runtime": 4.2467, |
|
"eval_samples_per_second": 91.601, |
|
"eval_steps_per_second": 6.593, |
|
"step": 2116 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"grad_norm": 0.12888406217098236, |
|
"learning_rate": 1.5841209829867677e-05, |
|
"loss": 0.0536, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"grad_norm": 0.06800971180200577, |
|
"learning_rate": 1.565217391304348e-05, |
|
"loss": 0.1039, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"grad_norm": 2.495684862136841, |
|
"learning_rate": 1.5463137996219284e-05, |
|
"loss": 0.0886, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"grad_norm": 3.7909159660339355, |
|
"learning_rate": 1.5274102079395087e-05, |
|
"loss": 0.0766, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"grad_norm": 0.025901077315211296, |
|
"learning_rate": 1.5085066162570889e-05, |
|
"loss": 0.0916, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.3040069341659546, |
|
"eval_runtime": 4.2484, |
|
"eval_samples_per_second": 91.564, |
|
"eval_steps_per_second": 6.591, |
|
"step": 2645 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"grad_norm": 0.011547481641173363, |
|
"learning_rate": 1.4896030245746694e-05, |
|
"loss": 0.0644, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"grad_norm": 0.026049258187413216, |
|
"learning_rate": 1.4706994328922497e-05, |
|
"loss": 0.0542, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"grad_norm": 0.35288381576538086, |
|
"learning_rate": 1.45179584120983e-05, |
|
"loss": 0.0512, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"grad_norm": 0.023430563509464264, |
|
"learning_rate": 1.4328922495274103e-05, |
|
"loss": 0.0491, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"grad_norm": 0.0032355256844311953, |
|
"learning_rate": 1.4139886578449906e-05, |
|
"loss": 0.0326, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.3605019450187683, |
|
"eval_runtime": 4.2462, |
|
"eval_samples_per_second": 91.611, |
|
"eval_steps_per_second": 6.594, |
|
"step": 3174 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"grad_norm": 0.1259104311466217, |
|
"learning_rate": 1.395085066162571e-05, |
|
"loss": 0.0665, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"grad_norm": 0.08823427557945251, |
|
"learning_rate": 1.3761814744801514e-05, |
|
"loss": 0.0252, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"grad_norm": 101.22339630126953, |
|
"learning_rate": 1.3572778827977318e-05, |
|
"loss": 0.0416, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"grad_norm": 0.033835213631391525, |
|
"learning_rate": 1.338374291115312e-05, |
|
"loss": 0.0465, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"grad_norm": 0.15451325476169586, |
|
"learning_rate": 1.3194706994328923e-05, |
|
"loss": 0.0358, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"grad_norm": 0.0031314522493630648, |
|
"learning_rate": 1.3005671077504726e-05, |
|
"loss": 0.0344, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.41286924481391907, |
|
"eval_runtime": 4.2519, |
|
"eval_samples_per_second": 91.488, |
|
"eval_steps_per_second": 6.585, |
|
"step": 3703 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"grad_norm": 0.0014451502356678247, |
|
"learning_rate": 1.281663516068053e-05, |
|
"loss": 0.0148, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"grad_norm": 17.679460525512695, |
|
"learning_rate": 1.2627599243856335e-05, |
|
"loss": 0.0309, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"grad_norm": 0.03577155992388725, |
|
"learning_rate": 1.2438563327032138e-05, |
|
"loss": 0.0235, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"grad_norm": 28.639484405517578, |
|
"learning_rate": 1.224952741020794e-05, |
|
"loss": 0.0296, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"grad_norm": 0.00975144561380148, |
|
"learning_rate": 1.2060491493383744e-05, |
|
"loss": 0.0218, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.3556581735610962, |
|
"eval_runtime": 4.245, |
|
"eval_samples_per_second": 91.637, |
|
"eval_steps_per_second": 6.596, |
|
"step": 4232 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10580, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 4.321523491876685e+16, |
|
"train_batch_size": 14, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|