|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9936305732484076, |
|
"eval_steps": 500, |
|
"global_step": 78, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"grad_norm": 7.021888236025707, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.1226, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06369426751592357, |
|
"grad_norm": 8.657087918321864, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.1459, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12738853503184713, |
|
"grad_norm": 1.0650511236817741, |
|
"learning_rate": 1.9959742939952393e-05, |
|
"loss": 0.9058, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1910828025477707, |
|
"grad_norm": 0.6764320700876563, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 0.8611, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25477707006369427, |
|
"grad_norm": 0.6246578199704307, |
|
"learning_rate": 1.8584487936018663e-05, |
|
"loss": 0.8437, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 0.6107481502237895, |
|
"learning_rate": 1.7227948638273918e-05, |
|
"loss": 0.837, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3821656050955414, |
|
"grad_norm": 0.569359163431419, |
|
"learning_rate": 1.5508969814521026e-05, |
|
"loss": 0.8131, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.445859872611465, |
|
"grad_norm": 0.6306438149635343, |
|
"learning_rate": 1.3513748240813429e-05, |
|
"loss": 0.8169, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5095541401273885, |
|
"grad_norm": 0.5969805073752579, |
|
"learning_rate": 1.1342332658176556e-05, |
|
"loss": 0.7991, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5732484076433121, |
|
"grad_norm": 0.5842201624755365, |
|
"learning_rate": 9.103606910965666e-06, |
|
"loss": 0.8228, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 0.5539821078021181, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 0.8094, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7006369426751592, |
|
"grad_norm": 0.6841832868254093, |
|
"learning_rate": 4.87100722594094e-06, |
|
"loss": 0.8048, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7643312101910829, |
|
"grad_norm": 0.5435122880502232, |
|
"learning_rate": 3.089373510131354e-06, |
|
"loss": 0.7957, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8280254777070064, |
|
"grad_norm": 0.5461217619161284, |
|
"learning_rate": 1.6542674627869738e-06, |
|
"loss": 0.8107, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.89171974522293, |
|
"grad_norm": 0.5714446690787659, |
|
"learning_rate": 6.37651293602628e-07, |
|
"loss": 0.8012, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 0.5789472224989608, |
|
"learning_rate": 9.0502382320653e-08, |
|
"loss": 0.8019, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"eval_loss": 0.8284274339675903, |
|
"eval_runtime": 9.6304, |
|
"eval_samples_per_second": 51.919, |
|
"eval_steps_per_second": 1.661, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"step": 78, |
|
"total_flos": 11234023833600.0, |
|
"train_loss": 0.8443797582235092, |
|
"train_runtime": 656.715, |
|
"train_samples_per_second": 15.227, |
|
"train_steps_per_second": 0.119 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 78, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 11234023833600.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|