|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.003926303287297427, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.9263032872974274e-05, |
|
"eval_loss": 3.7315213680267334, |
|
"eval_runtime": 773.5499, |
|
"eval_samples_per_second": 27.727, |
|
"eval_steps_per_second": 6.932, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00011778909861892282, |
|
"grad_norm": 31.5966854095459, |
|
"learning_rate": 3e-05, |
|
"loss": 3.5372, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00023557819723784564, |
|
"grad_norm": 11.457754135131836, |
|
"learning_rate": 6e-05, |
|
"loss": 1.4271, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00035336729585676844, |
|
"grad_norm": 0.03152690827846527, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0468, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00035336729585676844, |
|
"eval_loss": 0.0013787680072709918, |
|
"eval_runtime": 777.5058, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0004711563944756913, |
|
"grad_norm": 0.14033684134483337, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.0189, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0005889454930946141, |
|
"grad_norm": 0.008016275241971016, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.0107, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0007067345917135369, |
|
"grad_norm": 0.9549577236175537, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.0004, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0007067345917135369, |
|
"eval_loss": 0.0004475166497286409, |
|
"eval_runtime": 777.492, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.897, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0008245236903324598, |
|
"grad_norm": 0.030346207320690155, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.0001, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0009423127889513826, |
|
"grad_norm": 0.0005559443379752338, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0010601018875703054, |
|
"grad_norm": 6.331143231363967e-05, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0010601018875703054, |
|
"eval_loss": 0.000494947365950793, |
|
"eval_runtime": 777.5031, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0011778909861892282, |
|
"grad_norm": 4.403312050271779e-05, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.001295680084808151, |
|
"grad_norm": 3.843701051664539e-05, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0014134691834270738, |
|
"grad_norm": 4.2941730498569086e-05, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0014134691834270738, |
|
"eval_loss": 0.0005166567279957235, |
|
"eval_runtime": 777.5045, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0015312582820459966, |
|
"grad_norm": 3.606522659538314e-05, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0016490473806649196, |
|
"grad_norm": 3.24067659676075e-05, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0017668364792838424, |
|
"grad_norm": 2.977346593979746e-05, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0017668364792838424, |
|
"eval_loss": 0.0005258838646113873, |
|
"eval_runtime": 777.4979, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0018846255779027652, |
|
"grad_norm": 3.105065115960315e-05, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0020024146765216877, |
|
"grad_norm": 3.0599792808061466e-05, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0021202037751406107, |
|
"grad_norm": 3.959710738854483e-05, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0021202037751406107, |
|
"eval_loss": 0.0005288115935400128, |
|
"eval_runtime": 777.4935, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.897, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0022379928737595338, |
|
"grad_norm": 3.3493728551547974e-05, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0023557819723784563, |
|
"grad_norm": 3.4933964343508705e-05, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0024735710709973793, |
|
"grad_norm": 2.990081520692911e-05, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0024735710709973793, |
|
"eval_loss": 0.0005307694664224982, |
|
"eval_runtime": 777.489, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.897, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.002591360169616302, |
|
"grad_norm": 3.213492163922638e-05, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.002709149268235225, |
|
"grad_norm": 2.8474241844378412e-05, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0028269383668541475, |
|
"grad_norm": 3.170351919834502e-05, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0028269383668541475, |
|
"eval_loss": 0.0005312738940119743, |
|
"eval_runtime": 777.5031, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0029447274654730705, |
|
"grad_norm": 3.209977876394987e-05, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.003062516564091993, |
|
"grad_norm": 2.9874341635149904e-05, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.003180305662710916, |
|
"grad_norm": 3.113800266874023e-05, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.003180305662710916, |
|
"eval_loss": 0.0005310006672516465, |
|
"eval_runtime": 777.5014, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.003298094761329839, |
|
"grad_norm": 3.5468474379740655e-05, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.0034158838599487617, |
|
"grad_norm": 2.9161385100451298e-05, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.0035336729585676847, |
|
"grad_norm": 2.8734202714986168e-05, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0035336729585676847, |
|
"eval_loss": 0.0005303475772961974, |
|
"eval_runtime": 777.4997, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.896, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0036514620571866073, |
|
"grad_norm": 2.96364141831873e-05, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.0037692511558055303, |
|
"grad_norm": 3.082916373386979e-05, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.003887040254424453, |
|
"grad_norm": 2.8683465643553063e-05, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.003887040254424453, |
|
"eval_loss": 0.0005314745358191431, |
|
"eval_runtime": 777.4915, |
|
"eval_samples_per_second": 27.586, |
|
"eval_steps_per_second": 6.897, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.02538779410432e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|