|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0198373338623289, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007934933544931561, |
|
"grad_norm": 48.7606315612793, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 29.6105, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007934933544931561, |
|
"eval_loss": 3.7225465774536133, |
|
"eval_runtime": 212.1774, |
|
"eval_samples_per_second": 5.005, |
|
"eval_steps_per_second": 2.503, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015869867089863122, |
|
"grad_norm": 46.967803955078125, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 29.9806, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0023804800634794686, |
|
"grad_norm": 50.5672492980957, |
|
"learning_rate": 0.0002, |
|
"loss": 26.9378, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0031739734179726245, |
|
"grad_norm": 46.023780822753906, |
|
"learning_rate": 0.0001999048221581858, |
|
"loss": 21.9639, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003967466772465781, |
|
"grad_norm": 31.295907974243164, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 10.6674, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004760960126958937, |
|
"grad_norm": 23.777172088623047, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 6.2121, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005554453481452093, |
|
"grad_norm": 23.954105377197266, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 4.3349, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006347946835945249, |
|
"grad_norm": 7.861544132232666, |
|
"learning_rate": 0.00019762960071199333, |
|
"loss": 3.1486, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007141440190438405, |
|
"grad_norm": 6.988774299621582, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 2.6602, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007934933544931562, |
|
"grad_norm": 32.20113754272461, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 4.0596, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008728426899424718, |
|
"grad_norm": 14.095051765441895, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 2.995, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009521920253917874, |
|
"grad_norm": 13.5059232711792, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 3.2571, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010315413608411029, |
|
"grad_norm": 7.761572360992432, |
|
"learning_rate": 0.000190630778703665, |
|
"loss": 3.0002, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.011108906962904185, |
|
"grad_norm": 4.439308166503906, |
|
"learning_rate": 0.00018870108331782217, |
|
"loss": 2.6362, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011902400317397342, |
|
"grad_norm": 6.415318489074707, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 3.1925, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012695893671890498, |
|
"grad_norm": 6.409482955932617, |
|
"learning_rate": 0.0001843391445812886, |
|
"loss": 3.062, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013489387026383654, |
|
"grad_norm": 3.2289886474609375, |
|
"learning_rate": 0.0001819152044288992, |
|
"loss": 2.8807, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01428288038087681, |
|
"grad_norm": 7.170831680297852, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 3.1952, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.015076373735369967, |
|
"grad_norm": 8.997602462768555, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 2.9534, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015869867089863123, |
|
"grad_norm": 18.465681076049805, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 6.0253, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01666336044435628, |
|
"grad_norm": 6.664717674255371, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 3.1157, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.017456853798849436, |
|
"grad_norm": 6.979747772216797, |
|
"learning_rate": 0.00016755902076156604, |
|
"loss": 3.0816, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.018250347153342592, |
|
"grad_norm": 4.373898506164551, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 2.921, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01904384050783575, |
|
"grad_norm": 3.7028324604034424, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.8931, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0198373338623289, |
|
"grad_norm": 2.8346545696258545, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 2.8573, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0198373338623289, |
|
"eval_loss": 0.37545695900917053, |
|
"eval_runtime": 213.9659, |
|
"eval_samples_per_second": 4.963, |
|
"eval_steps_per_second": 2.482, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.26729100951552e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|