File size: 2,626 Bytes
da53500 ef09357 da53500 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.004363477691720301,
"eval_steps": 500,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004363477691720301,
"grad_norm": 6.069176197052002,
"learning_rate": 0.0001,
"loss": 3.7339,
"step": 1
},
{
"epoch": 0.0008726955383440602,
"grad_norm": 5.544577598571777,
"learning_rate": 0.0002,
"loss": 3.7163,
"step": 2
},
{
"epoch": 0.0013090433075160903,
"grad_norm": 4.14764404296875,
"learning_rate": 0.0001923879532511287,
"loss": 3.3414,
"step": 3
},
{
"epoch": 0.0017453910766881205,
"grad_norm": 6.233680725097656,
"learning_rate": 0.00017071067811865476,
"loss": 3.8764,
"step": 4
},
{
"epoch": 0.0021817388458601505,
"grad_norm": NaN,
"learning_rate": 0.00017071067811865476,
"loss": 2.2109,
"step": 5
},
{
"epoch": 0.0026180866150321805,
"grad_norm": 4.936144828796387,
"learning_rate": 0.000138268343236509,
"loss": 3.3856,
"step": 6
},
{
"epoch": 0.003054434384204211,
"grad_norm": NaN,
"learning_rate": 0.000138268343236509,
"loss": 2.5814,
"step": 7
},
{
"epoch": 0.003490782153376241,
"grad_norm": 4.964231491088867,
"learning_rate": 0.0001,
"loss": 3.3529,
"step": 8
},
{
"epoch": 0.003927129922548271,
"grad_norm": 5.627495765686035,
"learning_rate": 6.173165676349103e-05,
"loss": 3.4286,
"step": 9
},
{
"epoch": 0.004363477691720301,
"grad_norm": 4.651506423950195,
"learning_rate": 2.9289321881345254e-05,
"loss": 3.0882,
"step": 10
},
{
"epoch": 0.004363477691720301,
"eval_loss": 2.8977768421173096,
"eval_runtime": 33.2733,
"eval_samples_per_second": 14.516,
"eval_steps_per_second": 14.516,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1849564248145920.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|