Spaces:
Sleeping
Sleeping
File size: 1,746 Bytes
8b051da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8298755186721992,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 59.103092193603516,
"learning_rate": 5e-05,
"loss": 3.2072,
"step": 500
},
{
"epoch": 0.21,
"grad_norm": 4.993757247924805,
"learning_rate": 4.726477024070022e-05,
"loss": 1.7111,
"step": 1000
},
{
"epoch": 0.31,
"grad_norm": 21.068601608276367,
"learning_rate": 4.452954048140044e-05,
"loss": 1.1954,
"step": 1500
},
{
"epoch": 0.41,
"grad_norm": 46.41459274291992,
"learning_rate": 4.179431072210066e-05,
"loss": 0.8376,
"step": 2000
},
{
"epoch": 0.52,
"grad_norm": 0.6010065078735352,
"learning_rate": 3.9059080962800876e-05,
"loss": 0.6233,
"step": 2500
},
{
"epoch": 0.62,
"grad_norm": 57.68833541870117,
"learning_rate": 3.6323851203501094e-05,
"loss": 0.5353,
"step": 3000
},
{
"epoch": 0.73,
"grad_norm": 0.1653015911579132,
"learning_rate": 3.358862144420131e-05,
"loss": 0.3499,
"step": 3500
},
{
"epoch": 0.83,
"grad_norm": 8.111839294433594,
"learning_rate": 3.085339168490153e-05,
"loss": 0.3051,
"step": 4000
}
],
"logging_steps": 500,
"max_steps": 9640,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 4000,
"total_flos": 8668418408448000.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|