beamaia's picture
Training in progress, epoch 2
e101fb9
raw
history blame
2.49 kB
{
"best_metric": 0.7881951332092285,
"best_model_checkpoint": "./Zeroshot/01-12-23-NousResearch-Nous-Hermes-Llama2-13b_multilang-dataset-3.0.3-portuguese-2_epochs-10_batch_2/checkpoints/checkpoint-6773",
"epoch": 2.0,
"global_step": 6773,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"learning_rate": 5.8948611931482576e-05,
"loss": 1.3183,
"step": 500
},
{
"epoch": 0.3,
"learning_rate": 0.00011801535735380982,
"loss": 0.9343,
"step": 1000
},
{
"epoch": 0.44,
"learning_rate": 0.00017708210277613703,
"loss": 0.9005,
"step": 1500
},
{
"epoch": 0.59,
"learning_rate": 0.00023614884819846425,
"loss": 0.8859,
"step": 2000
},
{
"epoch": 0.74,
"learning_rate": 0.0002952155936207915,
"loss": 0.8707,
"step": 2500
},
{
"epoch": 0.89,
"learning_rate": 0.00035416420555227406,
"loss": 0.8523,
"step": 3000
},
{
"epoch": 1.0,
"eval_loss": 0.8459745645523071,
"eval_runtime": 133.0397,
"eval_samples_per_second": 11.32,
"eval_steps_per_second": 1.421,
"step": 3386
},
{
"epoch": 1.03,
"learning_rate": 0.00039998666871418663,
"loss": 0.8395,
"step": 3500
},
{
"epoch": 1.18,
"learning_rate": 0.00039960207649127196,
"loss": 0.8052,
"step": 4000
},
{
"epoch": 1.33,
"learning_rate": 0.0003986872720605752,
"loss": 0.8036,
"step": 4500
},
{
"epoch": 1.48,
"learning_rate": 0.00039724468545932536,
"loss": 0.7902,
"step": 5000
},
{
"epoch": 1.62,
"learning_rate": 0.0003952781486968895,
"loss": 0.7818,
"step": 5500
},
{
"epoch": 1.77,
"learning_rate": 0.00039279288557562877,
"loss": 0.7834,
"step": 6000
},
{
"epoch": 1.92,
"learning_rate": 0.0003897954978146717,
"loss": 0.7686,
"step": 6500
},
{
"epoch": 2.0,
"eval_loss": 0.7881951332092285,
"eval_runtime": 132.7462,
"eval_samples_per_second": 11.345,
"eval_steps_per_second": 1.424,
"step": 6773
}
],
"max_steps": 33860,
"num_train_epochs": 10,
"total_flos": 3.0202162838237184e+17,
"trial_name": null,
"trial_params": null
}