|
{ |
|
"best_metric": 0.8459745645523071, |
|
"best_model_checkpoint": "./Zeroshot/01-12-23-NousResearch-Nous-Hermes-Llama2-13b_multilang-dataset-3.0.3-portuguese-2_epochs-10_batch_2/checkpoints/checkpoint-3386", |
|
"epoch": 0.9998523549387273, |
|
"global_step": 3386, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.8948611931482576e-05, |
|
"loss": 1.3183, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00011801535735380982, |
|
"loss": 0.9343, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017708210277613703, |
|
"loss": 0.9005, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00023614884819846425, |
|
"loss": 0.8859, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002952155936207915, |
|
"loss": 0.8707, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00035416420555227406, |
|
"loss": 0.8523, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.8459745645523071, |
|
"eval_runtime": 133.0397, |
|
"eval_samples_per_second": 11.32, |
|
"eval_steps_per_second": 1.421, |
|
"step": 3386 |
|
} |
|
], |
|
"max_steps": 33860, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.510076088815616e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|