GaetanMichelet's picture
Model save
bb3fe5c verified
raw
history blame contribute delete
No virus
11.3 kB
{
"best_metric": 0.8982431888580322,
"best_model_checkpoint": "data/Llama-31-8B_task-1_60-samples_config-2_full/checkpoint-43",
"epoch": 21.91304347826087,
"eval_steps": 500,
"global_step": 63,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.34782608695652173,
"grad_norm": 2.058223247528076,
"learning_rate": 1e-05,
"loss": 2.5057,
"step": 1
},
{
"epoch": 0.6956521739130435,
"grad_norm": 2.0094869136810303,
"learning_rate": 2e-05,
"loss": 2.5391,
"step": 2
},
{
"epoch": 0.6956521739130435,
"eval_loss": 2.391584873199463,
"eval_runtime": 4.8261,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 2
},
{
"epoch": 1.391304347826087,
"grad_norm": 1.6555781364440918,
"learning_rate": 4e-05,
"loss": 2.4703,
"step": 4
},
{
"epoch": 1.7391304347826086,
"eval_loss": 2.20740008354187,
"eval_runtime": 4.8274,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 5
},
{
"epoch": 2.0869565217391304,
"grad_norm": 1.380189299583435,
"learning_rate": 6e-05,
"loss": 2.3343,
"step": 6
},
{
"epoch": 2.782608695652174,
"grad_norm": 1.1684484481811523,
"learning_rate": 8e-05,
"loss": 2.1328,
"step": 8
},
{
"epoch": 2.782608695652174,
"eval_loss": 1.9536972045898438,
"eval_runtime": 4.8279,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 8
},
{
"epoch": 3.4782608695652173,
"grad_norm": 1.248038649559021,
"learning_rate": 0.0001,
"loss": 1.9709,
"step": 10
},
{
"epoch": 3.8260869565217392,
"eval_loss": 1.6768689155578613,
"eval_runtime": 4.8282,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 11
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.9004043936729431,
"learning_rate": 9.987820251299122e-05,
"loss": 1.7679,
"step": 12
},
{
"epoch": 4.869565217391305,
"grad_norm": 0.9594781994819641,
"learning_rate": 9.951340343707852e-05,
"loss": 1.5704,
"step": 14
},
{
"epoch": 4.869565217391305,
"eval_loss": 1.3962249755859375,
"eval_runtime": 4.8269,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 14
},
{
"epoch": 5.565217391304348,
"grad_norm": 1.0566002130508423,
"learning_rate": 9.890738003669029e-05,
"loss": 1.3765,
"step": 16
},
{
"epoch": 5.913043478260869,
"eval_loss": 1.1357998847961426,
"eval_runtime": 4.8302,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 17
},
{
"epoch": 6.260869565217392,
"grad_norm": 0.7590504288673401,
"learning_rate": 9.806308479691595e-05,
"loss": 1.131,
"step": 18
},
{
"epoch": 6.956521739130435,
"grad_norm": 0.5426605343818665,
"learning_rate": 9.698463103929542e-05,
"loss": 1.0594,
"step": 20
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.0274940729141235,
"eval_runtime": 4.825,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 20
},
{
"epoch": 7.6521739130434785,
"grad_norm": 0.3678481876850128,
"learning_rate": 9.567727288213005e-05,
"loss": 0.9969,
"step": 22
},
{
"epoch": 8.0,
"eval_loss": 0.9876609444618225,
"eval_runtime": 4.821,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 23
},
{
"epoch": 8.347826086956522,
"grad_norm": 0.33574286103248596,
"learning_rate": 9.414737964294636e-05,
"loss": 0.9485,
"step": 24
},
{
"epoch": 8.695652173913043,
"eval_loss": 0.9700170159339905,
"eval_runtime": 4.8279,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 25
},
{
"epoch": 9.043478260869565,
"grad_norm": 0.3618321716785431,
"learning_rate": 9.24024048078213e-05,
"loss": 0.9357,
"step": 26
},
{
"epoch": 9.73913043478261,
"grad_norm": 0.3733964264392853,
"learning_rate": 9.045084971874738e-05,
"loss": 0.8932,
"step": 28
},
{
"epoch": 9.73913043478261,
"eval_loss": 0.9503232836723328,
"eval_runtime": 4.8207,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 28
},
{
"epoch": 10.434782608695652,
"grad_norm": 0.34359118342399597,
"learning_rate": 8.83022221559489e-05,
"loss": 0.8815,
"step": 30
},
{
"epoch": 10.782608695652174,
"eval_loss": 0.9331014752388,
"eval_runtime": 4.8274,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 31
},
{
"epoch": 11.130434782608695,
"grad_norm": 0.3720068037509918,
"learning_rate": 8.596699001693255e-05,
"loss": 0.8892,
"step": 32
},
{
"epoch": 11.826086956521738,
"grad_norm": 0.3460403382778168,
"learning_rate": 8.345653031794292e-05,
"loss": 0.8229,
"step": 34
},
{
"epoch": 11.826086956521738,
"eval_loss": 0.9215609431266785,
"eval_runtime": 4.8299,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 34
},
{
"epoch": 12.521739130434783,
"grad_norm": 0.37291157245635986,
"learning_rate": 8.07830737662829e-05,
"loss": 0.8136,
"step": 36
},
{
"epoch": 12.869565217391305,
"eval_loss": 0.9111137390136719,
"eval_runtime": 4.8323,
"eval_samples_per_second": 2.483,
"eval_steps_per_second": 2.483,
"step": 37
},
{
"epoch": 13.217391304347826,
"grad_norm": 0.44780686497688293,
"learning_rate": 7.795964517353735e-05,
"loss": 0.8011,
"step": 38
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.4574354887008667,
"learning_rate": 7.500000000000001e-05,
"loss": 0.7507,
"step": 40
},
{
"epoch": 13.91304347826087,
"eval_loss": 0.9021480679512024,
"eval_runtime": 4.8292,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 40
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.4815647602081299,
"learning_rate": 7.191855733945387e-05,
"loss": 0.7373,
"step": 42
},
{
"epoch": 14.956521739130435,
"eval_loss": 0.8982431888580322,
"eval_runtime": 4.8279,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 43
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.4997287690639496,
"learning_rate": 6.873032967079561e-05,
"loss": 0.7148,
"step": 44
},
{
"epoch": 16.0,
"grad_norm": 0.5747138857841492,
"learning_rate": 6.545084971874738e-05,
"loss": 0.6959,
"step": 46
},
{
"epoch": 16.0,
"eval_loss": 0.9020419716835022,
"eval_runtime": 4.8185,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 46
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.672699511051178,
"learning_rate": 6.209609477998338e-05,
"loss": 0.6651,
"step": 48
},
{
"epoch": 16.695652173913043,
"eval_loss": 0.9060425758361816,
"eval_runtime": 4.821,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 48
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.6403476595878601,
"learning_rate": 5.868240888334653e-05,
"loss": 0.6589,
"step": 50
},
{
"epoch": 17.73913043478261,
"eval_loss": 0.9118922352790833,
"eval_runtime": 4.8216,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 51
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.7273726463317871,
"learning_rate": 5.522642316338268e-05,
"loss": 0.5916,
"step": 52
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.7468767166137695,
"learning_rate": 5.174497483512506e-05,
"loss": 0.5782,
"step": 54
},
{
"epoch": 18.782608695652176,
"eval_loss": 0.9264171123504639,
"eval_runtime": 4.824,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 54
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.7150223851203918,
"learning_rate": 4.825502516487497e-05,
"loss": 0.585,
"step": 56
},
{
"epoch": 19.82608695652174,
"eval_loss": 0.9372181296348572,
"eval_runtime": 4.8257,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 57
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.7036177515983582,
"learning_rate": 4.477357683661734e-05,
"loss": 0.4927,
"step": 58
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.6790146231651306,
"learning_rate": 4.131759111665349e-05,
"loss": 0.511,
"step": 60
},
{
"epoch": 20.869565217391305,
"eval_loss": 0.9603924751281738,
"eval_runtime": 4.8248,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 60
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.5890304446220398,
"learning_rate": 3.790390522001662e-05,
"loss": 0.4767,
"step": 62
},
{
"epoch": 21.91304347826087,
"eval_loss": 0.9806472659111023,
"eval_runtime": 4.8246,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 63
},
{
"epoch": 21.91304347826087,
"step": 63,
"total_flos": 1.5148012584042496e+16,
"train_loss": 1.081045771402026,
"train_runtime": 1186.9402,
"train_samples_per_second": 1.938,
"train_steps_per_second": 0.084
}
],
"logging_steps": 2,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5148012584042496e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}