GaetanMichelet's picture
Model save
d0cf99b verified
raw
history blame contribute delete
No virus
12.2 kB
{
"best_metric": 1.1525942087173462,
"best_model_checkpoint": "data/Llama-31-8B_task-3_60-samples_config-2_full_auto/checkpoint-48",
"epoch": 24.0,
"eval_steps": 500,
"global_step": 69,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.34782608695652173,
"grad_norm": 0.5371779203414917,
"learning_rate": 1e-05,
"loss": 1.6963,
"step": 1
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.5044246315956116,
"learning_rate": 2e-05,
"loss": 1.6601,
"step": 2
},
{
"epoch": 0.6956521739130435,
"eval_loss": 1.669729232788086,
"eval_runtime": 11.2977,
"eval_samples_per_second": 1.062,
"eval_steps_per_second": 1.062,
"step": 2
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.4555090665817261,
"learning_rate": 4e-05,
"loss": 1.6649,
"step": 4
},
{
"epoch": 1.7391304347826086,
"eval_loss": 1.6259647607803345,
"eval_runtime": 11.2907,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 5
},
{
"epoch": 2.0869565217391304,
"grad_norm": 0.467232882976532,
"learning_rate": 6e-05,
"loss": 1.6544,
"step": 6
},
{
"epoch": 2.782608695652174,
"grad_norm": 0.44558843970298767,
"learning_rate": 8e-05,
"loss": 1.591,
"step": 8
},
{
"epoch": 2.782608695652174,
"eval_loss": 1.5467677116394043,
"eval_runtime": 11.2921,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 8
},
{
"epoch": 3.4782608695652173,
"grad_norm": 0.3453204333782196,
"learning_rate": 0.0001,
"loss": 1.4992,
"step": 10
},
{
"epoch": 3.8260869565217392,
"eval_loss": 1.4664047956466675,
"eval_runtime": 11.2912,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 11
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.2386687695980072,
"learning_rate": 9.987820251299122e-05,
"loss": 1.4747,
"step": 12
},
{
"epoch": 4.869565217391305,
"grad_norm": 0.240106001496315,
"learning_rate": 9.951340343707852e-05,
"loss": 1.4061,
"step": 14
},
{
"epoch": 4.869565217391305,
"eval_loss": 1.396276831626892,
"eval_runtime": 11.2935,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 14
},
{
"epoch": 5.565217391304348,
"grad_norm": 0.22406244277954102,
"learning_rate": 9.890738003669029e-05,
"loss": 1.352,
"step": 16
},
{
"epoch": 5.913043478260869,
"eval_loss": 1.3312901258468628,
"eval_runtime": 11.2861,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 17
},
{
"epoch": 6.260869565217392,
"grad_norm": 0.2621766924858093,
"learning_rate": 9.806308479691595e-05,
"loss": 1.3078,
"step": 18
},
{
"epoch": 6.956521739130435,
"grad_norm": 0.2611936032772064,
"learning_rate": 9.698463103929542e-05,
"loss": 1.2367,
"step": 20
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.2646236419677734,
"eval_runtime": 11.289,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 20
},
{
"epoch": 7.6521739130434785,
"grad_norm": 0.2356899231672287,
"learning_rate": 9.567727288213005e-05,
"loss": 1.2127,
"step": 22
},
{
"epoch": 8.0,
"eval_loss": 1.2216287851333618,
"eval_runtime": 11.2874,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 23
},
{
"epoch": 8.347826086956522,
"grad_norm": 0.176839679479599,
"learning_rate": 9.414737964294636e-05,
"loss": 1.1571,
"step": 24
},
{
"epoch": 8.695652173913043,
"eval_loss": 1.2051867246627808,
"eval_runtime": 11.2903,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 25
},
{
"epoch": 9.043478260869565,
"grad_norm": 0.17497973144054413,
"learning_rate": 9.24024048078213e-05,
"loss": 1.1346,
"step": 26
},
{
"epoch": 9.73913043478261,
"grad_norm": 0.15325738489627838,
"learning_rate": 9.045084971874738e-05,
"loss": 1.1165,
"step": 28
},
{
"epoch": 9.73913043478261,
"eval_loss": 1.189988613128662,
"eval_runtime": 11.2861,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 28
},
{
"epoch": 10.434782608695652,
"grad_norm": 0.13827162981033325,
"learning_rate": 8.83022221559489e-05,
"loss": 1.124,
"step": 30
},
{
"epoch": 10.782608695652174,
"eval_loss": 1.1786916255950928,
"eval_runtime": 11.2912,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 31
},
{
"epoch": 11.130434782608695,
"grad_norm": 0.15808305144309998,
"learning_rate": 8.596699001693255e-05,
"loss": 1.0669,
"step": 32
},
{
"epoch": 11.826086956521738,
"grad_norm": 0.14512917399406433,
"learning_rate": 8.345653031794292e-05,
"loss": 1.0947,
"step": 34
},
{
"epoch": 11.826086956521738,
"eval_loss": 1.1694222688674927,
"eval_runtime": 11.286,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 34
},
{
"epoch": 12.521739130434783,
"grad_norm": 0.14316336810588837,
"learning_rate": 8.07830737662829e-05,
"loss": 1.0606,
"step": 36
},
{
"epoch": 12.869565217391305,
"eval_loss": 1.163387417793274,
"eval_runtime": 11.2897,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 37
},
{
"epoch": 13.217391304347826,
"grad_norm": 0.14829201996326447,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0616,
"step": 38
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.16047731041908264,
"learning_rate": 7.500000000000001e-05,
"loss": 1.0621,
"step": 40
},
{
"epoch": 13.91304347826087,
"eval_loss": 1.1572819948196411,
"eval_runtime": 11.2875,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 40
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.13741867244243622,
"learning_rate": 7.191855733945387e-05,
"loss": 1.0235,
"step": 42
},
{
"epoch": 14.956521739130435,
"eval_loss": 1.1549540758132935,
"eval_runtime": 11.2917,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 43
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.14022833108901978,
"learning_rate": 6.873032967079561e-05,
"loss": 1.0307,
"step": 44
},
{
"epoch": 16.0,
"grad_norm": 0.15961027145385742,
"learning_rate": 6.545084971874738e-05,
"loss": 1.0274,
"step": 46
},
{
"epoch": 16.0,
"eval_loss": 1.1531068086624146,
"eval_runtime": 11.2899,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 46
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.1557525396347046,
"learning_rate": 6.209609477998338e-05,
"loss": 0.9827,
"step": 48
},
{
"epoch": 16.695652173913043,
"eval_loss": 1.1525942087173462,
"eval_runtime": 11.2879,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 48
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.17726318538188934,
"learning_rate": 5.868240888334653e-05,
"loss": 0.9959,
"step": 50
},
{
"epoch": 17.73913043478261,
"eval_loss": 1.153599500656128,
"eval_runtime": 11.2898,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 51
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.17308309674263,
"learning_rate": 5.522642316338268e-05,
"loss": 1.0118,
"step": 52
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.16349872946739197,
"learning_rate": 5.174497483512506e-05,
"loss": 0.9813,
"step": 54
},
{
"epoch": 18.782608695652176,
"eval_loss": 1.1575570106506348,
"eval_runtime": 11.2886,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 54
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.18308573961257935,
"learning_rate": 4.825502516487497e-05,
"loss": 0.9571,
"step": 56
},
{
"epoch": 19.82608695652174,
"eval_loss": 1.160021185874939,
"eval_runtime": 11.2899,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 57
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.1963604986667633,
"learning_rate": 4.477357683661734e-05,
"loss": 0.964,
"step": 58
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.19509056210517883,
"learning_rate": 4.131759111665349e-05,
"loss": 0.9413,
"step": 60
},
{
"epoch": 20.869565217391305,
"eval_loss": 1.1618882417678833,
"eval_runtime": 11.2899,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 60
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.2059006690979004,
"learning_rate": 3.790390522001662e-05,
"loss": 0.9355,
"step": 62
},
{
"epoch": 21.91304347826087,
"eval_loss": 1.1651769876480103,
"eval_runtime": 11.2932,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 63
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.21040895581245422,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.9214,
"step": 64
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.24565249681472778,
"learning_rate": 3.12696703292044e-05,
"loss": 0.9063,
"step": 66
},
{
"epoch": 22.956521739130434,
"eval_loss": 1.1697865724563599,
"eval_runtime": 11.2884,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 66
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.23310421407222748,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.8949,
"step": 68
},
{
"epoch": 24.0,
"eval_loss": 1.173593521118164,
"eval_runtime": 11.2871,
"eval_samples_per_second": 1.063,
"eval_steps_per_second": 1.063,
"step": 69
},
{
"epoch": 24.0,
"step": 69,
"total_flos": 8.84265884623831e+16,
"train_loss": 1.1593545882598213,
"train_runtime": 3262.9387,
"train_samples_per_second": 0.705,
"train_steps_per_second": 0.031
}
],
"logging_steps": 2,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.84265884623831e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}