|
{ |
|
"best_metric": 0.915133535861969, |
|
"best_model_checkpoint": "data/Mistral-7B_task-1_120-samples_config-1_full/checkpoint-55", |
|
"epoch": 12.0, |
|
"eval_steps": 500, |
|
"global_step": 132, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 2.566887855529785, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 2.0791, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 2.830773115158081, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 2.1354, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 2.6159067153930664, |
|
"learning_rate": 7.272727272727272e-06, |
|
"loss": 2.1214, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 2.952951669692993, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 2.0604, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 2.1884078979492188, |
|
"learning_rate": 1.4545454545454545e-05, |
|
"loss": 1.9963, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 2.105299949645996, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 1.9008, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7749409675598145, |
|
"eval_runtime": 9.6515, |
|
"eval_samples_per_second": 2.487, |
|
"eval_steps_per_second": 2.487, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.8031773567199707, |
|
"learning_rate": 2.1818181818181818e-05, |
|
"loss": 1.846, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 1.4205222129821777, |
|
"learning_rate": 2.5454545454545454e-05, |
|
"loss": 1.7576, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.291684865951538, |
|
"learning_rate": 2.909090909090909e-05, |
|
"loss": 1.5778, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 1.338912844657898, |
|
"learning_rate": 3.272727272727273e-05, |
|
"loss": 1.5468, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.7353410720825195, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 1.3755, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.1533783674240112, |
|
"learning_rate": 4e-05, |
|
"loss": 1.2368, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.204591155052185, |
|
"eval_runtime": 9.6569, |
|
"eval_samples_per_second": 2.485, |
|
"eval_steps_per_second": 2.485, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.1981287002563477, |
|
"learning_rate": 4.3636363636363636e-05, |
|
"loss": 1.1336, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.3636363636363638, |
|
"grad_norm": 1.0418447256088257, |
|
"learning_rate": 4.7272727272727275e-05, |
|
"loss": 1.1206, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 0.9300098419189453, |
|
"learning_rate": 5.090909090909091e-05, |
|
"loss": 0.9848, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 0.876102864742279, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 0.9623, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 0.8831844329833984, |
|
"learning_rate": 5.818181818181818e-05, |
|
"loss": 0.9597, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.976677417755127, |
|
"eval_runtime": 9.648, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.090909090909091, |
|
"grad_norm": 0.8121486306190491, |
|
"learning_rate": 6.181818181818182e-05, |
|
"loss": 0.9711, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.7559528946876526, |
|
"learning_rate": 6.545454545454546e-05, |
|
"loss": 0.8857, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.4545454545454546, |
|
"grad_norm": 0.7981972098350525, |
|
"learning_rate": 6.90909090909091e-05, |
|
"loss": 0.8503, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.8330462574958801, |
|
"learning_rate": 7.272727272727273e-05, |
|
"loss": 0.8669, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8181818181818183, |
|
"grad_norm": 1.0780049562454224, |
|
"learning_rate": 7.636363636363637e-05, |
|
"loss": 0.8486, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.0033646821975708, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8364, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.9267880916595459, |
|
"eval_runtime": 9.6473, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.181818181818182, |
|
"grad_norm": 0.9593367576599121, |
|
"learning_rate": 8.363636363636364e-05, |
|
"loss": 0.7425, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.122158408164978, |
|
"learning_rate": 8.727272727272727e-05, |
|
"loss": 0.7308, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 1.0962390899658203, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 0.7428, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.0357098579406738, |
|
"learning_rate": 9.454545454545455e-05, |
|
"loss": 0.7131, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"grad_norm": 1.1160370111465454, |
|
"learning_rate": 9.818181818181818e-05, |
|
"loss": 0.6751, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.915133535861969, |
|
"eval_runtime": 9.6465, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.1304196119308472, |
|
"learning_rate": 9.999899300364532e-05, |
|
"loss": 0.6196, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.2727272727272725, |
|
"grad_norm": 1.3260724544525146, |
|
"learning_rate": 9.99909372761763e-05, |
|
"loss": 0.5389, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 2.0141329765319824, |
|
"learning_rate": 9.997482711915927e-05, |
|
"loss": 0.5146, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.636363636363637, |
|
"grad_norm": 1.6033296585083008, |
|
"learning_rate": 9.99506651282272e-05, |
|
"loss": 0.5495, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.500605821609497, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.4957, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.3610997200012207, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.492, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.96142578125, |
|
"eval_runtime": 9.6466, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.1826406717300415, |
|
"learning_rate": 9.982991356370404e-05, |
|
"loss": 0.395, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 1.3635964393615723, |
|
"learning_rate": 9.977359612865423e-05, |
|
"loss": 0.3639, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 1.9806699752807617, |
|
"learning_rate": 9.970925928158274e-05, |
|
"loss": 0.3793, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.7272727272727275, |
|
"grad_norm": 2.0373482704162598, |
|
"learning_rate": 9.963691338830044e-05, |
|
"loss": 0.386, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 1.5265707969665527, |
|
"learning_rate": 9.955657010501806e-05, |
|
"loss": 0.3136, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.0793741941452026, |
|
"eval_runtime": 9.6816, |
|
"eval_samples_per_second": 2.479, |
|
"eval_steps_per_second": 2.479, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.090909090909091, |
|
"grad_norm": 1.2268736362457275, |
|
"learning_rate": 9.946824237646824e-05, |
|
"loss": 0.288, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 1.1878007650375366, |
|
"learning_rate": 9.937194443381972e-05, |
|
"loss": 0.2682, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.454545454545454, |
|
"grad_norm": 1.615853190422058, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.2332, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 1.8352854251861572, |
|
"learning_rate": 9.915550124911866e-05, |
|
"loss": 0.2292, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.818181818181818, |
|
"grad_norm": 1.9822676181793213, |
|
"learning_rate": 9.903539087991462e-05, |
|
"loss": 0.2604, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.0019595623016357, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.2439, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.2905783653259277, |
|
"eval_runtime": 9.6438, |
|
"eval_samples_per_second": 2.489, |
|
"eval_steps_per_second": 2.489, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 1.3164584636688232, |
|
"learning_rate": 9.877148934427037e-05, |
|
"loss": 0.1722, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 1.6368311643600464, |
|
"learning_rate": 9.862774069706346e-05, |
|
"loss": 0.177, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.545454545454545, |
|
"grad_norm": 1.649357795715332, |
|
"learning_rate": 9.847615725553456e-05, |
|
"loss": 0.1658, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 1.3624839782714844, |
|
"learning_rate": 9.831676344247342e-05, |
|
"loss": 0.1729, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"grad_norm": 1.5218144655227661, |
|
"learning_rate": 9.814958493905963e-05, |
|
"loss": 0.1635, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.2869492769241333, |
|
"eval_runtime": 9.6504, |
|
"eval_samples_per_second": 2.487, |
|
"eval_steps_per_second": 2.487, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 1.094982624053955, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.1559, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.272727272727273, |
|
"grad_norm": 1.2318857908248901, |
|
"learning_rate": 9.779198285281325e-05, |
|
"loss": 0.1275, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 2.4848134517669678, |
|
"learning_rate": 9.760161688604008e-05, |
|
"loss": 0.1259, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.636363636363637, |
|
"grad_norm": 1.4462814331054688, |
|
"learning_rate": 9.740358145174998e-05, |
|
"loss": 0.1264, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 1.9087587594985962, |
|
"learning_rate": 9.719790845697533e-05, |
|
"loss": 0.1368, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.2537627220153809, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.1396, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.3566713333129883, |
|
"eval_runtime": 9.651, |
|
"eval_samples_per_second": 2.487, |
|
"eval_steps_per_second": 2.487, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 1.6255460977554321, |
|
"learning_rate": 9.676378356149734e-05, |
|
"loss": 0.1033, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.363636363636363, |
|
"grad_norm": 1.0529149770736694, |
|
"learning_rate": 9.653540160603956e-05, |
|
"loss": 0.1009, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 1.5140531063079834, |
|
"learning_rate": 9.629952196931901e-05, |
|
"loss": 0.1062, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.727272727272727, |
|
"grad_norm": 1.8233051300048828, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.1153, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 1.4265450239181519, |
|
"learning_rate": 9.580542287160348e-05, |
|
"loss": 0.116, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 1.34817636013031, |
|
"eval_runtime": 9.6478, |
|
"eval_samples_per_second": 2.488, |
|
"eval_steps_per_second": 2.488, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.090909090909092, |
|
"grad_norm": 0.8790718913078308, |
|
"learning_rate": 9.554728301876526e-05, |
|
"loss": 0.0999, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 0.858034074306488, |
|
"learning_rate": 9.528180468815155e-05, |
|
"loss": 0.0895, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.454545454545455, |
|
"grad_norm": 0.832482099533081, |
|
"learning_rate": 9.50090306530454e-05, |
|
"loss": 0.089, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 1.222090244293213, |
|
"learning_rate": 9.472900486219769e-05, |
|
"loss": 0.0922, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 1.581915020942688, |
|
"learning_rate": 9.444177243274618e-05, |
|
"loss": 0.1027, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 1.0929337739944458, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.104, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 1.4276927709579468, |
|
"eval_runtime": 9.6483, |
|
"eval_samples_per_second": 2.487, |
|
"eval_steps_per_second": 2.487, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 132, |
|
"total_flos": 1.5907229681057792e+16, |
|
"train_loss": 0.6500745423589692, |
|
"train_runtime": 1244.1938, |
|
"train_samples_per_second": 3.536, |
|
"train_steps_per_second": 0.442 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5907229681057792e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|