Yi-6B-ruozhiba-1e-5-50 / trainer_state.json
yyx123's picture
Model save
d2dca23 verified
raw
history blame
No virus
19.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 550,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 9.090909090909091e-06,
"loss": 2.3833,
"step": 1
},
{
"epoch": 0.07,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.4676,
"step": 4
},
{
"epoch": 0.15,
"learning_rate": 7.272727272727273e-05,
"loss": 2.2207,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 0.00010909090909090909,
"loss": 2.1377,
"step": 12
},
{
"epoch": 0.29,
"learning_rate": 0.00014545454545454546,
"loss": 1.9925,
"step": 16
},
{
"epoch": 0.36,
"learning_rate": 0.00018181818181818183,
"loss": 1.9958,
"step": 20
},
{
"epoch": 0.44,
"learning_rate": 0.00021818181818181818,
"loss": 1.8572,
"step": 24
},
{
"epoch": 0.51,
"learning_rate": 0.0002545454545454545,
"loss": 1.9631,
"step": 28
},
{
"epoch": 0.58,
"learning_rate": 0.0002909090909090909,
"loss": 1.8855,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 0.00032727272727272726,
"loss": 1.8563,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 0.00036363636363636367,
"loss": 1.77,
"step": 40
},
{
"epoch": 0.8,
"learning_rate": 0.0004,
"loss": 1.8378,
"step": 44
},
{
"epoch": 0.87,
"learning_rate": 0.00043636363636363637,
"loss": 1.9476,
"step": 48
},
{
"epoch": 0.95,
"learning_rate": 0.0004727272727272727,
"loss": 1.8713,
"step": 52
},
{
"epoch": 1.0,
"gpt4_scores": 0.5359999999999999,
"step": 55
},
{
"epoch": 1.0,
"std": 0.0580868315541483,
"step": 55
},
{
"epoch": 1.0,
"eval_loss": 1.8388524055480957,
"eval_runtime": 4.9686,
"eval_samples_per_second": 4.629,
"eval_steps_per_second": 1.208,
"step": 55
},
{
"epoch": 1.02,
"learning_rate": 0.0004999949650182266,
"loss": 1.7168,
"step": 56
},
{
"epoch": 1.09,
"learning_rate": 0.0004998741355957963,
"loss": 1.6773,
"step": 60
},
{
"epoch": 1.16,
"learning_rate": 0.0004995922759815339,
"loss": 1.6162,
"step": 64
},
{
"epoch": 1.24,
"learning_rate": 0.0004991495678185201,
"loss": 1.5625,
"step": 68
},
{
"epoch": 1.31,
"learning_rate": 0.0004985462964079136,
"loss": 1.4995,
"step": 72
},
{
"epoch": 1.38,
"learning_rate": 0.0004977828505250904,
"loss": 1.4863,
"step": 76
},
{
"epoch": 1.45,
"learning_rate": 0.0004968597221690986,
"loss": 1.4958,
"step": 80
},
{
"epoch": 1.53,
"learning_rate": 0.0004957775062455933,
"loss": 1.6001,
"step": 84
},
{
"epoch": 1.6,
"learning_rate": 0.0004945369001834514,
"loss": 1.5667,
"step": 88
},
{
"epoch": 1.67,
"learning_rate": 0.0004931387034853173,
"loss": 1.4828,
"step": 92
},
{
"epoch": 1.75,
"learning_rate": 0.0004915838172123671,
"loss": 1.4508,
"step": 96
},
{
"epoch": 1.82,
"learning_rate": 0.0004898732434036243,
"loss": 1.4932,
"step": 100
},
{
"epoch": 1.89,
"learning_rate": 0.0004880080844302004,
"loss": 1.4789,
"step": 104
},
{
"epoch": 1.96,
"learning_rate": 0.0004859895422848767,
"loss": 1.3297,
"step": 108
},
{
"epoch": 2.04,
"learning_rate": 0.00048381891780748665,
"loss": 1.0117,
"step": 112
},
{
"epoch": 2.11,
"learning_rate": 0.0004814976098465951,
"loss": 0.7652,
"step": 116
},
{
"epoch": 2.18,
"learning_rate": 0.0004790271143580174,
"loss": 0.8033,
"step": 120
},
{
"epoch": 2.25,
"learning_rate": 0.0004764090234407577,
"loss": 0.6905,
"step": 124
},
{
"epoch": 2.33,
"learning_rate": 0.0004736450243109884,
"loss": 0.7312,
"step": 128
},
{
"epoch": 2.4,
"learning_rate": 0.00047073689821473173,
"loss": 0.761,
"step": 132
},
{
"epoch": 2.47,
"learning_rate": 0.00046768651927994433,
"loss": 0.6884,
"step": 136
},
{
"epoch": 2.55,
"learning_rate": 0.0004644958533087443,
"loss": 0.623,
"step": 140
},
{
"epoch": 2.62,
"learning_rate": 0.0004611669565105596,
"loss": 0.7938,
"step": 144
},
{
"epoch": 2.69,
"learning_rate": 0.00045770197417701366,
"loss": 0.7256,
"step": 148
},
{
"epoch": 2.76,
"learning_rate": 0.00045410313929940244,
"loss": 0.8236,
"step": 152
},
{
"epoch": 2.84,
"learning_rate": 0.00045037277112965383,
"loss": 0.9053,
"step": 156
},
{
"epoch": 2.91,
"learning_rate": 0.0004465132736856969,
"loss": 0.8822,
"step": 160
},
{
"epoch": 2.98,
"learning_rate": 0.00044252713420220394,
"loss": 0.847,
"step": 164
},
{
"epoch": 3.05,
"learning_rate": 0.00043841692152770415,
"loss": 0.3486,
"step": 168
},
{
"epoch": 3.13,
"learning_rate": 0.00043418528446910123,
"loss": 0.3387,
"step": 172
},
{
"epoch": 3.2,
"learning_rate": 0.0004298349500846628,
"loss": 0.3827,
"step": 176
},
{
"epoch": 3.27,
"learning_rate": 0.00042536872192658034,
"loss": 0.3494,
"step": 180
},
{
"epoch": 3.35,
"learning_rate": 0.00042078947823423365,
"loss": 0.3773,
"step": 184
},
{
"epoch": 3.42,
"learning_rate": 0.0004161001700793231,
"loss": 0.327,
"step": 188
},
{
"epoch": 3.49,
"learning_rate": 0.00041130381946406574,
"loss": 0.4188,
"step": 192
},
{
"epoch": 3.56,
"learning_rate": 0.0004064035173736804,
"loss": 0.3763,
"step": 196
},
{
"epoch": 3.64,
"learning_rate": 0.00040140242178441667,
"loss": 0.3837,
"step": 200
},
{
"epoch": 3.71,
"learning_rate": 0.0003963037556284129,
"loss": 0.4003,
"step": 204
},
{
"epoch": 3.78,
"learning_rate": 0.0003911108047166924,
"loss": 0.32,
"step": 208
},
{
"epoch": 3.85,
"learning_rate": 0.00038582691562163827,
"loss": 0.3738,
"step": 212
},
{
"epoch": 3.93,
"learning_rate": 0.0003804554935203115,
"loss": 0.3584,
"step": 216
},
{
"epoch": 4.0,
"learning_rate": 0.000375,
"loss": 0.3469,
"step": 220
},
{
"epoch": 4.07,
"learning_rate": 0.0003694639508274158,
"loss": 0.1782,
"step": 224
},
{
"epoch": 4.15,
"learning_rate": 0.0003638509136829758,
"loss": 0.1614,
"step": 228
},
{
"epoch": 4.22,
"learning_rate": 0.00035816450586162706,
"loss": 0.2127,
"step": 232
},
{
"epoch": 4.29,
"learning_rate": 0.00035240839194169884,
"loss": 0.173,
"step": 236
},
{
"epoch": 4.36,
"learning_rate": 0.00034658628142328216,
"loss": 0.1831,
"step": 240
},
{
"epoch": 4.44,
"learning_rate": 0.00034070192633766023,
"loss": 0.2235,
"step": 244
},
{
"epoch": 4.51,
"learning_rate": 0.0003347591188293301,
"loss": 0.1765,
"step": 248
},
{
"epoch": 4.58,
"learning_rate": 0.00032876168871217323,
"loss": 0.1828,
"step": 252
},
{
"epoch": 4.65,
"learning_rate": 0.00032271350100134975,
"loss": 0.1773,
"step": 256
},
{
"epoch": 4.73,
"learning_rate": 0.0003166184534225087,
"loss": 0.1496,
"step": 260
},
{
"epoch": 4.8,
"learning_rate": 0.0003104804738999169,
"loss": 0.176,
"step": 264
},
{
"epoch": 4.87,
"learning_rate": 0.00030430351802512693,
"loss": 0.1545,
"step": 268
},
{
"epoch": 4.95,
"learning_rate": 0.00029809156650781527,
"loss": 0.1631,
"step": 272
},
{
"epoch": 5.0,
"gpt4_scores": 0.6274,
"step": 275
},
{
"epoch": 5.0,
"std": 0.04891773502524415,
"step": 275
},
{
"epoch": 5.0,
"eval_loss": 2.9787437915802,
"eval_runtime": 4.9451,
"eval_samples_per_second": 4.651,
"eval_steps_per_second": 1.213,
"step": 275
},
{
"epoch": 5.02,
"learning_rate": 0.0002918486226104327,
"loss": 0.123,
"step": 276
},
{
"epoch": 5.09,
"learning_rate": 0.00028557870956832135,
"loss": 0.1251,
"step": 280
},
{
"epoch": 5.16,
"learning_rate": 0.0002792858679969596,
"loss": 0.0895,
"step": 284
},
{
"epoch": 5.24,
"learning_rate": 0.0002729741532880069,
"loss": 0.0952,
"step": 288
},
{
"epoch": 5.31,
"learning_rate": 0.000266647632995826,
"loss": 0.0914,
"step": 292
},
{
"epoch": 5.38,
"learning_rate": 0.00026031038421616684,
"loss": 0.1204,
"step": 296
},
{
"epoch": 5.45,
"learning_rate": 0.000253966490958702,
"loss": 0.1004,
"step": 300
},
{
"epoch": 5.53,
"learning_rate": 0.00024762004151510585,
"loss": 0.1251,
"step": 304
},
{
"epoch": 5.6,
"learning_rate": 0.00024127512582437484,
"loss": 0.0999,
"step": 308
},
{
"epoch": 5.67,
"learning_rate": 0.00023493583283708543,
"loss": 0.0932,
"step": 312
},
{
"epoch": 5.75,
"learning_rate": 0.00022860624788029015,
"loss": 0.0786,
"step": 316
},
{
"epoch": 5.82,
"learning_rate": 0.00022229045002474727,
"loss": 0.0984,
"step": 320
},
{
"epoch": 5.89,
"learning_rate": 0.000215992509456184,
"loss": 0.0769,
"step": 324
},
{
"epoch": 5.96,
"learning_rate": 0.000209716484852284,
"loss": 0.0765,
"step": 328
},
{
"epoch": 6.0,
"gpt4_scores": 0.5669999999999998,
"step": 330
},
{
"epoch": 6.0,
"std": 0.05915420526048846,
"step": 330
},
{
"epoch": 6.0,
"eval_loss": 3.0907390117645264,
"eval_runtime": 4.9453,
"eval_samples_per_second": 4.651,
"eval_steps_per_second": 1.213,
"step": 330
},
{
"epoch": 6.04,
"learning_rate": 0.0002034664207670925,
"loss": 0.0632,
"step": 332
},
{
"epoch": 6.11,
"learning_rate": 0.0001972463450245226,
"loss": 0.0566,
"step": 336
},
{
"epoch": 6.18,
"learning_rate": 0.00019106026612264316,
"loss": 0.054,
"step": 340
},
{
"epoch": 6.25,
"learning_rate": 0.00018491217065042198,
"loss": 0.0482,
"step": 344
},
{
"epoch": 6.33,
"learning_rate": 0.00017880602071858692,
"loss": 0.0482,
"step": 348
},
{
"epoch": 6.4,
"learning_rate": 0.00017274575140626317,
"loss": 0.0575,
"step": 352
},
{
"epoch": 6.47,
"learning_rate": 0.00016673526822502983,
"loss": 0.0506,
"step": 356
},
{
"epoch": 6.55,
"learning_rate": 0.00016077844460203207,
"loss": 0.0536,
"step": 360
},
{
"epoch": 6.62,
"learning_rate": 0.00015487911938376925,
"loss": 0.0503,
"step": 364
},
{
"epoch": 6.69,
"learning_rate": 0.00014904109436216883,
"loss": 0.0507,
"step": 368
},
{
"epoch": 6.76,
"learning_rate": 0.00014326813182453956,
"loss": 0.0572,
"step": 372
},
{
"epoch": 6.84,
"learning_rate": 0.0001375639521289836,
"loss": 0.0575,
"step": 376
},
{
"epoch": 6.91,
"learning_rate": 0.00013193223130682935,
"loss": 0.0546,
"step": 380
},
{
"epoch": 6.98,
"learning_rate": 0.00012637659869363084,
"loss": 0.0489,
"step": 384
},
{
"epoch": 7.0,
"gpt4_scores": 0.5259999999999999,
"step": 385
},
{
"epoch": 7.0,
"std": 0.05718811065247741,
"step": 385
},
{
"epoch": 7.0,
"eval_loss": 3.2638301849365234,
"eval_runtime": 4.9708,
"eval_samples_per_second": 4.627,
"eval_steps_per_second": 1.207,
"step": 385
},
{
"epoch": 7.05,
"learning_rate": 0.00012090063459025954,
"loss": 0.0396,
"step": 388
},
{
"epoch": 7.13,
"learning_rate": 0.0001155078679555969,
"loss": 0.0441,
"step": 392
},
{
"epoch": 7.2,
"learning_rate": 0.00011020177413231333,
"loss": 0.0413,
"step": 396
},
{
"epoch": 7.27,
"learning_rate": 0.00010498577260720049,
"loss": 0.0436,
"step": 400
},
{
"epoch": 7.35,
"learning_rate": 9.986322480749927e-05,
"loss": 0.0532,
"step": 404
},
{
"epoch": 7.42,
"learning_rate": 9.483743193464408e-05,
"loss": 0.0375,
"step": 408
},
{
"epoch": 7.49,
"learning_rate": 8.991163283681945e-05,
"loss": 0.0431,
"step": 412
},
{
"epoch": 7.56,
"learning_rate": 8.508900192169963e-05,
"loss": 0.0449,
"step": 416
},
{
"epoch": 7.64,
"learning_rate": 8.037264711071699e-05,
"loss": 0.0502,
"step": 420
},
{
"epoch": 7.71,
"learning_rate": 7.576560783617667e-05,
"loss": 0.038,
"step": 424
},
{
"epoch": 7.78,
"learning_rate": 7.127085308250913e-05,
"loss": 0.0483,
"step": 428
},
{
"epoch": 7.85,
"learning_rate": 6.689127947292231e-05,
"loss": 0.0411,
"step": 432
},
{
"epoch": 7.93,
"learning_rate": 6.262970940268654e-05,
"loss": 0.0529,
"step": 436
},
{
"epoch": 8.0,
"learning_rate": 5.848888922025553e-05,
"loss": 0.0559,
"step": 440
},
{
"epoch": 8.0,
"gpt4_scores": 0.574,
"step": 440
},
{
"epoch": 8.0,
"std": 0.056200355870759375,
"step": 440
},
{
"epoch": 8.0,
"eval_loss": 3.374971389770508,
"eval_runtime": 4.9393,
"eval_samples_per_second": 4.657,
"eval_steps_per_second": 1.215,
"step": 440
},
{
"epoch": 8.07,
"learning_rate": 5.4471487457395216e-05,
"loss": 0.0389,
"step": 444
},
{
"epoch": 8.15,
"learning_rate": 5.058009310946118e-05,
"loss": 0.0445,
"step": 448
},
{
"epoch": 8.22,
"learning_rate": 4.6817213966933034e-05,
"loss": 0.0467,
"step": 452
},
{
"epoch": 8.29,
"learning_rate": 4.318527499928074e-05,
"loss": 0.0357,
"step": 456
},
{
"epoch": 8.36,
"learning_rate": 3.968661679220467e-05,
"loss": 0.0469,
"step": 460
},
{
"epoch": 8.44,
"learning_rate": 3.632349403925664e-05,
"loss": 0.0503,
"step": 464
},
{
"epoch": 8.51,
"learning_rate": 3.309807408881269e-05,
"loss": 0.0419,
"step": 468
},
{
"epoch": 8.58,
"learning_rate": 3.0012435547336736e-05,
"loss": 0.0437,
"step": 472
},
{
"epoch": 8.65,
"learning_rate": 2.7068566939831645e-05,
"loss": 0.0388,
"step": 476
},
{
"epoch": 8.73,
"learning_rate": 2.4268365428344735e-05,
"loss": 0.0379,
"step": 480
},
{
"epoch": 8.8,
"learning_rate": 2.1613635589349755e-05,
"loss": 0.0442,
"step": 484
},
{
"epoch": 8.87,
"learning_rate": 1.9106088250797264e-05,
"loss": 0.0529,
"step": 488
},
{
"epoch": 8.95,
"learning_rate": 1.674733938957873e-05,
"loss": 0.0424,
"step": 492
},
{
"epoch": 9.0,
"gpt4_scores": 0.586,
"step": 495
},
{
"epoch": 9.0,
"std": 0.05261254603229158,
"step": 495
},
{
"epoch": 9.0,
"eval_loss": 3.401412010192871,
"eval_runtime": 4.9704,
"eval_samples_per_second": 4.627,
"eval_steps_per_second": 1.207,
"step": 495
},
{
"epoch": 9.02,
"learning_rate": 1.4538909090118846e-05,
"loss": 0.0488,
"step": 496
},
{
"epoch": 9.09,
"learning_rate": 1.2482220564763668e-05,
"loss": 0.0406,
"step": 500
},
{
"epoch": 9.16,
"learning_rate": 1.0578599236598707e-05,
"loss": 0.0387,
"step": 504
},
{
"epoch": 9.24,
"learning_rate": 8.829271885286095e-06,
"loss": 0.0413,
"step": 508
},
{
"epoch": 9.31,
"learning_rate": 7.235365856472442e-06,
"loss": 0.0512,
"step": 512
},
{
"epoch": 9.38,
"learning_rate": 5.797908335276214e-06,
"loss": 0.0451,
"step": 516
},
{
"epoch": 9.45,
"learning_rate": 4.517825684323323e-06,
"loss": 0.0385,
"step": 520
},
{
"epoch": 9.53,
"learning_rate": 3.3959428467570664e-06,
"loss": 0.0377,
"step": 524
},
{
"epoch": 9.6,
"learning_rate": 2.4329828146074094e-06,
"loss": 0.0434,
"step": 528
},
{
"epoch": 9.67,
"learning_rate": 1.6295661628624448e-06,
"loss": 0.0442,
"step": 532
},
{
"epoch": 9.75,
"learning_rate": 9.862106495415469e-07,
"loss": 0.0418,
"step": 536
},
{
"epoch": 9.82,
"learning_rate": 5.033308820289185e-07,
"loss": 0.0457,
"step": 540
},
{
"epoch": 9.89,
"learning_rate": 1.8123804988159908e-07,
"loss": 0.0428,
"step": 544
},
{
"epoch": 9.96,
"learning_rate": 2.0139724285161975e-08,
"loss": 0.0449,
"step": 548
},
{
"epoch": 10.0,
"step": 550,
"total_flos": 1.879358707856179e+16,
"train_loss": 0.0,
"train_runtime": 13.7041,
"train_samples_per_second": 158.347,
"train_steps_per_second": 40.134
}
],
"logging_steps": 4,
"max_steps": 550,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 55,
"total_flos": 1.879358707856179e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}