t5-small-qlora-finetune-tweetsumm / trainer_state.json
samuellimabraz's picture
End of training
d7f4ba3 verified
raw
history blame
58.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00909090909090909,
"grad_norm": 3.072246789932251,
"learning_rate": 0.0004984848484848485,
"loss": 3.4024,
"step": 1
},
{
"epoch": 0.01818181818181818,
"grad_norm": 2.9938416481018066,
"learning_rate": 0.000496969696969697,
"loss": 3.4175,
"step": 2
},
{
"epoch": 0.02727272727272727,
"grad_norm": 2.8850042819976807,
"learning_rate": 0.0004954545454545455,
"loss": 3.406,
"step": 3
},
{
"epoch": 0.03636363636363636,
"grad_norm": 3.1597700119018555,
"learning_rate": 0.000493939393939394,
"loss": 3.1781,
"step": 4
},
{
"epoch": 0.045454545454545456,
"grad_norm": 2.873157262802124,
"learning_rate": 0.0004924242424242425,
"loss": 3.3714,
"step": 5
},
{
"epoch": 0.05454545454545454,
"grad_norm": 2.6407289505004883,
"learning_rate": 0.0004909090909090909,
"loss": 3.2687,
"step": 6
},
{
"epoch": 0.06363636363636363,
"grad_norm": 2.6007120609283447,
"learning_rate": 0.0004893939393939393,
"loss": 2.4782,
"step": 7
},
{
"epoch": 0.07272727272727272,
"grad_norm": 3.4426627159118652,
"learning_rate": 0.00048787878787878784,
"loss": 3.4055,
"step": 8
},
{
"epoch": 0.08181818181818182,
"grad_norm": 2.847586154937744,
"learning_rate": 0.0004863636363636364,
"loss": 2.997,
"step": 9
},
{
"epoch": 0.09090909090909091,
"grad_norm": 2.890317916870117,
"learning_rate": 0.0004848484848484849,
"loss": 2.5766,
"step": 10
},
{
"epoch": 0.1,
"grad_norm": 2.904961109161377,
"learning_rate": 0.00048333333333333334,
"loss": 2.8253,
"step": 11
},
{
"epoch": 0.10909090909090909,
"grad_norm": 3.9057822227478027,
"learning_rate": 0.00048181818181818184,
"loss": 3.203,
"step": 12
},
{
"epoch": 0.11818181818181818,
"grad_norm": 3.1521217823028564,
"learning_rate": 0.0004803030303030303,
"loss": 2.6697,
"step": 13
},
{
"epoch": 0.12727272727272726,
"grad_norm": 2.8621177673339844,
"learning_rate": 0.0004787878787878788,
"loss": 2.7983,
"step": 14
},
{
"epoch": 0.13636363636363635,
"grad_norm": 3.0851597785949707,
"learning_rate": 0.0004772727272727273,
"loss": 2.9337,
"step": 15
},
{
"epoch": 0.14545454545454545,
"grad_norm": 3.1367125511169434,
"learning_rate": 0.0004757575757575758,
"loss": 2.7065,
"step": 16
},
{
"epoch": 0.15454545454545454,
"grad_norm": 3.4545798301696777,
"learning_rate": 0.0004742424242424243,
"loss": 2.5313,
"step": 17
},
{
"epoch": 0.16363636363636364,
"grad_norm": 3.1472649574279785,
"learning_rate": 0.0004727272727272727,
"loss": 2.823,
"step": 18
},
{
"epoch": 0.17272727272727273,
"grad_norm": 3.5677640438079834,
"learning_rate": 0.0004712121212121212,
"loss": 3.0701,
"step": 19
},
{
"epoch": 0.18181818181818182,
"grad_norm": 2.7954583168029785,
"learning_rate": 0.0004696969696969697,
"loss": 2.717,
"step": 20
},
{
"epoch": 0.19090909090909092,
"grad_norm": 2.7427642345428467,
"learning_rate": 0.0004681818181818182,
"loss": 2.5426,
"step": 21
},
{
"epoch": 0.2,
"grad_norm": 2.8571345806121826,
"learning_rate": 0.00046666666666666666,
"loss": 2.7008,
"step": 22
},
{
"epoch": 0.20909090909090908,
"grad_norm": NaN,
"learning_rate": 0.00046666666666666666,
"loss": 2.7254,
"step": 23
},
{
"epoch": 0.21818181818181817,
"grad_norm": 2.6938774585723877,
"learning_rate": 0.00046515151515151516,
"loss": 2.0546,
"step": 24
},
{
"epoch": 0.22727272727272727,
"grad_norm": 3.1118974685668945,
"learning_rate": 0.00046363636363636366,
"loss": 2.5798,
"step": 25
},
{
"epoch": 0.23636363636363636,
"grad_norm": 2.9915614128112793,
"learning_rate": 0.0004621212121212121,
"loss": 2.2886,
"step": 26
},
{
"epoch": 0.24545454545454545,
"grad_norm": 3.370804786682129,
"learning_rate": 0.00046060606060606066,
"loss": 3.0413,
"step": 27
},
{
"epoch": 0.2545454545454545,
"grad_norm": 3.392153739929199,
"learning_rate": 0.0004590909090909091,
"loss": 3.1874,
"step": 28
},
{
"epoch": 0.2636363636363636,
"grad_norm": 2.894507646560669,
"learning_rate": 0.0004575757575757576,
"loss": 2.8216,
"step": 29
},
{
"epoch": 0.2727272727272727,
"grad_norm": 2.8790500164031982,
"learning_rate": 0.00045606060606060605,
"loss": 2.5802,
"step": 30
},
{
"epoch": 0.2818181818181818,
"grad_norm": 2.648164987564087,
"learning_rate": 0.00045454545454545455,
"loss": 2.5944,
"step": 31
},
{
"epoch": 0.2909090909090909,
"grad_norm": 2.658068895339966,
"learning_rate": 0.000453030303030303,
"loss": 2.8305,
"step": 32
},
{
"epoch": 0.3,
"grad_norm": 3.077828884124756,
"learning_rate": 0.00045151515151515154,
"loss": 2.5191,
"step": 33
},
{
"epoch": 0.3090909090909091,
"grad_norm": 2.7916836738586426,
"learning_rate": 0.00045000000000000004,
"loss": 2.548,
"step": 34
},
{
"epoch": 0.3181818181818182,
"grad_norm": 2.780879497528076,
"learning_rate": 0.0004484848484848485,
"loss": 2.6919,
"step": 35
},
{
"epoch": 0.32727272727272727,
"grad_norm": 2.858031988143921,
"learning_rate": 0.000446969696969697,
"loss": 2.2139,
"step": 36
},
{
"epoch": 0.33636363636363636,
"grad_norm": 2.5262515544891357,
"learning_rate": 0.00044545454545454543,
"loss": 2.6746,
"step": 37
},
{
"epoch": 0.34545454545454546,
"grad_norm": 2.7128827571868896,
"learning_rate": 0.000443939393939394,
"loss": 2.9608,
"step": 38
},
{
"epoch": 0.35454545454545455,
"grad_norm": 2.1641690731048584,
"learning_rate": 0.00044242424242424243,
"loss": 2.3184,
"step": 39
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.511988878250122,
"learning_rate": 0.00044090909090909093,
"loss": 2.2914,
"step": 40
},
{
"epoch": 0.37272727272727274,
"grad_norm": 2.837374448776245,
"learning_rate": 0.0004393939393939394,
"loss": 2.8177,
"step": 41
},
{
"epoch": 0.38181818181818183,
"grad_norm": 2.4641802310943604,
"learning_rate": 0.00043787878787878787,
"loss": 3.0899,
"step": 42
},
{
"epoch": 0.39090909090909093,
"grad_norm": 2.5493505001068115,
"learning_rate": 0.00043636363636363637,
"loss": 2.6168,
"step": 43
},
{
"epoch": 0.4,
"grad_norm": 2.844992160797119,
"learning_rate": 0.00043484848484848487,
"loss": 2.0643,
"step": 44
},
{
"epoch": 0.4090909090909091,
"grad_norm": 2.301405429840088,
"learning_rate": 0.00043333333333333337,
"loss": 2.1776,
"step": 45
},
{
"epoch": 0.41818181818181815,
"grad_norm": 2.246461868286133,
"learning_rate": 0.0004318181818181818,
"loss": 2.1135,
"step": 46
},
{
"epoch": 0.42727272727272725,
"grad_norm": 2.560056686401367,
"learning_rate": 0.0004303030303030303,
"loss": 2.8383,
"step": 47
},
{
"epoch": 0.43636363636363634,
"grad_norm": 2.7504286766052246,
"learning_rate": 0.00042878787878787876,
"loss": 2.369,
"step": 48
},
{
"epoch": 0.44545454545454544,
"grad_norm": 2.3253281116485596,
"learning_rate": 0.00042727272727272726,
"loss": 2.1502,
"step": 49
},
{
"epoch": 0.45454545454545453,
"grad_norm": 2.799269676208496,
"learning_rate": 0.0004257575757575758,
"loss": 2.6997,
"step": 50
},
{
"epoch": 0.4636363636363636,
"grad_norm": 2.5673718452453613,
"learning_rate": 0.00042424242424242425,
"loss": 2.8942,
"step": 51
},
{
"epoch": 0.4727272727272727,
"grad_norm": 2.344376802444458,
"learning_rate": 0.00042272727272727275,
"loss": 2.7204,
"step": 52
},
{
"epoch": 0.4818181818181818,
"grad_norm": 2.5352869033813477,
"learning_rate": 0.0004212121212121212,
"loss": 2.5996,
"step": 53
},
{
"epoch": 0.4909090909090909,
"grad_norm": 2.6235299110412598,
"learning_rate": 0.0004196969696969697,
"loss": 2.7003,
"step": 54
},
{
"epoch": 0.5,
"grad_norm": 2.1709656715393066,
"learning_rate": 0.00041818181818181814,
"loss": 2.2072,
"step": 55
},
{
"epoch": 0.509090909090909,
"grad_norm": 2.1792054176330566,
"learning_rate": 0.0004166666666666667,
"loss": 1.994,
"step": 56
},
{
"epoch": 0.5181818181818182,
"grad_norm": 2.7915806770324707,
"learning_rate": 0.0004151515151515152,
"loss": 2.7364,
"step": 57
},
{
"epoch": 0.5272727272727272,
"grad_norm": 2.67014217376709,
"learning_rate": 0.00041363636363636364,
"loss": 2.419,
"step": 58
},
{
"epoch": 0.5363636363636364,
"grad_norm": 2.3936879634857178,
"learning_rate": 0.00041212121212121214,
"loss": 2.594,
"step": 59
},
{
"epoch": 0.5454545454545454,
"grad_norm": 2.2096967697143555,
"learning_rate": 0.0004106060606060606,
"loss": 2.1546,
"step": 60
},
{
"epoch": 0.5545454545454546,
"grad_norm": 2.4055185317993164,
"learning_rate": 0.00040909090909090913,
"loss": 2.6045,
"step": 61
},
{
"epoch": 0.5636363636363636,
"grad_norm": 2.4559690952301025,
"learning_rate": 0.0004075757575757576,
"loss": 2.3182,
"step": 62
},
{
"epoch": 0.5727272727272728,
"grad_norm": 2.608903408050537,
"learning_rate": 0.0004060606060606061,
"loss": 2.3894,
"step": 63
},
{
"epoch": 0.5818181818181818,
"grad_norm": 2.3189706802368164,
"learning_rate": 0.0004045454545454546,
"loss": 2.0284,
"step": 64
},
{
"epoch": 0.5909090909090909,
"grad_norm": 2.2709004878997803,
"learning_rate": 0.000403030303030303,
"loss": 2.1272,
"step": 65
},
{
"epoch": 0.6,
"grad_norm": 2.2770652770996094,
"learning_rate": 0.0004015151515151515,
"loss": 2.2473,
"step": 66
},
{
"epoch": 0.6090909090909091,
"grad_norm": 2.5721359252929688,
"learning_rate": 0.0004,
"loss": 3.1117,
"step": 67
},
{
"epoch": 0.6181818181818182,
"grad_norm": 2.5537078380584717,
"learning_rate": 0.0003984848484848485,
"loss": 2.3253,
"step": 68
},
{
"epoch": 0.6272727272727273,
"grad_norm": 2.4199492931365967,
"learning_rate": 0.00039696969696969696,
"loss": 2.3951,
"step": 69
},
{
"epoch": 0.6363636363636364,
"grad_norm": 2.334942579269409,
"learning_rate": 0.00039545454545454546,
"loss": 1.9247,
"step": 70
},
{
"epoch": 0.6454545454545455,
"grad_norm": 4.504452705383301,
"learning_rate": 0.0003939393939393939,
"loss": 2.286,
"step": 71
},
{
"epoch": 0.6545454545454545,
"grad_norm": 2.5388824939727783,
"learning_rate": 0.0003924242424242424,
"loss": 2.4846,
"step": 72
},
{
"epoch": 0.6636363636363637,
"grad_norm": NaN,
"learning_rate": 0.0003924242424242424,
"loss": 2.3651,
"step": 73
},
{
"epoch": 0.6727272727272727,
"grad_norm": 2.6247713565826416,
"learning_rate": 0.00039090909090909096,
"loss": 2.4366,
"step": 74
},
{
"epoch": 0.6818181818181818,
"grad_norm": 2.6662254333496094,
"learning_rate": 0.0003893939393939394,
"loss": 2.4153,
"step": 75
},
{
"epoch": 0.6909090909090909,
"grad_norm": 2.6559085845947266,
"learning_rate": 0.0003878787878787879,
"loss": 2.2564,
"step": 76
},
{
"epoch": 0.7,
"grad_norm": 2.5082592964172363,
"learning_rate": 0.00038636363636363635,
"loss": 2.2927,
"step": 77
},
{
"epoch": 0.7090909090909091,
"grad_norm": 2.240920066833496,
"learning_rate": 0.00038484848484848485,
"loss": 2.1802,
"step": 78
},
{
"epoch": 0.7181818181818181,
"grad_norm": 2.321823835372925,
"learning_rate": 0.00038333333333333334,
"loss": 2.2249,
"step": 79
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.708307981491089,
"learning_rate": 0.00038181818181818184,
"loss": 2.074,
"step": 80
},
{
"epoch": 0.7363636363636363,
"grad_norm": 3.075338840484619,
"learning_rate": 0.00038030303030303034,
"loss": 2.4737,
"step": 81
},
{
"epoch": 0.7454545454545455,
"grad_norm": 2.4267024993896484,
"learning_rate": 0.0003787878787878788,
"loss": 2.2937,
"step": 82
},
{
"epoch": 0.7545454545454545,
"grad_norm": 2.877577543258667,
"learning_rate": 0.0003772727272727273,
"loss": 2.5973,
"step": 83
},
{
"epoch": 0.7636363636363637,
"grad_norm": 2.5287859439849854,
"learning_rate": 0.00037575757575757573,
"loss": 2.2226,
"step": 84
},
{
"epoch": 0.7727272727272727,
"grad_norm": 2.423067331314087,
"learning_rate": 0.0003742424242424243,
"loss": 1.9636,
"step": 85
},
{
"epoch": 0.7818181818181819,
"grad_norm": 2.6509439945220947,
"learning_rate": 0.00037272727272727273,
"loss": 2.8285,
"step": 86
},
{
"epoch": 0.7909090909090909,
"grad_norm": 2.527987480163574,
"learning_rate": 0.00037121212121212123,
"loss": 2.267,
"step": 87
},
{
"epoch": 0.8,
"grad_norm": 2.641253709793091,
"learning_rate": 0.00036969696969696967,
"loss": 3.0696,
"step": 88
},
{
"epoch": 0.8090909090909091,
"grad_norm": 2.3204126358032227,
"learning_rate": 0.00036818181818181817,
"loss": 2.621,
"step": 89
},
{
"epoch": 0.8181818181818182,
"grad_norm": 2.6353261470794678,
"learning_rate": 0.00036666666666666667,
"loss": 2.3641,
"step": 90
},
{
"epoch": 0.8272727272727273,
"grad_norm": 2.0846307277679443,
"learning_rate": 0.00036515151515151517,
"loss": 2.4313,
"step": 91
},
{
"epoch": 0.8363636363636363,
"grad_norm": 2.263862371444702,
"learning_rate": 0.00036363636363636367,
"loss": 2.1305,
"step": 92
},
{
"epoch": 0.8454545454545455,
"grad_norm": 2.4628946781158447,
"learning_rate": 0.0003621212121212121,
"loss": 2.3846,
"step": 93
},
{
"epoch": 0.8545454545454545,
"grad_norm": 2.1154558658599854,
"learning_rate": 0.0003606060606060606,
"loss": 2.0915,
"step": 94
},
{
"epoch": 0.8636363636363636,
"grad_norm": 2.421992540359497,
"learning_rate": 0.00035909090909090906,
"loss": 1.966,
"step": 95
},
{
"epoch": 0.8727272727272727,
"grad_norm": 2.6157567501068115,
"learning_rate": 0.0003575757575757576,
"loss": 2.0816,
"step": 96
},
{
"epoch": 0.8818181818181818,
"grad_norm": 2.487288236618042,
"learning_rate": 0.0003560606060606061,
"loss": 2.0444,
"step": 97
},
{
"epoch": 0.8909090909090909,
"grad_norm": 2.3463704586029053,
"learning_rate": 0.00035454545454545455,
"loss": 2.3975,
"step": 98
},
{
"epoch": 0.9,
"grad_norm": 2.7266664505004883,
"learning_rate": 0.00035303030303030305,
"loss": 2.2822,
"step": 99
},
{
"epoch": 0.9090909090909091,
"grad_norm": 2.4807024002075195,
"learning_rate": 0.0003515151515151515,
"loss": 2.1119,
"step": 100
},
{
"epoch": 0.9181818181818182,
"grad_norm": 2.7550277709960938,
"learning_rate": 0.00035,
"loss": 2.2461,
"step": 101
},
{
"epoch": 0.9272727272727272,
"grad_norm": 2.43615984916687,
"learning_rate": 0.0003484848484848485,
"loss": 2.559,
"step": 102
},
{
"epoch": 0.9363636363636364,
"grad_norm": 3.006021499633789,
"learning_rate": 0.000346969696969697,
"loss": 2.2031,
"step": 103
},
{
"epoch": 0.9454545454545454,
"grad_norm": 2.3321893215179443,
"learning_rate": 0.00034545454545454544,
"loss": 2.2653,
"step": 104
},
{
"epoch": 0.9545454545454546,
"grad_norm": 2.7566874027252197,
"learning_rate": 0.00034393939393939394,
"loss": 2.1273,
"step": 105
},
{
"epoch": 0.9636363636363636,
"grad_norm": 2.574964761734009,
"learning_rate": 0.00034242424242424244,
"loss": 2.1953,
"step": 106
},
{
"epoch": 0.9727272727272728,
"grad_norm": 2.2862184047698975,
"learning_rate": 0.0003409090909090909,
"loss": 1.9793,
"step": 107
},
{
"epoch": 0.9818181818181818,
"grad_norm": 2.3291995525360107,
"learning_rate": 0.00033939393939393943,
"loss": 2.0028,
"step": 108
},
{
"epoch": 0.990909090909091,
"grad_norm": 2.4766077995300293,
"learning_rate": 0.0003378787878787879,
"loss": 2.6716,
"step": 109
},
{
"epoch": 1.0,
"grad_norm": 2.5020952224731445,
"learning_rate": 0.0003363636363636364,
"loss": 2.3641,
"step": 110
},
{
"epoch": 1.0,
"eval_f1": 0.8828,
"eval_gen_len": 47.7636,
"eval_loss": 2.201944589614868,
"eval_precision": 0.8806,
"eval_recall": 0.8852,
"eval_rouge1": 0.4172,
"eval_rouge2": 0.1774,
"eval_rougeL": 0.3518,
"eval_rougeLsum": 0.386,
"eval_runtime": 22.5664,
"eval_samples_per_second": 4.874,
"eval_steps_per_second": 0.62,
"step": 110
},
{
"epoch": 1.009090909090909,
"grad_norm": 2.647275686264038,
"learning_rate": 0.0003348484848484848,
"loss": 2.3777,
"step": 111
},
{
"epoch": 1.018181818181818,
"grad_norm": 2.123640298843384,
"learning_rate": 0.0003333333333333333,
"loss": 1.861,
"step": 112
},
{
"epoch": 1.0272727272727273,
"grad_norm": 2.4780056476593018,
"learning_rate": 0.0003318181818181819,
"loss": 2.2539,
"step": 113
},
{
"epoch": 1.0363636363636364,
"grad_norm": 2.6516036987304688,
"learning_rate": 0.0003303030303030303,
"loss": 2.806,
"step": 114
},
{
"epoch": 1.0454545454545454,
"grad_norm": 2.273198366165161,
"learning_rate": 0.0003287878787878788,
"loss": 1.894,
"step": 115
},
{
"epoch": 1.0545454545454545,
"grad_norm": 2.6385703086853027,
"learning_rate": 0.00032727272727272726,
"loss": 2.3756,
"step": 116
},
{
"epoch": 1.0636363636363637,
"grad_norm": 2.1708078384399414,
"learning_rate": 0.00032575757575757576,
"loss": 1.7245,
"step": 117
},
{
"epoch": 1.0727272727272728,
"grad_norm": 2.353435516357422,
"learning_rate": 0.0003242424242424242,
"loss": 2.2521,
"step": 118
},
{
"epoch": 1.0818181818181818,
"grad_norm": 2.38700532913208,
"learning_rate": 0.00032272727272727276,
"loss": 2.4595,
"step": 119
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.301133632659912,
"learning_rate": 0.00032121212121212126,
"loss": 2.5753,
"step": 120
},
{
"epoch": 1.1,
"grad_norm": 2.515345811843872,
"learning_rate": 0.0003196969696969697,
"loss": 2.1911,
"step": 121
},
{
"epoch": 1.1090909090909091,
"grad_norm": 2.5848662853240967,
"learning_rate": 0.0003181818181818182,
"loss": 2.0735,
"step": 122
},
{
"epoch": 1.1181818181818182,
"grad_norm": 3.129497528076172,
"learning_rate": 0.00031666666666666665,
"loss": 2.4778,
"step": 123
},
{
"epoch": 1.1272727272727272,
"grad_norm": 2.623422145843506,
"learning_rate": 0.00031515151515151515,
"loss": 2.3883,
"step": 124
},
{
"epoch": 1.1363636363636362,
"grad_norm": 2.2395179271698,
"learning_rate": 0.00031363636363636365,
"loss": 2.2265,
"step": 125
},
{
"epoch": 1.1454545454545455,
"grad_norm": 2.5940821170806885,
"learning_rate": 0.00031212121212121214,
"loss": 2.2186,
"step": 126
},
{
"epoch": 1.1545454545454545,
"grad_norm": 3.4867501258850098,
"learning_rate": 0.0003106060606060606,
"loss": 2.1858,
"step": 127
},
{
"epoch": 1.1636363636363636,
"grad_norm": 2.4735608100891113,
"learning_rate": 0.0003090909090909091,
"loss": 2.1304,
"step": 128
},
{
"epoch": 1.1727272727272728,
"grad_norm": 2.619661331176758,
"learning_rate": 0.0003075757575757576,
"loss": 2.5575,
"step": 129
},
{
"epoch": 1.1818181818181819,
"grad_norm": 2.6737751960754395,
"learning_rate": 0.00030606060606060603,
"loss": 2.5957,
"step": 130
},
{
"epoch": 1.190909090909091,
"grad_norm": 2.519474506378174,
"learning_rate": 0.0003045454545454546,
"loss": 2.1605,
"step": 131
},
{
"epoch": 1.2,
"grad_norm": 2.309298276901245,
"learning_rate": 0.00030303030303030303,
"loss": 2.3122,
"step": 132
},
{
"epoch": 1.209090909090909,
"grad_norm": 2.2344257831573486,
"learning_rate": 0.00030151515151515153,
"loss": 1.9541,
"step": 133
},
{
"epoch": 1.2181818181818183,
"grad_norm": 2.624655246734619,
"learning_rate": 0.0003,
"loss": 2.2801,
"step": 134
},
{
"epoch": 1.2272727272727273,
"grad_norm": 2.513366222381592,
"learning_rate": 0.00029848484848484847,
"loss": 2.5152,
"step": 135
},
{
"epoch": 1.2363636363636363,
"grad_norm": 2.4093875885009766,
"learning_rate": 0.000296969696969697,
"loss": 2.2779,
"step": 136
},
{
"epoch": 1.2454545454545454,
"grad_norm": 2.115041971206665,
"learning_rate": 0.00029545454545454547,
"loss": 1.7012,
"step": 137
},
{
"epoch": 1.2545454545454544,
"grad_norm": 2.436295509338379,
"learning_rate": 0.00029393939393939397,
"loss": 2.0279,
"step": 138
},
{
"epoch": 1.2636363636363637,
"grad_norm": 2.2901837825775146,
"learning_rate": 0.0002924242424242424,
"loss": 2.2395,
"step": 139
},
{
"epoch": 1.2727272727272727,
"grad_norm": 2.7776708602905273,
"learning_rate": 0.0002909090909090909,
"loss": 2.5537,
"step": 140
},
{
"epoch": 1.2818181818181817,
"grad_norm": 2.3812379837036133,
"learning_rate": 0.00028939393939393936,
"loss": 2.4604,
"step": 141
},
{
"epoch": 1.290909090909091,
"grad_norm": 2.4496119022369385,
"learning_rate": 0.0002878787878787879,
"loss": 1.9706,
"step": 142
},
{
"epoch": 1.3,
"grad_norm": 2.4268991947174072,
"learning_rate": 0.00028636363636363636,
"loss": 2.5722,
"step": 143
},
{
"epoch": 1.309090909090909,
"grad_norm": 2.3241193294525146,
"learning_rate": 0.00028484848484848485,
"loss": 1.9134,
"step": 144
},
{
"epoch": 1.3181818181818181,
"grad_norm": 2.587451219558716,
"learning_rate": 0.00028333333333333335,
"loss": 2.5717,
"step": 145
},
{
"epoch": 1.3272727272727272,
"grad_norm": 2.5142734050750732,
"learning_rate": 0.0002818181818181818,
"loss": 2.2671,
"step": 146
},
{
"epoch": 1.3363636363636364,
"grad_norm": 2.3716535568237305,
"learning_rate": 0.0002803030303030303,
"loss": 2.1641,
"step": 147
},
{
"epoch": 1.3454545454545455,
"grad_norm": 2.928048849105835,
"learning_rate": 0.0002787878787878788,
"loss": 2.4632,
"step": 148
},
{
"epoch": 1.3545454545454545,
"grad_norm": 2.8642890453338623,
"learning_rate": 0.0002772727272727273,
"loss": 2.0992,
"step": 149
},
{
"epoch": 1.3636363636363638,
"grad_norm": 2.571286201477051,
"learning_rate": 0.00027575757575757574,
"loss": 2.0022,
"step": 150
},
{
"epoch": 1.3727272727272728,
"grad_norm": 2.6257894039154053,
"learning_rate": 0.00027424242424242424,
"loss": 2.3255,
"step": 151
},
{
"epoch": 1.3818181818181818,
"grad_norm": 2.7065744400024414,
"learning_rate": 0.00027272727272727274,
"loss": 2.3697,
"step": 152
},
{
"epoch": 1.3909090909090909,
"grad_norm": 2.388538122177124,
"learning_rate": 0.00027121212121212124,
"loss": 2.1528,
"step": 153
},
{
"epoch": 1.4,
"grad_norm": 3.155831813812256,
"learning_rate": 0.00026969696969696974,
"loss": 2.2429,
"step": 154
},
{
"epoch": 1.4090909090909092,
"grad_norm": 2.4925553798675537,
"learning_rate": 0.0002681818181818182,
"loss": 2.3352,
"step": 155
},
{
"epoch": 1.4181818181818182,
"grad_norm": 2.4231719970703125,
"learning_rate": 0.0002666666666666667,
"loss": 2.2673,
"step": 156
},
{
"epoch": 1.4272727272727272,
"grad_norm": 2.527735710144043,
"learning_rate": 0.0002651515151515151,
"loss": 1.7499,
"step": 157
},
{
"epoch": 1.4363636363636363,
"grad_norm": 2.4436838626861572,
"learning_rate": 0.0002636363636363636,
"loss": 2.2615,
"step": 158
},
{
"epoch": 1.4454545454545453,
"grad_norm": 2.401718854904175,
"learning_rate": 0.0002621212121212122,
"loss": 2.2756,
"step": 159
},
{
"epoch": 1.4545454545454546,
"grad_norm": 2.2467689514160156,
"learning_rate": 0.0002606060606060606,
"loss": 2.3878,
"step": 160
},
{
"epoch": 1.4636363636363636,
"grad_norm": 2.4564456939697266,
"learning_rate": 0.0002590909090909091,
"loss": 2.2045,
"step": 161
},
{
"epoch": 1.4727272727272727,
"grad_norm": 2.67653751373291,
"learning_rate": 0.00025757575757575756,
"loss": 1.7914,
"step": 162
},
{
"epoch": 1.481818181818182,
"grad_norm": 2.8810715675354004,
"learning_rate": 0.00025606060606060606,
"loss": 2.6104,
"step": 163
},
{
"epoch": 1.490909090909091,
"grad_norm": 2.9596915245056152,
"learning_rate": 0.0002545454545454545,
"loss": 1.9902,
"step": 164
},
{
"epoch": 1.5,
"grad_norm": 2.1952764987945557,
"learning_rate": 0.00025303030303030306,
"loss": 1.717,
"step": 165
},
{
"epoch": 1.509090909090909,
"grad_norm": 2.576174259185791,
"learning_rate": 0.0002515151515151515,
"loss": 2.3907,
"step": 166
},
{
"epoch": 1.518181818181818,
"grad_norm": 2.540867567062378,
"learning_rate": 0.00025,
"loss": 2.2214,
"step": 167
},
{
"epoch": 1.5272727272727273,
"grad_norm": 2.2856028079986572,
"learning_rate": 0.0002484848484848485,
"loss": 1.8133,
"step": 168
},
{
"epoch": 1.5363636363636364,
"grad_norm": 2.8178927898406982,
"learning_rate": 0.000246969696969697,
"loss": 2.199,
"step": 169
},
{
"epoch": 1.5454545454545454,
"grad_norm": 2.3742241859436035,
"learning_rate": 0.00024545454545454545,
"loss": 1.9071,
"step": 170
},
{
"epoch": 1.5545454545454547,
"grad_norm": 2.286177396774292,
"learning_rate": 0.00024393939393939392,
"loss": 2.3051,
"step": 171
},
{
"epoch": 1.5636363636363635,
"grad_norm": 2.524489164352417,
"learning_rate": 0.00024242424242424245,
"loss": 2.0333,
"step": 172
},
{
"epoch": 1.5727272727272728,
"grad_norm": 3.2371888160705566,
"learning_rate": 0.00024090909090909092,
"loss": 2.056,
"step": 173
},
{
"epoch": 1.5818181818181818,
"grad_norm": 2.74900221824646,
"learning_rate": 0.0002393939393939394,
"loss": 2.3619,
"step": 174
},
{
"epoch": 1.5909090909090908,
"grad_norm": 2.49453067779541,
"learning_rate": 0.0002378787878787879,
"loss": 2.191,
"step": 175
},
{
"epoch": 1.6,
"grad_norm": 2.836361885070801,
"learning_rate": 0.00023636363636363636,
"loss": 2.4605,
"step": 176
},
{
"epoch": 1.6090909090909091,
"grad_norm": 3.4644951820373535,
"learning_rate": 0.00023484848484848486,
"loss": 2.116,
"step": 177
},
{
"epoch": 1.6181818181818182,
"grad_norm": 3.0246944427490234,
"learning_rate": 0.00023333333333333333,
"loss": 2.1507,
"step": 178
},
{
"epoch": 1.6272727272727274,
"grad_norm": 2.28613018989563,
"learning_rate": 0.00023181818181818183,
"loss": 2.0915,
"step": 179
},
{
"epoch": 1.6363636363636362,
"grad_norm": 2.576760768890381,
"learning_rate": 0.00023030303030303033,
"loss": 2.2536,
"step": 180
},
{
"epoch": 1.6454545454545455,
"grad_norm": 2.953038454055786,
"learning_rate": 0.0002287878787878788,
"loss": 2.0687,
"step": 181
},
{
"epoch": 1.6545454545454545,
"grad_norm": 2.397510528564453,
"learning_rate": 0.00022727272727272727,
"loss": 1.7358,
"step": 182
},
{
"epoch": 1.6636363636363636,
"grad_norm": 3.0446624755859375,
"learning_rate": 0.00022575757575757577,
"loss": 2.0852,
"step": 183
},
{
"epoch": 1.6727272727272728,
"grad_norm": 2.5387020111083984,
"learning_rate": 0.00022424242424242424,
"loss": 2.6556,
"step": 184
},
{
"epoch": 1.6818181818181817,
"grad_norm": 2.7849063873291016,
"learning_rate": 0.00022272727272727272,
"loss": 2.2982,
"step": 185
},
{
"epoch": 1.690909090909091,
"grad_norm": 2.275547504425049,
"learning_rate": 0.00022121212121212121,
"loss": 2.247,
"step": 186
},
{
"epoch": 1.7,
"grad_norm": 2.217078685760498,
"learning_rate": 0.0002196969696969697,
"loss": 2.01,
"step": 187
},
{
"epoch": 1.709090909090909,
"grad_norm": 2.437875747680664,
"learning_rate": 0.00021818181818181818,
"loss": 2.3097,
"step": 188
},
{
"epoch": 1.7181818181818183,
"grad_norm": 1.9528652429580688,
"learning_rate": 0.00021666666666666668,
"loss": 1.7132,
"step": 189
},
{
"epoch": 1.7272727272727273,
"grad_norm": 2.658073663711548,
"learning_rate": 0.00021515151515151516,
"loss": 2.2414,
"step": 190
},
{
"epoch": 1.7363636363636363,
"grad_norm": 2.47921085357666,
"learning_rate": 0.00021363636363636363,
"loss": 2.1501,
"step": 191
},
{
"epoch": 1.7454545454545456,
"grad_norm": 2.813053607940674,
"learning_rate": 0.00021212121212121213,
"loss": 2.2338,
"step": 192
},
{
"epoch": 1.7545454545454544,
"grad_norm": 2.383352041244507,
"learning_rate": 0.0002106060606060606,
"loss": 2.0248,
"step": 193
},
{
"epoch": 1.7636363636363637,
"grad_norm": 2.23240327835083,
"learning_rate": 0.00020909090909090907,
"loss": 2.4407,
"step": 194
},
{
"epoch": 1.7727272727272727,
"grad_norm": 2.3927600383758545,
"learning_rate": 0.0002075757575757576,
"loss": 2.1969,
"step": 195
},
{
"epoch": 1.7818181818181817,
"grad_norm": 2.6979734897613525,
"learning_rate": 0.00020606060606060607,
"loss": 2.3632,
"step": 196
},
{
"epoch": 1.790909090909091,
"grad_norm": 3.1346116065979004,
"learning_rate": 0.00020454545454545457,
"loss": 1.7359,
"step": 197
},
{
"epoch": 1.8,
"grad_norm": 2.7446019649505615,
"learning_rate": 0.00020303030303030304,
"loss": 2.2057,
"step": 198
},
{
"epoch": 1.809090909090909,
"grad_norm": 2.7223236560821533,
"learning_rate": 0.0002015151515151515,
"loss": 2.4566,
"step": 199
},
{
"epoch": 1.8181818181818183,
"grad_norm": 2.570932388305664,
"learning_rate": 0.0002,
"loss": 2.2638,
"step": 200
},
{
"epoch": 1.8272727272727272,
"grad_norm": 2.2431604862213135,
"learning_rate": 0.00019848484848484848,
"loss": 2.1802,
"step": 201
},
{
"epoch": 1.8363636363636364,
"grad_norm": 3.257647752761841,
"learning_rate": 0.00019696969696969695,
"loss": 2.2361,
"step": 202
},
{
"epoch": 1.8454545454545455,
"grad_norm": 2.444171905517578,
"learning_rate": 0.00019545454545454548,
"loss": 1.8914,
"step": 203
},
{
"epoch": 1.8545454545454545,
"grad_norm": 2.2748937606811523,
"learning_rate": 0.00019393939393939395,
"loss": 2.0162,
"step": 204
},
{
"epoch": 1.8636363636363638,
"grad_norm": 2.450481653213501,
"learning_rate": 0.00019242424242424242,
"loss": 1.8713,
"step": 205
},
{
"epoch": 1.8727272727272726,
"grad_norm": 2.7834432125091553,
"learning_rate": 0.00019090909090909092,
"loss": 2.1062,
"step": 206
},
{
"epoch": 1.8818181818181818,
"grad_norm": 2.4121110439300537,
"learning_rate": 0.0001893939393939394,
"loss": 2.0578,
"step": 207
},
{
"epoch": 1.8909090909090909,
"grad_norm": 2.533564567565918,
"learning_rate": 0.00018787878787878787,
"loss": 2.0605,
"step": 208
},
{
"epoch": 1.9,
"grad_norm": 2.543336868286133,
"learning_rate": 0.00018636363636363636,
"loss": 2.083,
"step": 209
},
{
"epoch": 1.9090909090909092,
"grad_norm": 3.0417320728302,
"learning_rate": 0.00018484848484848484,
"loss": 2.1015,
"step": 210
},
{
"epoch": 1.9181818181818182,
"grad_norm": 3.0233654975891113,
"learning_rate": 0.00018333333333333334,
"loss": 2.1515,
"step": 211
},
{
"epoch": 1.9272727272727272,
"grad_norm": 3.2592718601226807,
"learning_rate": 0.00018181818181818183,
"loss": 2.3158,
"step": 212
},
{
"epoch": 1.9363636363636365,
"grad_norm": 2.5174355506896973,
"learning_rate": 0.0001803030303030303,
"loss": 2.0665,
"step": 213
},
{
"epoch": 1.9454545454545453,
"grad_norm": 2.331172466278076,
"learning_rate": 0.0001787878787878788,
"loss": 2.2434,
"step": 214
},
{
"epoch": 1.9545454545454546,
"grad_norm": 2.3686776161193848,
"learning_rate": 0.00017727272727272728,
"loss": 2.3329,
"step": 215
},
{
"epoch": 1.9636363636363636,
"grad_norm": 2.747772693634033,
"learning_rate": 0.00017575757575757575,
"loss": 2.4027,
"step": 216
},
{
"epoch": 1.9727272727272727,
"grad_norm": 3.5569443702697754,
"learning_rate": 0.00017424242424242425,
"loss": 2.0921,
"step": 217
},
{
"epoch": 1.981818181818182,
"grad_norm": 2.4337990283966064,
"learning_rate": 0.00017272727272727272,
"loss": 1.8793,
"step": 218
},
{
"epoch": 1.990909090909091,
"grad_norm": 3.418469190597534,
"learning_rate": 0.00017121212121212122,
"loss": 2.6085,
"step": 219
},
{
"epoch": 2.0,
"grad_norm": 2.9401915073394775,
"learning_rate": 0.00016969696969696972,
"loss": 2.2228,
"step": 220
},
{
"epoch": 2.0,
"eval_f1": 0.8846,
"eval_gen_len": 48.1182,
"eval_loss": 2.1040308475494385,
"eval_precision": 0.882,
"eval_recall": 0.8875,
"eval_rouge1": 0.419,
"eval_rouge2": 0.1789,
"eval_rougeL": 0.3477,
"eval_rougeLsum": 0.3827,
"eval_runtime": 22.661,
"eval_samples_per_second": 4.854,
"eval_steps_per_second": 0.618,
"step": 220
},
{
"epoch": 2.0090909090909093,
"grad_norm": 2.243396520614624,
"learning_rate": 0.0001681818181818182,
"loss": 1.6725,
"step": 221
},
{
"epoch": 2.018181818181818,
"grad_norm": 2.3622076511383057,
"learning_rate": 0.00016666666666666666,
"loss": 2.0792,
"step": 222
},
{
"epoch": 2.0272727272727273,
"grad_norm": 2.4615345001220703,
"learning_rate": 0.00016515151515151516,
"loss": 2.1534,
"step": 223
},
{
"epoch": 2.036363636363636,
"grad_norm": 3.332016706466675,
"learning_rate": 0.00016363636363636363,
"loss": 2.151,
"step": 224
},
{
"epoch": 2.0454545454545454,
"grad_norm": 2.830742597579956,
"learning_rate": 0.0001621212121212121,
"loss": 2.4115,
"step": 225
},
{
"epoch": 2.0545454545454547,
"grad_norm": 2.9332048892974854,
"learning_rate": 0.00016060606060606063,
"loss": 2.8091,
"step": 226
},
{
"epoch": 2.0636363636363635,
"grad_norm": 3.2479031085968018,
"learning_rate": 0.0001590909090909091,
"loss": 2.1244,
"step": 227
},
{
"epoch": 2.0727272727272728,
"grad_norm": 2.493685007095337,
"learning_rate": 0.00015757575757575757,
"loss": 2.1421,
"step": 228
},
{
"epoch": 2.081818181818182,
"grad_norm": 2.0491628646850586,
"learning_rate": 0.00015606060606060607,
"loss": 1.7688,
"step": 229
},
{
"epoch": 2.090909090909091,
"grad_norm": 2.582287549972534,
"learning_rate": 0.00015454545454545454,
"loss": 2.0157,
"step": 230
},
{
"epoch": 2.1,
"grad_norm": 2.326449155807495,
"learning_rate": 0.00015303030303030302,
"loss": 1.833,
"step": 231
},
{
"epoch": 2.109090909090909,
"grad_norm": 2.128833770751953,
"learning_rate": 0.00015151515151515152,
"loss": 1.7071,
"step": 232
},
{
"epoch": 2.118181818181818,
"grad_norm": 2.6494805812835693,
"learning_rate": 0.00015,
"loss": 1.8511,
"step": 233
},
{
"epoch": 2.1272727272727274,
"grad_norm": 2.407592296600342,
"learning_rate": 0.0001484848484848485,
"loss": 1.8951,
"step": 234
},
{
"epoch": 2.1363636363636362,
"grad_norm": 2.171020030975342,
"learning_rate": 0.00014696969696969698,
"loss": 1.8288,
"step": 235
},
{
"epoch": 2.1454545454545455,
"grad_norm": 2.6327128410339355,
"learning_rate": 0.00014545454545454546,
"loss": 2.3277,
"step": 236
},
{
"epoch": 2.1545454545454543,
"grad_norm": 2.796823740005493,
"learning_rate": 0.00014393939393939396,
"loss": 1.9495,
"step": 237
},
{
"epoch": 2.1636363636363636,
"grad_norm": 2.510796308517456,
"learning_rate": 0.00014242424242424243,
"loss": 2.0122,
"step": 238
},
{
"epoch": 2.172727272727273,
"grad_norm": 2.5371253490448,
"learning_rate": 0.0001409090909090909,
"loss": 2.1992,
"step": 239
},
{
"epoch": 2.1818181818181817,
"grad_norm": 2.458181858062744,
"learning_rate": 0.0001393939393939394,
"loss": 2.5883,
"step": 240
},
{
"epoch": 2.190909090909091,
"grad_norm": 2.3969123363494873,
"learning_rate": 0.00013787878787878787,
"loss": 1.7977,
"step": 241
},
{
"epoch": 2.2,
"grad_norm": 2.271862506866455,
"learning_rate": 0.00013636363636363637,
"loss": 1.4961,
"step": 242
},
{
"epoch": 2.209090909090909,
"grad_norm": 2.7577078342437744,
"learning_rate": 0.00013484848484848487,
"loss": 2.6418,
"step": 243
},
{
"epoch": 2.2181818181818183,
"grad_norm": 3.0357556343078613,
"learning_rate": 0.00013333333333333334,
"loss": 2.2361,
"step": 244
},
{
"epoch": 2.227272727272727,
"grad_norm": 3.5949878692626953,
"learning_rate": 0.0001318181818181818,
"loss": 2.1915,
"step": 245
},
{
"epoch": 2.2363636363636363,
"grad_norm": 2.8536949157714844,
"learning_rate": 0.0001303030303030303,
"loss": 2.2827,
"step": 246
},
{
"epoch": 2.2454545454545456,
"grad_norm": 2.9661660194396973,
"learning_rate": 0.00012878787878787878,
"loss": 2.2646,
"step": 247
},
{
"epoch": 2.2545454545454544,
"grad_norm": 2.6501827239990234,
"learning_rate": 0.00012727272727272725,
"loss": 2.2011,
"step": 248
},
{
"epoch": 2.2636363636363637,
"grad_norm": 2.4860503673553467,
"learning_rate": 0.00012575757575757575,
"loss": 2.4671,
"step": 249
},
{
"epoch": 2.2727272727272725,
"grad_norm": 2.6340763568878174,
"learning_rate": 0.00012424242424242425,
"loss": 2.5151,
"step": 250
},
{
"epoch": 2.2818181818181817,
"grad_norm": 1.978316307067871,
"learning_rate": 0.00012272727272727272,
"loss": 1.6343,
"step": 251
},
{
"epoch": 2.290909090909091,
"grad_norm": 2.898264169692993,
"learning_rate": 0.00012121212121212122,
"loss": 1.6661,
"step": 252
},
{
"epoch": 2.3,
"grad_norm": 2.3824710845947266,
"learning_rate": 0.0001196969696969697,
"loss": 2.0863,
"step": 253
},
{
"epoch": 2.309090909090909,
"grad_norm": 2.5257728099823,
"learning_rate": 0.00011818181818181818,
"loss": 2.2026,
"step": 254
},
{
"epoch": 2.3181818181818183,
"grad_norm": 2.5643668174743652,
"learning_rate": 0.00011666666666666667,
"loss": 1.994,
"step": 255
},
{
"epoch": 2.327272727272727,
"grad_norm": 3.254385471343994,
"learning_rate": 0.00011515151515151516,
"loss": 2.1402,
"step": 256
},
{
"epoch": 2.3363636363636364,
"grad_norm": 2.2531135082244873,
"learning_rate": 0.00011363636363636364,
"loss": 2.4741,
"step": 257
},
{
"epoch": 2.3454545454545457,
"grad_norm": 4.917692184448242,
"learning_rate": 0.00011212121212121212,
"loss": 2.0093,
"step": 258
},
{
"epoch": 2.3545454545454545,
"grad_norm": 2.2988479137420654,
"learning_rate": 0.00011060606060606061,
"loss": 2.2858,
"step": 259
},
{
"epoch": 2.3636363636363638,
"grad_norm": 2.2871227264404297,
"learning_rate": 0.00010909090909090909,
"loss": 2.325,
"step": 260
},
{
"epoch": 2.3727272727272726,
"grad_norm": 2.498077392578125,
"learning_rate": 0.00010757575757575758,
"loss": 1.8679,
"step": 261
},
{
"epoch": 2.381818181818182,
"grad_norm": 2.622990608215332,
"learning_rate": 0.00010606060606060606,
"loss": 1.9478,
"step": 262
},
{
"epoch": 2.390909090909091,
"grad_norm": 4.280647277832031,
"learning_rate": 0.00010454545454545454,
"loss": 1.4923,
"step": 263
},
{
"epoch": 2.4,
"grad_norm": 2.2783660888671875,
"learning_rate": 0.00010303030303030303,
"loss": 2.3224,
"step": 264
},
{
"epoch": 2.409090909090909,
"grad_norm": 2.2206008434295654,
"learning_rate": 0.00010151515151515152,
"loss": 1.9765,
"step": 265
},
{
"epoch": 2.418181818181818,
"grad_norm": 2.0535147190093994,
"learning_rate": 0.0001,
"loss": 1.99,
"step": 266
},
{
"epoch": 2.4272727272727272,
"grad_norm": 2.354729652404785,
"learning_rate": 9.848484848484848e-05,
"loss": 2.2821,
"step": 267
},
{
"epoch": 2.4363636363636365,
"grad_norm": 2.695911169052124,
"learning_rate": 9.696969696969698e-05,
"loss": 1.9721,
"step": 268
},
{
"epoch": 2.4454545454545453,
"grad_norm": 2.2769014835357666,
"learning_rate": 9.545454545454546e-05,
"loss": 2.0998,
"step": 269
},
{
"epoch": 2.4545454545454546,
"grad_norm": 2.3782975673675537,
"learning_rate": 9.393939393939393e-05,
"loss": 2.1317,
"step": 270
},
{
"epoch": 2.463636363636364,
"grad_norm": 2.41587495803833,
"learning_rate": 9.242424242424242e-05,
"loss": 2.0085,
"step": 271
},
{
"epoch": 2.4727272727272727,
"grad_norm": 2.0826306343078613,
"learning_rate": 9.090909090909092e-05,
"loss": 1.7122,
"step": 272
},
{
"epoch": 2.481818181818182,
"grad_norm": 3.2027394771575928,
"learning_rate": 8.93939393939394e-05,
"loss": 1.9247,
"step": 273
},
{
"epoch": 2.4909090909090907,
"grad_norm": 2.588639974594116,
"learning_rate": 8.787878787878787e-05,
"loss": 2.3237,
"step": 274
},
{
"epoch": 2.5,
"grad_norm": 2.358501672744751,
"learning_rate": 8.636363636363636e-05,
"loss": 2.1826,
"step": 275
},
{
"epoch": 2.509090909090909,
"grad_norm": 2.538494348526001,
"learning_rate": 8.484848484848486e-05,
"loss": 1.9181,
"step": 276
},
{
"epoch": 2.518181818181818,
"grad_norm": 2.2839136123657227,
"learning_rate": 8.333333333333333e-05,
"loss": 1.9576,
"step": 277
},
{
"epoch": 2.5272727272727273,
"grad_norm": 2.2792277336120605,
"learning_rate": 8.181818181818182e-05,
"loss": 2.0411,
"step": 278
},
{
"epoch": 2.536363636363636,
"grad_norm": 3.6436235904693604,
"learning_rate": 8.030303030303031e-05,
"loss": 2.3181,
"step": 279
},
{
"epoch": 2.5454545454545454,
"grad_norm": 2.2795042991638184,
"learning_rate": 7.878787878787879e-05,
"loss": 1.9551,
"step": 280
},
{
"epoch": 2.5545454545454547,
"grad_norm": 2.5352325439453125,
"learning_rate": 7.727272727272727e-05,
"loss": 1.9835,
"step": 281
},
{
"epoch": 2.5636363636363635,
"grad_norm": 2.5468242168426514,
"learning_rate": 7.575757575757576e-05,
"loss": 1.9274,
"step": 282
},
{
"epoch": 2.5727272727272728,
"grad_norm": 2.5645673274993896,
"learning_rate": 7.424242424242426e-05,
"loss": 2.0234,
"step": 283
},
{
"epoch": 2.581818181818182,
"grad_norm": 2.5357186794281006,
"learning_rate": 7.272727272727273e-05,
"loss": 2.0772,
"step": 284
},
{
"epoch": 2.590909090909091,
"grad_norm": 2.257965564727783,
"learning_rate": 7.121212121212121e-05,
"loss": 2.123,
"step": 285
},
{
"epoch": 2.6,
"grad_norm": 2.6818959712982178,
"learning_rate": 6.96969696969697e-05,
"loss": 1.8595,
"step": 286
},
{
"epoch": 2.6090909090909093,
"grad_norm": 2.531693458557129,
"learning_rate": 6.818181818181818e-05,
"loss": 1.9458,
"step": 287
},
{
"epoch": 2.618181818181818,
"grad_norm": 2.8416342735290527,
"learning_rate": 6.666666666666667e-05,
"loss": 2.209,
"step": 288
},
{
"epoch": 2.6272727272727274,
"grad_norm": 2.5356521606445312,
"learning_rate": 6.515151515151516e-05,
"loss": 1.8766,
"step": 289
},
{
"epoch": 2.6363636363636362,
"grad_norm": 2.3233187198638916,
"learning_rate": 6.363636363636363e-05,
"loss": 2.1223,
"step": 290
},
{
"epoch": 2.6454545454545455,
"grad_norm": 2.911799907684326,
"learning_rate": 6.212121212121213e-05,
"loss": 2.217,
"step": 291
},
{
"epoch": 2.6545454545454543,
"grad_norm": 2.856112241744995,
"learning_rate": 6.060606060606061e-05,
"loss": 2.7215,
"step": 292
},
{
"epoch": 2.6636363636363636,
"grad_norm": 2.6321911811828613,
"learning_rate": 5.909090909090909e-05,
"loss": 2.0893,
"step": 293
},
{
"epoch": 2.672727272727273,
"grad_norm": 2.3121089935302734,
"learning_rate": 5.757575757575758e-05,
"loss": 2.0218,
"step": 294
},
{
"epoch": 2.6818181818181817,
"grad_norm": 2.717296600341797,
"learning_rate": 5.606060606060606e-05,
"loss": 2.532,
"step": 295
},
{
"epoch": 2.690909090909091,
"grad_norm": 2.3445937633514404,
"learning_rate": 5.4545454545454546e-05,
"loss": 2.1638,
"step": 296
},
{
"epoch": 2.7,
"grad_norm": 2.4560205936431885,
"learning_rate": 5.303030303030303e-05,
"loss": 2.1792,
"step": 297
},
{
"epoch": 2.709090909090909,
"grad_norm": 2.5110905170440674,
"learning_rate": 5.151515151515152e-05,
"loss": 1.7745,
"step": 298
},
{
"epoch": 2.7181818181818183,
"grad_norm": 2.1710031032562256,
"learning_rate": 5e-05,
"loss": 1.7763,
"step": 299
},
{
"epoch": 2.7272727272727275,
"grad_norm": 2.8132150173187256,
"learning_rate": 4.848484848484849e-05,
"loss": 2.7144,
"step": 300
},
{
"epoch": 2.7363636363636363,
"grad_norm": 2.755683422088623,
"learning_rate": 4.6969696969696966e-05,
"loss": 1.8518,
"step": 301
},
{
"epoch": 2.7454545454545456,
"grad_norm": 3.0009915828704834,
"learning_rate": 4.545454545454546e-05,
"loss": 1.9233,
"step": 302
},
{
"epoch": 2.7545454545454544,
"grad_norm": 2.8329362869262695,
"learning_rate": 4.393939393939394e-05,
"loss": 1.9821,
"step": 303
},
{
"epoch": 2.7636363636363637,
"grad_norm": 2.642076253890991,
"learning_rate": 4.242424242424243e-05,
"loss": 1.8232,
"step": 304
},
{
"epoch": 2.7727272727272725,
"grad_norm": 2.3849098682403564,
"learning_rate": 4.090909090909091e-05,
"loss": 1.5814,
"step": 305
},
{
"epoch": 2.7818181818181817,
"grad_norm": 2.4465572834014893,
"learning_rate": 3.939393939393939e-05,
"loss": 2.0538,
"step": 306
},
{
"epoch": 2.790909090909091,
"grad_norm": 3.058110475540161,
"learning_rate": 3.787878787878788e-05,
"loss": 2.4959,
"step": 307
},
{
"epoch": 2.8,
"grad_norm": 2.4234743118286133,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.9271,
"step": 308
},
{
"epoch": 2.809090909090909,
"grad_norm": 2.6342692375183105,
"learning_rate": 3.484848484848485e-05,
"loss": 2.4818,
"step": 309
},
{
"epoch": 2.8181818181818183,
"grad_norm": 3.9711482524871826,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.7856,
"step": 310
},
{
"epoch": 2.827272727272727,
"grad_norm": 2.6276450157165527,
"learning_rate": 3.1818181818181814e-05,
"loss": 2.3267,
"step": 311
},
{
"epoch": 2.8363636363636364,
"grad_norm": 2.395639181137085,
"learning_rate": 3.0303030303030306e-05,
"loss": 2.0063,
"step": 312
},
{
"epoch": 2.8454545454545457,
"grad_norm": 3.2870113849639893,
"learning_rate": 2.878787878787879e-05,
"loss": 2.1394,
"step": 313
},
{
"epoch": 2.8545454545454545,
"grad_norm": 2.2852306365966797,
"learning_rate": 2.7272727272727273e-05,
"loss": 1.905,
"step": 314
},
{
"epoch": 2.8636363636363638,
"grad_norm": 2.724724292755127,
"learning_rate": 2.575757575757576e-05,
"loss": 1.8821,
"step": 315
},
{
"epoch": 2.8727272727272726,
"grad_norm": 3.5306527614593506,
"learning_rate": 2.4242424242424244e-05,
"loss": 2.2191,
"step": 316
},
{
"epoch": 2.881818181818182,
"grad_norm": 2.3539886474609375,
"learning_rate": 2.272727272727273e-05,
"loss": 2.0281,
"step": 317
},
{
"epoch": 2.8909090909090907,
"grad_norm": 2.4188969135284424,
"learning_rate": 2.1212121212121215e-05,
"loss": 2.5453,
"step": 318
},
{
"epoch": 2.9,
"grad_norm": 2.5099847316741943,
"learning_rate": 1.9696969696969697e-05,
"loss": 2.3246,
"step": 319
},
{
"epoch": 2.909090909090909,
"grad_norm": 2.6091346740722656,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.7609,
"step": 320
},
{
"epoch": 2.918181818181818,
"grad_norm": 2.5419886112213135,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.2443,
"step": 321
},
{
"epoch": 2.9272727272727272,
"grad_norm": 2.336390972137451,
"learning_rate": 1.5151515151515153e-05,
"loss": 1.734,
"step": 322
},
{
"epoch": 2.9363636363636365,
"grad_norm": 2.4999842643737793,
"learning_rate": 1.3636363636363637e-05,
"loss": 1.9539,
"step": 323
},
{
"epoch": 2.9454545454545453,
"grad_norm": 2.4849205017089844,
"learning_rate": 1.2121212121212122e-05,
"loss": 2.2472,
"step": 324
},
{
"epoch": 2.9545454545454546,
"grad_norm": 2.6928179264068604,
"learning_rate": 1.0606060606060607e-05,
"loss": 2.0897,
"step": 325
},
{
"epoch": 2.963636363636364,
"grad_norm": 2.8431129455566406,
"learning_rate": 9.090909090909091e-06,
"loss": 2.443,
"step": 326
},
{
"epoch": 2.9727272727272727,
"grad_norm": 2.494403839111328,
"learning_rate": 7.5757575757575764e-06,
"loss": 1.8585,
"step": 327
},
{
"epoch": 2.981818181818182,
"grad_norm": 2.491305112838745,
"learning_rate": 6.060606060606061e-06,
"loss": 1.8932,
"step": 328
},
{
"epoch": 2.990909090909091,
"grad_norm": 2.3088715076446533,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.7301,
"step": 329
},
{
"epoch": 3.0,
"grad_norm": 2.533508539199829,
"learning_rate": 3.0303030303030305e-06,
"loss": 2.0174,
"step": 330
},
{
"epoch": 3.0,
"eval_f1": 0.887,
"eval_gen_len": 47.8091,
"eval_loss": 2.0939812660217285,
"eval_precision": 0.8838,
"eval_recall": 0.8904,
"eval_rouge1": 0.4298,
"eval_rouge2": 0.1915,
"eval_rougeL": 0.3559,
"eval_rougeLsum": 0.3956,
"eval_runtime": 22.6842,
"eval_samples_per_second": 4.849,
"eval_steps_per_second": 0.617,
"step": 330
},
{
"epoch": 3.0,
"step": 330,
"total_flos": 366452498497536.0,
"train_loss": 2.2660372549837287,
"train_runtime": 171.2707,
"train_samples_per_second": 15.397,
"train_steps_per_second": 1.927
}
],
"logging_steps": 1,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 366452498497536.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}