|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.035964035964036, |
|
"global_step": 2038, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.417624521072796e-05, |
|
"loss": 4.4188, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00010835249042145592, |
|
"loss": 4.4951, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00016252873563218388, |
|
"loss": 4.2222, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00021670498084291185, |
|
"loss": 4.2715, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00027088122605363983, |
|
"loss": 3.9803, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00032505747126436777, |
|
"loss": 4.2492, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00037923371647509576, |
|
"loss": 4.1401, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0004334099616858237, |
|
"loss": 4.114, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00048758620689655173, |
|
"loss": 4.0481, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0005417624521072797, |
|
"loss": 4.1815, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0005959386973180077, |
|
"loss": 3.8259, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0006501149425287355, |
|
"loss": 4.1996, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0007042911877394635, |
|
"loss": 4.0807, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0007584674329501915, |
|
"loss": 3.9835, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0008126436781609194, |
|
"loss": 3.8874, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0008668199233716474, |
|
"loss": 4.0532, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009209961685823754, |
|
"loss": 3.8632, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009751724137931035, |
|
"loss": 3.9985, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0010293486590038313, |
|
"loss": 4.0549, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0010835249042145593, |
|
"loss": 3.8613, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0011377011494252873, |
|
"loss": 3.9426, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0011918773946360153, |
|
"loss": 3.8803, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0012460536398467433, |
|
"loss": 3.955, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.001300229885057471, |
|
"loss": 3.979, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.001354406130268199, |
|
"loss": 3.8574, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.001408582375478927, |
|
"loss": 3.7432, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0014627586206896553, |
|
"loss": 4.024, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.001516934865900383, |
|
"loss": 4.0934, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.001571111111111111, |
|
"loss": 3.6876, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0016252873563218388, |
|
"loss": 3.6832, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.001679463601532567, |
|
"loss": 3.5089, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0017336398467432948, |
|
"loss": 3.6489, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.001787816091954023, |
|
"loss": 3.744, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0018419923371647507, |
|
"loss": 3.4325, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0018961685823754787, |
|
"loss": 3.5246, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.001950344827586207, |
|
"loss": 3.6095, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0020045210727969347, |
|
"loss": 3.4681, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0020586973180076627, |
|
"loss": 3.0709, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0021128735632183907, |
|
"loss": 3.3848, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0021670498084291187, |
|
"loss": 3.1883, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0022212260536398467, |
|
"loss": 2.8688, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0022754022988505746, |
|
"loss": 2.9804, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.002329578544061302, |
|
"loss": 2.78, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0023837547892720306, |
|
"loss": 2.4824, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.002437931034482758, |
|
"loss": 2.4343, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0024921072796934866, |
|
"loss": 2.2343, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0025462835249042146, |
|
"loss": 2.0473, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.002600459770114942, |
|
"loss": 2.0828, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0026546360153256706, |
|
"loss": 1.944, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.002708812260536398, |
|
"loss": 1.8739, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.002762988505747126, |
|
"loss": 1.8151, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.002817164750957854, |
|
"loss": 1.7999, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.002821634214969049, |
|
"loss": 1.6782, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00281367698368036, |
|
"loss": 1.7889, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.002805719752391671, |
|
"loss": 1.732, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0027977625211029825, |
|
"loss": 1.6743, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0027898052898142935, |
|
"loss": 1.5363, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.002781848058525605, |
|
"loss": 1.6643, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.002773890827236916, |
|
"loss": 1.6517, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.002765933595948227, |
|
"loss": 1.6323, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0027579763646595383, |
|
"loss": 1.6194, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0027500191333708493, |
|
"loss": 1.6597, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0027420619020821608, |
|
"loss": 1.6295, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0027341046707934718, |
|
"loss": 1.6804, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.002726147439504783, |
|
"loss": 1.6055, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.002718190208216094, |
|
"loss": 1.6157, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0027102329769274056, |
|
"loss": 1.6294, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.002702275745638717, |
|
"loss": 1.7494, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.002694318514350028, |
|
"loss": 1.588, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.002686361283061339, |
|
"loss": 1.7233, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0026784040517726505, |
|
"loss": 1.6158, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0026704468204839615, |
|
"loss": 1.6256, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.002662489589195273, |
|
"loss": 1.6494, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.002654532357906584, |
|
"loss": 1.647, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.002646575126617895, |
|
"loss": 1.5843, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0026386178953292064, |
|
"loss": 1.6872, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0026306606640405174, |
|
"loss": 1.551, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.002622703432751829, |
|
"loss": 1.5284, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00261474620146314, |
|
"loss": 1.6645, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.002606788970174451, |
|
"loss": 1.5748, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0025988317388857623, |
|
"loss": 1.6338, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0025908745075970733, |
|
"loss": 1.6677, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0025829172763083847, |
|
"loss": 1.6507, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.002574960045019696, |
|
"loss": 1.6592, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.002567002813731007, |
|
"loss": 1.593, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0025590455824423186, |
|
"loss": 1.5918, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0025510883511536296, |
|
"loss": 1.5243, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.002543131119864941, |
|
"loss": 1.5591, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.002535173888576252, |
|
"loss": 1.5922, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.002527216657287563, |
|
"loss": 1.5745, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0025192594259988745, |
|
"loss": 1.6005, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0025113021947101855, |
|
"loss": 1.5417, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.002503344963421497, |
|
"loss": 1.5977, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.002495387732132808, |
|
"loss": 1.6464, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.002487430500844119, |
|
"loss": 1.5063, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0024794732695554303, |
|
"loss": 1.6109, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0024715160382667413, |
|
"loss": 1.5821, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0024635588069780528, |
|
"loss": 1.5636, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0024556015756893638, |
|
"loss": 1.4746, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.002447644344400675, |
|
"loss": 1.56, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.002439687113111986, |
|
"loss": 1.6408, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0024317298818232976, |
|
"loss": 1.6337, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0024237726505346086, |
|
"loss": 1.5576, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00241581541924592, |
|
"loss": 1.6573, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.002407858187957231, |
|
"loss": 1.5688, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0023999009566685425, |
|
"loss": 1.6501, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0023919437253798535, |
|
"loss": 1.529, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.002383986494091165, |
|
"loss": 1.5988, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.002376029262802476, |
|
"loss": 1.6495, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.002368072031513787, |
|
"loss": 1.6037, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0023601148002250984, |
|
"loss": 1.6214, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0023521575689364094, |
|
"loss": 1.5713, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.002344200337647721, |
|
"loss": 1.6279, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.002336243106359032, |
|
"loss": 1.5442, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.002328285875070343, |
|
"loss": 1.6052, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0023203286437816543, |
|
"loss": 1.6117, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0023123714124929653, |
|
"loss": 1.5666, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0023044141812042767, |
|
"loss": 1.5963, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0022964569499155877, |
|
"loss": 1.5817, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.002288499718626899, |
|
"loss": 1.5122, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0022805424873382106, |
|
"loss": 1.6607, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0022725852560495216, |
|
"loss": 1.5135, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.002264628024760833, |
|
"loss": 1.6543, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.002256670793472144, |
|
"loss": 1.6807, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.002248713562183455, |
|
"loss": 1.4292, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0022407563308947664, |
|
"loss": 1.5581, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0022327990996060774, |
|
"loss": 1.5805, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.002224841868317389, |
|
"loss": 1.5441, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0022168846370287, |
|
"loss": 1.4829, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.002208927405740011, |
|
"loss": 1.5682, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0022009701744513223, |
|
"loss": 1.5293, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0021930129431626333, |
|
"loss": 1.5393, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0021850557118739448, |
|
"loss": 1.5362, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0021770984805852558, |
|
"loss": 1.5934, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0021691412492965668, |
|
"loss": 1.5107, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.002161184018007878, |
|
"loss": 1.5264, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0021532267867191896, |
|
"loss": 1.5978, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0021452695554305006, |
|
"loss": 1.6122, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.002137312324141812, |
|
"loss": 1.5131, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.002129355092853123, |
|
"loss": 1.5823, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0021213978615644345, |
|
"loss": 1.5021, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0021134406302757455, |
|
"loss": 1.537, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.002105483398987057, |
|
"loss": 1.5478, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.002097526167698368, |
|
"loss": 1.5272, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.002089568936409679, |
|
"loss": 1.508, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0020816117051209904, |
|
"loss": 1.609, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0020736544738323014, |
|
"loss": 1.549, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.002065697242543613, |
|
"loss": 1.5333, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.002057740011254924, |
|
"loss": 1.5664, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.002049782779966235, |
|
"loss": 1.5201, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0020418255486775463, |
|
"loss": 1.5582, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0020338683173888573, |
|
"loss": 1.5936, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0020259110861001687, |
|
"loss": 1.5531, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0020179538548114797, |
|
"loss": 1.6008, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.002009996623522791, |
|
"loss": 1.6433, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.002002039392234102, |
|
"loss": 1.5423, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0019940821609454136, |
|
"loss": 1.6291, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.001986124929656725, |
|
"loss": 1.496, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.001978167698368036, |
|
"loss": 1.5821, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.001970210467079347, |
|
"loss": 1.6126, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0019622532357906584, |
|
"loss": 1.6114, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0019542960045019694, |
|
"loss": 1.4821, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0019463387732132809, |
|
"loss": 1.5948, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0019383815419245919, |
|
"loss": 1.6362, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0019304243106359029, |
|
"loss": 1.4812, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0019224670793472143, |
|
"loss": 1.5479, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0019145098480585253, |
|
"loss": 1.4893, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0019065526167698368, |
|
"loss": 1.6329, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0018985953854811478, |
|
"loss": 1.5183, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.001890638154192459, |
|
"loss": 1.4427, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0018826809229037704, |
|
"loss": 1.5404, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0018747236916150814, |
|
"loss": 1.5218, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0018667664603263928, |
|
"loss": 1.5174, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0018588092290377038, |
|
"loss": 1.5274, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0018508519977490148, |
|
"loss": 1.4394, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0018428947664603263, |
|
"loss": 1.4837, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0018349375351716373, |
|
"loss": 1.4938, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0018269803038829487, |
|
"loss": 1.4845, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00181902307259426, |
|
"loss": 1.5218, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.001811065841305571, |
|
"loss": 1.385, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0018031086100168824, |
|
"loss": 1.518, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0017951513787281934, |
|
"loss": 1.5217, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0017871941474395048, |
|
"loss": 1.5088, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0017792369161508158, |
|
"loss": 1.5665, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0017712796848621268, |
|
"loss": 1.521, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0017633224535734382, |
|
"loss": 1.5092, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0017553652222847495, |
|
"loss": 1.6314, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0017474079909960607, |
|
"loss": 1.6241, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.001739450759707372, |
|
"loss": 1.5724, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.001731493528418683, |
|
"loss": 1.4219, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0017235362971299943, |
|
"loss": 1.5695, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0017155790658413053, |
|
"loss": 1.4847, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0017076218345526168, |
|
"loss": 1.4517, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0016996646032639278, |
|
"loss": 1.5844, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.001691707371975239, |
|
"loss": 1.4831, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0016837501406865502, |
|
"loss": 1.5493, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0016757929093978614, |
|
"loss": 1.5352, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0016678356781091727, |
|
"loss": 1.5711, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0016598784468204839, |
|
"loss": 1.5873, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0016519212155317949, |
|
"loss": 1.574, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0016439639842431063, |
|
"loss": 1.5327, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0016360067529544173, |
|
"loss": 1.4916, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0016280495216657287, |
|
"loss": 1.413, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0016200922903770397, |
|
"loss": 1.4399, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.001612135059088351, |
|
"loss": 1.4758, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0016041778277996622, |
|
"loss": 1.58, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0015962205965109734, |
|
"loss": 1.5144, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0015882633652222848, |
|
"loss": 1.5135, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0015803061339335958, |
|
"loss": 1.5407, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0015723489026449068, |
|
"loss": 1.5207, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0015643916713562183, |
|
"loss": 1.439, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0015564344400675293, |
|
"loss": 1.4517, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0015484772087788407, |
|
"loss": 1.5128, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0015405199774901517, |
|
"loss": 1.4634, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.001532562746201463, |
|
"loss": 1.5415, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0015246055149127744, |
|
"loss": 1.5151, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0015166482836240854, |
|
"loss": 1.4921, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0015086910523353968, |
|
"loss": 1.5596, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0015007338210467078, |
|
"loss": 1.5972, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0014927765897580188, |
|
"loss": 1.4749, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0014848193584693302, |
|
"loss": 1.5279, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0014768621271806412, |
|
"loss": 1.5179, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0014689048958919527, |
|
"loss": 1.4831, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.001460947664603264, |
|
"loss": 1.5519, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.001452990433314575, |
|
"loss": 1.3897, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0014450332020258863, |
|
"loss": 1.5673, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0014370759707371973, |
|
"loss": 1.4551, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0014291187394485088, |
|
"loss": 1.5422, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0014211615081598198, |
|
"loss": 1.5741, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.001413204276871131, |
|
"loss": 1.5678, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0014052470455824422, |
|
"loss": 1.5133, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0013972898142937534, |
|
"loss": 1.4879, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0013893325830050646, |
|
"loss": 1.4945, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0013813753517163759, |
|
"loss": 1.6049, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.001373418120427687, |
|
"loss": 1.4963, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0013654608891389983, |
|
"loss": 1.4837, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0013575036578503093, |
|
"loss": 1.5779, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0013495464265616205, |
|
"loss": 1.4238, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0013415891952729317, |
|
"loss": 1.6119, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.001333631963984243, |
|
"loss": 1.4393, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0013256747326955542, |
|
"loss": 1.5923, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0013177175014068654, |
|
"loss": 1.4509, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0013097602701181766, |
|
"loss": 1.5335, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0013018030388294878, |
|
"loss": 1.5637, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.001293845807540799, |
|
"loss": 1.5095, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0012858885762521103, |
|
"loss": 1.4885, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0012779313449634215, |
|
"loss": 1.5555, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0012699741136747325, |
|
"loss": 1.5119, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0012620168823860437, |
|
"loss": 1.5499, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.001254059651097355, |
|
"loss": 1.4255, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0012461024198086661, |
|
"loss": 1.5216, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0012381451885199776, |
|
"loss": 1.48, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0012301879572312886, |
|
"loss": 1.4991, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0012222307259425998, |
|
"loss": 1.459, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.001214273494653911, |
|
"loss": 1.4944, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0012063162633652222, |
|
"loss": 1.5351, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0011983590320765335, |
|
"loss": 1.5548, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0011904018007878445, |
|
"loss": 1.4821, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0011824445694991557, |
|
"loss": 1.5329, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.001174487338210467, |
|
"loss": 1.5326, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0011665301069217783, |
|
"loss": 1.5971, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0011585728756330895, |
|
"loss": 1.5427, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0011506156443444005, |
|
"loss": 1.4637, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0011426584130557118, |
|
"loss": 1.5118, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.001134701181767023, |
|
"loss": 1.5831, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0011267439504783342, |
|
"loss": 1.5563, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0011187867191896454, |
|
"loss": 1.618, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0011108294879009564, |
|
"loss": 1.5305, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0011028722566122679, |
|
"loss": 1.5213, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.001094915025323579, |
|
"loss": 1.4506, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0010869577940348903, |
|
"loss": 1.4775, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0010790005627462015, |
|
"loss": 1.5724, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0010710433314575125, |
|
"loss": 1.529, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0010630861001688237, |
|
"loss": 1.5512, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.001055128868880135, |
|
"loss": 1.5542, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0010471716375914462, |
|
"loss": 1.4235, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0010392144063027574, |
|
"loss": 1.4504, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0010312571750140686, |
|
"loss": 1.5948, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0010232999437253798, |
|
"loss": 1.4997, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.001015342712436691, |
|
"loss": 1.5144, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0010073854811480023, |
|
"loss": 1.4337, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0009994282498593135, |
|
"loss": 1.4679, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0009914710185706245, |
|
"loss": 1.5111, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0009835137872819357, |
|
"loss": 1.5423, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.000975556555993247, |
|
"loss": 1.5303, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0009675993247045582, |
|
"loss": 1.6125, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0009596420934158695, |
|
"loss": 1.4798, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0009516848621271805, |
|
"loss": 1.4338, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0009437276308384918, |
|
"loss": 1.5114, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.000935770399549803, |
|
"loss": 1.4716, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0009278131682611142, |
|
"loss": 1.5409, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0009198559369724254, |
|
"loss": 1.549, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0009118987056837366, |
|
"loss": 1.5123, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0009039414743950478, |
|
"loss": 1.4828, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.000895984243106359, |
|
"loss": 1.5086, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0008880270118176702, |
|
"loss": 1.5353, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0008800697805289814, |
|
"loss": 1.4679, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0008721125492402925, |
|
"loss": 1.5139, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0008641553179516038, |
|
"loss": 1.5272, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.000856198086662915, |
|
"loss": 1.5476, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0008482408553742262, |
|
"loss": 1.5384, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0008402836240855374, |
|
"loss": 1.4994, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0008323263927968485, |
|
"loss": 1.4601, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0008243691615081597, |
|
"loss": 1.5025, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.000816411930219471, |
|
"loss": 1.5673, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0008084546989307822, |
|
"loss": 1.4701, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0008004974676420934, |
|
"loss": 1.5013, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0007925402363534045, |
|
"loss": 1.4872, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0007845830050647157, |
|
"loss": 1.4978, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.000776625773776027, |
|
"loss": 1.4915, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0007686685424873382, |
|
"loss": 1.4778, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0007607113111986494, |
|
"loss": 1.466, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0007527540799099605, |
|
"loss": 1.5417, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0007447968486212717, |
|
"loss": 1.5491, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0007368396173325829, |
|
"loss": 1.5508, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0007288823860438941, |
|
"loss": 1.5231, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0007209251547552054, |
|
"loss": 1.4769, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0007129679234665165, |
|
"loss": 1.4474, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0007050106921778277, |
|
"loss": 1.419, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0006970534608891389, |
|
"loss": 1.4688, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0006890962296004501, |
|
"loss": 1.5495, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0006811389983117614, |
|
"loss": 1.5018, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0006731817670230726, |
|
"loss": 1.4739, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0006652245357343837, |
|
"loss": 1.5871, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0006572673044456949, |
|
"loss": 1.5003, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0006493100731570061, |
|
"loss": 1.4942, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0006413528418683173, |
|
"loss": 1.4569, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0006333956105796286, |
|
"loss": 1.5094, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0006254383792909397, |
|
"loss": 1.556, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0006174811480022509, |
|
"loss": 1.4795, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0006095239167135622, |
|
"loss": 1.4231, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0006015666854248733, |
|
"loss": 1.5737, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0005936094541361845, |
|
"loss": 1.4034, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0005856522228474956, |
|
"loss": 1.4919, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.000577694991558807, |
|
"loss": 1.5163, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0005697377602701182, |
|
"loss": 1.4146, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0005617805289814293, |
|
"loss": 1.5115, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0005538232976927405, |
|
"loss": 1.4961, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0005458660664040517, |
|
"loss": 1.4497, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.000537908835115363, |
|
"loss": 1.5232, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0005299516038266742, |
|
"loss": 1.454, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0005219943725379853, |
|
"loss": 1.4386, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0005140371412492965, |
|
"loss": 1.53, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0005060799099606077, |
|
"loss": 1.4589, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0004981226786719189, |
|
"loss": 1.4737, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0004901654473832302, |
|
"loss": 1.4965, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0004822082160945413, |
|
"loss": 1.5197, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00047425098480585254, |
|
"loss": 1.5132, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0004662937535171637, |
|
"loss": 1.4366, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0004583365222284749, |
|
"loss": 1.4455, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00045037929093978614, |
|
"loss": 1.5245, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0004424220596510973, |
|
"loss": 1.471, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0004344648283624085, |
|
"loss": 1.5327, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0004265075970737197, |
|
"loss": 1.5326, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0004185503657850309, |
|
"loss": 1.5458, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00041059313449634213, |
|
"loss": 1.4876, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0004026359032076533, |
|
"loss": 1.5073, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0003946786719189645, |
|
"loss": 1.397, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00038672144063027573, |
|
"loss": 1.5706, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0003787642093415869, |
|
"loss": 1.4266, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0003708069780528981, |
|
"loss": 1.5257, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0003628497467642093, |
|
"loss": 1.471, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0003548925154755205, |
|
"loss": 1.489, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0003469352841868317, |
|
"loss": 1.4412, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0003389780528981429, |
|
"loss": 1.462, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0003310208216094541, |
|
"loss": 1.5112, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0003230635903207653, |
|
"loss": 1.4571, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00031510635903207653, |
|
"loss": 1.5007, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0003071491277433877, |
|
"loss": 1.4701, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002991918964546989, |
|
"loss": 1.4797, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002912346651660101, |
|
"loss": 1.5134, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0002832774338773213, |
|
"loss": 1.3855, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0002753202025886325, |
|
"loss": 1.5209, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0002673629712999437, |
|
"loss": 1.5193, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0002594057400112549, |
|
"loss": 1.5011, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0002514485087225661, |
|
"loss": 1.4923, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0002434912774338773, |
|
"loss": 1.5298, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002355340461451885, |
|
"loss": 1.4577, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002275768148564997, |
|
"loss": 1.5495, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00021961958356781088, |
|
"loss": 1.4312, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00021166235227912213, |
|
"loss": 1.5179, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00020370512099043332, |
|
"loss": 1.5787, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0001957478897017445, |
|
"loss": 1.5253, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0001877906584130557, |
|
"loss": 1.4374, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0001798334271243669, |
|
"loss": 1.501, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00017187619583567808, |
|
"loss": 1.5137, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0001639189645469893, |
|
"loss": 1.4689, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0001559617332583005, |
|
"loss": 1.4655, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00014800450196961168, |
|
"loss": 1.4459, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001400472706809229, |
|
"loss": 1.4885, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001320900393922341, |
|
"loss": 1.4831, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0001241328081035453, |
|
"loss": 1.4613, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00011617557681485649, |
|
"loss": 1.4329, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010821834552616768, |
|
"loss": 1.4125, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010026111423747889, |
|
"loss": 1.4342, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 9.230388294879008e-05, |
|
"loss": 1.501, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 8.434665166010128e-05, |
|
"loss": 1.6016, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 7.638942037141249e-05, |
|
"loss": 1.4295, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.843218908272369e-05, |
|
"loss": 1.4612, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.047495779403489e-05, |
|
"loss": 1.4502, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.251772650534609e-05, |
|
"loss": 1.452, |
|
"step": 2005 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.456049521665728e-05, |
|
"loss": 1.4154, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 3.660326392796848e-05, |
|
"loss": 1.4977, |
|
"step": 2015 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.8646032639279683e-05, |
|
"loss": 1.4938, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.068880135059088e-05, |
|
"loss": 1.4416, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.2731570061902082e-05, |
|
"loss": 1.4756, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.7743387732132805e-06, |
|
"loss": 1.5734, |
|
"step": 2035 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"step": 2038, |
|
"total_flos": 1.8123372100594497e+22, |
|
"train_loss": 1.7846279670726564, |
|
"train_runtime": 13858.2887, |
|
"train_samples_per_second": 2409.431, |
|
"train_steps_per_second": 0.147 |
|
} |
|
], |
|
"max_steps": 2038, |
|
"num_train_epochs": 3, |
|
"total_flos": 1.8123372100594497e+22, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|