|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.5855145213338115, |
|
"eval_steps": 2000, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.5916264057159424, |
|
"learning_rate": 1e-06, |
|
"loss": 1.0776, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 3.6381211280822754, |
|
"learning_rate": 9.898989898989898e-07, |
|
"loss": 0.8345, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 3.9932849407196045, |
|
"learning_rate": 9.7989898989899e-07, |
|
"loss": 0.6863, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 2.8963606357574463, |
|
"learning_rate": 9.697979797979798e-07, |
|
"loss": 0.6659, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 4.000125885009766, |
|
"learning_rate": 9.598989898989899e-07, |
|
"loss": 0.6552, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 3.7767927646636963, |
|
"learning_rate": 9.497979797979798e-07, |
|
"loss": 0.6583, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.204336166381836, |
|
"learning_rate": 9.396969696969696e-07, |
|
"loss": 0.6229, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 3.798582077026367, |
|
"learning_rate": 9.295959595959596e-07, |
|
"loss": 0.6138, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.92914080619812, |
|
"learning_rate": 9.194949494949495e-07, |
|
"loss": 0.6266, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 3.982314109802246, |
|
"learning_rate": 9.093939393939394e-07, |
|
"loss": 0.6311, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 3.385096311569214, |
|
"learning_rate": 8.992929292929292e-07, |
|
"loss": 0.6141, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 3.015211343765259, |
|
"learning_rate": 8.891919191919191e-07, |
|
"loss": 0.6369, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 5.285658359527588, |
|
"learning_rate": 8.790909090909091e-07, |
|
"loss": 0.6442, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 4.69854736328125, |
|
"learning_rate": 8.68989898989899e-07, |
|
"loss": 0.6419, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 2.95841121673584, |
|
"learning_rate": 8.58989898989899e-07, |
|
"loss": 0.6575, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.4997267723083496, |
|
"learning_rate": 8.488888888888888e-07, |
|
"loss": 0.6346, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 4.126640319824219, |
|
"learning_rate": 8.387878787878787e-07, |
|
"loss": 0.6005, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 3.040174961090088, |
|
"learning_rate": 8.286868686868687e-07, |
|
"loss": 0.6434, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 4.870083332061768, |
|
"learning_rate": 8.185858585858586e-07, |
|
"loss": 0.6202, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 4.307583808898926, |
|
"learning_rate": 8.084848484848484e-07, |
|
"loss": 0.6552, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.746256411075592, |
|
"eval_runtime": 199.9457, |
|
"eval_samples_per_second": 5.001, |
|
"eval_steps_per_second": 1.25, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 4.793485164642334, |
|
"learning_rate": 7.983838383838384e-07, |
|
"loss": 0.6, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 5.155008316040039, |
|
"learning_rate": 7.882828282828282e-07, |
|
"loss": 0.5989, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 3.4713668823242188, |
|
"learning_rate": 7.781818181818182e-07, |
|
"loss": 0.589, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 4.512465476989746, |
|
"learning_rate": 7.68080808080808e-07, |
|
"loss": 0.5949, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 2.854623556137085, |
|
"learning_rate": 7.579797979797979e-07, |
|
"loss": 0.6103, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 6.94374418258667, |
|
"learning_rate": 7.478787878787879e-07, |
|
"loss": 0.6236, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 5.974214553833008, |
|
"learning_rate": 7.377777777777777e-07, |
|
"loss": 0.5915, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.472714424133301, |
|
"learning_rate": 7.276767676767677e-07, |
|
"loss": 0.6023, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 5.157464504241943, |
|
"learning_rate": 7.175757575757575e-07, |
|
"loss": 0.6137, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 5.155125617980957, |
|
"learning_rate": 7.074747474747474e-07, |
|
"loss": 0.5723, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 2.872635841369629, |
|
"learning_rate": 6.973737373737374e-07, |
|
"loss": 0.5835, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.128993034362793, |
|
"learning_rate": 6.872727272727273e-07, |
|
"loss": 0.5777, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 3.2823545932769775, |
|
"learning_rate": 6.771717171717171e-07, |
|
"loss": 0.5875, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 3.7050082683563232, |
|
"learning_rate": 6.67070707070707e-07, |
|
"loss": 0.5901, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 5.737078666687012, |
|
"learning_rate": 6.56969696969697e-07, |
|
"loss": 0.5913, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 2.425701141357422, |
|
"learning_rate": 6.468686868686868e-07, |
|
"loss": 0.5759, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 3.5726475715637207, |
|
"learning_rate": 6.367676767676767e-07, |
|
"loss": 0.5976, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 5.098260879516602, |
|
"learning_rate": 6.266666666666667e-07, |
|
"loss": 0.5851, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 4.929934501647949, |
|
"learning_rate": 6.165656565656565e-07, |
|
"loss": 0.6006, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 4.153616428375244, |
|
"learning_rate": 6.064646464646465e-07, |
|
"loss": 0.5933, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_loss": 0.7265416383743286, |
|
"eval_runtime": 200.0654, |
|
"eval_samples_per_second": 4.998, |
|
"eval_steps_per_second": 1.25, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 4.094910144805908, |
|
"learning_rate": 5.963636363636363e-07, |
|
"loss": 0.6005, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 3.998939275741577, |
|
"learning_rate": 5.862626262626262e-07, |
|
"loss": 0.5747, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 3.7930455207824707, |
|
"learning_rate": 5.761616161616162e-07, |
|
"loss": 0.5879, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 2.8265774250030518, |
|
"learning_rate": 5.660606060606061e-07, |
|
"loss": 0.5867, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 5.012646198272705, |
|
"learning_rate": 5.559595959595959e-07, |
|
"loss": 0.5882, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 4.642397880554199, |
|
"learning_rate": 5.458585858585858e-07, |
|
"loss": 0.5623, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 4.442890167236328, |
|
"learning_rate": 5.357575757575758e-07, |
|
"loss": 0.6009, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 4.405778884887695, |
|
"learning_rate": 5.256565656565657e-07, |
|
"loss": 0.5962, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 4.690769672393799, |
|
"learning_rate": 5.155555555555555e-07, |
|
"loss": 0.5562, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 6.986325263977051, |
|
"learning_rate": 5.055555555555555e-07, |
|
"loss": 0.5812, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 3.599179983139038, |
|
"learning_rate": 4.954545454545454e-07, |
|
"loss": 0.5907, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 4.54085111618042, |
|
"learning_rate": 4.853535353535353e-07, |
|
"loss": 0.5971, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 4.154399871826172, |
|
"learning_rate": 4.752525252525252e-07, |
|
"loss": 0.5774, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 4.425224781036377, |
|
"learning_rate": 4.6515151515151513e-07, |
|
"loss": 0.5625, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 4.346124172210693, |
|
"learning_rate": 4.55050505050505e-07, |
|
"loss": 0.5645, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"grad_norm": 4.570713043212891, |
|
"learning_rate": 4.449494949494949e-07, |
|
"loss": 0.587, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 3.669879674911499, |
|
"learning_rate": 4.3484848484848483e-07, |
|
"loss": 0.5609, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 5.310702800750732, |
|
"learning_rate": 4.2474747474747474e-07, |
|
"loss": 0.5487, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 10.60392951965332, |
|
"learning_rate": 4.1464646464646466e-07, |
|
"loss": 0.5603, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 3.6110637187957764, |
|
"learning_rate": 4.045454545454545e-07, |
|
"loss": 0.596, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_loss": 0.7302116751670837, |
|
"eval_runtime": 199.3626, |
|
"eval_samples_per_second": 5.016, |
|
"eval_steps_per_second": 1.254, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 5.180839538574219, |
|
"learning_rate": 3.9444444444444444e-07, |
|
"loss": 0.5581, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"grad_norm": 6.653825283050537, |
|
"learning_rate": 3.843434343434343e-07, |
|
"loss": 0.5536, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 5.317089080810547, |
|
"learning_rate": 3.7424242424242427e-07, |
|
"loss": 0.5769, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 5.519111633300781, |
|
"learning_rate": 3.6414141414141413e-07, |
|
"loss": 0.5359, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 4.846569061279297, |
|
"learning_rate": 3.5404040404040405e-07, |
|
"loss": 0.5857, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 4.538639545440674, |
|
"learning_rate": 3.439393939393939e-07, |
|
"loss": 0.5757, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 5.268352031707764, |
|
"learning_rate": 3.3383838383838383e-07, |
|
"loss": 0.5481, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 4.480013370513916, |
|
"learning_rate": 3.237373737373737e-07, |
|
"loss": 0.5682, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 5.44348669052124, |
|
"learning_rate": 3.1363636363636366e-07, |
|
"loss": 0.5796, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"grad_norm": 6.241185665130615, |
|
"learning_rate": 3.035353535353535e-07, |
|
"loss": 0.5931, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 5.187110424041748, |
|
"learning_rate": 2.9343434343434344e-07, |
|
"loss": 0.5696, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 6.06135368347168, |
|
"learning_rate": 2.833333333333333e-07, |
|
"loss": 0.5466, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 5.4311628341674805, |
|
"learning_rate": 2.732323232323232e-07, |
|
"loss": 0.5409, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 5.2975263595581055, |
|
"learning_rate": 2.631313131313131e-07, |
|
"loss": 0.5577, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 5.464794158935547, |
|
"learning_rate": 2.5303030303030305e-07, |
|
"loss": 0.584, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"grad_norm": 5.726445198059082, |
|
"learning_rate": 2.429292929292929e-07, |
|
"loss": 0.5689, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 4.5637736320495605, |
|
"learning_rate": 2.3282828282828283e-07, |
|
"loss": 0.5704, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 5.696423530578613, |
|
"learning_rate": 2.2272727272727272e-07, |
|
"loss": 0.5689, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 5.203188896179199, |
|
"learning_rate": 2.1262626262626264e-07, |
|
"loss": 0.554, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 5.128230571746826, |
|
"learning_rate": 2.0252525252525253e-07, |
|
"loss": 0.5412, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_loss": 0.7274453043937683, |
|
"eval_runtime": 199.1299, |
|
"eval_samples_per_second": 5.022, |
|
"eval_steps_per_second": 1.255, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 5.771433353424072, |
|
"learning_rate": 1.9242424242424241e-07, |
|
"loss": 0.5662, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"grad_norm": 4.031991004943848, |
|
"learning_rate": 1.8232323232323233e-07, |
|
"loss": 0.553, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"grad_norm": 5.155855655670166, |
|
"learning_rate": 1.7222222222222222e-07, |
|
"loss": 0.5825, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"grad_norm": 6.224823951721191, |
|
"learning_rate": 1.621212121212121e-07, |
|
"loss": 0.5891, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"grad_norm": 5.529131889343262, |
|
"learning_rate": 1.5202020202020203e-07, |
|
"loss": 0.5318, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"grad_norm": 4.826006889343262, |
|
"learning_rate": 1.4191919191919192e-07, |
|
"loss": 0.5817, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 4.968801498413086, |
|
"learning_rate": 1.318181818181818e-07, |
|
"loss": 0.5448, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"grad_norm": 5.9340715408325195, |
|
"learning_rate": 1.2171717171717172e-07, |
|
"loss": 0.5697, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"grad_norm": 6.170198440551758, |
|
"learning_rate": 1.1161616161616161e-07, |
|
"loss": 0.5539, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"grad_norm": 3.5836563110351562, |
|
"learning_rate": 1.0151515151515151e-07, |
|
"loss": 0.5514, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"grad_norm": 5.903813362121582, |
|
"learning_rate": 9.141414141414142e-08, |
|
"loss": 0.556, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"grad_norm": 3.9017932415008545, |
|
"learning_rate": 8.13131313131313e-08, |
|
"loss": 0.5558, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"grad_norm": 6.613101959228516, |
|
"learning_rate": 7.121212121212121e-08, |
|
"loss": 0.5476, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"grad_norm": 6.56988525390625, |
|
"learning_rate": 6.121212121212121e-08, |
|
"loss": 0.5665, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"grad_norm": 6.609213829040527, |
|
"learning_rate": 5.1111111111111114e-08, |
|
"loss": 0.5468, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"grad_norm": 5.318997859954834, |
|
"learning_rate": 4.101010101010101e-08, |
|
"loss": 0.571, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"grad_norm": 4.767919540405273, |
|
"learning_rate": 3.090909090909091e-08, |
|
"loss": 0.5523, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"grad_norm": 5.016121864318848, |
|
"learning_rate": 2.0808080808080806e-08, |
|
"loss": 0.5431, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"grad_norm": 5.048821926116943, |
|
"learning_rate": 1.0707070707070706e-08, |
|
"loss": 0.5418, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"grad_norm": 4.946585655212402, |
|
"learning_rate": 6.06060606060606e-10, |
|
"loss": 0.5754, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"eval_loss": 0.7410796880722046, |
|
"eval_runtime": 204.7897, |
|
"eval_samples_per_second": 4.883, |
|
"eval_steps_per_second": 1.221, |
|
"step": 10000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 2000, |
|
"total_flos": 9.426795635681526e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|