flatala-research's picture
End of training
5efc8d3 verified
{
"best_metric": 0.6213592233009708,
"best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-conflab-traj-direction-rh-v2/checkpoint-1053",
"epoch": 8.103513770180436,
"eval_steps": 500,
"global_step": 1053,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00949667616334283,
"grad_norm": 8.752718925476074,
"learning_rate": 4.716981132075472e-06,
"loss": 2.1131,
"step": 10
},
{
"epoch": 0.01899335232668566,
"grad_norm": 7.10194206237793,
"learning_rate": 9.433962264150944e-06,
"loss": 2.031,
"step": 20
},
{
"epoch": 0.02849002849002849,
"grad_norm": 8.048168182373047,
"learning_rate": 1.4150943396226415e-05,
"loss": 2.0382,
"step": 30
},
{
"epoch": 0.03798670465337132,
"grad_norm": 8.191485404968262,
"learning_rate": 1.8867924528301888e-05,
"loss": 1.9895,
"step": 40
},
{
"epoch": 0.04748338081671415,
"grad_norm": 7.475042819976807,
"learning_rate": 2.358490566037736e-05,
"loss": 1.9052,
"step": 50
},
{
"epoch": 0.05698005698005698,
"grad_norm": 12.284913063049316,
"learning_rate": 2.830188679245283e-05,
"loss": 1.9474,
"step": 60
},
{
"epoch": 0.06647673314339982,
"grad_norm": 6.110039234161377,
"learning_rate": 3.30188679245283e-05,
"loss": 1.8688,
"step": 70
},
{
"epoch": 0.07597340930674264,
"grad_norm": 9.754578590393066,
"learning_rate": 3.7735849056603776e-05,
"loss": 1.9115,
"step": 80
},
{
"epoch": 0.08547008547008547,
"grad_norm": 7.101430892944336,
"learning_rate": 4.245283018867925e-05,
"loss": 1.9359,
"step": 90
},
{
"epoch": 0.0949667616334283,
"grad_norm": 5.658616065979004,
"learning_rate": 4.716981132075472e-05,
"loss": 1.9613,
"step": 100
},
{
"epoch": 0.10446343779677113,
"grad_norm": 4.824208736419678,
"learning_rate": 4.978880675818374e-05,
"loss": 1.9728,
"step": 110
},
{
"epoch": 0.1120607787274454,
"eval_accuracy": 0.2912621359223301,
"eval_loss": 1.8081567287445068,
"eval_runtime": 37.3417,
"eval_samples_per_second": 5.517,
"eval_steps_per_second": 0.696,
"step": 118
},
{
"epoch": 1.0018993352326686,
"grad_norm": 5.8929443359375,
"learning_rate": 4.9260823653643085e-05,
"loss": 1.8685,
"step": 120
},
{
"epoch": 1.0113960113960114,
"grad_norm": 5.137537956237793,
"learning_rate": 4.8732840549102435e-05,
"loss": 1.8045,
"step": 130
},
{
"epoch": 1.0208926875593543,
"grad_norm": 8.457473754882812,
"learning_rate": 4.820485744456177e-05,
"loss": 1.7704,
"step": 140
},
{
"epoch": 1.030389363722697,
"grad_norm": 6.545841693878174,
"learning_rate": 4.767687434002112e-05,
"loss": 1.7769,
"step": 150
},
{
"epoch": 1.03988603988604,
"grad_norm": 11.467026710510254,
"learning_rate": 4.7148891235480466e-05,
"loss": 1.664,
"step": 160
},
{
"epoch": 1.0493827160493827,
"grad_norm": 7.759367942810059,
"learning_rate": 4.662090813093981e-05,
"loss": 1.6638,
"step": 170
},
{
"epoch": 1.0588793922127255,
"grad_norm": 4.53931188583374,
"learning_rate": 4.609292502639916e-05,
"loss": 1.6305,
"step": 180
},
{
"epoch": 1.0683760683760684,
"grad_norm": 11.055715560913086,
"learning_rate": 4.55649419218585e-05,
"loss": 1.6884,
"step": 190
},
{
"epoch": 1.0778727445394112,
"grad_norm": 7.864542007446289,
"learning_rate": 4.503695881731785e-05,
"loss": 1.5855,
"step": 200
},
{
"epoch": 1.087369420702754,
"grad_norm": 10.435840606689453,
"learning_rate": 4.45089757127772e-05,
"loss": 1.4554,
"step": 210
},
{
"epoch": 1.0968660968660968,
"grad_norm": 9.551925659179688,
"learning_rate": 4.398099260823654e-05,
"loss": 1.6277,
"step": 220
},
{
"epoch": 1.1063627730294396,
"grad_norm": 8.910355567932129,
"learning_rate": 4.3453009503695884e-05,
"loss": 1.5903,
"step": 230
},
{
"epoch": 1.1120607787274455,
"eval_accuracy": 0.3786407766990291,
"eval_loss": 1.803798794746399,
"eval_runtime": 30.5748,
"eval_samples_per_second": 6.738,
"eval_steps_per_second": 0.85,
"step": 236
},
{
"epoch": 2.003798670465337,
"grad_norm": 5.9250712394714355,
"learning_rate": 4.292502639915523e-05,
"loss": 1.7366,
"step": 240
},
{
"epoch": 2.01329534662868,
"grad_norm": 7.3831400871276855,
"learning_rate": 4.239704329461457e-05,
"loss": 1.4794,
"step": 250
},
{
"epoch": 2.022792022792023,
"grad_norm": 4.820956230163574,
"learning_rate": 4.186906019007392e-05,
"loss": 1.2516,
"step": 260
},
{
"epoch": 2.0322886989553655,
"grad_norm": 10.172005653381348,
"learning_rate": 4.1341077085533265e-05,
"loss": 1.3548,
"step": 270
},
{
"epoch": 2.0417853751187085,
"grad_norm": 14.752180099487305,
"learning_rate": 4.081309398099261e-05,
"loss": 1.2371,
"step": 280
},
{
"epoch": 2.051282051282051,
"grad_norm": 12.396346092224121,
"learning_rate": 4.028511087645195e-05,
"loss": 1.4009,
"step": 290
},
{
"epoch": 2.060778727445394,
"grad_norm": 11.691078186035156,
"learning_rate": 3.97571277719113e-05,
"loss": 1.2658,
"step": 300
},
{
"epoch": 2.0702754036087367,
"grad_norm": 7.867719650268555,
"learning_rate": 3.9229144667370646e-05,
"loss": 1.3029,
"step": 310
},
{
"epoch": 2.07977207977208,
"grad_norm": 11.82673454284668,
"learning_rate": 3.870116156282999e-05,
"loss": 1.1971,
"step": 320
},
{
"epoch": 2.0892687559354224,
"grad_norm": 12.339221954345703,
"learning_rate": 3.817317845828934e-05,
"loss": 1.3162,
"step": 330
},
{
"epoch": 2.0987654320987654,
"grad_norm": 14.504555702209473,
"learning_rate": 3.764519535374868e-05,
"loss": 1.4248,
"step": 340
},
{
"epoch": 2.1082621082621085,
"grad_norm": 12.908592224121094,
"learning_rate": 3.711721224920803e-05,
"loss": 1.2371,
"step": 350
},
{
"epoch": 2.1120607787274452,
"eval_accuracy": 0.4563106796116505,
"eval_loss": 1.466732382774353,
"eval_runtime": 28.3624,
"eval_samples_per_second": 7.263,
"eval_steps_per_second": 0.917,
"step": 354
},
{
"epoch": 3.005698005698006,
"grad_norm": 10.089714050292969,
"learning_rate": 3.658922914466738e-05,
"loss": 0.9885,
"step": 360
},
{
"epoch": 3.0151946818613484,
"grad_norm": 6.925798416137695,
"learning_rate": 3.6061246040126714e-05,
"loss": 0.8912,
"step": 370
},
{
"epoch": 3.0246913580246915,
"grad_norm": 7.01603889465332,
"learning_rate": 3.5533262935586064e-05,
"loss": 0.78,
"step": 380
},
{
"epoch": 3.034188034188034,
"grad_norm": 7.25798225402832,
"learning_rate": 3.500527983104541e-05,
"loss": 0.6707,
"step": 390
},
{
"epoch": 3.043684710351377,
"grad_norm": 11.455845832824707,
"learning_rate": 3.447729672650475e-05,
"loss": 0.8491,
"step": 400
},
{
"epoch": 3.0531813865147197,
"grad_norm": 13.49953556060791,
"learning_rate": 3.3949313621964095e-05,
"loss": 0.7744,
"step": 410
},
{
"epoch": 3.0626780626780628,
"grad_norm": 19.157188415527344,
"learning_rate": 3.3421330517423445e-05,
"loss": 0.83,
"step": 420
},
{
"epoch": 3.0721747388414054,
"grad_norm": 15.591106414794922,
"learning_rate": 3.289334741288279e-05,
"loss": 0.8751,
"step": 430
},
{
"epoch": 3.0816714150047484,
"grad_norm": 15.52799129486084,
"learning_rate": 3.236536430834213e-05,
"loss": 0.868,
"step": 440
},
{
"epoch": 3.091168091168091,
"grad_norm": 15.363070487976074,
"learning_rate": 3.183738120380148e-05,
"loss": 0.7792,
"step": 450
},
{
"epoch": 3.100664767331434,
"grad_norm": 9.656035423278809,
"learning_rate": 3.130939809926082e-05,
"loss": 0.6143,
"step": 460
},
{
"epoch": 3.1101614434947766,
"grad_norm": 11.785117149353027,
"learning_rate": 3.078141499472017e-05,
"loss": 0.9635,
"step": 470
},
{
"epoch": 3.1120607787274452,
"eval_accuracy": 0.5631067961165048,
"eval_loss": 1.418686032295227,
"eval_runtime": 23.7661,
"eval_samples_per_second": 8.668,
"eval_steps_per_second": 1.094,
"step": 472
},
{
"epoch": 4.007597340930674,
"grad_norm": 5.675172805786133,
"learning_rate": 3.0253431890179517e-05,
"loss": 0.5573,
"step": 480
},
{
"epoch": 4.017094017094017,
"grad_norm": 5.014197826385498,
"learning_rate": 2.972544878563886e-05,
"loss": 0.4382,
"step": 490
},
{
"epoch": 4.02659069325736,
"grad_norm": 8.953272819519043,
"learning_rate": 2.9197465681098207e-05,
"loss": 0.639,
"step": 500
},
{
"epoch": 4.036087369420703,
"grad_norm": 10.838029861450195,
"learning_rate": 2.8669482576557548e-05,
"loss": 0.493,
"step": 510
},
{
"epoch": 4.045584045584046,
"grad_norm": 8.463229179382324,
"learning_rate": 2.8141499472016898e-05,
"loss": 0.4703,
"step": 520
},
{
"epoch": 4.055080721747388,
"grad_norm": 8.104616165161133,
"learning_rate": 2.7613516367476245e-05,
"loss": 0.3912,
"step": 530
},
{
"epoch": 4.064577397910731,
"grad_norm": 13.097394943237305,
"learning_rate": 2.7085533262935585e-05,
"loss": 0.4528,
"step": 540
},
{
"epoch": 4.074074074074074,
"grad_norm": 11.679549217224121,
"learning_rate": 2.6557550158394935e-05,
"loss": 0.5598,
"step": 550
},
{
"epoch": 4.083570750237417,
"grad_norm": 17.88136100769043,
"learning_rate": 2.6029567053854276e-05,
"loss": 0.4933,
"step": 560
},
{
"epoch": 4.09306742640076,
"grad_norm": 4.025656700134277,
"learning_rate": 2.5501583949313622e-05,
"loss": 0.4895,
"step": 570
},
{
"epoch": 4.102564102564102,
"grad_norm": 18.150617599487305,
"learning_rate": 2.497360084477297e-05,
"loss": 0.5344,
"step": 580
},
{
"epoch": 4.112060778727446,
"grad_norm": 10.502995491027832,
"learning_rate": 2.4445617740232313e-05,
"loss": 0.3984,
"step": 590
},
{
"epoch": 4.112060778727446,
"eval_accuracy": 0.5631067961165048,
"eval_loss": 1.398964285850525,
"eval_runtime": 20.6856,
"eval_samples_per_second": 9.959,
"eval_steps_per_second": 1.257,
"step": 590
},
{
"epoch": 5.009496676163343,
"grad_norm": 7.37868070602417,
"learning_rate": 2.391763463569166e-05,
"loss": 0.3346,
"step": 600
},
{
"epoch": 5.018993352326686,
"grad_norm": 5.786819934844971,
"learning_rate": 2.3389651531151003e-05,
"loss": 0.2937,
"step": 610
},
{
"epoch": 5.028490028490029,
"grad_norm": 8.739676475524902,
"learning_rate": 2.286166842661035e-05,
"loss": 0.2478,
"step": 620
},
{
"epoch": 5.037986704653371,
"grad_norm": 3.41898512840271,
"learning_rate": 2.2333685322069694e-05,
"loss": 0.2988,
"step": 630
},
{
"epoch": 5.047483380816714,
"grad_norm": 13.979087829589844,
"learning_rate": 2.180570221752904e-05,
"loss": 0.2672,
"step": 640
},
{
"epoch": 5.056980056980057,
"grad_norm": 9.218252182006836,
"learning_rate": 2.1277719112988384e-05,
"loss": 0.2932,
"step": 650
},
{
"epoch": 5.0664767331434,
"grad_norm": 8.248359680175781,
"learning_rate": 2.074973600844773e-05,
"loss": 0.2845,
"step": 660
},
{
"epoch": 5.075973409306743,
"grad_norm": 12.172591209411621,
"learning_rate": 2.0221752903907075e-05,
"loss": 0.2125,
"step": 670
},
{
"epoch": 5.085470085470085,
"grad_norm": 4.901180267333984,
"learning_rate": 1.9693769799366422e-05,
"loss": 0.2387,
"step": 680
},
{
"epoch": 5.094966761633429,
"grad_norm": 3.2035112380981445,
"learning_rate": 1.9165786694825765e-05,
"loss": 0.1137,
"step": 690
},
{
"epoch": 5.104463437796771,
"grad_norm": 1.4330121278762817,
"learning_rate": 1.863780359028511e-05,
"loss": 0.1962,
"step": 700
},
{
"epoch": 5.112060778727446,
"eval_accuracy": 0.5825242718446602,
"eval_loss": 1.4243208169937134,
"eval_runtime": 20.9667,
"eval_samples_per_second": 9.825,
"eval_steps_per_second": 1.24,
"step": 708
},
{
"epoch": 6.001899335232668,
"grad_norm": 17.324275970458984,
"learning_rate": 1.810982048574446e-05,
"loss": 0.2794,
"step": 710
},
{
"epoch": 6.011396011396012,
"grad_norm": 13.693547248840332,
"learning_rate": 1.7581837381203803e-05,
"loss": 0.2361,
"step": 720
},
{
"epoch": 6.020892687559354,
"grad_norm": 1.044037938117981,
"learning_rate": 1.7053854276663146e-05,
"loss": 0.0796,
"step": 730
},
{
"epoch": 6.030389363722697,
"grad_norm": 1.7120157480239868,
"learning_rate": 1.6525871172122493e-05,
"loss": 0.1323,
"step": 740
},
{
"epoch": 6.0398860398860394,
"grad_norm": 16.39497184753418,
"learning_rate": 1.5997888067581837e-05,
"loss": 0.1033,
"step": 750
},
{
"epoch": 6.049382716049383,
"grad_norm": 0.9011927247047424,
"learning_rate": 1.5469904963041184e-05,
"loss": 0.082,
"step": 760
},
{
"epoch": 6.0588793922127255,
"grad_norm": 19.976755142211914,
"learning_rate": 1.4941921858500529e-05,
"loss": 0.1568,
"step": 770
},
{
"epoch": 6.068376068376068,
"grad_norm": 1.4492021799087524,
"learning_rate": 1.4413938753959874e-05,
"loss": 0.1059,
"step": 780
},
{
"epoch": 6.077872744539412,
"grad_norm": 10.073105812072754,
"learning_rate": 1.388595564941922e-05,
"loss": 0.0908,
"step": 790
},
{
"epoch": 6.087369420702754,
"grad_norm": 12.8700532913208,
"learning_rate": 1.3357972544878563e-05,
"loss": 0.1215,
"step": 800
},
{
"epoch": 6.096866096866097,
"grad_norm": 0.7424122095108032,
"learning_rate": 1.2829989440337912e-05,
"loss": 0.1036,
"step": 810
},
{
"epoch": 6.106362773029439,
"grad_norm": 4.486748218536377,
"learning_rate": 1.2302006335797255e-05,
"loss": 0.1952,
"step": 820
},
{
"epoch": 6.112060778727446,
"eval_accuracy": 0.6019417475728155,
"eval_loss": 1.557113528251648,
"eval_runtime": 56.6502,
"eval_samples_per_second": 3.636,
"eval_steps_per_second": 0.459,
"step": 826
},
{
"epoch": 7.003798670465337,
"grad_norm": 18.093158721923828,
"learning_rate": 1.17740232312566e-05,
"loss": 0.1356,
"step": 830
},
{
"epoch": 7.01329534662868,
"grad_norm": 0.3082960247993469,
"learning_rate": 1.1246040126715946e-05,
"loss": 0.0692,
"step": 840
},
{
"epoch": 7.022792022792022,
"grad_norm": 0.9313774108886719,
"learning_rate": 1.0718057022175291e-05,
"loss": 0.0197,
"step": 850
},
{
"epoch": 7.032288698955366,
"grad_norm": 0.9466642141342163,
"learning_rate": 1.0190073917634636e-05,
"loss": 0.0202,
"step": 860
},
{
"epoch": 7.0417853751187085,
"grad_norm": 7.768035888671875,
"learning_rate": 9.662090813093982e-06,
"loss": 0.0544,
"step": 870
},
{
"epoch": 7.051282051282051,
"grad_norm": 0.7909216284751892,
"learning_rate": 9.134107708553327e-06,
"loss": 0.0614,
"step": 880
},
{
"epoch": 7.060778727445394,
"grad_norm": 0.6131118535995483,
"learning_rate": 8.606124604012672e-06,
"loss": 0.0458,
"step": 890
},
{
"epoch": 7.070275403608737,
"grad_norm": 0.46935775876045227,
"learning_rate": 8.078141499472017e-06,
"loss": 0.0309,
"step": 900
},
{
"epoch": 7.07977207977208,
"grad_norm": 4.931636333465576,
"learning_rate": 7.5501583949313625e-06,
"loss": 0.0386,
"step": 910
},
{
"epoch": 7.089268755935422,
"grad_norm": 0.7869710326194763,
"learning_rate": 7.022175290390708e-06,
"loss": 0.0599,
"step": 920
},
{
"epoch": 7.098765432098766,
"grad_norm": 18.208322525024414,
"learning_rate": 6.494192185850054e-06,
"loss": 0.1256,
"step": 930
},
{
"epoch": 7.1082621082621085,
"grad_norm": 6.396451473236084,
"learning_rate": 5.966209081309398e-06,
"loss": 0.0319,
"step": 940
},
{
"epoch": 7.112060778727446,
"eval_accuracy": 0.587378640776699,
"eval_loss": 1.5844324827194214,
"eval_runtime": 38.1637,
"eval_samples_per_second": 5.398,
"eval_steps_per_second": 0.681,
"step": 944
},
{
"epoch": 8.005698005698006,
"grad_norm": 0.25447767972946167,
"learning_rate": 5.438225976768744e-06,
"loss": 0.0605,
"step": 950
},
{
"epoch": 8.015194681861349,
"grad_norm": 3.442044973373413,
"learning_rate": 4.910242872228089e-06,
"loss": 0.0193,
"step": 960
},
{
"epoch": 8.024691358024691,
"grad_norm": 0.15950489044189453,
"learning_rate": 4.382259767687434e-06,
"loss": 0.0443,
"step": 970
},
{
"epoch": 8.034188034188034,
"grad_norm": 0.21153709292411804,
"learning_rate": 3.854276663146779e-06,
"loss": 0.0152,
"step": 980
},
{
"epoch": 8.043684710351377,
"grad_norm": 0.30688220262527466,
"learning_rate": 3.326293558606125e-06,
"loss": 0.0275,
"step": 990
},
{
"epoch": 8.05318138651472,
"grad_norm": 0.7708584070205688,
"learning_rate": 2.79831045406547e-06,
"loss": 0.0195,
"step": 1000
},
{
"epoch": 8.062678062678062,
"grad_norm": 1.1436554193496704,
"learning_rate": 2.2703273495248154e-06,
"loss": 0.0098,
"step": 1010
},
{
"epoch": 8.072174738841406,
"grad_norm": 1.9318220615386963,
"learning_rate": 1.7423442449841606e-06,
"loss": 0.0597,
"step": 1020
},
{
"epoch": 8.081671415004749,
"grad_norm": 3.5386385917663574,
"learning_rate": 1.2143611404435059e-06,
"loss": 0.0213,
"step": 1030
},
{
"epoch": 8.091168091168091,
"grad_norm": 0.16293883323669434,
"learning_rate": 6.863780359028511e-07,
"loss": 0.0118,
"step": 1040
},
{
"epoch": 8.100664767331434,
"grad_norm": 2.2753586769104004,
"learning_rate": 1.5839493136219642e-07,
"loss": 0.0203,
"step": 1050
},
{
"epoch": 8.103513770180436,
"eval_accuracy": 0.6213592233009708,
"eval_loss": 1.579399824142456,
"eval_runtime": 23.3894,
"eval_samples_per_second": 8.807,
"eval_steps_per_second": 1.112,
"step": 1053
},
{
"epoch": 8.103513770180436,
"step": 1053,
"total_flos": 1.0477471675667448e+19,
"train_loss": 0.7528787313023864,
"train_runtime": 2130.6056,
"train_samples_per_second": 3.954,
"train_steps_per_second": 0.494
},
{
"epoch": 8.103513770180436,
"eval_accuracy": 0.551219512195122,
"eval_loss": 1.7062230110168457,
"eval_runtime": 32.2785,
"eval_samples_per_second": 6.351,
"eval_steps_per_second": 0.805,
"step": 1053
},
{
"epoch": 8.103513770180436,
"eval_accuracy": 0.551219512195122,
"eval_loss": 1.7062228918075562,
"eval_runtime": 25.2134,
"eval_samples_per_second": 8.131,
"eval_steps_per_second": 1.031,
"step": 1053
}
],
"logging_steps": 10,
"max_steps": 1053,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0477471675667448e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}