yehiawp4's picture
End of training
8f20b2e verified
{
"best_metric": 0.5063291139240507,
"best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-caer-final\\checkpoint-146",
"epoch": 1.4383561643835616,
"eval_steps": 500,
"global_step": 146,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"grad_norm": 5.686412811279297,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.6544,
"step": 10
},
{
"epoch": 0.14,
"grad_norm": 5.574044704437256,
"learning_rate": 4.809160305343512e-05,
"loss": 1.5654,
"step": 20
},
{
"epoch": 0.21,
"grad_norm": 5.577368259429932,
"learning_rate": 4.4274809160305345e-05,
"loss": 1.5835,
"step": 30
},
{
"epoch": 0.27,
"grad_norm": 6.698699474334717,
"learning_rate": 4.0458015267175576e-05,
"loss": 1.531,
"step": 40
},
{
"epoch": 0.34,
"grad_norm": 7.928526878356934,
"learning_rate": 3.66412213740458e-05,
"loss": 1.5016,
"step": 50
},
{
"epoch": 0.41,
"grad_norm": 9.12410831451416,
"learning_rate": 3.282442748091603e-05,
"loss": 1.3787,
"step": 60
},
{
"epoch": 0.48,
"grad_norm": 7.112976551055908,
"learning_rate": 2.900763358778626e-05,
"loss": 1.48,
"step": 70
},
{
"epoch": 0.55,
"grad_norm": 6.422354698181152,
"learning_rate": 2.5190839694656487e-05,
"loss": 1.4128,
"step": 80
},
{
"epoch": 0.56,
"eval_accuracy": 0.35443037974683544,
"eval_loss": 1.4815402030944824,
"eval_runtime": 28.5428,
"eval_samples_per_second": 2.768,
"eval_steps_per_second": 0.49,
"step": 82
},
{
"epoch": 1.05,
"grad_norm": 6.360805511474609,
"learning_rate": 2.1374045801526718e-05,
"loss": 1.3173,
"step": 90
},
{
"epoch": 1.12,
"grad_norm": 8.944757461547852,
"learning_rate": 1.7557251908396945e-05,
"loss": 1.1789,
"step": 100
},
{
"epoch": 1.19,
"grad_norm": 8.334917068481445,
"learning_rate": 1.3740458015267178e-05,
"loss": 1.2477,
"step": 110
},
{
"epoch": 1.26,
"grad_norm": 11.71667194366455,
"learning_rate": 9.923664122137405e-06,
"loss": 1.1704,
"step": 120
},
{
"epoch": 1.33,
"grad_norm": 11.985795021057129,
"learning_rate": 6.106870229007634e-06,
"loss": 1.0841,
"step": 130
},
{
"epoch": 1.4,
"grad_norm": 7.384586334228516,
"learning_rate": 2.2900763358778625e-06,
"loss": 1.0255,
"step": 140
},
{
"epoch": 1.44,
"eval_accuracy": 0.5063291139240507,
"eval_loss": 1.3321611881256104,
"eval_runtime": 28.5122,
"eval_samples_per_second": 2.771,
"eval_steps_per_second": 0.491,
"step": 146
},
{
"epoch": 1.44,
"step": 146,
"total_flos": 1.0865973925262131e+18,
"train_loss": 1.3585324875295979,
"train_runtime": 492.8068,
"train_samples_per_second": 1.778,
"train_steps_per_second": 0.296
},
{
"epoch": 1.44,
"eval_accuracy": 0.5,
"eval_loss": 1.355814814567566,
"eval_runtime": 18.5285,
"eval_samples_per_second": 3.022,
"eval_steps_per_second": 0.54,
"step": 146
},
{
"epoch": 1.44,
"eval_accuracy": 0.5063291139240507,
"eval_loss": 1.3321610689163208,
"eval_runtime": 31.1651,
"eval_samples_per_second": 2.535,
"eval_steps_per_second": 0.449,
"step": 146
}
],
"logging_steps": 10,
"max_steps": 146,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 1.0865973925262131e+18,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}