VisVM-LLaVA-Next-Mistral-7B / trainer_state.json
xiyaowang96's picture
Add model
409c478
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.7091093802912294,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13545546901456146,
"grad_norm": 1574869.2083632639,
"learning_rate": 4.77416440831075e-05,
"loss": 1353.2703,
"step": 50
},
{
"epoch": 0.2709109380291229,
"grad_norm": 574636.4713829944,
"learning_rate": 4.5483288166214996e-05,
"loss": 494.2715,
"step": 100
},
{
"epoch": 0.4063664070436844,
"grad_norm": 165921.43125685613,
"learning_rate": 4.3224932249322496e-05,
"loss": 355.075,
"step": 150
},
{
"epoch": 0.5418218760582458,
"grad_norm": 1054175.2125193765,
"learning_rate": 4.0966576332429996e-05,
"loss": 298.0039,
"step": 200
},
{
"epoch": 0.6772773450728073,
"grad_norm": 304903.3231599474,
"learning_rate": 3.870822041553749e-05,
"loss": 256.8294,
"step": 250
},
{
"epoch": 0.8127328140873687,
"grad_norm": 761760.3140334514,
"learning_rate": 3.644986449864499e-05,
"loss": 184.8965,
"step": 300
},
{
"epoch": 0.9481882831019303,
"grad_norm": 154977.28987328513,
"learning_rate": 3.419150858175248e-05,
"loss": 71.5309,
"step": 350
},
{
"epoch": 1.0836437521164917,
"grad_norm": 66782.93493039718,
"learning_rate": 3.193315266485998e-05,
"loss": 35.7074,
"step": 400
},
{
"epoch": 1.2190992211310532,
"grad_norm": 80713.24189086114,
"learning_rate": 2.9674796747967482e-05,
"loss": 14.7926,
"step": 450
},
{
"epoch": 1.3545546901456147,
"grad_norm": 158997.17335227018,
"learning_rate": 2.741644083107498e-05,
"loss": 9.5767,
"step": 500
},
{
"epoch": 1.490010159160176,
"grad_norm": 113.74950562638243,
"learning_rate": 2.515808491418248e-05,
"loss": 5.2432,
"step": 550
},
{
"epoch": 1.6254656281747377,
"grad_norm": 27075.61142018447,
"learning_rate": 2.2899728997289975e-05,
"loss": 3.0958,
"step": 600
},
{
"epoch": 1.760921097189299,
"grad_norm": 10278.977215554642,
"learning_rate": 2.0641373080397472e-05,
"loss": 2.0576,
"step": 650
},
{
"epoch": 1.8963765662038605,
"grad_norm": 12596.77587898223,
"learning_rate": 1.8383017163504972e-05,
"loss": 1.2601,
"step": 700
},
{
"epoch": 2.031832035218422,
"grad_norm": 13479.113907510653,
"learning_rate": 1.6124661246612465e-05,
"loss": 0.8444,
"step": 750
},
{
"epoch": 2.1672875042329833,
"grad_norm": 30111.248342855248,
"learning_rate": 1.3866305329719963e-05,
"loss": 0.6148,
"step": 800
},
{
"epoch": 2.302742973247545,
"grad_norm": 116.05101187578147,
"learning_rate": 1.1607949412827462e-05,
"loss": 0.3569,
"step": 850
},
{
"epoch": 2.4381984422621064,
"grad_norm": 4598.6273543174375,
"learning_rate": 9.34959349593496e-06,
"loss": 0.2227,
"step": 900
},
{
"epoch": 2.5736539112766676,
"grad_norm": 1998.2329181500259,
"learning_rate": 7.0912375790424575e-06,
"loss": 0.1308,
"step": 950
},
{
"epoch": 2.7091093802912294,
"grad_norm": 2452.7106262167913,
"learning_rate": 4.832881662149955e-06,
"loss": 0.0662,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 1107,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.507046875496448e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}