ashutosh01012001's picture
Upload 9 files
8cedfc2 verified
{
"best_metric": 24.489795918367346,
"best_model_checkpoint": "/content/drive/MyDrive/vavista_kannada_collection_sales_bot_v2/checkpoint-900",
"epoch": 14.285714285714286,
"eval_steps": 100,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4,
"grad_norm": 54.585182189941406,
"learning_rate": 3.8e-07,
"loss": 2.0199,
"step": 25
},
{
"epoch": 0.79,
"grad_norm": 38.83250427246094,
"learning_rate": 8.8e-07,
"loss": 1.7912,
"step": 50
},
{
"epoch": 1.19,
"grad_norm": 33.85667037963867,
"learning_rate": 1.3800000000000001e-06,
"loss": 1.2745,
"step": 75
},
{
"epoch": 1.59,
"grad_norm": 16.32790184020996,
"learning_rate": 1.8800000000000002e-06,
"loss": 0.7215,
"step": 100
},
{
"epoch": 1.59,
"eval_loss": 0.5508027672767639,
"eval_runtime": 174.0594,
"eval_samples_per_second": 1.419,
"eval_steps_per_second": 0.046,
"eval_wer": 116.3265306122449,
"step": 100
},
{
"epoch": 1.98,
"grad_norm": 9.59360408782959,
"learning_rate": 2.38e-06,
"loss": 0.4407,
"step": 125
},
{
"epoch": 2.38,
"grad_norm": 7.552663326263428,
"learning_rate": 2.88e-06,
"loss": 0.3329,
"step": 150
},
{
"epoch": 2.78,
"grad_norm": 7.011340141296387,
"learning_rate": 3.3800000000000007e-06,
"loss": 0.2694,
"step": 175
},
{
"epoch": 3.17,
"grad_norm": 5.541098117828369,
"learning_rate": 3.88e-06,
"loss": 0.2487,
"step": 200
},
{
"epoch": 3.17,
"eval_loss": 0.2373683899641037,
"eval_runtime": 132.9698,
"eval_samples_per_second": 1.858,
"eval_steps_per_second": 0.06,
"eval_wer": 71.68367346938776,
"step": 200
},
{
"epoch": 3.57,
"grad_norm": 4.505865573883057,
"learning_rate": 4.38e-06,
"loss": 0.1927,
"step": 225
},
{
"epoch": 3.97,
"grad_norm": 5.797987461090088,
"learning_rate": 4.880000000000001e-06,
"loss": 0.1946,
"step": 250
},
{
"epoch": 4.37,
"grad_norm": 4.06318998336792,
"learning_rate": 5.380000000000001e-06,
"loss": 0.161,
"step": 275
},
{
"epoch": 4.76,
"grad_norm": 3.223536252975464,
"learning_rate": 5.8800000000000005e-06,
"loss": 0.139,
"step": 300
},
{
"epoch": 4.76,
"eval_loss": 0.18038798868656158,
"eval_runtime": 87.9591,
"eval_samples_per_second": 2.808,
"eval_steps_per_second": 0.091,
"eval_wer": 40.05102040816326,
"step": 300
},
{
"epoch": 5.16,
"grad_norm": 5.675058841705322,
"learning_rate": 6.380000000000001e-06,
"loss": 0.1185,
"step": 325
},
{
"epoch": 5.56,
"grad_norm": 3.9274826049804688,
"learning_rate": 6.88e-06,
"loss": 0.1022,
"step": 350
},
{
"epoch": 5.95,
"grad_norm": 3.3899903297424316,
"learning_rate": 7.3800000000000005e-06,
"loss": 0.1012,
"step": 375
},
{
"epoch": 6.35,
"grad_norm": 2.7243378162384033,
"learning_rate": 7.88e-06,
"loss": 0.0712,
"step": 400
},
{
"epoch": 6.35,
"eval_loss": 0.16518759727478027,
"eval_runtime": 45.1355,
"eval_samples_per_second": 5.472,
"eval_steps_per_second": 0.177,
"eval_wer": 29.591836734693878,
"step": 400
},
{
"epoch": 6.75,
"grad_norm": 5.021995544433594,
"learning_rate": 8.380000000000001e-06,
"loss": 0.0718,
"step": 425
},
{
"epoch": 7.14,
"grad_norm": 2.2155253887176514,
"learning_rate": 8.880000000000001e-06,
"loss": 0.0687,
"step": 450
},
{
"epoch": 7.54,
"grad_norm": 3.375765562057495,
"learning_rate": 9.38e-06,
"loss": 0.0506,
"step": 475
},
{
"epoch": 7.94,
"grad_norm": 2.747950553894043,
"learning_rate": 9.88e-06,
"loss": 0.0448,
"step": 500
},
{
"epoch": 7.94,
"eval_loss": 0.15877747535705566,
"eval_runtime": 45.849,
"eval_samples_per_second": 5.387,
"eval_steps_per_second": 0.174,
"eval_wer": 26.53061224489796,
"step": 500
},
{
"epoch": 8.33,
"grad_norm": 2.5800018310546875,
"learning_rate": 9.620000000000001e-06,
"loss": 0.0336,
"step": 525
},
{
"epoch": 8.73,
"grad_norm": 2.498263359069824,
"learning_rate": 9.12e-06,
"loss": 0.0264,
"step": 550
},
{
"epoch": 9.13,
"grad_norm": 1.941878318786621,
"learning_rate": 8.62e-06,
"loss": 0.0295,
"step": 575
},
{
"epoch": 9.52,
"grad_norm": 2.1652817726135254,
"learning_rate": 8.120000000000002e-06,
"loss": 0.0186,
"step": 600
},
{
"epoch": 9.52,
"eval_loss": 0.17014923691749573,
"eval_runtime": 45.3455,
"eval_samples_per_second": 5.447,
"eval_steps_per_second": 0.176,
"eval_wer": 27.29591836734694,
"step": 600
},
{
"epoch": 9.92,
"grad_norm": 1.3376543521881104,
"learning_rate": 7.620000000000001e-06,
"loss": 0.02,
"step": 625
},
{
"epoch": 10.32,
"grad_norm": 1.175929069519043,
"learning_rate": 7.1200000000000004e-06,
"loss": 0.013,
"step": 650
},
{
"epoch": 10.71,
"grad_norm": 1.1778250932693481,
"learning_rate": 6.620000000000001e-06,
"loss": 0.0115,
"step": 675
},
{
"epoch": 11.11,
"grad_norm": 1.2574666738510132,
"learning_rate": 6.120000000000001e-06,
"loss": 0.0115,
"step": 700
},
{
"epoch": 11.11,
"eval_loss": 0.17647403478622437,
"eval_runtime": 45.0453,
"eval_samples_per_second": 5.483,
"eval_steps_per_second": 0.178,
"eval_wer": 25.637755102040817,
"step": 700
},
{
"epoch": 11.51,
"grad_norm": 0.981600284576416,
"learning_rate": 5.620000000000001e-06,
"loss": 0.0068,
"step": 725
},
{
"epoch": 11.9,
"grad_norm": 1.208804726600647,
"learning_rate": 5.12e-06,
"loss": 0.0097,
"step": 750
},
{
"epoch": 12.3,
"grad_norm": 0.9865374565124512,
"learning_rate": 4.620000000000001e-06,
"loss": 0.0058,
"step": 775
},
{
"epoch": 12.7,
"grad_norm": 0.5761059522628784,
"learning_rate": 4.12e-06,
"loss": 0.0057,
"step": 800
},
{
"epoch": 12.7,
"eval_loss": 0.1846890151500702,
"eval_runtime": 45.0489,
"eval_samples_per_second": 5.483,
"eval_steps_per_second": 0.178,
"eval_wer": 26.27551020408163,
"step": 800
},
{
"epoch": 13.1,
"grad_norm": 0.34698066115379333,
"learning_rate": 3.62e-06,
"loss": 0.0049,
"step": 825
},
{
"epoch": 13.49,
"grad_norm": 0.2580834925174713,
"learning_rate": 3.12e-06,
"loss": 0.0036,
"step": 850
},
{
"epoch": 13.89,
"grad_norm": 0.6976760625839233,
"learning_rate": 2.6200000000000003e-06,
"loss": 0.0043,
"step": 875
},
{
"epoch": 14.29,
"grad_norm": 0.257933646440506,
"learning_rate": 2.12e-06,
"loss": 0.0031,
"step": 900
},
{
"epoch": 14.29,
"eval_loss": 0.18790897727012634,
"eval_runtime": 45.02,
"eval_samples_per_second": 5.486,
"eval_steps_per_second": 0.178,
"eval_wer": 24.489795918367346,
"step": 900
}
],
"logging_steps": 25,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 16,
"save_steps": 100,
"total_flos": 7.0350926118912e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}