|
{ |
|
"best_metric": 0.2397383749485016, |
|
"best_model_checkpoint": "./Mistral/15-03-24-Weni-ZeroShot-3.4.12-Mistral-7b-DPO-1.0.0_ZeroShot DPO Training a improved dataset and best hyperparameters found so far-2_max_steps-144_batch_32_2024-03-15_ppid_9/checkpoint-100", |
|
"epoch": 2.0408163265306123, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 5.192111492156982, |
|
"learning_rate": 0.0001937984496124031, |
|
"logits/chosen": -0.9390562176704407, |
|
"logits/rejected": -0.9053889513015747, |
|
"logps/chosen": -21.715065002441406, |
|
"logps/rejected": -29.212356567382812, |
|
"loss": 0.4668, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.06483893096446991, |
|
"rewards/margins": 1.010897159576416, |
|
"rewards/rejected": -1.075736165046692, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 1.8516901731491089, |
|
"learning_rate": 0.00016279069767441862, |
|
"logits/chosen": -0.5763565301895142, |
|
"logits/rejected": -0.4915798306465149, |
|
"logps/chosen": -21.638835906982422, |
|
"logps/rejected": -44.50564956665039, |
|
"loss": 0.2617, |
|
"rewards/accuracies": 0.8968750238418579, |
|
"rewards/chosen": -0.01356862299144268, |
|
"rewards/margins": 2.59334135055542, |
|
"rewards/rejected": -2.606910228729248, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_logits/chosen": -0.6646511554718018, |
|
"eval_logits/rejected": -0.5983995795249939, |
|
"eval_logps/chosen": -21.368825912475586, |
|
"eval_logps/rejected": -46.24407196044922, |
|
"eval_loss": 0.2652554214000702, |
|
"eval_rewards/accuracies": 0.8522727489471436, |
|
"eval_rewards/chosen": -0.04463845491409302, |
|
"eval_rewards/margins": 2.807713747024536, |
|
"eval_rewards/rejected": -2.8523521423339844, |
|
"eval_runtime": 81.9688, |
|
"eval_samples_per_second": 2.123, |
|
"eval_steps_per_second": 0.268, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 3.232189178466797, |
|
"learning_rate": 0.0001317829457364341, |
|
"logits/chosen": -0.6450687050819397, |
|
"logits/rejected": -0.5619121193885803, |
|
"logps/chosen": -20.606281280517578, |
|
"logps/rejected": -48.98772430419922, |
|
"loss": 0.2055, |
|
"rewards/accuracies": 0.909375011920929, |
|
"rewards/chosen": 0.04113447293639183, |
|
"rewards/margins": 3.0845394134521484, |
|
"rewards/rejected": -3.0434048175811768, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 2.522237777709961, |
|
"learning_rate": 0.00010077519379844962, |
|
"logits/chosen": -0.4938630163669586, |
|
"logits/rejected": -0.43368759751319885, |
|
"logps/chosen": -17.037982940673828, |
|
"logps/rejected": -50.90436553955078, |
|
"loss": 0.1921, |
|
"rewards/accuracies": 0.917187511920929, |
|
"rewards/chosen": 0.40310320258140564, |
|
"rewards/margins": 3.6394691467285156, |
|
"rewards/rejected": -3.236365795135498, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 2.4511005878448486, |
|
"learning_rate": 7.131782945736435e-05, |
|
"logits/chosen": -0.4601399898529053, |
|
"logits/rejected": -0.41903334856033325, |
|
"logps/chosen": -22.506694793701172, |
|
"logps/rejected": -49.0537223815918, |
|
"loss": 0.2375, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": -0.1375463902950287, |
|
"rewards/margins": 2.9313812255859375, |
|
"rewards/rejected": -3.068927764892578, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_logits/chosen": -0.4721185564994812, |
|
"eval_logits/rejected": -0.438131719827652, |
|
"eval_logps/chosen": -19.791969299316406, |
|
"eval_logps/rejected": -41.700191497802734, |
|
"eval_loss": 0.2397383749485016, |
|
"eval_rewards/accuracies": 0.8636363744735718, |
|
"eval_rewards/chosen": 0.11304693669080734, |
|
"eval_rewards/margins": 2.5110111236572266, |
|
"eval_rewards/rejected": -2.3979642391204834, |
|
"eval_runtime": 81.8963, |
|
"eval_samples_per_second": 2.125, |
|
"eval_steps_per_second": 0.269, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 144, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|