{"model": "meta-llama/Llama-2-7b-hf", "data_path": "10k/", "project_name": "llm_llama", "train_split": "train", "valid_split": null, "text_column": "text", "rejected_text_column": "rejected", "lr": 0.001, "epochs": 100, "batch_size": 16, "warmup_ratio": 0.1, "gradient_accumulation": 4, "optimizer": "adamw_torch", "scheduler": "linear", "weight_decay": 0.01, "max_grad_norm": 1.0, "seed": 42, "add_eos_token": false, "block_size": 1024, "use_peft": true, "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.1, "logging_steps": -1, "evaluation_strategy": "epoch", "save_total_limit": 10, "save_strategy": "epoch", "auto_find_batch_size": false, "fp16": true, "push_to_hub": true, "use_int8": false, "model_max_length": 1024, "repo_id": "Username6568/cnn_10k_llama2", "use_int4": true, "trainer": "default", "target_modules": "q_proj,v_proj", "merge_adapter": true, "username": null, "use_flash_attention_2": false, "log": "wandb", "disable_gradient_checkpointing": false, "model_ref": null, "dpo_beta": 0.1, "prompt_text_column": "prompt"}