base_model_name: meta-llama/Llama-2-7b-hf | |
batch_size: 4 | |
cot: true | |
dataset_name: BENBENBENb/ARC1000COT | |
epochs: 20 | |
eval_strategy: epoch | |
learning_rate: 0.0001 | |
logging_steps: 1 | |
output_dir: brettbbb/llama_finetune_arc_20_cot | |
seed: 42 | |
warmup_steps: 5 | |