Edit model card

Train Config

base_model: allganize/Llama-3-Alpha-Ko-8B-Instruct model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer

load_in_8bit: false load_in_4bit: true strict: false

datasets:

  • path: ? type: alpaca dataset_prepared_path: val_set_size: 0 output_dir: ./outputs/qlora-out

adapter: qlora lora_model_dir:

sequence_len: 2048 sample_packing: true pad_to_sequence_len: true

lora_r: 32 lora_alpha: 16 lora_dropout: 0.05 lora_target_modules:

  • q_proj
  • v_proj

lora_target_linear: true lora_fan_in_fan_out: lora_modules_to_save:

  • embed_tokens
  • lm_head

wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model:

gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 3 optimizer: paged_adamw_32bit lr_scheduler: cosine learning_rate: 0.0002

train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false

gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 100 xformers_attention: flash_attention: true

warmup_steps: 10 evals_per_epoch: 4 eval_table_size: saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.01 neftune_noise_alpha: 5 fsdp: fsdp_config: special_tokens: pad_token: :"<|end_of_text|>"

Downloads last month
2,087
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.