|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer: |
|
_component_: torchtune.models.llama3.llama3_s_tokenizer |
|
path: ../model_zoo/tokenizer.model |
|
max_seq_len: 1024 |
|
|
|
|
|
dataset: |
|
_component_: torchtune.datasets.chat_dataset |
|
source: homebrewltd/instruction-speech-whispervq-v2 |
|
conversation_style: openai |
|
max_seq_len: 1024 |
|
split: train |
|
train_on_input: True |
|
|
|
seed: 42 |
|
shuffle: True |
|
|
|
model: |
|
_component_: torchtune.models.llama3_1.llama3_1_s_8b |
|
|
|
checkpointer: |
|
_component_: torchtune.utils.FullModelHFCheckpointerSaveSteps |
|
checkpoint_dir: ../model_zoo/llama3.1-s-base-2024-08-17 |
|
checkpoint_files: [ |
|
model-00001-of-00004.safetensors, |
|
model-00002-of-00004.safetensors, |
|
model-00003-of-00004.safetensors, |
|
model-00004-of-00004.safetensors, |
|
] |
|
recipe_checkpoint: null |
|
output_dir: ../model_zoo/llama3-s-instruct |
|
model_type: LLAMA3 |
|
resume_from_checkpoint: False |
|
save_every_n_steps: 1000 |
|
max_checkpoints: 3 |
|
|
|
batch_size: 16 |
|
epochs: 5 |
|
max_steps_per_epoch: null |
|
gradient_accumulation_steps: 1 |
|
compile: False |
|
|
|
optimizer: |
|
_component_: torch.optim.AdamW |
|
weight_decay: 0.005 |
|
lr: 1e-4 |
|
fused: True |
|
lr_scheduler: |
|
_component_: torchtune.modules.get_cosine_schedule_with_warmup |
|
num_warmup_steps: 80 |
|
|
|
loss: |
|
_component_: torch.nn.CrossEntropyLoss |
|
|
|
fsdp: |
|
cpu_offload: False |
|
|
|
|
|
device: cuda |
|
dtype: bf16 |
|
|
|
|
|
enable_activation_checkpointing: True |
|
memory_efficient_fsdp_wrap: True |
|
ac_mode: 'selective' |
|
|
|
|
|
|
|
metric_logger: |
|
_component_: torchtune.utils.metric_logging.DiskLogger |
|
log_dir: ${output_dir} |
|
output_dir: ../model_zoo/Llama3-instruct-log/ |
|
log_every_n_steps: 1 |
|
log_peak_memory_stats: False |