|
|
|
|
|
|
|
|
|
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \ |
|
--model_name_or_path NousResearch/Llama-2-7b-hf \ |
|
--train_file_dir /workspace/medvicuna/pretrain_data_170G \ |
|
--cache_dir /workspace/.cache \ |
|
--bf16 True \ |
|
--max_steps 12000 \ |
|
--output_dir /workspace/medvicuna/output_medllama2_pretrain \ |
|
--per_device_train_batch_size 16 \ |
|
--per_device_eval_batch_size 16 \ |
|
--gradient_accumulation_steps 32 \ |
|
--evaluation_strategy "no" \ |
|
--eval_steps 4500 \ |
|
--save_strategy "steps" \ |
|
--save_steps 250 \ |
|
--save_total_limit 1000 \ |
|
--learning_rate 5e-5 \ |
|
--weight_decay 0.1 \ |
|
--warmup_ratio 0.02 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--fsdp "full_shard auto_wrap" \ |
|
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ |
|
--tf32 True \ |
|
--model_max_length 4096 \ |
|
--gradient_checkpointing True &>> pretrain_set1.log |
|
|
|
|
|
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \ |
|
--model_name_or_path NousResearch/Llama-2-7b-hf \ |
|
--train_file_dir /workspace/medvicuna/pretrain_data_170G \ |
|
--cache_dir /workspace/.cache \ |
|
--bf16 True \ |
|
--max_steps 24000 \ |
|
--output_dir /workspace/medvicuna/output_medllama2_pretrain \ |
|
--per_device_train_batch_size 16 \ |
|
--per_device_eval_batch_size 16 \ |
|
--gradient_accumulation_steps 16 \ |
|
--evaluation_strategy "no" \ |
|
--eval_steps 4500 \ |
|
--save_strategy "steps" \ |
|
--save_steps 500 \ |
|
--save_total_limit 1000 \ |
|
--learning_rate 5e-5 \ |
|
--weight_decay 0.1 \ |
|
--warmup_ratio 0.04 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--fsdp "full_shard auto_wrap" \ |
|
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ |
|
--tf32 True \ |
|
--model_max_length 4096 \ |
|
--gradient_checkpointing True &>> pretrain_set2.log |
|
|
|
|
|
|
|
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \ |
|
--model_name_or_path NousResearch/Llama-2-7b-hf \ |
|
--train_file_dir /workspace/medvicuna/pretrain_data_170G \ |
|
--cache_dir /workspace/.cache \ |
|
--bf16 True \ |
|
--max_steps 24000 \ |
|
--output_dir /workspace/medvicuna/output_medllama2_pretrain \ |
|
--per_device_train_batch_size 8 \ |
|
--per_device_eval_batch_size 16 \ |
|
--gradient_accumulation_steps 32 \ |
|
--evaluation_strategy "no" \ |
|
--eval_steps 4500 \ |
|
--save_strategy "steps" \ |
|
--save_steps 500 \ |
|
--save_total_limit 1000 \ |
|
--learning_rate 5e-5 \ |
|
--weight_decay 0.1 \ |
|
--warmup_ratio 0.04 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--fsdp "full_shard auto_wrap" \ |
|
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ |
|
--tf32 True \ |
|
--model_max_length 4096 \ |
|
--gradient_checkpointing True &>> pretrain_set3.log |
|
|
|
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \ |
|
--model_name_or_path NousResearch/Llama-2-7b-hf \ |
|
--train_file_dir /workspace/medvicuna/pretrain_data_170G \ |
|
--cache_dir /workspace/.cache \ |
|
--bf16 True \ |
|
--max_steps 12000 \ |
|
--output_dir /workspace/medvicuna/output_medllama2_pretrain \ |
|
--per_device_train_batch_size 32 \ |
|
--per_device_eval_batch_size 16 \ |
|
--gradient_accumulation_steps 8 \ |
|
--evaluation_strategy "no" \ |
|
--eval_steps 4500 \ |
|
--save_strategy "steps" \ |
|
--save_steps 250 \ |
|
--save_total_limit 1000 \ |
|
--learning_rate 5e-5 \ |
|
--weight_decay 0.1 \ |
|
--warmup_ratio 0.04 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--fsdp "full_shard auto_wrap" \ |
|
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ |
|
--tf32 True \ |
|
--model_max_length 2048 \ |
|
--gradient_checkpointing True &>> pretrain_set4.log |
|
|
|
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \ |
|
--model_name_or_path yahma/llama-7b-hf \ |
|
--train_file_dir /workspace/medvicuna/pretrain_data_170G \ |
|
--cache_dir /workspace/.cache \ |
|
--bf16 True \ |
|
--max_steps 12000 \ |
|
--output_dir /workspace/medvicuna/output_medllama_pretrain \ |
|
--per_device_train_batch_size 32 \ |
|
--per_device_eval_batch_size 16 \ |
|
--gradient_accumulation_steps 8 \ |
|
--evaluation_strategy "no" \ |
|
--eval_steps 4500 \ |
|
--save_strategy "steps" \ |
|
--save_steps 250 \ |
|
--save_total_limit 1000 \ |
|
--learning_rate 5e-5 \ |
|
--weight_decay 0.1 \ |
|
--warmup_ratio 0.04 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--fsdp "full_shard auto_wrap" \ |
|
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ |
|
--tf32 True \ |
|
--model_max_length 2048 \ |
|
--gradient_checkpointing True &>> pretrain_set5.log |