Upload pretrain_streaming.sh
Browse files- pretrain_streaming.sh +136 -0
pretrain_streaming.sh
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# export WANDB_MODE=offline
|
2 |
+
|
3 |
+
# openlm-research/open_llama_3b
|
4 |
+
# --num_train_epochs 1 \
|
5 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
|
6 |
+
--model_name_or_path NousResearch/Llama-2-7b-hf \
|
7 |
+
--train_file_dir /workspace/medvicuna/pretrain_data_170G \
|
8 |
+
--cache_dir /workspace/.cache \
|
9 |
+
--bf16 True \
|
10 |
+
--max_steps 12000 \
|
11 |
+
--output_dir /workspace/medvicuna/output_medllama2_pretrain \
|
12 |
+
--per_device_train_batch_size 16 \
|
13 |
+
--per_device_eval_batch_size 16 \
|
14 |
+
--gradient_accumulation_steps 32 \
|
15 |
+
--evaluation_strategy "no" \
|
16 |
+
--eval_steps 4500 \
|
17 |
+
--save_strategy "steps" \
|
18 |
+
--save_steps 250 \
|
19 |
+
--save_total_limit 1000 \
|
20 |
+
--learning_rate 5e-5 \
|
21 |
+
--weight_decay 0.1 \
|
22 |
+
--warmup_ratio 0.02 \
|
23 |
+
--lr_scheduler_type "cosine" \
|
24 |
+
--logging_steps 1 \
|
25 |
+
--fsdp "full_shard auto_wrap" \
|
26 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
27 |
+
--tf32 True \
|
28 |
+
--model_max_length 4096 \
|
29 |
+
--gradient_checkpointing True &>> pretrain_set1.log
|
30 |
+
|
31 |
+
|
32 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
|
33 |
+
--model_name_or_path NousResearch/Llama-2-7b-hf \
|
34 |
+
--train_file_dir /workspace/medvicuna/pretrain_data_170G \
|
35 |
+
--cache_dir /workspace/.cache \
|
36 |
+
--bf16 True \
|
37 |
+
--max_steps 24000 \
|
38 |
+
--output_dir /workspace/medvicuna/output_medllama2_pretrain \
|
39 |
+
--per_device_train_batch_size 16 \
|
40 |
+
--per_device_eval_batch_size 16 \
|
41 |
+
--gradient_accumulation_steps 16 \
|
42 |
+
--evaluation_strategy "no" \
|
43 |
+
--eval_steps 4500 \
|
44 |
+
--save_strategy "steps" \
|
45 |
+
--save_steps 500 \
|
46 |
+
--save_total_limit 1000 \
|
47 |
+
--learning_rate 5e-5 \
|
48 |
+
--weight_decay 0.1 \
|
49 |
+
--warmup_ratio 0.04 \
|
50 |
+
--lr_scheduler_type "cosine" \
|
51 |
+
--logging_steps 1 \
|
52 |
+
--fsdp "full_shard auto_wrap" \
|
53 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
54 |
+
--tf32 True \
|
55 |
+
--model_max_length 4096 \
|
56 |
+
--gradient_checkpointing True &>> pretrain_set2.log
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
|
61 |
+
--model_name_or_path NousResearch/Llama-2-7b-hf \
|
62 |
+
--train_file_dir /workspace/medvicuna/pretrain_data_170G \
|
63 |
+
--cache_dir /workspace/.cache \
|
64 |
+
--bf16 True \
|
65 |
+
--max_steps 24000 \
|
66 |
+
--output_dir /workspace/medvicuna/output_medllama2_pretrain \
|
67 |
+
--per_device_train_batch_size 8 \
|
68 |
+
--per_device_eval_batch_size 16 \
|
69 |
+
--gradient_accumulation_steps 32 \
|
70 |
+
--evaluation_strategy "no" \
|
71 |
+
--eval_steps 4500 \
|
72 |
+
--save_strategy "steps" \
|
73 |
+
--save_steps 500 \
|
74 |
+
--save_total_limit 1000 \
|
75 |
+
--learning_rate 5e-5 \
|
76 |
+
--weight_decay 0.1 \
|
77 |
+
--warmup_ratio 0.04 \
|
78 |
+
--lr_scheduler_type "cosine" \
|
79 |
+
--logging_steps 1 \
|
80 |
+
--fsdp "full_shard auto_wrap" \
|
81 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
82 |
+
--tf32 True \
|
83 |
+
--model_max_length 4096 \
|
84 |
+
--gradient_checkpointing True &>> pretrain_set3.log
|
85 |
+
|
86 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
|
87 |
+
--model_name_or_path NousResearch/Llama-2-7b-hf \
|
88 |
+
--train_file_dir /workspace/medvicuna/pretrain_data_170G \
|
89 |
+
--cache_dir /workspace/.cache \
|
90 |
+
--bf16 True \
|
91 |
+
--max_steps 12000 \
|
92 |
+
--output_dir /workspace/medvicuna/output_medllama2_pretrain \
|
93 |
+
--per_device_train_batch_size 32 \
|
94 |
+
--per_device_eval_batch_size 16 \
|
95 |
+
--gradient_accumulation_steps 8 \
|
96 |
+
--evaluation_strategy "no" \
|
97 |
+
--eval_steps 4500 \
|
98 |
+
--save_strategy "steps" \
|
99 |
+
--save_steps 250 \
|
100 |
+
--save_total_limit 1000 \
|
101 |
+
--learning_rate 5e-5 \
|
102 |
+
--weight_decay 0.1 \
|
103 |
+
--warmup_ratio 0.04 \
|
104 |
+
--lr_scheduler_type "cosine" \
|
105 |
+
--logging_steps 1 \
|
106 |
+
--fsdp "full_shard auto_wrap" \
|
107 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
108 |
+
--tf32 True \
|
109 |
+
--model_max_length 2048 \
|
110 |
+
--gradient_checkpointing True &>> pretrain_set4.log
|
111 |
+
|
112 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
|
113 |
+
--model_name_or_path yahma/llama-7b-hf \
|
114 |
+
--train_file_dir /workspace/medvicuna/pretrain_data_170G \
|
115 |
+
--cache_dir /workspace/.cache \
|
116 |
+
--bf16 True \
|
117 |
+
--max_steps 12000 \
|
118 |
+
--output_dir /workspace/medvicuna/output_medllama_pretrain \
|
119 |
+
--per_device_train_batch_size 32 \
|
120 |
+
--per_device_eval_batch_size 16 \
|
121 |
+
--gradient_accumulation_steps 8 \
|
122 |
+
--evaluation_strategy "no" \
|
123 |
+
--eval_steps 4500 \
|
124 |
+
--save_strategy "steps" \
|
125 |
+
--save_steps 250 \
|
126 |
+
--save_total_limit 1000 \
|
127 |
+
--learning_rate 5e-5 \
|
128 |
+
--weight_decay 0.1 \
|
129 |
+
--warmup_ratio 0.04 \
|
130 |
+
--lr_scheduler_type "cosine" \
|
131 |
+
--logging_steps 1 \
|
132 |
+
--fsdp "full_shard auto_wrap" \
|
133 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
134 |
+
--tf32 True \
|
135 |
+
--model_max_length 2048 \
|
136 |
+
--gradient_checkpointing True &>> pretrain_set5.log
|