File size: 4,823 Bytes
5c19284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# export WANDB_MODE=offline

# openlm-research/open_llama_3b
    # --num_train_epochs 1 \
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
    --model_name_or_path NousResearch/Llama-2-7b-hf \
    --train_file_dir /workspace/medvicuna/pretrain_data_170G \
    --cache_dir /workspace/.cache \
    --bf16 True \
    --max_steps 12000 \
    --output_dir /workspace/medvicuna/output_medllama2_pretrain \
    --per_device_train_batch_size 16 \
    --per_device_eval_batch_size 16 \
    --gradient_accumulation_steps 32 \
    --evaluation_strategy "no" \
    --eval_steps 4500 \
    --save_strategy "steps" \
    --save_steps 250 \
    --save_total_limit 1000 \
    --learning_rate 5e-5 \
    --weight_decay 0.1 \
    --warmup_ratio 0.02 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --fsdp "full_shard auto_wrap" \
    --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
    --tf32 True \
    --model_max_length 4096 \
    --gradient_checkpointing True &>> pretrain_set1.log


torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
    --model_name_or_path NousResearch/Llama-2-7b-hf \
    --train_file_dir /workspace/medvicuna/pretrain_data_170G \
    --cache_dir /workspace/.cache \
    --bf16 True \
    --max_steps 24000 \
    --output_dir /workspace/medvicuna/output_medllama2_pretrain \
    --per_device_train_batch_size 16 \
    --per_device_eval_batch_size 16 \
    --gradient_accumulation_steps 16 \
    --evaluation_strategy "no" \
    --eval_steps 4500 \
    --save_strategy "steps" \
    --save_steps 500 \
    --save_total_limit 1000 \
    --learning_rate 5e-5 \
    --weight_decay 0.1 \
    --warmup_ratio 0.04 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --fsdp "full_shard auto_wrap" \
    --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
    --tf32 True \
    --model_max_length 4096 \
    --gradient_checkpointing True &>> pretrain_set2.log



torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
    --model_name_or_path NousResearch/Llama-2-7b-hf \
    --train_file_dir /workspace/medvicuna/pretrain_data_170G \
    --cache_dir /workspace/.cache \
    --bf16 True \
    --max_steps 24000 \
    --output_dir /workspace/medvicuna/output_medllama2_pretrain \
    --per_device_train_batch_size 8 \
    --per_device_eval_batch_size 16 \
    --gradient_accumulation_steps 32 \
    --evaluation_strategy "no" \
    --eval_steps 4500 \
    --save_strategy "steps" \
    --save_steps 500 \
    --save_total_limit 1000 \
    --learning_rate 5e-5 \
    --weight_decay 0.1 \
    --warmup_ratio 0.04 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --fsdp "full_shard auto_wrap" \
    --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
    --tf32 True \
    --model_max_length 4096 \
    --gradient_checkpointing True &>> pretrain_set3.log

torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
    --model_name_or_path NousResearch/Llama-2-7b-hf \
    --train_file_dir /workspace/medvicuna/pretrain_data_170G \
    --cache_dir /workspace/.cache \
    --bf16 True \
    --max_steps 12000 \
    --output_dir /workspace/medvicuna/output_medllama2_pretrain \
    --per_device_train_batch_size 32 \
    --per_device_eval_batch_size 16 \
    --gradient_accumulation_steps 8 \
    --evaluation_strategy "no" \
    --eval_steps 4500 \
    --save_strategy "steps" \
    --save_steps 250 \
    --save_total_limit 1000 \
    --learning_rate 5e-5 \
    --weight_decay 0.1 \
    --warmup_ratio 0.04 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --fsdp "full_shard auto_wrap" \
    --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
    --tf32 True \
    --model_max_length 2048 \
    --gradient_checkpointing True &>> pretrain_set4.log

torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/pretrain_streaming_mem.py \
    --model_name_or_path yahma/llama-7b-hf \
    --train_file_dir /workspace/medvicuna/pretrain_data_170G \
    --cache_dir /workspace/.cache \
    --bf16 True \
    --max_steps 12000 \
    --output_dir /workspace/medvicuna/output_medllama_pretrain \
    --per_device_train_batch_size 32 \
    --per_device_eval_batch_size 16 \
    --gradient_accumulation_steps 8 \
    --evaluation_strategy "no" \
    --eval_steps 4500 \
    --save_strategy "steps" \
    --save_steps 250 \
    --save_total_limit 1000 \
    --learning_rate 5e-5 \
    --weight_decay 0.1 \
    --warmup_ratio 0.04 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --fsdp "full_shard auto_wrap" \
    --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
    --tf32 True \
    --model_max_length 2048 \
    --gradient_checkpointing True &>> pretrain_set5.log