|
#!/bin/bash |
|
|
|
deepspeed --num_nodes=1 --num_gpus=2 --master_port=25004 llava/train/train_mem.py \ |
|
--deepspeed ./scripts/zero3.json \ |
|
--model_name_or_path /mnt/bn/algo-masp-nas-2/xiangchen/model/gemma-7b-it \ |
|
--version plain \ |
|
--dataset_config /mnt/bn/algo-masp-nas-2/xiangchen/repo/LLaVA/llava/configs/pretrain_debug.yaml \ |
|
--vision_tower eva-vit-g \ |
|
--vit_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/eva_vit_g.pth \ |
|
--qformer_model_path /mnt/bn/data-tns-algo-masp/baiyi.by/masp/model/blip2_pretrained_flant5xxl.pth \ |
|
--mm_projector_type mlp2x_gelu \ |
|
--tune_mm_mlp_adapter True \ |
|
--mm_vision_select_layer -2 \ |
|
--mm_use_start_end True \ |
|
--mm_use_patch_token False \ |
|
--image_aspect_ratio pad \ |
|
--bf16 True \ |
|
--output_dir ./checkpoints/llava-v1.5-13b-pretrain-with-token \ |
|
--num_train_epochs 1 \ |
|
--per_device_train_batch_size 32 \ |
|
--per_device_eval_batch_size 4 \ |
|
--gradient_accumulation_steps 1 \ |
|
--evaluation_strategy "no" \ |
|
--save_strategy "steps" \ |
|
--save_steps 24000 \ |
|
--save_total_limit 1 \ |
|
--learning_rate 1e-3 \ |
|
--weight_decay 0. \ |
|
--warmup_ratio 0.03 \ |
|
--lr_scheduler_type "cosine" \ |
|
--logging_steps 1 \ |
|
--tf32 True \ |
|
--model_max_length 4096 \ |
|
--gradient_checkpointing True \ |
|
--dataloader_num_workers 1 \ |
|
--lazy_preprocess True \ |
|
--report_to none |
|
|