llavaguard / scripts /run_instructblip_attack.sh
Ahren09's picture
Upload 227 files
5ca4e86 verified
raw
history blame
1.34 kB
set -x
MODEL=instructblip
MODEL_PATH=/workingdir/models_hf/lmsys/vicuna-13b-v1.1
GPU_ID=2
for TASK in unconstrained constrained qna; do
INFERENCE_FILE="outputs/${MODEL}/inference_${MODEL}_attack_${TASK}"
METRIC_FILE="outputs/${MODEL}/metric_${MODEL}_attack_${TASK}"
SUMMARY_FILE="outputs/${MODEL}/summary_${MODEL}_attack_${TASK}"
if [ "${TASK}" = "constrained" ]; then
echo "Running constrained"
python instructblip_constrained_inference.py --gpu-id ${GPU_ID} \
--model_path ${MODEL_PATH} \
--output_file ${INFERENCE_FILE} \
--do_attack
elif [ "${TASK}" = "unconstrained" ]; then
echo "Running unconstrained"
python instructblip_unconstrained_inference.py --gpu-id ${GPU_ID} \
--model_path ${MODEL_PATH} \
--output_file ${INFERENCE_FILE} \
--do_attack
elif [ "${TASK}" = "qna" ]; then
echo "Running qna"
python instructblip_qna.py --gpu-id ${GPU_ID} \
--model_path ${MODEL_PATH} \
--output_file ${INFERENCE_FILE} \
--do_attack
else
echo "Wrong Implementation"
exit 1
fi
CUDA_VISIBLE_DEVICES=2 python get_metric.py --input ${INFERENCE_FILE} \
--output ${METRIC_FILE} \
--perplexity ${SUMMARY_FILE} \
--load_existing_generation \
--device cuda
python cal_metrics.py --input ${METRIC_FILE} \
--output ${SUMMARY_FILE}
done