llavaguard / scripts /run_minigpt_safety_patch.sh
Ahren09's picture
Upload 227 files
5ca4e86 verified
raw
history blame
1.91 kB
set -x
MODEL=minigpt4
GPU_ID=1
for TASK in unconstrained constrained qna; do
for SAFETY_PATCH_MODE in heuristic optimized; do
INFERENCE_FILE="outputs/${MODEL}/inference_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
METRIC_FILE="outputs/${MODEL}/metric_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
SUMMARY_FILE="outputs/${MODEL}/summary_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"
IMAGE_SAFETY_PATCH=safety_patch/safety_patch.pt
TEXT_SAFETY_PATCH=safety_patch/text_patch_${SAFETY_PATCH_MODE}
if [ "${TASK}" = "constrained" ]; then
echo "Running constrained"
python minigpt_constrained_inference.py --gpu-id 2 \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode heuristic
elif [ "${TASK}" = "constrained" ]; then
echo "Running unconstrained"
python minigpt_unconstrained_inference.py --gpu-id 1 \
--image_path ${TASK}_attack_images/adversarial_ \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode ${SAFETY_PATCH_MODE}
elif [ "${TASK}" = "qna" ]; then
echo "Running qna"
python minigpt_qna.py --gpu-id ${GPU_ID} \
--image_path ${TASK}_attack_images/adversarial_ \
--image_safety_patch ${IMAGE_SAFETY_PATCH} \
--text_safety_patch ${TEXT_SAFETY_PATCH} \
--output_file ${INFERENCE_FILE} \
--safety_patch_mode ${SAFETY_PATCH_MODE}
else
echo "Wrong Implementation"
exit 1
fi
CUDA_VISIBLE_DEVICES=1,2 python get_metric.py --input ${INFERENCE_FILE} \
--output ${METRIC_FILE} \
--perplexity ${SUMMARY_FILE} \
--device cuda \
python cal_metrics.py --input ${METRIC_FILE} \
--output ${SUMMARY_FILE}
done
done