File size: 2,070 Bytes
5ca4e86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
set -x

MODEL=instructblip
MODEL_PATH=/workingdir/models_hf/lmsys/vicuna-13b-v1.1
GPU_ID=2

for TASK in unconstrained constrained qna; do
  for SAFETY_PATCH_MODE in heuristic optimized; do

    INFERENCE_FILE="outputs/${MODEL}/inference_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"

    METRIC_FILE="outputs/${MODEL}/metric_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"

    SUMMARY_FILE="outputs/${MODEL}/summary_${TASK}_${MODEL}_${SAFETY_PATCH_MODE}"

    IMAGE_SAFETY_PATCH=safety_patch/safety_patch.pt
    TEXT_SAFETY_PATCH=safety_patch/text_patch_${SAFETY_PATCH_MODE}

    if [ "${TASK}" = "constrained" ]; then
      echo "Running constrained"
      python instructblip_constrained_inference.py --gpu-id ${GPU_ID} \
        --model_path ${MODEL_PATH} \
        --image_safety_patch ${IMAGE_SAFETY_PATCH} \
        --text_safety_patch ${TEXT_SAFETY_PATCH} \
        --output_file ${INFERENCE_FILE} \
        --safety_patch_mode ${SAFETY_PATCH_MODE}

    elif [ "${TASK}" = "unconstrained" ]; then
      echo "Running unconstrained"

      python instructblip_unconstrained_inference.py --gpu-id ${GPU_ID} \
        --model_path ${MODEL_PATH} \
        --image_safety_patch ${IMAGE_SAFETY_PATCH} \
        --text_safety_patch ${TEXT_SAFETY_PATCH} \
        --output_file ${INFERENCE_FILE} \
        --safety_patch_mode ${SAFETY_PATCH_MODE}

    elif [ "${TASK}" = "qna" ]; then
      echo "Running qna"

      python instructblip_qna.py --gpu-id ${GPU_ID} \
        --image_path ${TASK}_attack_images/adversarial_ \
        --image_safety_patch ${IMAGE_SAFETY_PATCH} \
        --text_safety_patch ${TEXT_SAFETY_PATCH} \
        --output_file ${INFERENCE_FILE} \
        --safety_patch_mode ${SAFETY_PATCH_MODE}
    else
      echo "Wrong Implementation"
      exit 1
    fi

    CUDA_VISIBLE_DEVICES=${GPU_ID} python get_metric.py --input ${INFERENCE_FILE} \
      --output ${METRIC_FILE} \
      --perplexity ${SUMMARY_FILE} \
      --device cuda \
      --load_existing_generation

    python cal_metrics.py --input ${METRIC_FILE} \
      --output ${SUMMARY_FILE}

  done
done