Update my_model/config/kbvqa_config.py
Browse files
my_model/config/kbvqa_config.py
CHANGED
@@ -3,9 +3,9 @@ import os
|
|
3 |
# Model and Tokenizer Settings
|
4 |
KBVQA_MODEL_NAME_7b = "m7mdal7aj/fine_tuned_llama_2_7b_chat_OKVQA"
|
5 |
KBVQA_MODEL_NAME_13b = "m7mdal7aj/fine_tuned_llama_2_13b_chat_OKVQA"
|
6 |
-
QUANTIZATION = '4bit'
|
7 |
-
MAX_CONTEXT_WINDOW = 4000
|
8 |
-
ADD_EOS_TOKEN = False
|
9 |
TRUST_REMOTE = False
|
10 |
USE_FAST = True
|
11 |
LOW_CPU_MEM_USAGE = True
|
@@ -13,5 +13,5 @@ LOW_CPU_MEM_USAGE = True
|
|
13 |
# Access Token
|
14 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
15 |
|
16 |
-
# SYS Prompt
|
17 |
SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant for visual question answering. you are provided with a caption of an image and a list of objects detected in the image along with their bounding boxes and level of certainty, you will output an answer to the given questions in no more than one sentence. Use logical reasoning to reach to the answer, but do not output your reasoning process unless asked for it. If provided, you will use the [CAP] and [/CAP] tags to indicate the beginning and end of the caption respectively. If provided you will use the [OBJ] and [/OBJ] tags to indicate the beginning and end of the list of detected objects in the image along with their bounding boxes respectively. If provided, you will use [QES] and [/QES] tags to indicate the beginning and end of the question respectively."
|
|
|
3 |
# Model and Tokenizer Settings
|
4 |
KBVQA_MODEL_NAME_7b = "m7mdal7aj/fine_tuned_llama_2_7b_chat_OKVQA"
|
5 |
KBVQA_MODEL_NAME_13b = "m7mdal7aj/fine_tuned_llama_2_13b_chat_OKVQA"
|
6 |
+
QUANTIZATION = '4bit' # 8bit can be used as well.
|
7 |
+
MAX_CONTEXT_WINDOW = 4000 # keeping 96 tokens as margin
|
8 |
+
ADD_EOS_TOKEN = False # We do not need the model to add the default special tokens, because we already added them in the prompt engineering module with bew extra ones.
|
9 |
TRUST_REMOTE = False
|
10 |
USE_FAST = True
|
11 |
LOW_CPU_MEM_USAGE = True
|
|
|
13 |
# Access Token
|
14 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
15 |
|
16 |
+
# SYS Prompt Designed
|
17 |
SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant for visual question answering. you are provided with a caption of an image and a list of objects detected in the image along with their bounding boxes and level of certainty, you will output an answer to the given questions in no more than one sentence. Use logical reasoning to reach to the answer, but do not output your reasoning process unless asked for it. If provided, you will use the [CAP] and [/CAP] tags to indicate the beginning and end of the caption respectively. If provided you will use the [OBJ] and [/OBJ] tags to indicate the beginning and end of the list of detected objects in the image along with their bounding boxes respectively. If provided, you will use [QES] and [/QES] tags to indicate the beginning and end of the question respectively."
|