Update my_model/tabs/run_inference.py
Browse files- my_model/tabs/run_inference.py +10 -10
my_model/tabs/run_inference.py
CHANGED
@@ -48,7 +48,7 @@ class InferenceRunner(StateManager):
|
|
48 |
"""
|
49 |
free_gpu_resources()
|
50 |
answer = model.generate_answer(question, caption, detected_objects_str)
|
51 |
-
prompt_length =
|
52 |
free_gpu_resources()
|
53 |
return answer, prompt_length
|
54 |
|
@@ -113,17 +113,17 @@ class InferenceRunner(StateManager):
|
|
113 |
# Use the selected sample question or the custom question
|
114 |
question = custom_question if selected_question == "Custom question..." else selected_question
|
115 |
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
118 |
else:
|
119 |
-
if
|
120 |
-
nested_col22.warning("This question has already been answered.")
|
121 |
-
else:
|
122 |
-
if nested_col22.button('Get Answer', key=f'answer_{image_key}', disabled=self.is_widget_disabled):
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
|
128 |
# Display Q&A history and prompts lengths for each image
|
129 |
for num, (q, a, p) in enumerate(qa_history):
|
|
|
48 |
"""
|
49 |
free_gpu_resources()
|
50 |
answer = model.generate_answer(question, caption, detected_objects_str)
|
51 |
+
prompt_length = model.current_prompt_length
|
52 |
free_gpu_resources()
|
53 |
return answer, prompt_length
|
54 |
|
|
|
113 |
# Use the selected sample question or the custom question
|
114 |
question = custom_question if selected_question == "Custom question..." else selected_question
|
115 |
|
116 |
+
# if not question:
|
117 |
+
# nested_col22.warning("Please select or enter a question.")
|
118 |
+
# else:
|
119 |
+
if question in [q for q, _ in qa_history]:
|
120 |
+
nested_col22.warning("This question has already been answered.")
|
121 |
else:
|
122 |
+
if nested_col22.button('Get Answer', key=f'answer_{image_key}', disabled=self.is_widget_disabled):
|
|
|
|
|
|
|
123 |
|
124 |
+
answer, prompt_length = self.answer_question(image_data['caption'], image_data['detected_objects_str'], question, kbvqa)
|
125 |
+
st.session_state['loading_in_progress'] = False
|
126 |
+
self.add_to_qa_history(image_key, question, answer, prompt_length)
|
127 |
|
128 |
# Display Q&A history and prompts lengths for each image
|
129 |
for num, (q, a, p) in enumerate(qa_history):
|