Spaces:
Running
Running
File size: 14,746 Bytes
125214f 7e54217 6c73039 8eb0316 125214f a984aef 125214f 1812270 eff41fa 40d77a8 125214f 1948116 7c3b785 7337830 571fea7 7c3b785 7337830 571fea7 7c3b785 40d77a8 8eb0316 1948116 571fea7 7c3b785 571fea7 7c3b785 571fea7 7c3b785 8eb0316 7c3b785 7337830 1948116 6657600 7337830 1948116 0b0cffd 1948116 571fea7 8eb0316 571fea7 7337830 d4b85b8 7c3b785 1948116 5667733 29f316e 7337830 df65239 1948116 571fea7 8eb0316 571fea7 7337830 d4b85b8 1948116 df65239 1948116 7337830 571fea7 8eb0316 7337830 8eb0316 571fea7 7337830 0f0c882 8eb0316 571fea7 8eb0316 7337830 8eb0316 571fea7 7337830 0f0c882 7337830 f366249 7337830 f366249 0f0c882 8eb0316 571fea7 8eb0316 7337830 8eb0316 571fea7 7337830 0f0c882 1d707c1 87fb74c 7337830 571fea7 8eb0316 571fea7 8eb0316 7337830 8eb0316 571fea7 7337830 0f0c882 7337830 571fea7 7337830 0f0c882 7337830 0f0c882 f283cf3 7337830 571fea7 8eb0316 571fea7 7337830 8eb0316 571fea7 8eb0316 7337830 8eb0316 571fea7 7337830 094a401 7337830 0f0c882 7337830 0f0c882 571fea7 7337830 8eb0316 571fea7 7337830 0f0c882 7337830 7316948 0bac0de 82e0944 256561a 0f0c882 7337830 8eb0316 7c3b785 7337830 7c3b785 571fea7 7337830 8eb0316 7337830 3b61686 87db907 3b61686 7337830 3eeb25f 61382de 7337830 3eeb25f 7337830 a6451e7 61382de 5600c91 c7b6d08 5600c91 7337830 5600c91 e36aa58 5600c91 3394a6e 5600c91 3394a6e 3eeb25f 87db907 3eeb25f 3394a6e 7337830 3394a6e c45c1b1 7337830 904c909 7337830 c791f22 904c909 802de9d c791f22 904c909 c791f22 904c909 802de9d 7337830 904c909 7337830 c791f22 7e54217 3eeb25f deb2356 7337830 3b98e5e 33dd05f 3eeb25f 2111435 6b844f6 7e54217 bb9ebd1 7337830 6b844f6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
import time
from typing import Tuple, Dict, List, Union
from streamlit.delta_generator import DeltaGenerator
from PIL import Image
import torch.nn as nn
import pandas as pd
from my_model.detector.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities.gen_utilities import free_gpu_resources
from my_model.state_manager import StateManager
from my_model.config import inference_config as config
class InferenceRunner(StateManager):
"""
Manages the user interface and interactions for running inference using the Streamlit-based Knowledge-Based Visual
Question Answering (KBVQA) application.
This class handles image uploads, displays sample images, and facilitates the question-answering process using the
KBVQA model.
Inherits from the StateManager class.
"""
def __init__(self) -> None:
"""
Initializes the InferenceRunner instance, setting up the necessary state.
"""
super().__init__()
def answer_question(self, caption: str, detected_objects_str: str, question: str) -> Tuple[str, int]:
"""
Generates an answer to a user's question based on the image's caption and detected objects.
Args:
caption (str): Caption generated for the image.
detected_objects_str (str): String representation of detected objects in the image.
question (str): User's question about the image.
Returns:
Tuple[str, int]: A tuple containing the answer to the question and the prompt length.
"""
free_gpu_resources()
answer = st.session_state.kbvqa.generate_answer(question, caption, detected_objects_str)
prompt_length = st.session_state.kbvqa.current_prompt_length
free_gpu_resources()
return answer, prompt_length
def display_sample_images(self) -> None:
"""
Displays sample images as clickable thumbnails for the user to select.
Returns:
None
"""
self.col1.write("Choose from sample images:")
cols = self.col1.columns(len(config.SAMPLE_IMAGES))
for idx, sample_image_path in enumerate(config.SAMPLE_IMAGES):
with cols[idx]:
image = Image.open(sample_image_path)
image_for_display = self.resize_image(sample_image_path, 80, 80)
st.image(image_for_display)
if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx + 1}'):
self.process_new_image(sample_image_path, image)
def handle_image_upload(self) -> None:
"""
Provides an image uploader widget for the user to upload their own images.
Returns:
None
"""
uploaded_image = self.col1.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
if uploaded_image is not None:
self.process_new_image(uploaded_image.name, Image.open(uploaded_image))
def display_image_and_analysis(self, image_key: str, image_data: Dict, nested_col21: DeltaGenerator,
nested_col22: DeltaGenerator) -> None:
"""
Displays the uploaded or selected image and provides an option to analyze the image.
Args:
image_key (str): Unique key identifying the image.
image_data (Dict): Data associated with the image.
nested_col21 (DeltaGenerator): Column for displaying the image.
nested_col22 (DeltaGenerator): Column for displaying the analysis button.
Returns:
None
"""
image_for_display = self.resize_image(image_data['image'], 600)
nested_col21.image(image_for_display, caption=f'Uploaded Image: {image_key[-11:]}')
self.handle_analysis_button(image_key, image_data, nested_col22)
def handle_analysis_button(self, image_key: str, image_data: Dict, nested_col22: DeltaGenerator) -> None:
"""
Provides an 'Analyze Image' button and processes the image analysis upon click.
Args:
image_key (str): Unique key identifying the image.
image_data (Dict): Data associated with the image.
nested_col22 (DeltaGenerator): Column for displaying the analysis button.
Returns:
None
"""
if not image_data['analysis_done'] or self.settings_changed or self.confidance_change:
nested_col22.text("Please click 'Analyze Image'..")
analyze_button_key = f'analyze_{image_key}_{st.session_state.detection_model}_' \
f'{st.session_state.confidence_level}'
with nested_col22:
if st.button('Analyze Image', key=analyze_button_key, on_click=self.disable_widgets,
disabled=self.is_widget_disabled):
with st.spinner('Analyzing the image...'):
caption, detected_objects_str, image_with_boxes = self.analyze_image(image_data['image'])
self.update_image_data(image_key, caption, detected_objects_str, True)
st.session_state['loading_in_progress'] = False
def handle_question_answering(self, image_key: str, image_data: Dict, nested_col22: DeltaGenerator) -> None:
"""
Manages the question-answering interface for each image.
Args:
image_key (str): Unique key identifying the image.
image_data (Dict): Data associated with the image.
nested_col22 (DeltaGenerator): Column for displaying the question-answering interface.
Returns:
None
"""
if image_data['analysis_done']:
self.display_question_answering_interface(image_key, image_data, nested_col22)
if self.settings_changed or self.confidance_change:
nested_col22.warning("Confidence level changed, please click 'Analyze Image' each time you change it.")
def display_question_answering_interface(self, image_key: str, image_data: Dict,
nested_col22: DeltaGenerator) -> None:
"""
Displays the interface for question answering, including sample questions and a custom question input.
Args:
image_key (str): Unique key identifying the image.
image_data (Dict): Data associated with the image.
nested_col22 (DeltaGenerator): The column where the interface will be displayed.
Returns:
None
"""
sample_questions = config.SAMPLE_QUESTIONS.get(image_key, [])
selected_question = nested_col22.selectbox("Select a sample question or type your own:",
["Custom question..."] + sample_questions,
key=f'sample_question_{image_key}')
# Display custom question input only if "Custom question..." is selected
question = selected_question
if selected_question == "Custom question...":
custom_question = nested_col22.text_input("Or ask your own question:", key=f'custom_question_{image_key}')
question = custom_question
self.process_question(image_key, question, image_data, nested_col22)
qa_history = image_data.get('qa_history', [])
for num, (q, a, p) in enumerate(qa_history):
nested_col22.text(f"Q{num + 1}: {q}\nA{num + 1}: {a}\nPrompt Length: {p}\n")
def process_question(self, image_key: str, question: str, image_data: Dict, nested_col22: DeltaGenerator) -> None:
"""
Processes the user's question, generates an answer, and updates the question-answer history.
This method checks if the question is new or if settings have changed, and if so, generates an answer using the
KBVQA model.
It then updates the question-answer history for the image.
Args:
image_key (str): Unique key identifying the image.
question (str): The question asked by the user.
image_data (Dict): Data associated with the image.
nested_col22 (DeltaGenerator): The column where the answer will be displayed.
Returns:
None
"""
qa_history = image_data.get('qa_history', [])
if question and (
question not in [q for q, _, _ in qa_history] or self.settings_changed or self.confidance_change):
if nested_col22.button('Get Answer', key=f'answer_{image_key}', disabled=self.is_widget_disabled):
answer, prompt_length = self.answer_question(image_data['caption'], image_data['detected_objects_str'],
question)
self.add_to_qa_history(image_key, question, answer, prompt_length)
def image_qa_app(self) -> None:
"""
Main application interface for image-based question answering.
This method orchestrates the display of sample images, handles image uploads, and facilitates the
question-answering process.
It iterates through each image in the session state, displaying the image and providing interfaces for image
analysis and question answering.
Returns:
None
"""
self.display_sample_images()
self.handle_image_upload()
# self.display_session_state(self.col1)
with self.col2:
for image_key, image_data in self.get_images_data().items():
with st.container():
nested_col21, nested_col22 = st.columns([0.65, 0.35])
self.display_image_and_analysis(image_key, image_data, nested_col21, nested_col22)
self.handle_question_answering(image_key, image_data, nested_col22)
def run_inference(self) -> None:
"""
Sets up widgets and manages the inference process, including model loading and reloading, based on user
interactions.
This method orchestrates the overall flow of the inference process.
Returns:
None
"""
self.set_up_widgets() # Inherent from the StateManager Class
load_fine_tuned_model = False
fine_tuned_model_already_loaded = False
reload_kbvqa = False
reload_detection_model = False
force_reload_full_model = False
# self.update_prev_state()
st.session_state.button_label = (
"Reload Model" if (self.is_model_loaded and
st.session_state.kbvqa.detection_model != st.session_state['detection_model']) or
(st.session_state['previous_state']['method'] is not None and
st.session_state['method'] != st.session_state['previous_state']['method'])
else "Load Model"
)
#if self.is_model_loaded and self.settings_changed:
if st.session_state.button_label == "Reload Model":
self.col1.warning("Model settings have changed, please reload the model.. ")
with self.col1:
if st.session_state.method == "7b-Fine-Tuned Model" or st.session_state.method == "13b-Fine-Tuned Model":
with st.container():
nested_col11, nested_col12 = st.columns([0.5, 0.5])
if nested_col11.button(st.session_state.button_label, on_click=self.disable_widgets,
disabled=self.is_widget_disabled):
if st.session_state.button_label == "Load Model":
if self.is_model_loaded:
free_gpu_resources()
fine_tuned_model_already_loaded = True
else:
load_fine_tuned_model = True
elif st.session_state.button_label == "Reload Model" and st.session_state['method'] != \
st.session_state['previous_state']['method']: # check if the model size have changed
force_reload_full_model = True
elif (self.is_model_loaded and st.session_state.kbvqa.detection_model !=
st.session_state['detection_model']):
reload_detection_model = True
if nested_col12.button("Force Reload", on_click=self.disable_widgets,
disabled=self.is_widget_disabled):
force_reload_full_model = True
if load_fine_tuned_model:
t1 = time.time()
free_gpu_resources()
self.load_model()
st.session_state['time_taken_to_load_model'] = int(time.time() - t1)
st.session_state['loading_in_progress'] = False
elif fine_tuned_model_already_loaded:
free_gpu_resources()
self.col1.text("Model already loaded and no settings were changed:)")
st.session_state['loading_in_progress'] = False
elif reload_detection_model:
free_gpu_resources()
self.reload_detection_model()
st.session_state['loading_in_progress'] = False
elif force_reload_full_model:
free_gpu_resources()
t1 = time.time()
self.force_reload_model()
st.session_state['time_taken_to_load_model'] = int(time.time() - t1)
st.session_state['loading_in_progress'] = False
st.session_state['model_loaded'] = True
elif st.session_state.method == "Vision-Language Embeddings Alignment":
self.col1.warning(
f'Model using {st.session_state.method} is desgined but requires large scale data and multiple '
f'high-end GPUs, implementation will be explored in the future.')
st.write(st.session_state['method'])
st.write(st.session_state['previous_state']['method'])
if st.session_state['kbvqa'] is not None:
st.write(st.session_state['kbvqa'].kbvqa_model_name)
if self.is_model_loaded:
free_gpu_resources()
st.session_state['loading_in_progress'] = False
self.update_prev_state()
self.image_qa_app() # this is the main Q/A Application
|