File size: 4,897 Bytes
bc7d231
 
c7c92f9
 
dfda773
58e3cb5
63fc765
d5a60de
5554139
e9d7d81
fdc69a0
bc7d231
 
1d94d91
f35e4aa
 
 
 
1d94d91
f35e4aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d94d91
 
f35e4aa
3ec7dfb
1d94d91
 
 
f950e4a
1d94d91
 
 
3ec7dfb
f35e4aa
3ec7dfb
 
 
f35e4aa
3ec7dfb
 
1d94d91
f35e4aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
from PIL import Image
import torch.nn as nn
from transformers import Blip2Processor, Blip2ForConditionalGeneration, InstructBlipProcessor, InstructBlipForConditionalGeneration
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities import free_gpu_resources



# Placeholder for undefined functions
def load_caption_model():
    st.write("Placeholder for load_caption_model function")
    return None, None

def answer_question(image, question, model, processor):
    return "Placeholder answer for the question"

def detect_and_draw_objects(image, model_name, threshold):
    return image, "Detected objects"

def get_caption(image):
    return "Generated caption for the image"

def free_gpu_resources():
    pass

# Main function
def main():
    st.sidebar.title("Navigation")
    selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report", "Object Detection"])

    if selection == "Home":
        display_home()
    elif selection == "Dissertation Report":
        display_dissertation_report()
    elif selection == "Evaluation Results":
        display_evaluation_results()
    elif selection == "Dataset Analysis":
        display_dataset_analysis()
    elif selection == "Run Inference":
        run_inference()
    elif selection == "Object Detection":
        run_object_detection()

def display_home():
    st.title("MultiModal Learning for Knowledge-Based Visual Question Answering")
    st.write("Home page content goes here...")

def display_dissertation_report():
    st.title("Dissertation Report")
    st.write("Click the link below to view the PDF.")
    st.download_button(
        label="Download PDF",
        data=open("Files/Dissertation Report.pdf", "rb"),
        file_name="example.pdf",
        mime="application/octet-stream"
    )

def display_evaluation_results():
    st.title("Evaluation Results")
    st.write("This is a Place Holder until the contents are uploaded.")

def display_dataset_analysis():
    st.title("OK-VQA Dataset Analysis")
    st.write("This is a Place Holder until the contents are uploaded.")

def run_inference():
    st.title("Image-based Q&A App")
    # Image-based Q&A functionality
    image_qa_app()

def run_object_detection():
    st.title("Object Detection")
    # Object Detection functionality
    # ... Implement your code for this section ...

def image_qa_app():
    # Initialize session state for storing images and their Q&A histories
    if 'images_qa_history' not in st.session_state:
        st.session_state['images_qa_history'] = []

    # Button to clear all data
    if st.button('Clear All'):
        st.session_state['images_qa_history'] = []
        st.experimental_rerun()

    # Image uploader
    uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"])

    if uploaded_image is not None:
        image = Image.open(uploaded_image)
        current_image_key = uploaded_image.name  # Use image name as a unique key

        # Check if the image is already in the history
        if not any(info['image_key'] == current_image_key for info in st.session_state['images_qa_history']):
            st.session_state['images_qa_history'].append({
                'image_key': current_image_key,
                'image': image,
                'qa_history': []
            })

        # Display all images and their Q&A histories
        for image_info in st.session_state['images_qa_history']:
            st.image(image_info['image'], caption='Uploaded Image.', use_column_width=True)
            for q, a in image_info['qa_history']:
                st.text(f"Q: {q}\nA: {a}\n")

            # If the current image is being processed
            if image_info['image_key'] == current_image_key:
                # Unique keys for each widget
                question_key = f"question_{current_image_key}"
                button_key = f"button_{current_image_key}"

                # Question input for the current image
                question = st.text_input("Ask a question about this image:", key=question_key)

                # Get Answer button for the current image
                if st.button('Get Answer', key=button_key):
                    # Process the image and question
                    answer = get_answer(image_info['image'], question)  # Implement this function
                    image_info['qa_history'].append((question, answer))
                    st.experimental_rerun()  # Rerun to update the display

def get_answer(image, question):
    # Implement the logic to process the image and question, and return the answer
    return "Sample answer based on the image and question."

if __name__ == "__main__":
    main()