Spaces:
Runtime error
Runtime error
ziyadsuper2017
commited on
Commit
•
b557897
1
Parent(s):
aa719f7
Update app.py
Browse files
app.py
CHANGED
@@ -1,113 +1,92 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
-
import
|
4 |
-
|
5 |
|
6 |
-
# Database setup
|
7 |
-
conn = sqlite3.connect('chat_history.db')
|
8 |
-
c = conn.cursor()
|
9 |
|
10 |
-
|
11 |
-
CREATE TABLE IF NOT EXISTS history
|
12 |
-
(role TEXT, message TEXT)
|
13 |
-
''')
|
14 |
|
15 |
-
#
|
16 |
-
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
17 |
genai.configure(api_key=api_key)
|
18 |
|
19 |
generation_config = {
|
20 |
-
|
21 |
-
|
22 |
}
|
23 |
|
24 |
safety_settings = []
|
25 |
|
26 |
-
#
|
27 |
-
st.
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
else:
|
34 |
-
role = "model"
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
-
#
|
41 |
-
user_input = st.text_area("", height=5)
|
42 |
-
if user_input:
|
43 |
-
chat_history.append({"role": role, "parts": [{"text": user_input}]})
|
44 |
-
if role == "user":
|
45 |
-
|
46 |
-
# Model code
|
47 |
-
model_name = "gemini-pro"
|
48 |
-
model = genai.GenerativeModel(
|
49 |
-
model_name=model_name,
|
50 |
-
generation_config=generation_config,
|
51 |
-
safety_settings=safety_settings
|
52 |
-
)
|
53 |
-
|
54 |
-
response = model.generate_content(chat_history)
|
55 |
-
response_text = response.text
|
56 |
-
chat_history.append({"role": "model", "parts": [{"text": response_text}]})
|
57 |
-
|
58 |
-
st.session_state["chat_history"] = chat_history
|
59 |
-
|
60 |
-
for message in chat_history:
|
61 |
-
r, t = message["role"], message["parts"][0]["text"]
|
62 |
-
st.markdown(f"**{r.title()}:** {t}")
|
63 |
-
if st.button("Display History"):
|
64 |
-
c.execute("SELECT * FROM history")
|
65 |
-
rows = c.fetchall()
|
66 |
-
|
67 |
-
for row in rows:
|
68 |
-
st.markdown(f"**{row[0].title()}:** {row[1]}")
|
69 |
-
|
70 |
-
# Save chat history to database
|
71 |
-
for message in chat_history:
|
72 |
-
c.execute("INSERT INTO history VALUES (?, ?)",
|
73 |
-
(message["role"], message["parts"][0]["text"]))
|
74 |
-
conn.commit()
|
75 |
-
|
76 |
-
conn.close()
|
77 |
-
|
78 |
-
# Separate section for image uploading
|
79 |
-
st.title("Image Description Generator")
|
80 |
-
|
81 |
-
# Change the file_uploader to accept multiple files
|
82 |
-
uploaded_files = st.file_uploader("Upload one or more images here", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
|
83 |
-
|
84 |
-
# Text input for asking questions about the images
|
85 |
-
image_question = st.text_input("Ask something about the images:")
|
86 |
-
|
87 |
-
# Check if the user has entered a question
|
88 |
-
if image_question:
|
89 |
-
# Create a list of image parts from the uploaded files
|
90 |
image_parts = []
|
91 |
for uploaded_file in uploaded_files:
|
|
|
|
|
|
|
92 |
image_parts.append({
|
93 |
"mime_type": uploaded_file.type,
|
94 |
-
"data":
|
95 |
})
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
-
#
|
111 |
-
for
|
112 |
-
|
113 |
-
st.
|
|
|
1 |
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import io
|
4 |
+
import base64
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
+
import google.generativeai as genai
|
|
|
|
|
|
|
8 |
|
9 |
+
# Configure the API key (should be set as an environment variable or secure storage in production)
|
10 |
+
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
11 |
genai.configure(api_key=api_key)
|
12 |
|
13 |
generation_config = {
|
14 |
+
"temperature": 0.9,
|
15 |
+
"max_output_tokens": 3000
|
16 |
}
|
17 |
|
18 |
safety_settings = []
|
19 |
|
20 |
+
# Initialize session state
|
21 |
+
if "chat_history" not in st.session_state:
|
22 |
+
st.session_state.chat_history = []
|
23 |
+
|
24 |
+
# UI layout
|
25 |
+
st.title("Gemini Chatbot")
|
26 |
|
27 |
+
# Display the chat history
|
28 |
+
for message in st.session_state.chat_history:
|
29 |
+
role, text = message
|
30 |
+
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)
|
31 |
|
32 |
+
# Text input for the user to send messages
|
33 |
+
user_input = st.text_input("Enter your message here:")
|
|
|
|
|
34 |
|
35 |
+
# File uploader for images
|
36 |
+
uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
|
37 |
+
|
38 |
+
# Function to convert image to base64
|
39 |
+
def get_image_base64(image):
|
40 |
+
buffered = io.BytesIO()
|
41 |
+
image.save(buffered, format="JPEG")
|
42 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
43 |
+
return f"data:image/jpeg;base64,{img_str}"
|
44 |
+
|
45 |
+
# When the 'Send' button is clicked, process the input and generate a response
|
46 |
+
if st.button("Send"):
|
47 |
+
# Save user input to the chat history if it's not empty
|
48 |
+
if user_input.strip():
|
49 |
+
st.session_state.chat_history.append(("user", user_input))
|
50 |
|
51 |
+
# Process and save uploaded images to the chat history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
image_parts = []
|
53 |
for uploaded_file in uploaded_files:
|
54 |
+
bytes_data = uploaded_file.read()
|
55 |
+
image = Image.open(io.BytesIO(bytes_data))
|
56 |
+
image_base64 = get_image_base64(image)
|
57 |
image_parts.append({
|
58 |
"mime_type": uploaded_file.type,
|
59 |
+
"data": image_base64
|
60 |
})
|
61 |
+
st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
|
62 |
+
|
63 |
+
# Prepare the prompts for the model
|
64 |
+
prompts = []
|
65 |
+
if user_input.strip():
|
66 |
+
prompts.append({"role": "user", "parts": [{"text": user_input}]})
|
67 |
+
for image_part in image_parts:
|
68 |
+
prompts.append({"role": "user", "parts": [image_part]})
|
69 |
+
|
70 |
+
# Generate the response
|
71 |
+
if image_parts:
|
72 |
+
model = genai.GenerativeModel(
|
73 |
+
model_name='gemini-pro-vision',
|
74 |
+
generation_config=generation_config,
|
75 |
+
safety_settings=safety_settings
|
76 |
+
)
|
77 |
+
else:
|
78 |
+
model = genai.GenerativeModel(
|
79 |
+
model_name='gemini-pro',
|
80 |
+
generation_config=generation_config,
|
81 |
+
safety_settings=safety_settings
|
82 |
+
)
|
83 |
+
response = model.generate_content(prompts)
|
84 |
+
response_text = response['text']
|
85 |
+
|
86 |
+
# Save the model response to the chat history
|
87 |
+
st.session_state.chat_history.append(("model", response_text))
|
88 |
|
89 |
+
# Display the updated chat history
|
90 |
+
for message in st.session_state.chat_history:
|
91 |
+
role, text = message
|
92 |
+
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)
|