ziyadsuper2017 commited on
Commit
b557897
1 Parent(s): aa719f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -92
app.py CHANGED
@@ -1,113 +1,92 @@
1
  import streamlit as st
2
- import google.generativeai as genai
3
- import sqlite3
4
- from streamlit import file_uploader
5
 
6
- # Database setup
7
- conn = sqlite3.connect('chat_history.db')
8
- c = conn.cursor()
9
 
10
- c.execute('''
11
- CREATE TABLE IF NOT EXISTS history
12
- (role TEXT, message TEXT)
13
- ''')
14
 
15
- # Generative AI setup
16
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
17
  genai.configure(api_key=api_key)
18
 
19
  generation_config = {
20
- "temperature": 0.9,
21
- "max_output_tokens": 3000
22
  }
23
 
24
  safety_settings = []
25
 
26
- # Streamlit UI
27
- st.title("Chatbot")
 
 
 
 
28
 
29
- chat_history = st.session_state.get("chat_history", [])
 
 
 
30
 
31
- if len(chat_history) % 2 == 0:
32
- role = "user"
33
- else:
34
- role = "model"
35
 
36
- for message in chat_history:
37
- r, t = message["role"], message["parts"][0]["text"]
38
- st.markdown(f"**{r.title()}:** {t}")
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- # Use text_area for multiline input
41
- user_input = st.text_area("", height=5)
42
- if user_input:
43
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
44
- if role == "user":
45
-
46
- # Model code
47
- model_name = "gemini-pro"
48
- model = genai.GenerativeModel(
49
- model_name=model_name,
50
- generation_config=generation_config,
51
- safety_settings=safety_settings
52
- )
53
-
54
- response = model.generate_content(chat_history)
55
- response_text = response.text
56
- chat_history.append({"role": "model", "parts": [{"text": response_text}]})
57
-
58
- st.session_state["chat_history"] = chat_history
59
-
60
- for message in chat_history:
61
- r, t = message["role"], message["parts"][0]["text"]
62
- st.markdown(f"**{r.title()}:** {t}")
63
- if st.button("Display History"):
64
- c.execute("SELECT * FROM history")
65
- rows = c.fetchall()
66
-
67
- for row in rows:
68
- st.markdown(f"**{row[0].title()}:** {row[1]}")
69
-
70
- # Save chat history to database
71
- for message in chat_history:
72
- c.execute("INSERT INTO history VALUES (?, ?)",
73
- (message["role"], message["parts"][0]["text"]))
74
- conn.commit()
75
-
76
- conn.close()
77
-
78
- # Separate section for image uploading
79
- st.title("Image Description Generator")
80
-
81
- # Change the file_uploader to accept multiple files
82
- uploaded_files = st.file_uploader("Upload one or more images here", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
83
-
84
- # Text input for asking questions about the images
85
- image_question = st.text_input("Ask something about the images:")
86
-
87
- # Check if the user has entered a question
88
- if image_question:
89
- # Create a list of image parts from the uploaded files
90
  image_parts = []
91
  for uploaded_file in uploaded_files:
 
 
 
92
  image_parts.append({
93
  "mime_type": uploaded_file.type,
94
- "data": uploaded_file.read()
95
  })
96
-
97
- # Create a prompt parts list with the question and the image parts
98
- prompt_parts = [image_question] + image_parts
99
-
100
- # Use the gemini-pro-vision model to generate a response
101
- model = genai.GenerativeModel(
102
- model_name="gemini-pro-vision",
103
- generation_config=generation_config,
104
- safety_settings=safety_settings
105
- )
106
-
107
- response = model.generate_content(prompt_parts)
108
- st.markdown(f"**Model's answer:** {response.text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- # Loop through the uploaded files and display them
111
- for uploaded_file in uploaded_files:
112
- # Display the image
113
- st.image(uploaded_file)
 
1
  import streamlit as st
2
+ from PIL import Image
3
+ import io
4
+ import base64
5
 
 
 
 
6
 
7
+ import google.generativeai as genai
 
 
 
8
 
9
+ # Configure the API key (should be set as an environment variable or secure storage in production)
10
+ api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
11
  genai.configure(api_key=api_key)
12
 
13
  generation_config = {
14
+ "temperature": 0.9,
15
+ "max_output_tokens": 3000
16
  }
17
 
18
  safety_settings = []
19
 
20
+ # Initialize session state
21
+ if "chat_history" not in st.session_state:
22
+ st.session_state.chat_history = []
23
+
24
+ # UI layout
25
+ st.title("Gemini Chatbot")
26
 
27
+ # Display the chat history
28
+ for message in st.session_state.chat_history:
29
+ role, text = message
30
+ st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)
31
 
32
+ # Text input for the user to send messages
33
+ user_input = st.text_input("Enter your message here:")
 
 
34
 
35
+ # File uploader for images
36
+ uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
37
+
38
+ # Function to convert image to base64
39
+ def get_image_base64(image):
40
+ buffered = io.BytesIO()
41
+ image.save(buffered, format="JPEG")
42
+ img_str = base64.b64encode(buffered.getvalue()).decode()
43
+ return f"data:image/jpeg;base64,{img_str}"
44
+
45
+ # When the 'Send' button is clicked, process the input and generate a response
46
+ if st.button("Send"):
47
+ # Save user input to the chat history if it's not empty
48
+ if user_input.strip():
49
+ st.session_state.chat_history.append(("user", user_input))
50
 
51
+ # Process and save uploaded images to the chat history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  image_parts = []
53
  for uploaded_file in uploaded_files:
54
+ bytes_data = uploaded_file.read()
55
+ image = Image.open(io.BytesIO(bytes_data))
56
+ image_base64 = get_image_base64(image)
57
  image_parts.append({
58
  "mime_type": uploaded_file.type,
59
+ "data": image_base64
60
  })
61
+ st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
62
+
63
+ # Prepare the prompts for the model
64
+ prompts = []
65
+ if user_input.strip():
66
+ prompts.append({"role": "user", "parts": [{"text": user_input}]})
67
+ for image_part in image_parts:
68
+ prompts.append({"role": "user", "parts": [image_part]})
69
+
70
+ # Generate the response
71
+ if image_parts:
72
+ model = genai.GenerativeModel(
73
+ model_name='gemini-pro-vision',
74
+ generation_config=generation_config,
75
+ safety_settings=safety_settings
76
+ )
77
+ else:
78
+ model = genai.GenerativeModel(
79
+ model_name='gemini-pro',
80
+ generation_config=generation_config,
81
+ safety_settings=safety_settings
82
+ )
83
+ response = model.generate_content(prompts)
84
+ response_text = response['text']
85
+
86
+ # Save the model response to the chat history
87
+ st.session_state.chat_history.append(("model", response_text))
88
 
89
+ # Display the updated chat history
90
+ for message in st.session_state.chat_history:
91
+ role, text = message
92
+ st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)