ziyadsuper2017 commited on
Commit
ce73371
1 Parent(s): f7d3fbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -55
app.py CHANGED
@@ -8,7 +8,7 @@ import uuid
8
  import google.generativeai as genai
9
 
10
  # Configure the API key (should be set as an environment variable or secure storage in production)
11
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
12
  genai.configure(api_key=api_key)
13
 
14
  generation_config = genai.GenerationConfig(
@@ -19,10 +19,12 @@ generation_config = genai.GenerationConfig(
19
  safety_settings = []
20
 
21
  # Initialize session state for chat history and file uploader key
 
 
22
  if 'file_uploader_key' not in st.session_state:
23
  st.session_state['file_uploader_key'] = str(uuid.uuid4())
24
- if 'using_vision_model' not in st.session_state:
25
- st.session_state['using_vision_model'] = False
26
 
27
  # UI layout
28
  st.title("Gemini Chatbot")
@@ -37,65 +39,50 @@ def get_image_base64(image):
37
 
38
  # Function to clear conversation
39
  def clear_conversation():
40
- st.session_state['using_vision_model'] = False
41
  st.session_state['file_uploader_key'] = str(uuid.uuid4())
 
42
 
43
  # Function to send message and clear input
44
  def send_message():
45
  user_input = st.session_state.user_input
46
  uploaded_files = st.session_state.uploaded_files
47
 
48
- # Check if an image has been uploaded or if we are continuing with the vision model
49
- if uploaded_files or st.session_state['using_vision_model']:
50
- st.session_state['using_vision_model'] = True
51
- image_prompt = None
52
-
53
- # Create the prompt for the vision model
54
- if uploaded_files:
55
- image = Image.open(uploaded_files[0]) # Only take the first image for simplicity
56
- image_base64 = get_image_base64(image)
57
- image_prompt = {
58
- "role": "user",
59
- "parts": [{"mime_type": uploaded_files[0].type, "data": image_base64}]
60
- }
61
- elif user_input:
62
- # Text input after using vision model
63
- image_prompt = {
64
- "role": "user",
65
- "parts": [{"text": user_input}]
66
- }
67
-
68
- # Use Gemini Pro Vision model for image-based interaction
69
- vision_model = genai.GenerativeModel(
70
- model_name='gemini-pro-vision',
71
- generation_config=generation_config,
72
- safety_settings=safety_settings
73
- )
74
-
75
- response = vision_model.generate_content([image_prompt])
76
- response_text = response.text if hasattr(response, "text") else "No response text found."
77
- st.write("AI: " + response_text)
78
-
79
- # If no images are uploaded and we haven't used the vision model yet, use Gemini Pro model
80
- elif user_input and not st.session_state['using_vision_model']:
81
- text_prompt = {
82
- "role": "user",
83
- "parts": [{"text": user_input}]
84
- }
85
-
86
- text_model = genai.GenerativeModel(
87
- model_name='gemini-pro',
88
- generation_config=generation_config,
89
- safety_settings=safety_settings
90
- )
91
-
92
- response = text_model.generate_content([text_prompt])
93
- response_text = response.text if hasattr(response, "text") else "No response text found."
94
- st.write("AI: " + response_text)
95
-
96
- # Clear the user input and uploaded files
97
  st.session_state.user_input = ''
98
  st.session_state.uploaded_files = []
 
99
 
100
  # Multiline text input for the user to send messages
101
  user_input = st.text_area("Enter your message here:", key="user_input")
@@ -104,7 +91,7 @@ user_input = st.text_area("Enter your message here:", key="user_input")
104
  uploaded_files = st.file_uploader(
105
  "Upload images:",
106
  type=["png", "jpg", "jpeg"],
107
- accept_multiple_files=False, # For simplicity, we're only accepting one image at a time
108
  key=st.session_state.file_uploader_key
109
  )
110
 
@@ -112,4 +99,17 @@ uploaded_files = st.file_uploader(
112
  send_button = st.button("Send", on_click=send_message)
113
 
114
  # Button to clear the conversation
115
- clear_button = st.button("Clear Conversation", on_click=clear_conversation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  import google.generativeai as genai
9
 
10
  # Configure the API key (should be set as an environment variable or secure storage in production)
11
+ api_key = "your_actual_api_key" # Replace with your actual API key
12
  genai.configure(api_key=api_key)
13
 
14
  generation_config = genai.GenerationConfig(
 
19
  safety_settings = []
20
 
21
  # Initialize session state for chat history and file uploader key
22
+ if 'chat_history' not in st.session_state:
23
+ st.session_state['chat_history'] = []
24
  if 'file_uploader_key' not in st.session_state:
25
  st.session_state['file_uploader_key'] = str(uuid.uuid4())
26
+ if 'last_model_used' not in st.session_state:
27
+ st.session_state['last_model_used'] = 'text'
28
 
29
  # UI layout
30
  st.title("Gemini Chatbot")
 
39
 
40
  # Function to clear conversation
41
  def clear_conversation():
42
+ st.session_state['chat_history'] = []
43
  st.session_state['file_uploader_key'] = str(uuid.uuid4())
44
+ st.session_state['last_model_used'] = 'text'
45
 
46
  # Function to send message and clear input
47
  def send_message():
48
  user_input = st.session_state.user_input
49
  uploaded_files = st.session_state.uploaded_files
50
 
51
+ # Determine which model to use based on input type
52
+ model_name = 'gemini-pro-vision' if uploaded_files else 'gemini-pro'
53
+ st.session_state['last_model_used'] = 'vision' if uploaded_files else 'text'
54
+
55
+ # Prepare prompts for conversation
56
+ prompts = []
57
+ if user_input:
58
+ prompts.append({"role": "user", "parts": [{"text": user_input}]})
59
+
60
+ # Append images to prompts if uploaded
61
+ if uploaded_files:
62
+ for uploaded_file in uploaded_files:
63
+ prompts.append({"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": get_image_base64(Image.open(uploaded_file))}]})
64
+
65
+ # Create a new list combining chat history with current prompts
66
+ combined_prompts = st.session_state['chat_history'] + prompts
67
+
68
+ # Use the appropriate model for interaction
69
+ model = genai.GenerativeModel(
70
+ model_name=model_name,
71
+ generation_config=generation_config,
72
+ safety_settings=safety_settings
73
+ )
74
+ response = model.generate_content(combined_prompts)
75
+ response_text = response.text if hasattr(response, "text") else "No response text found."
76
+
77
+ # Update chat history and display the model response
78
+ for prompt in prompts:
79
+ st.session_state['chat_history'].append(prompt)
80
+ st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
81
+
82
+ # Clear the user input and reset the file uploader widget
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  st.session_state.user_input = ''
84
  st.session_state.uploaded_files = []
85
+ st.session_state.file_uploader_key = str(uuid.uuid4())
86
 
87
  # Multiline text input for the user to send messages
88
  user_input = st.text_area("Enter your message here:", key="user_input")
 
91
  uploaded_files = st.file_uploader(
92
  "Upload images:",
93
  type=["png", "jpg", "jpeg"],
94
+ accept_multiple_files=True,
95
  key=st.session_state.file_uploader_key
96
  )
97
 
 
99
  send_button = st.button("Send", on_click=send_message)
100
 
101
  # Button to clear the conversation
102
+ clear_button = st.button("Clear Conversation", on_click=clear_conversation)
103
+
104
+ # Display the chat history
105
+ for entry in st.session_state['chat_history']:
106
+ role = entry["role"]
107
+ parts = entry["parts"][0]
108
+ if 'text' in parts:
109
+ st.markdown(f"{role.title()}: {parts['text']}")
110
+ elif 'data' in parts:
111
+ # Display the image
112
+ st.image(Image.open(io.BytesIO(base64.b64decode(parts['data']))), caption='Uploaded Image')
113
+
114
+ # Ensure the file_uploader widget state is tied to the randomly generated key
115
+ st.session_state.uploaded_files = uploaded_files