Spaces:
Runtime error
Runtime error
Eric Michael Martinez
commited on
Commit
•
1cd8783
1
Parent(s):
9058df5
adding chatbot
Browse files- app.py +87 -0
- assignment.ipynb +11 -12
app.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
|
3 |
+
load_dotenv() # take environment variables from .env.
|
4 |
+
import gradio as gr
|
5 |
+
import openai
|
6 |
+
|
7 |
+
# Define a function to get the AI's reply using the OpenAI API
|
8 |
+
def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]):
|
9 |
+
# Initialize the messages list
|
10 |
+
messages = []
|
11 |
+
|
12 |
+
# Add the system message to the messages list
|
13 |
+
if system_message is not None:
|
14 |
+
messages += [{"role": "system", "content": system_message}]
|
15 |
+
|
16 |
+
# Add the message history to the messages list
|
17 |
+
if message_history is not None:
|
18 |
+
messages += message_history
|
19 |
+
|
20 |
+
# Add the user's message to the messages list
|
21 |
+
messages += [{"role": "user", "content": message}]
|
22 |
+
|
23 |
+
# Make an API call to the OpenAI ChatCompletion endpoint with the model and messages
|
24 |
+
completion = openai.ChatCompletion.create(
|
25 |
+
model=model,
|
26 |
+
messages=messages,
|
27 |
+
temperature=temperature
|
28 |
+
)
|
29 |
+
|
30 |
+
# Extract and return the AI's response from the API response
|
31 |
+
return completion.choices[0].message.content.strip()
|
32 |
+
|
33 |
+
# Define a function to handle the chat interaction with the AI model
|
34 |
+
def chat(message, chatbot_messages, history_state):
|
35 |
+
# Initialize chatbot_messages and history_state if they are not provided
|
36 |
+
chatbot_messages = chatbot_messages or []
|
37 |
+
history_state = history_state or []
|
38 |
+
|
39 |
+
# Try to get the AI's reply using the get_ai_reply function
|
40 |
+
try:
|
41 |
+
prompt = """
|
42 |
+
You are bot created to simulate commands.
|
43 |
+
|
44 |
+
Simulate doing a command using this notation:
|
45 |
+
:: <command> ::
|
46 |
+
|
47 |
+
Simulate doing nothing with this notation:
|
48 |
+
:: does nothing ::
|
49 |
+
"""
|
50 |
+
ai_reply = get_ai_reply(message, model="gpt-3.5-turbo", system_message=prompt.strip(), message_history=history_state)
|
51 |
+
|
52 |
+
# Append the user's message and the AI's reply to the chatbot_messages list
|
53 |
+
chatbot_messages.append((message, ai_reply))
|
54 |
+
|
55 |
+
# Append the user's message and the AI's reply to the history_state list
|
56 |
+
history_state.append({"role": "user", "content": message})
|
57 |
+
history_state.append({"role": "assistant", "content": ai_reply})
|
58 |
+
|
59 |
+
# Return None (empty out the user's message textbox), the updated chatbot_messages, and the updated history_state
|
60 |
+
except Exception as e:
|
61 |
+
# If an error occurs, raise a Gradio error
|
62 |
+
raise gr.Error(e)
|
63 |
+
|
64 |
+
return None, chatbot_messages, history_state
|
65 |
+
|
66 |
+
# Define a function to launch the chatbot interface using Gradio
|
67 |
+
def get_chatbot_app():
|
68 |
+
# Create the Gradio interface using the Blocks layout
|
69 |
+
with gr.Blocks() as app:
|
70 |
+
# Create a chatbot interface for the conversation
|
71 |
+
chatbot = gr.Chatbot(label="Conversation")
|
72 |
+
# Create a textbox for the user's message
|
73 |
+
message = gr.Textbox(label="Message")
|
74 |
+
# Create a state object to store the conversation history
|
75 |
+
history_state = gr.State()
|
76 |
+
# Create a button to send the user's message
|
77 |
+
btn = gr.Button(value="Send")
|
78 |
+
|
79 |
+
# Connect the send button to the chat function
|
80 |
+
btn.click(chat, inputs=[message, chatbot, history_state], outputs=[message, chatbot, history_state])
|
81 |
+
# Return the app
|
82 |
+
return app
|
83 |
+
|
84 |
+
# Call the launch_chatbot function to start the chatbot interface using Gradio
|
85 |
+
app = get_chatbot_app()
|
86 |
+
app.queue() # this is to be able to queue multiple requests at once
|
87 |
+
app.launch()
|
assignment.ipynb
CHANGED
@@ -238,10 +238,9 @@
|
|
238 |
"outputs": [],
|
239 |
"source": [
|
240 |
"# You don't need to change this, just run this cell\n",
|
241 |
-
"import openai\n",
|
242 |
"from dotenv import load_dotenv\n",
|
243 |
-
"\n",
|
244 |
"load_dotenv() # take environment variables from .env.\n",
|
|
|
245 |
"\n",
|
246 |
"# Define a function to get the AI's reply using the OpenAI API\n",
|
247 |
"def get_ai_reply(message, model=\"gpt-3.5-turbo\", system_message=None, temperature=0, message_history=[]):\n",
|
@@ -337,8 +336,8 @@
|
|
337 |
"input_2 = \"Touch your toes\"\n",
|
338 |
"response_2 = get_ai_reply(input_2, system_message=prompt, message_history=history)\n",
|
339 |
"\n",
|
340 |
-
"print(f\"Input 2 (same conversation): {
|
341 |
-
"print(f\"Output 2: {
|
342 |
"\n",
|
343 |
"history = [\n",
|
344 |
" {\"role\": \"user\", \"content\": input}, \n",
|
@@ -349,8 +348,8 @@
|
|
349 |
"input_3 = \"simon says touch your toes\"\n",
|
350 |
"response_3 = get_ai_reply(input_3, system_message=prompt, message_history=history)\n",
|
351 |
"\n",
|
352 |
-
"print(f\"Input 3 (same conversation): {
|
353 |
-
"print(f\"Output 3: {
|
354 |
]
|
355 |
},
|
356 |
{
|
@@ -527,12 +526,12 @@
|
|
527 |
"metadata": {},
|
528 |
"outputs": [],
|
529 |
"source": [
|
530 |
-
"import gradio as gr\n",
|
531 |
-
"import openai\n",
|
532 |
"from dotenv import load_dotenv\n",
|
533 |
"\n",
|
534 |
"load_dotenv() # take environment variables from .env.\n",
|
535 |
-
"
|
|
|
|
|
536 |
"# Define a function to get the AI's reply using the OpenAI API\n",
|
537 |
"def get_ai_reply(message, model=\"gpt-3.5-turbo\", system_message=None, temperature=0, message_history=[]):\n",
|
538 |
" # Initialize the messages list\n",
|
@@ -560,7 +559,7 @@
|
|
560 |
" return completion.choices[0].message.content.strip()\n",
|
561 |
"\n",
|
562 |
"# Define a function to handle the chat interaction with the AI model\n",
|
563 |
-
"def chat(
|
564 |
" # Initialize chatbot_messages and history_state if they are not provided\n",
|
565 |
" chatbot_messages = chatbot_messages or []\n",
|
566 |
" history_state = history_state or []\n",
|
@@ -576,7 +575,7 @@
|
|
576 |
" Simulate doing nothing with this notation:\n",
|
577 |
" :: does nothing ::\n",
|
578 |
" \"\"\"\n",
|
579 |
-
" ai_reply = get_ai_reply(message, model
|
580 |
" \n",
|
581 |
" # Append the user's message and the AI's reply to the chatbot_messages list\n",
|
582 |
" chatbot_messages.append((message, ai_reply))\n",
|
@@ -613,7 +612,7 @@
|
|
613 |
"# Call the launch_chatbot function to start the chatbot interface using Gradio\n",
|
614 |
"app = get_chatbot_app()\n",
|
615 |
"app.queue() # this is to be able to queue multiple requests at once\n",
|
616 |
-
"app.launch()"
|
617 |
]
|
618 |
},
|
619 |
{
|
|
|
238 |
"outputs": [],
|
239 |
"source": [
|
240 |
"# You don't need to change this, just run this cell\n",
|
|
|
241 |
"from dotenv import load_dotenv\n",
|
|
|
242 |
"load_dotenv() # take environment variables from .env.\n",
|
243 |
+
"import openai\n",
|
244 |
"\n",
|
245 |
"# Define a function to get the AI's reply using the OpenAI API\n",
|
246 |
"def get_ai_reply(message, model=\"gpt-3.5-turbo\", system_message=None, temperature=0, message_history=[]):\n",
|
|
|
336 |
"input_2 = \"Touch your toes\"\n",
|
337 |
"response_2 = get_ai_reply(input_2, system_message=prompt, message_history=history)\n",
|
338 |
"\n",
|
339 |
+
"print(f\"Input 2 (same conversation): {input_2}\")\n",
|
340 |
+
"print(f\"Output 2: {response_2}\")\n",
|
341 |
"\n",
|
342 |
"history = [\n",
|
343 |
" {\"role\": \"user\", \"content\": input}, \n",
|
|
|
348 |
"input_3 = \"simon says touch your toes\"\n",
|
349 |
"response_3 = get_ai_reply(input_3, system_message=prompt, message_history=history)\n",
|
350 |
"\n",
|
351 |
+
"print(f\"Input 3 (same conversation): {input_3}\")\n",
|
352 |
+
"print(f\"Output 3: {response_3}\")\n"
|
353 |
]
|
354 |
},
|
355 |
{
|
|
|
526 |
"metadata": {},
|
527 |
"outputs": [],
|
528 |
"source": [
|
|
|
|
|
529 |
"from dotenv import load_dotenv\n",
|
530 |
"\n",
|
531 |
"load_dotenv() # take environment variables from .env.\n",
|
532 |
+
"import gradio as gr\n",
|
533 |
+
"import openai\n",
|
534 |
+
"\n",
|
535 |
"# Define a function to get the AI's reply using the OpenAI API\n",
|
536 |
"def get_ai_reply(message, model=\"gpt-3.5-turbo\", system_message=None, temperature=0, message_history=[]):\n",
|
537 |
" # Initialize the messages list\n",
|
|
|
559 |
" return completion.choices[0].message.content.strip()\n",
|
560 |
"\n",
|
561 |
"# Define a function to handle the chat interaction with the AI model\n",
|
562 |
+
"def chat(message, chatbot_messages, history_state):\n",
|
563 |
" # Initialize chatbot_messages and history_state if they are not provided\n",
|
564 |
" chatbot_messages = chatbot_messages or []\n",
|
565 |
" history_state = history_state or []\n",
|
|
|
575 |
" Simulate doing nothing with this notation:\n",
|
576 |
" :: does nothing ::\n",
|
577 |
" \"\"\"\n",
|
578 |
+
" ai_reply = get_ai_reply(message, model=\"gpt-3.5-turbo\", system_message=prompt.strip(), message_history=history_state)\n",
|
579 |
" \n",
|
580 |
" # Append the user's message and the AI's reply to the chatbot_messages list\n",
|
581 |
" chatbot_messages.append((message, ai_reply))\n",
|
|
|
612 |
"# Call the launch_chatbot function to start the chatbot interface using Gradio\n",
|
613 |
"app = get_chatbot_app()\n",
|
614 |
"app.queue() # this is to be able to queue multiple requests at once\n",
|
615 |
+
"app.launch(share=True)"
|
616 |
]
|
617 |
},
|
618 |
{
|