Spaces:
Sleeping
Sleeping
record result
Browse files- app.py +108 -8
- chatbot_simulator.py +1 -1
app.py
CHANGED
@@ -1,19 +1,32 @@
|
|
1 |
import gradio as gr
|
2 |
from chatbot_simulator import ChatbotSimulation
|
3 |
-
from
|
4 |
from datasets import load_dataset
|
5 |
import json_repair
|
6 |
import random
|
7 |
import os
|
|
|
8 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
9 |
|
10 |
-
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
class AppSimulator:
|
14 |
def __init__(self, openai_api_key):
|
15 |
self.simulation = None
|
16 |
self.openai_api_key = openai_api_key
|
|
|
17 |
|
18 |
def initialize_simulator(self, sitemap_url, progress=gr.Progress(track_tqdm=True)):
|
19 |
"""Initialize the simulator with retries and elapsed time tracking."""
|
@@ -31,12 +44,24 @@ class AppSimulator:
|
|
31 |
elif row['name'] == 'system_data':
|
32 |
system_data = json_repair.loads(row['value'])
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
self.simulation = ChatbotSimulation(
|
42 |
app_name=app_name,
|
@@ -68,9 +93,75 @@ simulator_app = AppSimulator(openai_api_key=openai_api_key)
|
|
68 |
def chat(user_input, history):
|
69 |
"""Chat handler that validates input and interacts with the simulator."""
|
70 |
response = simulator_app.chat_interaction(user_input, history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
return response
|
72 |
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
# Gradio Interface using ChatInterface
|
75 |
with gr.Blocks(fill_height=True) as demo:
|
76 |
gr.Markdown("## Simulator Setup")
|
@@ -85,6 +176,8 @@ with gr.Blocks(fill_height=True) as demo:
|
|
85 |
# Chat interface to handle user interactions
|
86 |
chat_interface = gr.ChatInterface(fn=chat, type='messages')
|
87 |
|
|
|
|
|
88 |
# Define the callback function to initialize the simulator and update status
|
89 |
def initialize_and_start_chat(sitemap):
|
90 |
return simulator_app.initialize_simulator(sitemap) # Use progress tracking
|
@@ -96,5 +189,12 @@ with gr.Blocks(fill_height=True) as demo:
|
|
96 |
outputs=status # Update only the status block
|
97 |
)
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
# Launch the app
|
100 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from chatbot_simulator import ChatbotSimulation
|
3 |
+
from huggingface_hub import HfApi, create_repo
|
4 |
from datasets import load_dataset
|
5 |
import json_repair
|
6 |
import random
|
7 |
import os
|
8 |
+
import re
|
9 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
10 |
|
11 |
+
openai_api_key = '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'#os.getenv("OPENAI_API_KEY")
|
12 |
+
|
13 |
+
task_completed = 0
|
14 |
+
task_completed_steps = None
|
15 |
+
|
16 |
+
|
17 |
+
def is_task_incomplete(example):
|
18 |
+
return example["task_completed"] is None and example["task_completed_steps"] is None
|
19 |
+
|
20 |
+
|
21 |
+
def is_task_given_up(example):
|
22 |
+
return example["task_completed"] == 0 and example["task_completed_steps"] == 0
|
23 |
|
24 |
|
25 |
class AppSimulator:
|
26 |
def __init__(self, openai_api_key):
|
27 |
self.simulation = None
|
28 |
self.openai_api_key = openai_api_key
|
29 |
+
self.app_name = None
|
30 |
|
31 |
def initialize_simulator(self, sitemap_url, progress=gr.Progress(track_tqdm=True)):
|
32 |
"""Initialize the simulator with retries and elapsed time tracking."""
|
|
|
44 |
elif row['name'] == 'system_data':
|
45 |
system_data = json_repair.loads(row['value'])
|
46 |
|
47 |
+
self.app_name = app_name
|
48 |
+
human_result_url = "jjz5463/simulator_human_result"
|
49 |
+
synthetic_tasks = load_dataset(human_result_url, app_name, split='train')
|
50 |
+
|
51 |
+
incomplete_tasks = synthetic_tasks.filter(is_task_incomplete)
|
52 |
+
give_up_tasks = synthetic_tasks.filter(is_task_given_up)
|
53 |
+
if len(incomplete_tasks) > 0:
|
54 |
+
incomplete_task = incomplete_tasks[0]
|
55 |
+
task = incomplete_task["tasks"]
|
56 |
+
solution = incomplete_task["steps"]
|
57 |
+
user_data = incomplete_task["attributes"]["user_data"]
|
58 |
+
elif len(give_up_tasks) > 0:
|
59 |
+
give_up_task = give_up_tasks[0]
|
60 |
+
task = give_up_task["tasks"]
|
61 |
+
solution = give_up_task["steps"]
|
62 |
+
user_data = give_up_task["attributes"]["user_data"]
|
63 |
+
else:
|
64 |
+
return "All tasks in this app have been completed!"
|
65 |
|
66 |
self.simulation = ChatbotSimulation(
|
67 |
app_name=app_name,
|
|
|
93 |
def chat(user_input, history):
|
94 |
"""Chat handler that validates input and interacts with the simulator."""
|
95 |
response = simulator_app.chat_interaction(user_input, history)
|
96 |
+
|
97 |
+
# Initialize variables for task completion and steps
|
98 |
+
|
99 |
+
# Define the pattern for matching the response
|
100 |
+
pattern = r"Task completed! You took (\d+) steps\."
|
101 |
+
match = re.match(pattern, response)
|
102 |
+
|
103 |
+
global task_completed
|
104 |
+
global task_completed_steps
|
105 |
+
task_completed = 0
|
106 |
+
task_completed_steps = None
|
107 |
+
if match:
|
108 |
+
task_completed = 1
|
109 |
+
task_completed_steps = int(match.group(1))
|
110 |
+
human_result_url = "jjz5463/simulator_human_result"
|
111 |
+
app_name = simulator_app.app_name
|
112 |
+
synthetic_tasks = load_dataset(human_result_url, app_name, split='train')
|
113 |
+
incomplete_tasks = synthetic_tasks.filter(is_task_incomplete)
|
114 |
+
if len(incomplete_tasks) > 0:
|
115 |
+
incomplete_task_index = incomplete_tasks['__index_level_0__'][0]
|
116 |
+
synthetic_tasks = synthetic_tasks.map(
|
117 |
+
lambda example, idx: {
|
118 |
+
"task_completed": task_completed if idx == incomplete_task_index else example["task_completed"],
|
119 |
+
"task_completed_steps": task_completed_steps if idx == incomplete_task_index else example["task_completed_steps"],
|
120 |
+
},
|
121 |
+
with_indices=True,
|
122 |
+
)
|
123 |
+
# Push the updated dataset back to Hugging Face
|
124 |
+
synthetic_tasks.push_to_hub(human_result_url)
|
125 |
+
|
126 |
return response
|
127 |
|
128 |
|
129 |
+
def give_up():
|
130 |
+
"""Handle the Give-Up action by marking the first incomplete task as abandoned."""
|
131 |
+
global task_completed, task_completed_steps
|
132 |
+
task_completed = 0
|
133 |
+
task_completed_steps = 0
|
134 |
+
|
135 |
+
# Access the app_name from the simulator instance
|
136 |
+
app_name = simulator_app.app_name
|
137 |
+
if not app_name:
|
138 |
+
return "Simulator has not been initialized with an app!"
|
139 |
+
|
140 |
+
# Load the human result dataset
|
141 |
+
human_result_url = "jjz5463/simulator_human_result"
|
142 |
+
synthetic_tasks = load_dataset(human_result_url, app_name, split='train')
|
143 |
+
|
144 |
+
# Find the first incomplete task
|
145 |
+
incomplete_tasks = synthetic_tasks.filter(is_task_incomplete)
|
146 |
+
if len(incomplete_tasks) > 0:
|
147 |
+
incomplete_task_index = incomplete_tasks['__index_level_0__'][0]
|
148 |
+
|
149 |
+
# Update the dataset to mark the task as abandoned
|
150 |
+
synthetic_tasks = synthetic_tasks.map(
|
151 |
+
lambda example, idx: {
|
152 |
+
"task_completed": task_completed if idx == incomplete_task_index else example["task_completed"],
|
153 |
+
"steps": task_completed_steps if idx == incomplete_task_index else example["steps"],
|
154 |
+
},
|
155 |
+
with_indices=True,
|
156 |
+
)
|
157 |
+
|
158 |
+
# Push the updated dataset back to Hugging Face
|
159 |
+
synthetic_tasks.push_to_hub(human_result_url)
|
160 |
+
return "Task marked as abandoned (Give-Up action)."
|
161 |
+
else:
|
162 |
+
return "No incomplete tasks found to abandon!"
|
163 |
+
|
164 |
+
|
165 |
# Gradio Interface using ChatInterface
|
166 |
with gr.Blocks(fill_height=True) as demo:
|
167 |
gr.Markdown("## Simulator Setup")
|
|
|
176 |
# Chat interface to handle user interactions
|
177 |
chat_interface = gr.ChatInterface(fn=chat, type='messages')
|
178 |
|
179 |
+
give_up_button = gr.Button("Give Up")
|
180 |
+
|
181 |
# Define the callback function to initialize the simulator and update status
|
182 |
def initialize_and_start_chat(sitemap):
|
183 |
return simulator_app.initialize_simulator(sitemap) # Use progress tracking
|
|
|
189 |
outputs=status # Update only the status block
|
190 |
)
|
191 |
|
192 |
+
# Set up the Give-Up button click to update the dataset
|
193 |
+
give_up_button.click(
|
194 |
+
fn=give_up,
|
195 |
+
inputs=[],
|
196 |
+
outputs=status # Update the status with the give-up message
|
197 |
+
)
|
198 |
+
|
199 |
# Launch the app
|
200 |
demo.launch()
|
chatbot_simulator.py
CHANGED
@@ -179,7 +179,7 @@ class ChatbotSimulation:
|
|
179 |
return gpt_instruction
|
180 |
|
181 |
def start_conversation(self):
|
182 |
-
greeting = f'\nWelcome to {self.app_name} simulator! Your task is: {self.task}
|
183 |
system_prompt = self._generate_system_prompt()
|
184 |
# GPT generates the page instructions
|
185 |
self.conversation.append({"role": "system", "content": system_prompt})
|
|
|
179 |
return gpt_instruction
|
180 |
|
181 |
def start_conversation(self):
|
182 |
+
greeting = f'\nWelcome to {self.app_name} simulator! Your task is: {self.task} \n'
|
183 |
system_prompt = self._generate_system_prompt()
|
184 |
# GPT generates the page instructions
|
185 |
self.conversation.append({"role": "system", "content": system_prompt})
|