acecalisto3 commited on
Commit
675c9d3
1 Parent(s): 924e1fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +307 -47
app.py CHANGED
@@ -1,48 +1,308 @@
1
- ===== Application Startup at 2024-07-13 21:48:51 =====
2
-
3
- Running on local URL: http://0.0.0.0:7860
4
-
5
- To create a public link, set `share=True` in `launch()`.
6
- Traceback (most recent call last):
7
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 304, in hf_raise_for_status
8
- response.raise_for_status()
9
- File "/usr/local/lib/python3.10/site-packages/requests/models.py", line 1024, in raise_for_status
10
- raise HTTPError(http_error_msg, response=self)
11
- requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1
12
-
13
- The above exception was the direct cause of the following exception:
14
-
15
- Traceback (most recent call last):
16
- File "/usr/local/lib/python3.10/site-packages/gradio/queueing.py", line 541, in process_events
17
- response = await route_utils.call_process_api(
18
- File "/usr/local/lib/python3.10/site-packages/gradio/route_utils.py", line 276, in call_process_api
19
- output = await app.get_blocks().process_api(
20
- File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1928, in process_api
21
- result = await self.call_function(
22
- File "/usr/local/lib/python3.10/site-packages/gradio/blocks.py", line 1514, in call_function
23
- prediction = await anyio.to_thread.run_sync(
24
- File "/usr/local/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
25
- return await get_async_backend().run_sync_in_worker_thread(
26
- File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
27
- return await future
28
- File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
29
- result = context.run(func, *args)
30
- File "/usr/local/lib/python3.10/site-packages/gradio/utils.py", line 833, in wrapper
31
- response = f(*args, **kwargs)
32
- File "/home/user/app/app.py", line 276, in chat
33
- history, history_output = agent_interaction(
34
- File "/home/user/app/app.py", line 106, in agent_interaction
35
- response = run_llm(
36
- File "/home/user/app/app.py", line 77, in run_llm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  resp = client.text_generation(
38
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 2061, in text_generation
39
- raise_text_generation_error(e)
40
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_common.py", line 460, in raise_text_generation_error
41
- raise http_error
42
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 2032, in text_generation
43
- bytes_output = self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore
44
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/inference/_client.py", line 273, in post
45
- hf_raise_for_status(response)
46
- File "/usr/local/lib/python3.10/site-packages/huggingface_hub/utils/_errors.py", line 371, in hf_raise_for_status
47
- raise HfHubHTTPError(str(e), response=response) from e
48
- huggingface_hub.utils._errors.HfHubHTTPError: 500 Server Error: Internal Server Error for url: https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1 (Request ID: 41TZN5nlKL2We1-SnVOGU)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
5
+ import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from datetime import datetime
10
+ import logging
11
+ import json
12
+
13
+ # --- Configuration ---
14
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Model to use
15
+ MAX_HISTORY_TURNS = 5 # Number of history turns to keep
16
+ VERBOSE = True # Enable verbose logging
17
+
18
+ # --- Logging Setup ---
19
+ logging.basicConfig(
20
+ filename="app.log", # Name of the log file
21
+ level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
22
+ format="%(asctime)s - %(levelname)s - %(message)s",
23
+ )
24
+
25
+ # --- Agent Definitions ---
26
+ agents = {
27
+ "WEB_DEV": {
28
+ "description": "Specialized in web development tasks.",
29
+ "system_prompt": "You are a helpful AI assistant specializing in web development. You can generate code, answer questions, and provide guidance on web technologies.",
30
+ },
31
+ "AI_SYSTEM_PROMPT": {
32
+ "description": "Focuses on generating system prompts for AI agents.",
33
+ "system_prompt": "You are a helpful AI assistant that generates effective system prompts for AI agents. Your prompts should be clear, concise, and provide specific instructions.",
34
+ },
35
+ "PYTHON_CODE_DEV": {
36
+ "description": "Expert in Python code development.",
37
+ "system_prompt": "You are a helpful AI assistant specializing in Python code development. You can generate Python code, debug code, and answer questions about Python.",
38
+ },
39
+ "DATA_SCIENCE": {
40
+ "description": "Expert in data science tasks.",
41
+ "system_prompt": "You are a helpful AI assistant specializing in data science. You can analyze data, build models, and provide insights.",
42
+ },
43
+ "GAME_DEV": {
44
+ "description": "Expert in game development tasks.",
45
+ "system_prompt": "You are a helpful AI assistant specializing in game development. You can generate game logic, design levels, and provide guidance on game engines.",
46
+ },
47
+ # Add more agents as needed
48
+ }
49
+
50
+ # --- Function to format prompt with history ---
51
+ def format_prompt(message, history, agent_name, system_prompt):
52
+ prompt = " "
53
+ for user_prompt, bot_response in history[-MAX_HISTORY_TURNS:]:
54
+ prompt += f"[INST] {user_prompt} [/ "
55
+ prompt += f" {bot_response}"
56
+ prompt += f"[INST] {message} [/ "
57
+
58
+ # Add system prompt if provided
59
+ if system_prompt:
60
+ prompt = f"{system_prompt}\n\n{prompt}"
61
+
62
+ return prompt
63
+
64
+ # --- Function to run the LLM with specified parameters ---
65
+ def run_llm(
66
+ prompt,
67
+ stop_sequences,
68
+ max_tokens,
69
+ temperature=0.7,
70
+ top_p=0.8,
71
+ repetition_penalty=1.5,
72
+ ):
73
+ seed = random.randint(1, 1111111111111111)
74
+ logging.info(f"Seed: {seed}") # Log the seed
75
+
76
+ client = InferenceClient(MODEL_NAME)
77
  resp = client.text_generation(
78
+ prompt,
79
+ max_new_tokens=max_tokens,
80
+ stop_sequences=stop_sequences,
81
+ temperature=temperature,
82
+ top_p=top_p,
83
+ repetition_penalty=repetition_penalty,
84
+ )
85
+ if VERBOSE:
86
+ logging.info(f"Prompt: {prompt}")
87
+ logging.info(f"Response: {resp}")
88
+ return resp
89
+
90
+ # --- Function to handle agent interactions ---
91
+ def agent_interaction(
92
+ purpose,
93
+ message,
94
+ agent_name,
95
+ system_prompt,
96
+ history,
97
+ temperature,
98
+ max_new_tokens,
99
+ top_p,
100
+ repetition_penalty,
101
+ ):
102
+ # Format the prompt with history
103
+ prompt = format_prompt(message, history, agent_name, system_prompt)
104
+
105
+ # Run the LLM
106
+ response = run_llm(
107
+ prompt,
108
+ stop_sequences=["observation:", "task:", "action:", "thought:"],
109
+ max_tokens=max_new_tokens,
110
+ temperature=temperature,
111
+ top_p=top_p,
112
+ repetition_penalty=repetition_penalty,
113
+ )
114
+
115
+ # Update history
116
+ history.append((message, response))
117
+ return history, history
118
+
119
+ # --- Function to parse actions from LLM response ---
120
+ def parse_action(line):
121
+ """Parse the action line to get the action name and input."""
122
+ parts = line.split(":", 1)
123
+ if len(parts) == 2:
124
+ action_name = parts[0].replace("action", "").strip()
125
+ action_input = parts[1].strip()
126
+ else:
127
+ action_name = parts[0].replace("action", "").strip()
128
+ action_input = ""
129
+ return action_name, action_input
130
+
131
+ # --- Function to execute actions based on agent's response ---
132
+ def execute_action(purpose, task, history, action_name, action_input):
133
+ logging.info(f"Executing Action: {action_name} - {action_input}")
134
+
135
+ if action_name == "SEARCH":
136
+ try:
137
+ if "http" in action_input:
138
+ if "<" in action_input:
139
+ action_input = action_input.strip("<")
140
+ if ">" in action_input:
141
+ action_input = action_input.strip(">")
142
+ response = i_s(action_input)
143
+ logging.info(f"Search Result: {response}")
144
+ history += "observation: search result is: {}\n".format(response)
145
+ else:
146
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
147
+ except Exception as e:
148
+ history += "observation: {}\n".format(e)
149
+ return "MAIN", None, history, task
150
+
151
+ elif action_name == "COMPLETE":
152
+ task = "END"
153
+ return "COMPLETE", "COMPLETE", history, task
154
+
155
+ elif action_name == "GENERATE_CODE":
156
+ # Simulate OpenAI API response for code generation (using Hugging Face model)
157
+ # ... (Implement code generation logic using a suitable Hugging Face model)
158
+ # Example:
159
+ # code = generate_code_from_huggingface_model(action_input) # Replace with actual code generation function
160
+ # history += f"observation: Here's the code: {code}\n"
161
+ # return "MAIN", None, history, task
162
+ pass # Placeholder for code generation logic
163
+
164
+ elif action_name == "RUN_CODE":
165
+ # Simulate OpenAI API response for code execution (using Hugging Face model)
166
+ # ... (Implement code execution logic using a suitable Hugging Face model)
167
+ # Example:
168
+ # output = execute_code_from_huggingface_model(action_input) # Replace with actual code execution function
169
+ # history += f"observation: Code output: {output}\n"
170
+ # return "MAIN", None, history, task
171
+ pass # Placeholder for code execution logic
172
+
173
+ else:
174
+ # Default action: "MAIN"
175
+ return "MAIN", action_input, history, task
176
+
177
+ # --- Function to handle the main loop of agent interaction ---
178
+ def run_agent(purpose, history):
179
+ task = None
180
+ directory = "./"
181
+ if history:
182
+ history = str(history).strip("[]")
183
+ if not history:
184
+ history = ""
185
+
186
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
187
+ action_input = None
188
+
189
+ while True:
190
+ logging.info(f"---")
191
+ logging.info(f"Purpose: {purpose}")
192
+ logging.info(f"Task: {task}")
193
+ logging.info(f"---")
194
+ logging.info(f"History: {history}")
195
+ logging.info(f"---")
196
+
197
+ # Get the agent's next action
198
+ prompt = f"""
199
+ You are a helpful AI assistant. You are working on the task: {task}
200
+ Your current history is:
201
+ {history}
202
+ What is your next thought?
203
+ thought:
204
+ What is your next action?
205
+ action:
206
+ """
207
+
208
+ response = run_llm(
209
+ prompt,
210
+ stop_sequences=["observation:", "task:", "action:", "thought:"],
211
+ max_tokens=32000,
212
+ )
213
+
214
+ # Parse the action
215
+ lines = response.strip().strip("\n").split("\n")
216
+ for line in lines:
217
+ if line.startswith("thought: "):
218
+ history += "{}\n".format(line)
219
+ logging.info(f"Thought: {line}")
220
+ elif line.startswith("action: "):
221
+ action_name, action_input = parse_action(line)
222
+ logging.info(f"Action: {action_name} - {action_input}")
223
+ history += "{}\n".format(line)
224
+ break
225
+
226
+ # Execute the action
227
+ action_name, action_input, history, task = execute_action(
228
+ purpose, task, history, action_name, action_input
229
+ )
230
+
231
+ yield (history)
232
+ if task == "END":
233
+ return (history)
234
+
235
+ # --- Gradio Interface ---
236
+ def main():
237
+ with gr.Blocks() as demo:
238
+ gr.Markdown("## FragMixt - No-Code Development Powerhouse")
239
+ gr.Markdown("### Your AI-Powered Development Companion")
240
+
241
+ # Chat Interface
242
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
243
+
244
+ # Input Components
245
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
246
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
247
+ agent_name = gr.Dropdown(label="Agents", choices=list(agents.keys()), value=list(agents.keys())[0], interactive=True)
248
+ system_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
249
+ temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
250
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048 * 10, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
251
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
252
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
253
+
254
+ # Button to submit the message
255
+ submit_button = gr.Button(value="Send")
256
+
257
+ # Project Explorer Tab (Placeholder)
258
+ with gr.Tab("Project Explorer"):
259
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
260
+ explore_button = gr.Button(value="Explore")
261
+ project_output = gr.Textbox(label="File Tree", lines=20)
262
+
263
+ # Chat App Logic Tab
264
+ with gr.Tab("Chat App"):
265
+ history = gr.State([])
266
+ examples = [
267
+ ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
268
+ ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
269
+ ]
270
+
271
+ def chat(purpose, message, agent_name, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history):
272
+ # Get the system prompt for the selected agent
273
+ system_prompt = agents.get(agent_name, {}).get("system_prompt", "")
274
+
275
+ # Run the agent interaction
276
+ history, history_output = agent_interaction(
277
+ purpose,
278
+ message,
279
+ agent_name,
280
+ system_prompt,
281
+ history,
282
+ temperature,
283
+ max_new_tokens,
284
+ top_p,
285
+ repetition_penalty,
286
+ )
287
+ return history, history_output
288
+
289
+ submit_button.click(
290
+ chat,
291
+ inputs=[
292
+ purpose,
293
+ message,
294
+ agent_name,
295
+ system_prompt,
296
+ temperature,
297
+ max_new_tokens,
298
+ top_p,
299
+ repetition_penalty,
300
+ history,
301
+ ],
302
+ outputs=[chatbot, history],
303
+ )
304
+
305
+ demo.launch()
306
+
307
+ if __name__ == "__main__":
308
+ main()