acecalisto3 commited on
Commit
8f208cb
1 Parent(s): b70861c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -271
app.py CHANGED
@@ -7,26 +7,25 @@ from datetime import datetime
7
  import logging
8
 
9
  import gradio as gr
10
- from huggingface_hub import InferenceClient, cached_download
11
  from safe_search import safe_search
12
  from i_search import google, i_search as i_s
13
 
14
  # --- Configuration ---
15
- VERBOSE = True # Enable verbose logging
16
- MAX_HISTORY = 5 # Maximum history turns to keep
17
- MAX_TOKENS = 2048 # Maximum tokens for LLM responses
18
- TEMPERATURE = 0.7 # Temperature for LLM responses
19
- TOP_P = 0.8 # Top-p (nucleus sampling) for LLM responses
20
- REPETITION_PENALTY = 1.5 # Repetition penalty for LLM responses
21
- MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Name of the LLM model
22
 
23
- import os
24
- API_KEY = os.getenv("HUGGINGFACE_API_KEY") # Ensure you set the HUGGINGFACE_API_KEY environment variable
25
 
26
  # --- Logging Setup ---
27
  logging.basicConfig(
28
- filename="app.log", # Name of the log file
29
- level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
30
  format="%(asctime)s - %(levelname)s - %(message)s",
31
  )
32
 
@@ -86,13 +85,10 @@ thought:
86
 
87
  # --- Functions ---
88
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
89
- """Formats the prompt for the LLM, including the message and relevant history."""
90
  prompt = " "
91
- # Keep only the last 'max_history_turns' turns
92
  for user_prompt, bot_response in history[-max_history_turns:]:
93
- prompt += f"[INST] {user_prompt} [/ "
94
- prompt += f" {bot_response}"
95
- prompt += f"[INST] {message} [/ "
96
  return prompt
97
 
98
  def run_llm(
@@ -101,21 +97,21 @@ def run_llm(
101
  purpose: str,
102
  **prompt_kwargs: Dict
103
  ) -> str:
104
- """Runs the LLM with the given prompt and parameters."""
105
  seed = random.randint(1, 1111111111111111)
106
- logging.info(f"Seed: {seed}") # Log the seed
107
 
108
  content = PREFIX.format(
109
- date_time_str=date_time_str,
110
  purpose=purpose,
111
  safe_search=safe_search,
112
  ) + prompt_template.format(**prompt_kwargs)
113
  if VERBOSE:
114
- logging.info(LOG_PROMPT.format(content)) # Log the prompt
115
 
 
116
  resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
117
  if VERBOSE:
118
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
119
  return resp
120
 
121
  def generate(
@@ -128,265 +124,59 @@ def generate(
128
  top_p: float = TOP_P,
129
  repetition_penalty: float = REPETITION_PENALTY,
130
  ) -> str:
131
- """Generates text using the LLM."""
132
  content = PREFIX.format(
133
- date_time_str=date_time_str,
134
- purpose=purpose,
135
  safe_search=safe_search,
136
- ) + prompt_template.format(**prompt_kwargs)
137
  if VERBOSE:
138
- logging.info(LOG_PROMPT.format(content)) # Log the prompt
139
 
 
140
  stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
141
- resp = ""
142
- for response in stream:
143
- resp += response.token.text
144
-
145
- if VERBOSE:
146
- logging.info(LOG_RESPONSE.format(resp)) # Log the response
147
- return resp
148
-
149
- def compress_history(purpose: str, task: str, history: List[Tuple[str, str]], directory: str) -> str:
150
- """Compresses the history into a shorter summary."""
151
- resp = run_llm(
152
- COMPRESS_HISTORY_PROMPT,
153
- stop_tokens=["observation:", "task:", "action:", "thought:"],
154
- purpose=purpose,
155
- task=task,
156
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
157
- )
158
- history = "observation: {}\n".format(resp)
159
- return history
160
-
161
- def call_search(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
162
- """Performs a search based on the action input."""
163
- logging.info(f"CALLING SEARCH: {action_input}")
164
- try:
165
- if "http" in action_input:
166
- if "<" in action_input:
167
- action_input = action_input.strip("<")
168
- if ">" in action_input:
169
- action_input = action_input.strip(">")
170
-
171
- response = i_s(action_input)
172
- logging.info(f"Search Result: {response}")
173
- history.append(("observation: search result is: {}".format(response), ""))
174
- else:
175
- history.append(("observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n", ""))
176
- except Exception as e:
177
- history.append(("observation: {}\n".format(e), ""))
178
- return "MAIN", None, history, task
179
-
180
- def call_main(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
181
- """Handles the main agent interaction loop."""
182
- logging.info(f"CALLING MAIN: {action_input}")
183
- resp = run_llm(
184
- ACTION_PROMPT,
185
- stop_tokens=["observation:", "task:", "action:", "thought:"],
186
- purpose=purpose,
187
- task=task,
188
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
189
- )
190
- lines = resp.strip().strip("\n").split("\n")
191
- for line in lines:
192
- if line == "":
193
- continue
194
- if line.startswith("thought: "):
195
- history.append((line, ""))
196
- logging.info(f"Thought: {line}")
197
- elif line.startswith("action: "):
198
- action_name, action_input = parse_action(line)
199
- logging.info(f"Action: {action_name} - {action_input}")
200
- history.append((line, ""))
201
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
202
- task = "END"
203
- return action_name, action_input, history, task
204
- else:
205
- return action_name, action_input, history, task
206
- else:
207
- history.append((line, ""))
208
- logging.info(f"Other Output: {line}")
209
- return "MAIN", None, history, task
210
-
211
- def call_set_task(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
212
- """Sets a new task for the agent."""
213
- logging.info(f"CALLING SET_TASK: {action_input}")
214
- task = run_llm(
215
- TASK_PROMPT,
216
- stop_tokens=[],
217
- purpose=purpose,
218
- task=task,
219
- history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
220
- ).strip("\n")
221
- history.append(("observation: task has been updated to: {}".format(task), ""))
222
- return "MAIN", None, history, task
223
-
224
- def end_fn(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
225
- """Ends the agent interaction."""
226
- logging.info(f"CALLING END_FN: {action_input}")
227
- task = "END"
228
- return "COMPLETE", "COMPLETE", history, task
229
-
230
- NAME_TO_FUNC: Dict[str, callable] = {
231
- "MAIN": call_main,
232
- "UPDATE-TASK": call_set_task,
233
- "SEARCH": call_search,
234
- "COMPLETE": end_fn,
235
- }
236
-
237
- def run_action(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_name: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
238
- """Executes the specified action."""
239
- logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
240
- try:
241
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
242
- action_name = "COMPLETE"
243
- task = "END"
244
- return action_name, "COMPLETE", history, task
245
-
246
- # compress the history when it is long
247
- if len(history) > MAX_HISTORY:
248
- logging.info("COMPRESSING HISTORY")
249
- history = compress_history(purpose, task, history, directory)
250
- if not action_name in NAME_TO_FUNC:
251
- action_name = "MAIN"
252
- if action_name == "" or action_name is None:
253
- action_name = "MAIN"
254
- assert action_name in NAME_TO_FUNC
255
-
256
- logging.info(f"RUN: {action_name} - {action_input}")
257
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
258
- except Exception as e:
259
- history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n", ""))
260
- logging.error(f"Error in run_action: {e}")
261
- return "MAIN", None, history, task
262
-
263
- def run(purpose: str, history: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
264
- """Main agent interaction loop."""
265
- task = None
266
- directory = "./"
267
- if history:
268
- history = str(history).strip("[]")
269
- if not history:
270
- history = []
271
-
272
- action_name = "UPDATE-TASK" if task is None else "MAIN"
273
- action_input = None
274
- while True:
275
- logging.info(f"---")
276
- logging.info(f"Purpose: {purpose}")
277
- logging.info(f"Task: {task}")
278
- logging.info(f"---")
279
- logging.info(f"History: {history}")
280
- logging.info(f"---")
281
-
282
- action_name, action_input, history, task = run_action(
283
- purpose,
284
- task,
285
- history,
286
- directory,
287
- action_name,
288
- action_input,
289
- )
290
- yield (history)
291
- if task == "END":
292
- return (history)
293
-
294
- ################################################
295
-
296
- def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 5) -> str:
297
- """Formats the prompt for the LLM, including the message and relevant history."""
298
- prompt = " "
299
- # Keep only the last 'max_history_turns' turns
300
- for user_prompt, bot_response in history[-max_history_turns:]:
301
- prompt += f"[INST] {user_prompt} [/ "
302
- prompt += f" {bot_response}"
303
- prompt += f"[INST] {message} [/ "
304
- return prompt
305
-
306
- def parse_action(line: str) -> Tuple[str, str]:
307
- """Parses the action line to get the action name and input."""
308
- parts = line.split(":", 1)
309
- if len(parts) == 2:
310
- action_name = parts[0].replace("action", "").strip()
311
- action_input = parts[1].strip()
312
- else:
313
- action_name = parts[0].replace("action", "").strip()
314
- action_input = ""
315
- return action_name, action_input
316
 
317
  def main():
318
- """Main function to run the Gradio interface."""
319
- global client, date_time_str
320
- # Initialize the LLM client with your API key
321
- try:
322
- client = InferenceClient(
323
- MODEL_NAME,
324
- token=API_KEY # Replace with your actual API key
325
- )
326
- except Exception as e:
327
- logging.error(f"Error initializing LLM client: {e}")
328
- print("Error initializing LLM client. Please check your API key.")
329
- return
330
-
331
- # Get the current date and time
332
- date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
333
-
334
  with gr.Blocks() as demo:
335
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
336
- gr.Markdown("### Your AI-Powered Development Companion")
337
-
338
- # Chat Interface
339
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
340
-
341
- # Input Components
342
- message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
343
- purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
344
- agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
345
- sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
346
- temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
347
- max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
348
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
349
- repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
350
-
351
- # Button to submit the message
352
- submit_button = gr.Button(value="Send")
353
-
354
- # Project Explorer Tab
355
- with gr.Tab("Project Explorer"):
356
- project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
357
- explore_button = gr.Button(value="Explore")
358
- project_output = gr.Textbox(label="File Tree", lines=20)
359
-
360
- # Chat App Logic Tab
361
- with gr.Tab("Chat App"):
362
- history = gr.State([])
363
- examples = [
364
- ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
365
- ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
366
- ["Generate a simple HTML page with a heading and a paragraph.", "
367
-
368
-
369
- html\n
370
-
371
- <!DOCTYPE html>\n<html>\n<head>\n<title>My Simple Page</title>\n</head>\n<body>\n<h1>Welcome to my page!</h1>\n<p>This is a simple paragraph.</p>\n</body>\n</html>
372
-
373
- JS
374
-
375
-
376
 
377
-
378
- \n
379
-
380
- ["Create a basic SQL query to select all data from a table named 'users'.", "
381
-
382
-
383
- sql\nSELECT * FROM users;\n
384
-
385
- ["Design a user interface for a mobile app that allows users to track their daily expenses.", "Here's a basic UI design for a mobile expense tracker app:\n\n**Screen 1: Home**\n- Top: App Name and Balance Display\n- Middle: List of Recent Transactions (Date, Description, Amount)\n- Bottom: Buttons for Add Expense, Add Income, View Categories\n\n**Screen 2: Add Expense**\n- Input fields for Date, Category, Description, Amount\n- Buttons for Save, Cancel\n\n**Screen 3: Expense Categories**\n- List of expense categories (e.g., Food, Transportation, Entertainment)\n- Option to add/edit categories\n\n**Screen 4: Reports**\n- Charts and graphs to visualize spending by category, date range, etc.\n- Filters to customize the reports"],
386
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
388
  def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
389
- """Handles the chat interaction."""
390
  prompt = format_prompt(message, history)
391
  response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
392
  history.append((message, response))
@@ -394,9 +184,7 @@ sql\nSELECT * FROM users;\n
394
 
395
  submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
396
 
397
- # Project Explorer Logic
398
  def explore_project(project_path: str) -> str:
399
- """Explores the project directory and returns a file tree."""
400
  try:
401
  tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
402
  return tree
@@ -405,6 +193,58 @@ sql\nSELECT * FROM users;\n
405
 
406
  explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])
407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  demo.launch()
409
 
410
  if __name__ == "__main__":
 
7
  import logging
8
 
9
  import gradio as gr
10
+ from huggingface_hub import InferenceClient
11
  from safe_search import safe_search
12
  from i_search import google, i_search as i_s
13
 
14
  # --- Configuration ---
15
+ VERBOSE = True
16
+ MAX_HISTORY = 5
17
+ MAX_TOKENS = 2048
18
+ TEMPERATURE = 0.7
19
+ TOP_P = 0.8
20
+ REPETITION_PENALTY = 1.5
21
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
22
 
23
+ API_KEY = os.getenv("HUGGINGFACE_API_KEY")
 
24
 
25
  # --- Logging Setup ---
26
  logging.basicConfig(
27
+ filename="app.log",
28
+ level=logging.INFO,
29
  format="%(asctime)s - %(levelname)s - %(message)s",
30
  )
31
 
 
85
 
86
  # --- Functions ---
87
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
 
88
  prompt = " "
 
89
  for user_prompt, bot_response in history[-max_history_turns:]:
90
+ prompt += f"[INST] {user_prompt} [/INST] {bot_response} "
91
+ prompt += f"[INST] {message} [/INST]"
 
92
  return prompt
93
 
94
  def run_llm(
 
97
  purpose: str,
98
  **prompt_kwargs: Dict
99
  ) -> str:
 
100
  seed = random.randint(1, 1111111111111111)
101
+ logging.info(f"Seed: {seed}")
102
 
103
  content = PREFIX.format(
104
+ date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
105
  purpose=purpose,
106
  safe_search=safe_search,
107
  ) + prompt_template.format(**prompt_kwargs)
108
  if VERBOSE:
109
+ logging.info(LOG_PROMPT.format(content=content))
110
 
111
+ client = InferenceClient(model=MODEL_NAME, token=API_KEY)
112
  resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
113
  if VERBOSE:
114
+ logging.info(LOG_RESPONSE.format(resp=resp))
115
  return resp
116
 
117
  def generate(
 
124
  top_p: float = TOP_P,
125
  repetition_penalty: float = REPETITION_PENALTY,
126
  ) -> str:
 
127
  content = PREFIX.format(
128
+ date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
129
+ purpose=f"Generating response as {agent_name}",
130
  safe_search=safe_search,
131
+ ) + sys_prompt + "\n" + prompt
132
  if VERBOSE:
133
+ logging.info(LOG_PROMPT.format(content=content))
134
 
135
+ client = InferenceClient(model=MODEL_NAME, token=API_KEY)
136
  stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
137
+ return "".join(chunk.text for chunk in stream)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  with gr.Blocks() as demo:
141
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
142
+ gr.Markdown("### Your AI-Powered Development Companion")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
+ with gr.Row():
145
+ with gr.Column(scale=3):
146
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
147
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
148
+ submit_button = gr.Button(value="Send")
149
+
150
+ with gr.Column(scale=1):
151
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
152
+ agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
153
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
154
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
155
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
156
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
157
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
158
+
159
+ with gr.Tabs():
160
+ with gr.TabItem("Project Explorer"):
161
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
162
+ explore_button = gr.Button(value="Explore")
163
+ project_output = gr.Textbox(label="File Tree", lines=20)
164
+
165
+ with gr.TabItem("Code Editor"):
166
+ code_editor = gr.Code(label="Code Editor", language="python")
167
+ run_code_button = gr.Button(value="Run Code")
168
+ code_output = gr.Textbox(label="Code Output", lines=10)
169
+
170
+ with gr.TabItem("File Management"):
171
+ file_list = gr.Dropdown(label="Select File", choices=[], interactive=True)
172
+ file_content = gr.Textbox(label="File Content", lines=20)
173
+ save_file_button = gr.Button(value="Save File")
174
+ create_file_button = gr.Button(value="Create New File")
175
+ delete_file_button = gr.Button(value="Delete File")
176
+
177
+ history = gr.State([])
178
 
179
  def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
 
180
  prompt = format_prompt(message, history)
181
  response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
182
  history.append((message, response))
 
184
 
185
  submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
186
 
 
187
  def explore_project(project_path: str) -> str:
 
188
  try:
189
  tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
190
  return tree
 
193
 
194
  explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])
195
 
196
+ def run_code(code: str) -> str:
197
+ try:
198
+ exec_globals = {}
199
+ exec(code, exec_globals)
200
+ output = exec_globals.get('__builtins__', {}).get('print', print)
201
+ return str(output)
202
+ except Exception as e:
203
+ return f"Error running code: {e}"
204
+
205
+ run_code_button.click(run_code, inputs=[code_editor], outputs=[code_output])
206
+
207
+ def load_file_list(project_path: str) -> List[str]:
208
+ try:
209
+ return [f for f in os.listdir(project_path) if os.path.isfile(os.path.join(project_path, f))]
210
+ except Exception as e:
211
+ return [f"Error loading file list: {e}"]
212
+
213
+ def load_file_content(project_path: str, file_name: str) -> str:
214
+ try:
215
+ with open(os.path.join(project_path, file_name), 'r') as file:
216
+ return file.read()
217
+ except Exception as e:
218
+ return f"Error loading file content: {e}"
219
+
220
+ def save_file(project_path: str, file_name: str, content: str) -> str:
221
+ try:
222
+ with open(os.path.join(project_path, file_name), 'w') as file:
223
+ file.write(content)
224
+ return f"File {file_name} saved successfully."
225
+ except Exception as e:
226
+ return f"Error saving file: {e}"
227
+
228
+ def create_file(project_path: str, file_name: str) -> str:
229
+ try:
230
+ open(os.path.join(project_path, file_name), 'a').close()
231
+ return f"File {file_name} created successfully."
232
+ except Exception as e:
233
+ return f"Error creating file: {e}"
234
+
235
+ def delete_file(project_path: str, file_name: str) -> str:
236
+ try:
237
+ os.remove(os.path.join(project_path, file_name))
238
+ return f"File {file_name} deleted successfully."
239
+ except Exception as e:
240
+ return f"Error deleting file: {e}"
241
+
242
+ project_path.change(load_file_list, inputs=[project_path], outputs=[file_list])
243
+ file_list.change(load_file_content, inputs=[project_path, file_list], outputs=[file_content])
244
+ save_file_button.click(save_file, inputs=[project_path, file_list, file_content], outputs=[gr.Textbox()])
245
+ create_file_button.click(create_file, inputs=[project_path, gr.Textbox(label="New File Name")], outputs=[gr.Textbox()])
246
+ delete_file_button.click(delete_file, inputs=[project_path, file_list], outputs=[gr.Textbox()])
247
+
248
  demo.launch()
249
 
250
  if __name__ == "__main__":