acecalisto3 commited on
Commit
c4177ba
1 Parent(s): 595a992

Update 1app.py

Browse files
Files changed (1) hide show
  1. 1app.py +429 -154
1app.py CHANGED
@@ -1,156 +1,431 @@
1
- from huggingface_hub import InferenceClient
2
- import gradio as gr
3
- import random
4
- import prompts # Ensure this module is correctly imported
5
-
6
- client = InferenceClient(
7
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
8
- )
9
-
10
- def format_prompt(message, history):
11
- prompt = "<s>"
12
- for user_prompt, bot_response in history:
13
- prompt += f"[INST] {user_prompt} [/INST]"
14
- prompt += f" {bot_response}</s> "
15
- prompt += f"[INST] {message} [/INST]"
16
- return prompt
17
-
18
- agents = [
19
- "WEB_DEV",
20
- "AI_SYSTEM_PROMPT",
21
- "PYTHON_CODE_DEV",
22
- "CODE_REVIEW_ASSISTANT",
23
- "CONTENT_WRITER_EDITOR",
24
- "QUESTION_GENERATOR",
25
- "HUGGINGFACE_FILE_DEV",
26
- ]
27
-
28
- def generate(
29
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
30
- ):
31
- seed = random.randint(1, 1111111111111111)
32
-
33
- # Ensure correct agent selection
34
- agent = prompts.WEB_DEV_SYSTEM_PROMPT
35
- if agent_name == "WEB_DEV":
36
- agent = prompts.WEB_DEV_SYSTEM_PROMPT
37
- elif agent_name == "CODE_REVIEW_ASSISTANT":
38
- agent = prompts.CODE_REVIEW_ASSISTANT
39
- elif agent_name == "CONTENT_WRITER_EDITOR":
40
- agent = prompts.CONTENT_WRITER_EDITOR
41
- elif agent_name == "SOCIAL_MEDIA_MANAGER":
42
- agent = prompts.SOCIAL_MEDIA_MANAGER
43
- elif agent_name == "AI_SYSTEM_PROMPT":
44
- agent = prompts.AI_SYSTEM_PROMPT
45
- elif agent_name == "PYTHON_CODE_DEV":
46
- agent = prompts.PYTHON_CODE_DEV
47
- elif agent_name == "QUESTION_GENERATOR":
48
- agent = prompts.QUESTION_GENERATOR
49
- elif agent_name == "HUGGINGFACE_FILE_DEV":
50
- agent = prompts.HUGGINGFACE_FILE_DEV
51
-
52
- system_prompt = agent
53
- temperature = float(temperature)
54
- if temperature < 1e-2:
55
- temperature = 1e-2
56
- top_p = float(top_p)
57
-
58
- generate_kwargs = dict(
59
- temperature=temperature,
60
- max_new_tokens=max_new_tokens,
61
- top_p=top_p,
62
- repetition_penalty=repetition_penalty,
63
- do_sample=True,
64
- seed=seed,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
68
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
69
- output = ""
70
-
71
- for response in stream:
72
- output += response.token.text
73
- yield output
74
-
75
- additional_inputs = [
76
- gr.Dropdown(
77
- label="Agents",
78
- choices=[s for s in agents],
79
- value=agents[0],
80
- interactive=True,
81
- ),
82
- gr.Textbox(
83
- label="System Prompt",
84
- max_lines=1,
85
- interactive=True,
86
- ),
87
- gr.Slider(
88
- label="Temperature",
89
- value=0.9,
90
- minimum=0.0,
91
- maximum=1.0,
92
- step=0.05,
93
- interactive=True,
94
- info="Higher values produce more diverse outputs",
95
- ),
96
- gr.Slider(
97
- label="Max new tokens",
98
- value=256,
99
- minimum=0,
100
- maximum=1000,
101
- step=64,
102
- interactive=True,
103
- info="The maximum numbers of new tokens",
104
- ),
105
- gr.Slider(
106
- label="Top-p (nucleus sampling)",
107
- value=0.95,
108
- minimum=0.0,
109
- maximum=1,
110
- step=0.05,
111
- interactive=True,
112
- info="Higher values sample more low-probability tokens",
113
- ),
114
- gr.Slider(
115
- label="Repetition penalty",
116
- value=1.0,
117
- minimum=1.0,
118
- maximum=2.0,
119
- step=0.05,
120
- interactive=True,
121
- info="Penalize repeated tokens",
122
- ),
123
- ]
124
-
125
- examples = [
126
- ["Create a simple web application using Flask", agents[0], "", 0.9, 256, 0.95, 1.0],
127
- ["Generate a Python script to perform a linear regression analysis", agents[2], "", 0.9, 256, 0.95, 1.0],
128
- ["Create a Dockerfile for a Node.js application", agents[1], "", 0.9, 256, 0.95, 1.0],
129
- ["Write a shell script to automate the deployment of a web application to a server", agents[3], "", 0.9, 256, 0.95, 1.0],
130
- ["Generate a SQL query to retrieve the top 10 most popular products by sales", agents[4], "", 0.9, 256, 0.95, 1.0],
131
- ["Write a Python script to generate a random password with a given length and complexity", agents[2], "", 0.9, 256, 0.95, 1.0],
132
- ["Create a simple game in Unity using C#", agents[0], "", 0.9, 256, 0.95, 1.0],
133
- ["Generate a Java program to implement a binary search algorithm", agents[2], "", 0.9, 256, 0.95, 1.0],
134
- ["Write a shell script to monitor the CPU usage of a server", agents[1], "", 0.9, 256, 0.95, 1.0],
135
- ["Create a simple web application using React and Node.js", agents[0], "", 0.9, 256, 0.95, 1.0],
136
- ["Generate a Python script to perform a sentiment analysis on a given text", agents[2], "", 0.9, 256, 0.95, 1.0],
137
- ["Write a shell script to automate the backup of a MySQL database", agents[1], "", 0.9, 256, 0.95, 1.0],
138
- ["Create a simple game in Unreal Engine using C++", agents[3], "", 0.9, 256, 0.95, 1.0],
139
- ["Generate a Java program to implement a bubble sort algorithm", agents[2], "", 0.9, 256, 0.95, 1.0],
140
- ["Write a shell script to monitor the memory usage of a server", agents[1], "", 0.9, 256, 0.95, 1.0],
141
- ["Create a simple web application using Angular and Node.js", agents[0], "", 0.9, 256, 0.95, 1.0],
142
- ["Generate a Python script to perform a text classification on a given dataset", agents[2], "", 0.9, 256, 0.95, 1.0],
143
- ["Write a shell script to automate the installation of a software package on a server", agents[1], "", 0.9, 256, 0.95, 1.0],
144
- ["Create a simple game in Godot using GDScript", agents[3], "", 0.9, 256, 0.95, 1.0],
145
- ["Generate a Java program to implement a merge sort algorithm", agents[2], "", 0.9, 256, 0.95, 1.0],
146
- ["Write a shell script to automate the cleanup of temporary files on a server", agents[1], "", 0.9, 256, 0.95, 1.0],
147
- ]
148
-
149
- gr.ChatInterface(
150
- fn=generate,
151
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
152
- additional_inputs=additional_inputs,
153
- title="Mixtral 46.7B",
154
- examples=examples,
155
- concurrency_limit=20,
156
- ).launch(show_api=False)
 
1
+ import os
2
+ import subprocess
3
+ import streamlit as st
4
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
+ import black
6
+ from pylint import lint
7
+ from io import StringIO
8
+
9
+ # Set Hugging Face repository URL and project root path
10
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/Mistri"
11
+ PROJECT_ROOT = "projects"
12
+ AGENT_DIRECTORY = "agents"
13
+
14
+ # Global state for session management
15
+ if 'chat_history' not in st.session_state:
16
+ st.session_state.chat_history = []
17
+ if 'terminal_history' not in st.session_state:
18
+ st.session_state.terminal_history = []
19
+ if 'workspace_projects' not in st.session_state:
20
+ st.session_state.workspace_projects = {}
21
+ if 'available_agents' not in st.session_state:
22
+ st.session_state.available_agents = []
23
+ if 'current_state' not in st.session_state:
24
+ st.session_state.current_state = {
25
+ 'toolbox': {},
26
+ 'workspace_chat': {}
27
+ }
28
+
29
+ # Define AIAgent class
30
+ class AIAgent:
31
+ def __init__(self, name, description, skills):
32
+ self.name = name
33
+ self.description = description
34
+ self.skills = skills
35
+
36
+ def create_agent_prompt(self):
37
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
38
+ agent_prompt = f"""
39
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
40
+ {skills_str}
41
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
42
+ """
43
+ return agent_prompt
44
+
45
+ def autonomous_build(self, chat_history, workspace_projects):
46
+ """
47
+ Autonomous build logic based on chat history and workspace projects.
48
+ """
49
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
50
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
51
+
52
+ # Use a Hugging Face model for more advanced logic (e.g., a summarization model)
53
+ summarizer = pipeline("summarization")
54
+ next_step = summarizer(summary, max_length=50, min_length=25, do_sample=False)[0]['summary_text']
55
+
56
+ return summary, next_step
57
+
58
+ # Function to save an agent's prompt to a file and commit to the Hugging Face repository
59
+ def save_agent_to_file(agent):
60
+ """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
61
+ if not os.path.exists(AGENT_DIRECTORY):
62
+ os.makedirs(AGENT_DIRECTORY)
63
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
64
+ config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
65
+ with open(file_path, "w") as file:
66
+ file.write(agent.create_agent_prompt())
67
+ with open(config_path, "w") as file:
68
+ file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
69
+ st.session_state.available_agents.append(agent.name)
70
+
71
+ commit_and_push_changes(f"Add agent {agent.name}")
72
+
73
+ # Function to load an agent's prompt from a file
74
+ def load_agent_prompt(agent_name):
75
+ """Loads an agent prompt from a file."""
76
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
77
+ if os.path.exists(file_path):
78
+ with open(file_path, "r") as file:
79
+ agent_prompt = file.read()
80
+ return agent_prompt
81
+ else:
82
+ return None
83
+
84
+ # Function to create an agent from text input
85
+ def create_agent_from_text(name, text):
86
+ skills = text.split('\n')
87
+ agent = AIAgent(name, "AI agent created from text input.", skills)
88
+ save_agent_to_file(agent)
89
+ return agent.create_agent_prompt()
90
+
91
+ # Chat interface using a selected agent
92
+ def chat_interface_with_agent(input_text, agent_name):
93
+ agent_prompt = load_agent_prompt(agent_name)
94
+ if agent_prompt is None:
95
+ return f"Agent {agent_name} not found."
96
+
97
+ # Load GPT-2 model
98
+ model_name = "gpt2"
99
+ try:
100
+ model = AutoModelForCausalLM.from_pretrained(model_name)
101
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
102
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
103
+ except EnvironmentError as e:
104
+ return f"Error loading model: {e}"
105
+
106
+ # Combine agent prompt with user input
107
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
108
+
109
+ # Truncate input text for model length limit
110
+ max_input_length = 900
111
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
112
+ if input_ids.shape[1] > max_input_length:
113
+ input_ids = input_ids[:, :max_input_length]
114
+
115
+ # Generate chatbot response
116
+ outputs = model.generate(
117
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
118
+ )
119
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
120
+ return response
121
+
122
+ # Workspace interface for creating projects
123
+ def workspace_interface(project_name):
124
+ project_path = os.path.join(PROJECT_ROOT, project_name)
125
+ if not os.path.exists(PROJECT_ROOT):
126
+ os.makedirs(PROJECT_ROOT)
127
+ if not os.path.exists(project_path):
128
+ os.makedirs(project_path)
129
+ st.session_state.workspace_projects[project_name] = {"files": []}
130
+ st.session_state.current_state['workspace_chat']['project_name'] = project_name
131
+ commit_and_push_changes(f"Create project {project_name}")
132
+ return f"Project {project_name} created successfully."
133
+ else:
134
+ return f"Project {project_name} already exists."
135
+
136
+ # Function to add code to the workspace
137
+ def add_code_to_workspace(project_name, code, file_name):
138
+ project_path = os.path.join(PROJECT_ROOT, project_name)
139
+ if os.path.exists(project_path):
140
+ file_path = os.path.join(project_path, file_name)
141
+ with open(file_path, "w") as file:
142
+ file.write(code)
143
+ st.session_state.workspace_projects[project_name]["files"].append(file_name)
144
+ st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
145
+ commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
146
+ return f"Code added to {file_name} in project {project_name} successfully."
147
+ else:
148
+ return f"Project {project_name} does not exist."
149
+
150
+ # Terminal interface with optional project context
151
+ def terminal_interface(command, project_name=None):
152
+ if project_name:
153
+ project_path = os.path.join(PROJECT_ROOT, project_name)
154
+ if not os.path.exists(project_path):
155
+ return f"Project {project_name} does not exist."
156
+ result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
157
+ else:
158
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
159
+ if result.returncode == 0:
160
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
161
+ return result.stdout
162
+ else:
163
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
164
+ return result.stderr
165
+
166
+ # Code editor interface for formatting and linting
167
+ def code_editor_interface(code):
168
+ try:
169
+ formatted_code = black.format_str(code, mode=black.FileMode())
170
+ except black.NothingChanged:
171
+ formatted_code = code
172
+ result = StringIO()
173
+ sys.stdout = result
174
+ sys.stderr = result
175
+ (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
176
+ sys.stdout = sys.stdout
177
+ sys.stderr = sys.stderr
178
+ lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
179
+ st.session_state.current_state['toolbox']['formatted_code'] = formatted_code
180
+ st.session_state.current_state['toolbox']['lint_message'] = lint_message
181
+ return formatted_code, lint_message
182
+
183
+ # Function to summarize text using a summarization pipeline
184
+ def summarize_text(text):
185
+ summarizer = pipeline("summarization")
186
+ summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
187
+ st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
188
+ return summary[0]['summary_text']
189
+
190
+ # Function to perform sentiment analysis using a sentiment analysis pipeline
191
+ def sentiment_analysis(text):
192
+ analyzer = pipeline("sentiment-analysis")
193
+ sentiment = analyzer(text)
194
+ st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
195
+ return sentiment[0]
196
+
197
+ # Function to translate code using the OpenAI API
198
+ def translate_code(code, input_language, output_language):
199
+ # Define a dictionary to map programming languages to their corresponding file extensions
200
+ language_extensions = {
201
+ # ignore the specific languages right now, and continue to EOF
202
+ }
203
+
204
+ # Add code to handle edge cases such as invalid input and unsupported programming languages
205
+ if input_language not in language_extensions:
206
+ raise ValueError(f"Invalid input language: {input_language}")
207
+ if output_language not in language_extensions:
208
+ raise ValueError(f"Invalid output language: {output_language}")
209
+
210
+ # Use the dictionary to map the input and output languages to their corresponding file extensions
211
+ input_extension = language_extensions[input_language]
212
+ output_extension = language_extensions[output_language]
213
+
214
+ # Translate the code using the OpenAI API
215
+ prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
216
+ response = openai.ChatCompletion.create(
217
+ model="gpt-4",
218
+ messages=[
219
+ {"role": "system", "content": "You are an expert software developer."},
220
+ {"role": "user", "content": prompt}
221
+ ]
222
  )
223
+ translated_code = response.choices[0].message['content'].strip()
224
+
225
+ # Return the translated code
226
+ translated_code = response.choices[0].message['content'].strip()
227
+ st.session_state.current_state['toolbox']['translated_code'] = translated_code
228
+ return translated_code
229
+
230
+ # Function to generate code based on a code idea using the OpenAI API
231
+ def generate_code(code_idea):
232
+ response = openai.ChatCompletion.create(
233
+ model="gpt-4",
234
+ messages=[
235
+ {"role": "system", "content": "You are an expert software developer."},
236
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
237
+ ]
238
+ )
239
+ generated_code = response.choices[0].message['content'].strip()
240
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
241
+ return generated_code
242
+
243
+ # Function to commit and push changes to the Hugging Face repository
244
+ def commit_and_push_changes(commit_message):
245
+ """Commits and pushes changes to the Hugging Face repository."""
246
+ commands = [
247
+ "git add .",
248
+ f"git commit -m '{commit_message}'",
249
+ "git push"
250
+ ]
251
+ for command in commands:
252
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
253
+ if result.returncode != 0:
254
+ st.error(f"Error executing command '{command}': {result.stderr}")
255
+ break
256
+
257
+ # Streamlit App
258
+ st.title("AI Agent Creator")
259
+
260
+ # Sidebar navigation
261
+ st.sidebar.title("Navigation")
262
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
263
+
264
+ # AI Agent Creator
265
+ if app_mode == "AI Agent Creator":
266
+ st.header("Create an AI Agent from Text")
267
+
268
+ st.subheader("From Text")
269
+ agent_name = st.text_input("Enter agent name:")
270
+ text_input = st.text_area("Enter skills (one per line):")
271
+ if st.button("Create Agent"):
272
+ agent_prompt = create_agent_from_text(agent_name, text_input)
273
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
274
+ st.session_state.available_agents.append(agent_name)
275
+
276
+ # Tool Box
277
+ elif app_mode == "Tool Box":
278
+ st.header("AI-Powered Tools")
279
+
280
+ # Chat Interface
281
+ st.subheader("Chat with CodeCraft")
282
+ chat_input = st.text_area("Enter your message:")
283
+ if st.button("Send"):
284
+ if chat_input.startswith("@"):
285
+ agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
286
+ chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
287
+ chat_response = chat_interface_with_agent(chat_input, agent_name)
288
+ else:
289
+ chat_response = chat_interface(chat_input)
290
+ st.session_state.chat_history.append((chat_input, chat_response))
291
+ st.write(f"CodeCraft: {chat_response}")
292
+
293
+ # Terminal Interface
294
+ st.subheader("Terminal")
295
+ terminal_input = st.text_input("Enter a command:")
296
+ if st.button("Run"):
297
+ terminal_output = terminal_interface(terminal_input)
298
+ st.session_state.terminal_history.append((terminal_input, terminal_output))
299
+ st.code(terminal_output, language="bash")
300
+
301
+ # Code Editor Interface
302
+ st.subheader("Code Editor")
303
+ code_editor = st.text_area("Write your code:", height=300)
304
+ if st.button("Format & Lint"):
305
+ formatted_code, lint_message = code_editor_interface(code_editor)
306
+ st.code(formatted_code, language="python")
307
+ st.info(lint_message)
308
+
309
+ # Text Summarization Tool
310
+ st.subheader("Summarize Text")
311
+ text_to_summarize = st.text_area("Enter text to summarize:")
312
+ if st.button("Summarize"):
313
+ summary = summarize_text(text_to_summarize)
314
+ st.write(f"Summary: {summary}")
315
+
316
+ # Sentiment Analysis Tool
317
+ st.subheader("Sentiment Analysis")
318
+ sentiment_text = st.text_area("Enter text for sentiment analysis:")
319
+ if st.button("Analyze Sentiment"):
320
+ sentiment = sentiment_analysis(sentiment_text)
321
+ st.write(f"Sentiment: {sentiment}")
322
+
323
+ # Text Translation Tool (Code Translation)
324
+ st.subheader("Translate Code")
325
+ code_to_translate = st.text_area("Enter code to translate:")
326
+ source_language = st.text_input("Enter source language (e.g. 'Python'):")
327
+ target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
328
+ if st.button("Translate Code"):
329
+ # Use a Hugging Face translation model
330
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
331
+ translated_code = translator(code_to_translate, target_lang=target_language)[0]['translation_text']
332
+ st.code(translated_code, language=target_language.lower())
333
+
334
+ # Code Generation
335
+ st.subheader("Code Generation")
336
+ code_idea = st.text_input("Enter your code idea:")
337
+ if st.button("Generate Code"):
338
+ # Use a Hugging Face code generation model
339
+ generator = pipeline("text-generation", model="bigscience/T0_3B")
340
+ generated_code = generator(code_idea, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text']
341
+ st.code(generated_code, language="python")
342
+
343
+ # Display Preset Commands
344
+ st.subheader("Preset Commands")
345
+ preset_commands = {
346
+ "Create a new project": "create_project('project_name')",
347
+ "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
348
+ "Run terminal command": "terminal_interface('command', 'project_name')",
349
+ "Generate code": "generate_code('code_idea')",
350
+ "Summarize text": "summarize_text('text')",
351
+ "Analyze sentiment": "sentiment_analysis('text')",
352
+ "Translate code": "translate_code('code', 'source_language', 'target_language')",
353
+ }
354
+ for command_name, command in preset_commands.items():
355
+ st.write(f"{command_name}: `{command}`")
356
+
357
+ # Workspace Chat App
358
+ elif app_mode == "Workspace Chat App":
359
+ st.header("Workspace Chat App")
360
+
361
+ # Project Workspace Creation
362
+ st.subheader("Create a New Project")
363
+ project_name = st.text_input("Enter project name:")
364
+ if st.button("Create Project"):
365
+ workspace_status = workspace_interface(project_name)
366
+ st.success(workspace_status)
367
+
368
+ # Add Code to Workspace
369
+ st.subheader("Add Code to Workspace")
370
+ code_to_add = st.text_area("Enter code to add to workspace:")
371
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
372
+ if st.button("Add Code"):
373
+ add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
374
+ st.success(add_code_status)
375
+
376
+ # Terminal Interface with Project Context
377
+ st.subheader("Terminal (Workspace Context)")
378
+ terminal_input = st.text_input("Enter a command within the workspace:")
379
+ if st.button("Run Command"):
380
+ terminal_output = terminal_interface(terminal_input, project_name)
381
+ st.code(terminal_output, language="bash")
382
+
383
+ # Chat Interface for Guidance
384
+ st.subheader("Chat with CodeCraft for Guidance")
385
+ chat_input = st.text_area("Enter your message for guidance:")
386
+ if st.button("Get Guidance"):
387
+ chat_response = chat_interface(chat_input)
388
+ st.session_state.chat_history.append((chat_input, chat_response))
389
+ st.write(f"CodeCraft: {chat_response}")
390
+
391
+ # Display Chat History
392
+ st.subheader("Chat History")
393
+ for user_input, response in st.session_state.chat_history:
394
+ st.write(f"User: {user_input}")
395
+ st.write(f"CodeCraft: {response}")
396
+
397
+ # Display Terminal History
398
+ st.subheader("Terminal History")
399
+ for command, output in st.session_state.terminal_history:
400
+ st.write(f"Command: {command}")
401
+ st.code(output, language="bash")
402
+
403
+ # Display Projects and Files
404
+ st.subheader("Workspace Projects")
405
+ for project, details in st.session_state.workspace_projects.items():
406
+ st.write(f"Project: {project}")
407
+ for file in details['files']:
408
+ st.write(f" - {file}")
409
+
410
+ # Chat with AI Agents
411
+ st.subheader("Chat with AI Agents")
412
+ selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
413
+ agent_chat_input = st.text_area("Enter your message for the agent:")
414
+ if st.button("Send to Agent"):
415
+ agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
416
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
417
+ st.write(f"{selected_agent}: {agent_chat_response}")
418
+
419
+ # Automate Build Process
420
+ st.subheader("Automate Build Process")
421
+ if st.button("Automate"):
422
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
423
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
424
+ st.write("Autonomous Build Summary:")
425
+ st.write(summary)
426
+ st.write("Next Step:")
427
+ st.write(next_step)
428
 
429
+ # Display current state for debugging
430
+ st.sidebar.subheader("Current State")
431
+ st.sidebar.json(st.session_state.current_state)