acecalisto3 commited on
Commit
550c01d
1 Parent(s): 8cf43f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -67
app.py CHANGED
@@ -1,83 +1,81 @@
1
  from huggingface_hub import InferenceClient
2
- from huggingface_hub import HfApi
3
  import gradio as gr
4
  import random
5
  import prompts
6
-
7
  client = InferenceClient(
8
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
9
  )
10
 
11
- # Initialize the Hugging Face API
12
- hf_api = HfApi()
13
-
14
  def format_prompt(message, history):
15
- prompt = " "
16
  for user_prompt, bot_response in history:
17
- prompt += f"[INST] {user_prompt} [/
18
- Replit
19
-
20
- ]" prompt += f" {bot_response}[" prompt += f"[INST] {message} [/```]" return prompt
21
-
22
- agents =[ "WEB_DEV", "AI_SYSTEM_PROMPT", "PYTHON_CODE_DEV", "CODE_REVIEW_ASSISTANT", "CONTENT_WRITER_EDITOR", "SOCIAL_MEDIA_MANAGER", "MEME_GENERATOR", "QUESTION_GENERATOR", "IMAGE_GENERATOR", "HUGGINGFACE_FILE_DEV",
 
 
 
 
 
 
 
 
 
23
 
24
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- def generate( prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, ): seed = random.randint(1,1111111111111111)
27
-
28
- agent=prompts.WEB_DEV
29
- if agent_name == "WEB_DEV":
30
- agent = prompts.WEB_DEV_SYSTEM_PROMPT
31
- if agent_name == "CODE_REVIEW_ASSISTANT":
32
- agent = prompts.CODE_REVIEW_ASSISTANT
33
- if agent_name == "CONTENT_WRITER_EDITOR":
34
- agent = prompts.CONTENT_WRITER_EDITOR
35
- if agent_name == "SOCIAL_MEDIA_MANAGER":
36
- agent = prompts.SOCIAL_MEDIA_MANAGER
37
- if agent_name == "AI_SYSTEM_PROMPT":
38
- agent = prompts.AI_SYSTEM_PROMPT
39
- if agent_name == "PYTHON_CODE_DEV":
40
- agent = prompts.PYTHON_CODE_DEV
41
- if agent_name == "MEME_GENERATOR":
42
- agent = prompts.MEME_GENERATOR
43
- if agent_name == "QUESTION_GENERATOR":
44
- agent = prompts.QUESTION_GENERATOR
45
- if agent_name == "IMAGE_GENERATOR":
46
- agent = prompts.IMAGE_GENERATOR
47
- if agent_name == "HUGGINGFACE_FILE_DEV":
48
- agent = prompts.HUGGINGFACE_FILE_DEV
49
- system_prompt=agent
50
- temperature = float(temperature)
51
- if temperature < 1e-2:
52
- temperature = 1e-2
53
- top_p = float(top_p)
54
-
55
- generate_kwargs = dict(
56
- temperature=temperature,
57
- max_new_tokens=max_new_tokens,
58
- top_p=top_p,
59
- repetition_penalty=repetition_penalty,
60
- do_sample=True,
61
- seed=seed,
62
- )
63
-
64
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
65
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
66
- output = ""
67
-
68
- for response in stream:
69
- output += response.token.text
70
- yield output
71
-
72
- # Send the generated text to the ai-app-factory space for further processing
73
- ai_app_factory_url = "https://huggingface.co/spaces/acecalisto3/ai-app-factory"
74
- ai_app_factory_response = hf_api.create_call(ai_app_factory_url, data={"text": output})
75
-
76
- # Extract the processed text from the response
77
- processed_text = ai_app_factory_response['response']['content']
78
-
79
- # Return the processed text
80
- return processed_text
81
 
82
  additional_inputs=[
83
  gr.Dropdown(
 
1
  from huggingface_hub import InferenceClient
 
2
  import gradio as gr
3
  import random
4
  import prompts
 
5
  client = InferenceClient(
6
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
  )
8
 
 
 
 
9
  def format_prompt(message, history):
10
+ prompt = "<s>"
11
  for user_prompt, bot_response in history:
12
+ prompt += f"[INST] {user_prompt} [/INST]"
13
+ prompt += f" {bot_response}</s> "
14
+ prompt += f"[INST] {message} [/INST]"
15
+ return prompt
16
+ agents =[
17
+ "WEB_DEV",
18
+ "AI_SYSTEM_PROMPT",
19
+ "PYTHON_CODE_DEV",
20
+ "CODE_REVIEW_ASSISTANT",
21
+ "CONTENT_WRITER_EDITOR",
22
+ "SOCIAL_MEDIA_MANAGER",
23
+ "MEME_GENERATOR",
24
+ "QUESTION_GENERATOR",
25
+ "IMAGE_GENERATOR",
26
+ "HUGGINGFACE_FILE_DEV",
27
 
28
  ]
29
+ def generate(
30
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
31
+ ):
32
+ seed = random.randint(1,1111111111111111)
33
+
34
+ agent=prompts.WEB_DEV
35
+ if agent_name == "WEB_DEV":
36
+ agent = prompts.WEB_DEV_SYSTEM_PROMPT
37
+ if agent_name == "CODE_REVIEW_ASSISTANT":
38
+ agent = prompts.CODE_REVIEW_ASSISTANT
39
+ if agent_name == "CONTENT_WRITER_EDITOR":
40
+ agent = prompts.CONTENT_WRITER_EDITOR
41
+ if agent_name == "SOCIAL_MEDIA_MANAGER":
42
+ agent = prompts.SOCIAL_MEDIA_MANAGER
43
+ if agent_name == "AI_SYSTEM_PROMPT":
44
+ agent = prompts.AI_SYSTEM_PROMPT
45
+ if agent_name == "PYTHON_CODE_DEV":
46
+ agent = prompts.PYTHON_CODE_DEV
47
+ if agent_name == "MEME_GENERATOR":
48
+ agent = prompts.MEME_GENERATOR
49
+ if agent_name == "QUESTION_GENERATOR":
50
+ agent = prompts.QUESTION_GENERATOR
51
+ if agent_name == "IMAGE_GENERATOR":
52
+ agent = prompts.IMAGE_GENERATOR
53
+ if agent_name == "HUGGINGFACE_FILE_DEV":
54
+ agent = prompts.HUGGINGFACE_FILE_DEV
55
+ system_prompt=agent
56
+ temperature = float(temperature)
57
+ if temperature < 1e-2:
58
+ temperature = 1e-2
59
+ top_p = float(top_p)
60
+
61
+ generate_kwargs = dict(
62
+ temperature=temperature,
63
+ max_new_tokens=max_new_tokens,
64
+ top_p=top_p,
65
+ repetition_penalty=repetition_penalty,
66
+ do_sample=True,
67
+ seed=seed,
68
+ )
69
+
70
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
71
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
72
+ output = ""
73
+
74
+ for response in stream:
75
+ output += response.token.text
76
+ yield output
77
+ return output
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  additional_inputs=[
81
  gr.Dropdown(