akhaliq HF staff commited on
Commit
12afc5d
1 Parent(s): eb6ddbd

add experimental tab

Browse files
Files changed (2) hide show
  1. app.py +4 -0
  2. app_experimental.py +162 -88
app.py CHANGED
@@ -16,8 +16,11 @@ from app_together import demo as demo_together
16
  from app_xai import demo as demo_grok
17
  from app_flux import demo as demo_flux
18
  from app_ltx_video import demo as demo_ltx_video
 
19
 
20
  with gr.Blocks(fill_height=True) as demo:
 
 
21
  with gr.Tab("Meta Llama"):
22
  demo_sambanova.render()
23
  gr.Markdown(
@@ -60,6 +63,7 @@ with gr.Blocks(fill_height=True) as demo:
60
  demo_nvidia.render()
61
  with gr.Tab("Flux"):
62
  demo_flux.render()
 
63
 
64
 
65
  if __name__ == "__main__":
 
16
  from app_xai import demo as demo_grok
17
  from app_flux import demo as demo_flux
18
  from app_ltx_video import demo as demo_ltx_video
19
+ from app_experimental import demo as demo_experimental
20
 
21
  with gr.Blocks(fill_height=True) as demo:
22
+ with gr.Tab("Experimental"):
23
+ demo_experimental.render()
24
  with gr.Tab("Meta Llama"):
25
  demo_sambanova.render()
26
  gr.Markdown(
 
63
  demo_nvidia.render()
64
  with gr.Tab("Flux"):
65
  demo_flux.render()
66
+
67
 
68
 
69
  if __name__ == "__main__":
app_experimental.py CHANGED
@@ -1,48 +1,31 @@
1
  import os
2
  import gradio as gr
3
- from typing import List, Dict
4
  import random
5
- import time
6
- from utils import get_app
7
-
8
- # Import all the model registries (keeping existing imports)
9
- import anthropic_gradio
10
- import cerebras_gradio
11
- import dashscope_gradio
12
- import fireworks_gradio
13
- import gemini_gradio
14
- import groq_gradio
15
- import hyperbolic_gradio
16
- import mistral_gradio
17
- import nvidia_gradio
18
- import openai_gradio
19
- import perplexity_gradio
20
- import sambanova_gradio
21
- import together_gradio
22
- import xai_gradio
23
-
24
- # Define MODEL_REGISTRIES dictionary
25
- MODEL_REGISTRIES = {
26
- "OpenAI": (openai_gradio.registry, os.getenv("OPENAI_API_KEY")),
27
- "Anthropic": (anthropic_gradio.registry, os.getenv("ANTHROPIC_API_KEY")),
28
- "Cerebras": (cerebras_gradio, os.getenv("CEREBRAS_API_KEY")),
29
- "DashScope": (dashscope_gradio, os.getenv("DASHSCOPE_API_KEY")),
30
- "Fireworks": (fireworks_gradio, os.getenv("FIREWORKS_API_KEY")),
31
- "Gemini": (gemini_gradio, os.getenv("GEMINI_API_KEY")),
32
- "Groq": (groq_gradio, os.getenv("GROQ_API_KEY")),
33
- "Hyperbolic": (hyperbolic_gradio, os.getenv("HYPERBOLIC_API_KEY")),
34
- "Mistral": (mistral_gradio, os.getenv("MISTRAL_API_KEY")),
35
- "NVIDIA": (nvidia_gradio, os.getenv("NVIDIA_API_KEY")),
36
- "SambaNova": (sambanova_gradio, os.getenv("SAMBANOVA_API_KEY")),
37
- "Together": (together_gradio, os.getenv("TOGETHER_API_KEY")),
38
- "XAI": (xai_gradio, os.getenv("XAI_API_KEY")),
39
- }
40
 
41
  def get_all_models():
42
  """Get all available models from the registries."""
43
  return [
44
- "OpenAI: gpt-4o", # From app_openai.py
45
- "Anthropic: claude-3-5-sonnet-20241022", # From app_claude.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  ]
47
 
48
  def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
@@ -85,17 +68,66 @@ def chat_with_openai(model: str, messages: List[Dict], api_key: str) -> str:
85
  )
86
  return response.choices[0].message.content
87
 
88
- def chat_with_anthropic(model: str, messages: List[Dict], api_key: str) -> str:
89
- from anthropic import Anthropic
90
  client = Anthropic(api_key=api_key)
91
- # Convert messages to Anthropic format
92
- prompt = "\n\n".join([f"{m['role']}: {m['content']}" for m in messages])
93
  response = client.messages.create(
94
- model=model,
95
- messages=[{"role": "user", "content": prompt}]
 
96
  )
97
  return response.content[0].text
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  def multi_model_consensus(
100
  question: str,
101
  selected_models: List[str],
@@ -113,31 +145,34 @@ def multi_model_consensus(
113
  initial_responses = []
114
  for i, model in enumerate(selected_models):
115
  provider, model_name = model.split(": ", 1)
116
- registry_fn, api_key = MODEL_REGISTRIES[provider]
117
 
118
- if not api_key:
119
- continue
120
-
121
  try:
122
- # Load the model using the registry function
123
- predictor = gr.load(
124
- name=model_name,
125
- src=registry_fn,
126
- token=api_key
127
- )
128
-
129
- # Format the request based on the provider
130
  if provider == "Anthropic":
131
- response = predictor.predict(
 
132
  messages=[{"role": "user", "content": question}],
133
- max_tokens=1024,
134
- model=model_name,
135
- api_name="chat"
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  )
137
- else:
138
- response = predictor.predict(
139
- question,
140
- api_name="chat"
 
141
  )
142
 
143
  initial_responses.append(f"{model}: {response}")
@@ -154,38 +189,77 @@ def multi_model_consensus(
154
  random.shuffle(selected_models) # Randomize order each round
155
  for model in selected_models:
156
  provider, model_name = model.split(": ", 1)
157
- registry, api_key = MODEL_REGISTRIES[provider]
158
 
159
- if not api_key:
160
- continue
161
-
162
  try:
163
  discussion_prompt = generate_discussion_prompt(question, discussion_history)
164
- response = registry.chat(
165
- model=model_name,
166
- messages=[{"role": "user", "content": discussion_prompt}],
167
- api_key=api_key
168
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  round_responses.append(f"{model}: {response}")
170
  discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
171
  chat_history.append((f"Round {round_num + 1} - {model}", response))
172
  except Exception as e:
173
  chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
174
 
175
- # Final consensus - use the model that's shown most consistency
176
  progress(0.9, desc="Building final consensus...")
177
- # Use the first model for final consensus instead of two models
178
  model = selected_models[0]
179
  provider, model_name = model.split(": ", 1)
180
- registry, api_key = MODEL_REGISTRIES[provider]
181
 
182
  try:
183
  consensus_prompt = generate_consensus_prompt(question, discussion_history)
184
- final_consensus = registry.chat(
185
- model=model_name,
186
- messages=[{"role": "user", "content": consensus_prompt}],
187
- api_key=api_key
188
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  except Exception as e:
190
  final_consensus = f"Error getting consensus from {model}: {str(e)}"
191
 
@@ -198,22 +272,22 @@ with gr.Blocks() as demo:
198
  gr.Markdown("# Experimental Multi-Model Consensus Chat")
199
  gr.Markdown("""Select multiple models to collaborate on answering your question.
200
  The models will discuss with each other and attempt to reach a consensus.
201
- Maximum 5 models can be selected at once.""")
202
 
203
  with gr.Row():
204
  with gr.Column():
205
  model_selector = gr.Dropdown(
206
  choices=get_all_models(),
207
  multiselect=True,
208
- label="Select Models (max 5)",
209
- info="Choose up to 5 models to participate in the discussion",
210
- value=["OpenAI: gpt-4o", "Anthropic: claude-3-5-sonnet-20241022"], # Updated model names
211
- max_choices=5
212
  )
213
  rounds_slider = gr.Slider(
214
  minimum=1,
215
- maximum=5,
216
- value=3,
217
  step=1,
218
  label="Discussion Rounds",
219
  info="Number of rounds of discussion between models"
 
1
  import os
2
  import gradio as gr
3
+ from typing import List, Dict, Callable
4
  import random
5
+ import google.generativeai as genai
6
+ from anthropic import Anthropic
7
+ import openai
8
+ from openai import OpenAI # Add explicit OpenAI import
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def get_all_models():
11
  """Get all available models from the registries."""
12
  return [
13
+ "SambaNova: Meta-Llama-3.2-1B-Instruct",
14
+ "SambaNova: Meta-Llama-3.2-3B-Instruct",
15
+ "SambaNova: Llama-3.2-11B-Vision-Instruct",
16
+ "SambaNova: Llama-3.2-90B-Vision-Instruct",
17
+ "SambaNova: Meta-Llama-3.1-8B-Instruct",
18
+ "SambaNova: Meta-Llama-3.1-70B-Instruct",
19
+ "SambaNova: Meta-Llama-3.1-405B-Instruct",
20
+ "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
21
+ "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
22
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
23
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
24
+ "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
25
+ "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
26
+ "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
27
+ "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
28
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
29
  ]
30
 
31
  def generate_discussion_prompt(original_question: str, previous_responses: List[str]) -> str:
 
68
  )
69
  return response.choices[0].message.content
70
 
71
+ def chat_with_anthropic(messages: List[Dict], api_key: str) -> str:
72
+ """Chat with Anthropic's Claude model."""
73
  client = Anthropic(api_key=api_key)
 
 
74
  response = client.messages.create(
75
+ model="claude-3-sonnet-20240229",
76
+ messages=messages,
77
+ max_tokens=1024
78
  )
79
  return response.content[0].text
80
 
81
+ def chat_with_gemini(messages: List[Dict], api_key: str) -> str:
82
+ """Chat with Gemini Pro model."""
83
+ genai.configure(api_key=api_key)
84
+ model = genai.GenerativeModel('gemini-pro')
85
+
86
+ # Convert messages to Gemini format
87
+ gemini_messages = []
88
+ for msg in messages:
89
+ role = "user" if msg["role"] == "user" else "model"
90
+ gemini_messages.append({"role": role, "parts": [msg["content"]]})
91
+
92
+ response = model.generate_content([m["parts"][0] for m in gemini_messages])
93
+ return response.text
94
+
95
+ def chat_with_sambanova(messages: List[Dict], api_key: str, model_name: str = "Llama-3.2-90B-Vision-Instruct") -> str:
96
+ """Chat with SambaNova's models using their OpenAI-compatible API."""
97
+ client = openai.OpenAI(
98
+ api_key=api_key,
99
+ base_url="https://api.sambanova.ai/v1",
100
+ )
101
+
102
+ response = client.chat.completions.create(
103
+ model=model_name, # Use the specific model name passed in
104
+ messages=messages,
105
+ temperature=0.1,
106
+ top_p=0.1
107
+ )
108
+ return response.choices[0].message.content
109
+
110
+ def chat_with_hyperbolic(messages: List[Dict], api_key: str, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct") -> str:
111
+ """Chat with Hyperbolic's models using their OpenAI-compatible API."""
112
+ client = OpenAI(
113
+ api_key=api_key,
114
+ base_url="https://api.hyperbolic.xyz/v1"
115
+ )
116
+
117
+ # Add system message to the start of the messages list
118
+ full_messages = [
119
+ {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
120
+ *messages
121
+ ]
122
+
123
+ response = client.chat.completions.create(
124
+ model=model_name, # Use the specific model name passed in
125
+ messages=full_messages,
126
+ temperature=0.7,
127
+ max_tokens=1024,
128
+ )
129
+ return response.choices[0].message.content
130
+
131
  def multi_model_consensus(
132
  question: str,
133
  selected_models: List[str],
 
145
  initial_responses = []
146
  for i, model in enumerate(selected_models):
147
  provider, model_name = model.split(": ", 1)
 
148
 
 
 
 
149
  try:
 
 
 
 
 
 
 
 
150
  if provider == "Anthropic":
151
+ api_key = os.getenv("ANTHROPIC_API_KEY")
152
+ response = chat_with_anthropic(
153
  messages=[{"role": "user", "content": question}],
154
+ api_key=api_key
155
+ )
156
+ elif provider == "SambaNova":
157
+ api_key = os.getenv("SAMBANOVA_API_KEY")
158
+ response = chat_with_sambanova(
159
+ messages=[
160
+ {"role": "system", "content": "You are a helpful assistant"},
161
+ {"role": "user", "content": question}
162
+ ],
163
+ api_key=api_key
164
+ )
165
+ elif provider == "Hyperbolic": # Add Hyperbolic case
166
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
167
+ response = chat_with_hyperbolic(
168
+ messages=[{"role": "user", "content": question}],
169
+ api_key=api_key
170
  )
171
+ else: # Gemini
172
+ api_key = os.getenv("GEMINI_API_KEY")
173
+ response = chat_with_gemini(
174
+ messages=[{"role": "user", "content": question}],
175
+ api_key=api_key
176
  )
177
 
178
  initial_responses.append(f"{model}: {response}")
 
189
  random.shuffle(selected_models) # Randomize order each round
190
  for model in selected_models:
191
  provider, model_name = model.split(": ", 1)
 
192
 
 
 
 
193
  try:
194
  discussion_prompt = generate_discussion_prompt(question, discussion_history)
195
+ if provider == "Anthropic":
196
+ api_key = os.getenv("ANTHROPIC_API_KEY")
197
+ response = chat_with_anthropic(
198
+ messages=[{"role": "user", "content": discussion_prompt}],
199
+ api_key=api_key
200
+ )
201
+ elif provider == "SambaNova":
202
+ api_key = os.getenv("SAMBANOVA_API_KEY")
203
+ response = chat_with_sambanova(
204
+ messages=[
205
+ {"role": "system", "content": "You are a helpful assistant"},
206
+ {"role": "user", "content": discussion_prompt}
207
+ ],
208
+ api_key=api_key
209
+ )
210
+ elif provider == "Hyperbolic": # Add Hyperbolic case
211
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
212
+ response = chat_with_hyperbolic(
213
+ messages=[{"role": "user", "content": discussion_prompt}],
214
+ api_key=api_key
215
+ )
216
+ else: # Gemini
217
+ api_key = os.getenv("GEMINI_API_KEY")
218
+ response = chat_with_gemini(
219
+ messages=[{"role": "user", "content": discussion_prompt}],
220
+ api_key=api_key
221
+ )
222
+
223
  round_responses.append(f"{model}: {response}")
224
  discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
225
  chat_history.append((f"Round {round_num + 1} - {model}", response))
226
  except Exception as e:
227
  chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
228
 
229
+ # Final consensus
230
  progress(0.9, desc="Building final consensus...")
 
231
  model = selected_models[0]
232
  provider, model_name = model.split(": ", 1)
 
233
 
234
  try:
235
  consensus_prompt = generate_consensus_prompt(question, discussion_history)
236
+ if provider == "Anthropic":
237
+ api_key = os.getenv("ANTHROPIC_API_KEY")
238
+ final_consensus = chat_with_anthropic(
239
+ messages=[{"role": "user", "content": consensus_prompt}],
240
+ api_key=api_key
241
+ )
242
+ elif provider == "SambaNova":
243
+ api_key = os.getenv("SAMBANOVA_API_KEY")
244
+ final_consensus = chat_with_sambanova(
245
+ messages=[
246
+ {"role": "system", "content": "You are a helpful assistant"},
247
+ {"role": "user", "content": consensus_prompt}
248
+ ],
249
+ api_key=api_key
250
+ )
251
+ elif provider == "Hyperbolic": # Add Hyperbolic case
252
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
253
+ final_consensus = chat_with_hyperbolic(
254
+ messages=[{"role": "user", "content": consensus_prompt}],
255
+ api_key=api_key
256
+ )
257
+ else: # Gemini
258
+ api_key = os.getenv("GEMINI_API_KEY")
259
+ final_consensus = chat_with_gemini(
260
+ messages=[{"role": "user", "content": consensus_prompt}],
261
+ api_key=api_key
262
+ )
263
  except Exception as e:
264
  final_consensus = f"Error getting consensus from {model}: {str(e)}"
265
 
 
272
  gr.Markdown("# Experimental Multi-Model Consensus Chat")
273
  gr.Markdown("""Select multiple models to collaborate on answering your question.
274
  The models will discuss with each other and attempt to reach a consensus.
275
+ Maximum 3 models can be selected at once.""")
276
 
277
  with gr.Row():
278
  with gr.Column():
279
  model_selector = gr.Dropdown(
280
  choices=get_all_models(),
281
  multiselect=True,
282
+ label="Select Models (max 3)",
283
+ info="Choose up to 3 models to participate in the discussion",
284
+ value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
285
+ max_choices=3
286
  )
287
  rounds_slider = gr.Slider(
288
  minimum=1,
289
+ maximum=2,
290
+ value=1,
291
  step=1,
292
  label="Discussion Rounds",
293
  info="Number of rounds of discussion between models"