Spaces:
Running
on
Zero
Running
on
Zero
fix
Browse files
app.py
CHANGED
@@ -27,14 +27,14 @@ with gr.Blocks(fill_height=True) as demo:
|
|
27 |
gemini_interface = gr.load(
|
28 |
name=gemini_model.value,
|
29 |
src=gemini_gradio.registry,
|
30 |
-
|
31 |
)
|
32 |
|
33 |
def update_gemini_model(new_model):
|
34 |
return gr.load(
|
35 |
name=new_model,
|
36 |
src=gemini_gradio.registry,
|
37 |
-
|
38 |
)
|
39 |
|
40 |
gemini_model.change(
|
@@ -73,7 +73,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
73 |
name=model_choice.value,
|
74 |
src=openai_gradio.registry,
|
75 |
accept_token=True,
|
76 |
-
|
77 |
)
|
78 |
|
79 |
def update_model(new_model):
|
@@ -81,7 +81,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
81 |
name=new_model,
|
82 |
src=openai_gradio.registry,
|
83 |
accept_token=True,
|
84 |
-
|
85 |
)
|
86 |
|
87 |
model_choice.change(
|
@@ -108,7 +108,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
108 |
name=claude_model.value,
|
109 |
src=anthropic_gradio.registry,
|
110 |
accept_token=True,
|
111 |
-
|
112 |
)
|
113 |
|
114 |
def update_claude_model(new_model):
|
@@ -116,7 +116,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
116 |
name=new_model,
|
117 |
src=anthropic_gradio.registry,
|
118 |
accept_token=True,
|
119 |
-
|
120 |
)
|
121 |
|
122 |
claude_model.change(
|
@@ -146,7 +146,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
146 |
src=sambanova_gradio.registry,
|
147 |
accept_token=True,
|
148 |
multimodal=True,
|
149 |
-
|
150 |
)
|
151 |
|
152 |
def update_llama_model(new_model):
|
@@ -155,7 +155,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
155 |
src=sambanova_gradio.registry,
|
156 |
accept_token=True,
|
157 |
multimodal=True,
|
158 |
-
|
159 |
)
|
160 |
|
161 |
llama_model.change(
|
@@ -170,14 +170,14 @@ with gr.Blocks(fill_height=True) as demo:
|
|
170 |
name='grok-beta',
|
171 |
src=xai_gradio.registry,
|
172 |
accept_token=True,
|
173 |
-
|
174 |
)
|
175 |
with gr.Tab("Qwen2.5 72B"):
|
176 |
gr.load(
|
177 |
name='Qwen/Qwen2.5-72B-Instruct',
|
178 |
src=hyperbolic_gradio.registry,
|
179 |
accept_token=True,
|
180 |
-
|
181 |
)
|
182 |
gr.Markdown("**Note:** You need to use a Hyperbolic API key from [Hyperbolic](https://app.hyperbolic.xyz/).")
|
183 |
with gr.Tab("Perplexity"):
|
@@ -204,7 +204,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
204 |
name=perplexity_model.value,
|
205 |
src=perplexity_gradio.registry,
|
206 |
accept_token=True,
|
207 |
-
|
208 |
)
|
209 |
|
210 |
def update_perplexity_model(new_model):
|
@@ -212,7 +212,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
212 |
name=new_model,
|
213 |
src=perplexity_gradio.registry,
|
214 |
accept_token=True,
|
215 |
-
|
216 |
)
|
217 |
|
218 |
perplexity_model.change(
|
|
|
27 |
gemini_interface = gr.load(
|
28 |
name=gemini_model.value,
|
29 |
src=gemini_gradio.registry,
|
30 |
+
fill_height=True
|
31 |
)
|
32 |
|
33 |
def update_gemini_model(new_model):
|
34 |
return gr.load(
|
35 |
name=new_model,
|
36 |
src=gemini_gradio.registry,
|
37 |
+
fill_height=True
|
38 |
)
|
39 |
|
40 |
gemini_model.change(
|
|
|
73 |
name=model_choice.value,
|
74 |
src=openai_gradio.registry,
|
75 |
accept_token=True,
|
76 |
+
fill_height=True
|
77 |
)
|
78 |
|
79 |
def update_model(new_model):
|
|
|
81 |
name=new_model,
|
82 |
src=openai_gradio.registry,
|
83 |
accept_token=True,
|
84 |
+
fill_height=True
|
85 |
)
|
86 |
|
87 |
model_choice.change(
|
|
|
108 |
name=claude_model.value,
|
109 |
src=anthropic_gradio.registry,
|
110 |
accept_token=True,
|
111 |
+
fill_height=True
|
112 |
)
|
113 |
|
114 |
def update_claude_model(new_model):
|
|
|
116 |
name=new_model,
|
117 |
src=anthropic_gradio.registry,
|
118 |
accept_token=True,
|
119 |
+
fill_height=True
|
120 |
)
|
121 |
|
122 |
claude_model.change(
|
|
|
146 |
src=sambanova_gradio.registry,
|
147 |
accept_token=True,
|
148 |
multimodal=True,
|
149 |
+
fill_height=True
|
150 |
)
|
151 |
|
152 |
def update_llama_model(new_model):
|
|
|
155 |
src=sambanova_gradio.registry,
|
156 |
accept_token=True,
|
157 |
multimodal=True,
|
158 |
+
fill_height=True
|
159 |
)
|
160 |
|
161 |
llama_model.change(
|
|
|
170 |
name='grok-beta',
|
171 |
src=xai_gradio.registry,
|
172 |
accept_token=True,
|
173 |
+
fill_height=True
|
174 |
)
|
175 |
with gr.Tab("Qwen2.5 72B"):
|
176 |
gr.load(
|
177 |
name='Qwen/Qwen2.5-72B-Instruct',
|
178 |
src=hyperbolic_gradio.registry,
|
179 |
accept_token=True,
|
180 |
+
fill_height=True
|
181 |
)
|
182 |
gr.Markdown("**Note:** You need to use a Hyperbolic API key from [Hyperbolic](https://app.hyperbolic.xyz/).")
|
183 |
with gr.Tab("Perplexity"):
|
|
|
204 |
name=perplexity_model.value,
|
205 |
src=perplexity_gradio.registry,
|
206 |
accept_token=True,
|
207 |
+
fill_height=True
|
208 |
)
|
209 |
|
210 |
def update_perplexity_model(new_model):
|
|
|
212 |
name=new_model,
|
213 |
src=perplexity_gradio.registry,
|
214 |
accept_token=True,
|
215 |
+
fill_height=True
|
216 |
)
|
217 |
|
218 |
perplexity_model.change(
|