Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ def wrap_text(text, width=90):
|
|
18 |
|
19 |
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
|
20 |
# Combine user input and system prompt
|
21 |
-
formatted_input = f"
|
22 |
|
23 |
# Encode the input text
|
24 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
@@ -55,9 +55,9 @@ class ChatBot:
|
|
55 |
def __init__(self):
|
56 |
self.history = []
|
57 |
|
58 |
-
def predict(self, user_input, system_prompt
|
59 |
# Combine user input and system prompt
|
60 |
-
formatted_input = f"
|
61 |
|
62 |
# Encode user input
|
63 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
@@ -82,15 +82,14 @@ bot = ChatBot()
|
|
82 |
|
83 |
title = "👋🏻Welcome to Tonic's Claire Chat🚀"
|
84 |
description = "You can use this Space to test out the current model ([ClaireLLM](https://huggingface.co/OpenLLM-France/Claire-Mistral-7B-0.1)) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord to build together](https://discord.gg/nXx5wbX9)."
|
85 |
-
examples = [["
|
86 |
-
|
87 |
iface = gr.Interface(
|
88 |
fn=bot.predict,
|
89 |
title=title,
|
90 |
description=description,
|
91 |
examples=examples,
|
92 |
-
inputs=["text", "text"
|
93 |
-
outputs="text",
|
94 |
theme="ParityError/Anime"
|
95 |
)
|
96 |
|
|
|
18 |
|
19 |
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
|
20 |
# Combine user input and system prompt
|
21 |
+
formatted_input = f"{system_prompt} {user_input}"
|
22 |
|
23 |
# Encode the input text
|
24 |
encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
|
|
|
55 |
def __init__(self):
|
56 |
self.history = []
|
57 |
|
58 |
+
def predict(self, user_input, system_prompt):
|
59 |
# Combine user input and system prompt
|
60 |
+
formatted_input = f"{system_prompt} {user_input}"
|
61 |
|
62 |
# Encode user input
|
63 |
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
|
|
82 |
|
83 |
title = "👋🏻Welcome to Tonic's Claire Chat🚀"
|
84 |
description = "You can use this Space to test out the current model ([ClaireLLM](https://huggingface.co/OpenLLM-France/Claire-Mistral-7B-0.1)) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord to build together](https://discord.gg/nXx5wbX9)."
|
85 |
+
examples = [["[Estragon :] On va voir. Tiens. Ils prennent chacun un bout de la corde et tirent. La corde se casse. Ils manquent de tomber.", "[Vladimir] Fais voir quand même. (Estragon dénoue la corde qui maintient son pantalon.Celui-ci, beaucoup trop large, lui tombe autour des chevilles. Ils regardent la corde.) À la rigueur ça pourrait aller. Mais est-elle solide ?"]]
|
|
|
86 |
iface = gr.Interface(
|
87 |
fn=bot.predict,
|
88 |
title=title,
|
89 |
description=description,
|
90 |
examples=examples,
|
91 |
+
inputs=[{"type": "text", "label": "Deuxieme partie d'un dialogue"}, {"type": "text", "label": "Premiere partie d'un dialogue"}], # Labeled inputs
|
92 |
+
outputs={"type": "text", "label": "Claire LLM Dialogue"}, # Labeled output
|
93 |
theme="ParityError/Anime"
|
94 |
)
|
95 |
|