Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -65,40 +65,47 @@ def generar_prompt(message, idioma):
|
|
65 |
return prompt
|
66 |
|
67 |
@spaces.GPU(duration=150)
|
68 |
-
# Función para procesar la respuesta y verificar políticas
|
69 |
def respond(message, language, system_message, max_tokens, temperature, top_p):
|
70 |
-
if language == "Español":
|
71 |
-
system_message = "Eres un asistente virtual amable y servicial."
|
72 |
-
else:
|
73 |
-
system_message = "You are a friendly and helpful virtual assistant."
|
74 |
-
|
75 |
-
# Verificar políticas
|
76 |
prompt = generar_prompt(message, language)
|
77 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
78 |
outputs = model.generate(
|
79 |
**inputs,
|
80 |
-
max_new_tokens=
|
81 |
-
temperature=0.
|
82 |
-
top_p=1.0,
|
83 |
do_sample=False,
|
|
|
|
|
84 |
)
|
85 |
-
|
86 |
-
|
|
|
87 |
if language == "Español":
|
88 |
-
|
89 |
-
|
90 |
-
violation_keywords = ['Yes', 'No']
|
91 |
-
if first_word in violation_keywords:
|
92 |
-
violation = first_word
|
93 |
else:
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
if language == "Español":
|
97 |
violation_message = "Su pregunta viola las políticas aceptadas."
|
98 |
else:
|
99 |
violation_message = "Your question violates the accepted policies."
|
100 |
return violation_message
|
101 |
else:
|
|
|
102 |
# Generar respuesta al usuario
|
103 |
if language == "Español":
|
104 |
assistant_prompt = f"{system_message}\nUsuario: {message}\nAsistente:"
|
@@ -119,34 +126,36 @@ def respond(message, language, system_message, max_tokens, temperature, top_p):
|
|
119 |
assistant_reply = assistant_response.split("Assistant:")[-1].strip()
|
120 |
return assistant_reply
|
121 |
|
|
|
122 |
# Crear la interfaz de Gradio usando Blocks
|
123 |
with gr.Blocks() as demo:
|
124 |
gr.Markdown("# Chatbot con Verificación de Políticas")
|
125 |
language = gr.Dropdown(choices=["English", "Español"], value="English", label="Idioma/Language")
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
129 |
chatbot = gr.Chatbot()
|
130 |
message = gr.Textbox(label="Your message")
|
131 |
submit_button = gr.Button("Send")
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
def submit_message(user_message, chat_history, language):
|
138 |
-
assistant_reply = respond(user_message, language, max_tokens, temperature, top_p)
|
139 |
-
chat_history = chat_history + [[user_message, assistant_reply]]
|
140 |
return "", chat_history
|
141 |
|
142 |
submit_button.click(
|
143 |
submit_message,
|
144 |
-
inputs=[message, chatbot,
|
145 |
outputs=[message, chatbot],
|
146 |
)
|
147 |
message.submit(
|
148 |
submit_message,
|
149 |
-
inputs=[message, chatbot,
|
150 |
outputs=[message, chatbot],
|
151 |
)
|
152 |
|
|
|
65 |
return prompt
|
66 |
|
67 |
@spaces.GPU(duration=150)
|
|
|
68 |
def respond(message, language, system_message, max_tokens, temperature, top_p):
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
prompt = generar_prompt(message, language)
|
70 |
+
inputs = tokenizer(prompt, return_tensors="pt") # No especificar dispositivo aquí
|
71 |
outputs = model.generate(
|
72 |
**inputs,
|
73 |
+
max_new_tokens=1,
|
74 |
+
temperature=0.0,
|
|
|
75 |
do_sample=False,
|
76 |
+
return_dict_in_generate=True,
|
77 |
+
output_scores=True,
|
78 |
)
|
79 |
+
# Obtener los logits del token generado
|
80 |
+
logits = outputs.scores[0] # Solo un paso de generación
|
81 |
+
# Obtener los IDs de los tokens "Yes" y "No"
|
82 |
if language == "Español":
|
83 |
+
yes_token_id = tokenizer.encode('Sí', add_special_tokens=False)[0]
|
84 |
+
no_token_id = tokenizer.encode('No', add_special_tokens=False)[0]
|
|
|
|
|
|
|
85 |
else:
|
86 |
+
yes_token_id = tokenizer.encode('Yes', add_special_tokens=False)[0]
|
87 |
+
no_token_id = tokenizer.encode('No', add_special_tokens=False)[0]
|
88 |
+
# Extraer los logits para "Yes" y "No"
|
89 |
+
selected_logits = logits[0, [yes_token_id, no_token_id]]
|
90 |
+
# Calcular las probabilidades
|
91 |
+
probabilities = torch.softmax(selected_logits, dim=0)
|
92 |
+
yes_probability = probabilities[0].item()
|
93 |
+
no_probability = probabilities[1].item()
|
94 |
+
|
95 |
+
# Imprimir las probabilidades
|
96 |
+
print(f"Yes probability: {yes_probability}")
|
97 |
+
print(f"No probability: {no_probability}")
|
98 |
+
|
99 |
+
# Decidir si hay violación de políticas
|
100 |
+
if yes_probability > no_probability:
|
101 |
+
print("Decisión: Yes (viola las políticas)")
|
102 |
if language == "Español":
|
103 |
violation_message = "Su pregunta viola las políticas aceptadas."
|
104 |
else:
|
105 |
violation_message = "Your question violates the accepted policies."
|
106 |
return violation_message
|
107 |
else:
|
108 |
+
print("Decisión: No (no viola las políticas)")
|
109 |
# Generar respuesta al usuario
|
110 |
if language == "Español":
|
111 |
assistant_prompt = f"{system_message}\nUsuario: {message}\nAsistente:"
|
|
|
126 |
assistant_reply = assistant_response.split("Assistant:")[-1].strip()
|
127 |
return assistant_reply
|
128 |
|
129 |
+
|
130 |
# Crear la interfaz de Gradio usando Blocks
|
131 |
with gr.Blocks() as demo:
|
132 |
gr.Markdown("# Chatbot con Verificación de Políticas")
|
133 |
language = gr.Dropdown(choices=["English", "Español"], value="English", label="Idioma/Language")
|
134 |
+
system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System message")
|
135 |
+
|
136 |
+
with gr.Accordion("Avanzado", open=False):
|
137 |
+
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
138 |
+
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
139 |
+
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
140 |
+
|
141 |
chatbot = gr.Chatbot()
|
142 |
message = gr.Textbox(label="Your message")
|
143 |
submit_button = gr.Button("Send")
|
144 |
|
145 |
+
def submit_message(user_message, chat_history, system_message, max_tokens, temperature, top_p, language):
|
146 |
+
chat_history = chat_history + [[user_message, None]]
|
147 |
+
assistant_reply = respond(user_message, language, system_message, max_tokens, temperature, top_p)
|
148 |
+
chat_history[-1][1] = assistant_reply
|
|
|
|
|
|
|
149 |
return "", chat_history
|
150 |
|
151 |
submit_button.click(
|
152 |
submit_message,
|
153 |
+
inputs=[message, chatbot, system_message, max_tokens, temperature, top_p, language],
|
154 |
outputs=[message, chatbot],
|
155 |
)
|
156 |
message.submit(
|
157 |
submit_message,
|
158 |
+
inputs=[message, chatbot, system_message, max_tokens, temperature, top_p, language],
|
159 |
outputs=[message, chatbot],
|
160 |
)
|
161 |
|