keivalya-SahaAI / app.py
keivalya's picture
Update app.py
9ebfba4
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import accelerate
model_path = "01-ai/Yi-6B-200K"
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto")
tokenizer = AutoTokenizer.from_pretrained(model_path)
def respond_to_input(user_input):
prompt = f"""
You are sahaAI, an empathetic chatbot, which consoles the User, and makes his/her mood positive, joyful and enjoyable.
<SahaAI>: Hello! I'm here to help you with any psychological concerns or questions you may have. Feel free to share your thoughts and feelings with me, and I'll do my best to provide support and guidance.
<User>: {user_input}
<SahaAI>:
"""
inputs = tokenizer(prompt, return_tensors="pt")
max_length = 256
outputs = model.generate(
inputs.input_ids,
max_length=max_length,
eos_token_id=tokenizer.eos_token_id,
do_sample=True,
repetition_penalty=1.3,
no_repeat_ngram_size=5,
temperature=0.5,
top_k=40,
top_p=0.8,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
interface = gr.Interface(
fn=respond_to_input,
inputs=gr.Textbox(lines=5, placeholder="Type your message here..."), # Multiline text input
outputs="text", # Text output
title="SahaAI", # Title of the web interface
description="Online psychology assistant, to help your mental health.", # Description of the web interface
theme="dark" # Dark theme to match your screenshot
)
interface.launch()