urdu-llama / run.py
traversaal-ai's picture
Update run.py
77bfe0b verified
raw
history blame
957 Bytes
import gradio as gr
import random
import time
endpoint_url = "https://ko60a2m26ylqgri6.us-east-1.aws.endpoints.huggingface.cloud"
# Streaming Client
client = InferenceClient(endpoint_url, token=hf_token)
def generate_text(prompt):
"""Generates text using the Hugging Face Inference API."""
chat_prompt = f"""
### Instruction:
You are a chatbot. Chat in Urdu
### Input:
{prompt}
### Response:
""
"""
stream = client.text_generation(chat_prompt, stream=True, details=True, **gen_kwargs)
generated_text = ""
for r in stream:
if r.token.special:
continue
if r.token.text in gen_kwargs["stop"]:
break
generated_text += r.token.text
yield generated_text
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Urdu Chatbot",
description="Ask me anything in Urdu!",
)
iface.launch()