Spaces:
Runtime error
Runtime error
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# pip install bitsandbytes accelerate | |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig | |
# to use 4bit use `load_in_4bit=True` instead | |
quantization_config = BitsAndBytesConfig(load_in_8bit=True) | |
checkpoint = "HuggingFaceTB/SmolLM-135M" | |
tokenizer = AutoTokenizer.from_pretrained(checkpoint).to("cpu") | |
model = AutoModelForCausalLM.from_pretrained(checkpoint, quantization_config=quantization_config) | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
inputs = tokenizer.encode(messages, return_tensors="pt").to("cuda") | |
outputs = model.generate(inputs) | |
print(tokenizer.decode(outputs[0])) | |
return tokenizer.decode(outputs[0]) | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)", | |
), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |