import gradio as gr from transformers import pipeline, AutoTokenizer def load_model(model_name): return pipeline("text-generation", model=model_name, device="cpu") def generate( model_name, system_input, user_input, temperature=0.4, top_p=0.25, top_k=7, max_new_tokens=256, repetition_penalty=1.0, ): pipe = load_model(model_name) if model_name == "Felladrin/Pythia-31M-Chat-v1": repetition_penalty=1.0016 message_template = [ { "role": "system", "content": system_input, }, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Hi! How can I assist you today?"}, {"role": "user", "content": user_input}, ] prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=True, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty) return outputs[0]["generated_text"] model_choices = ["Felladrin/Pythia-31M-Chat-v1", "Felladrin/Llama-160M-Chat-v1", "Felladrin/Smol-Llama-101M-Chat-v1", "Felladrin/TinyMistral-248M-SFT-v4"] g = gr.Interface( fn=generate, inputs=[ gr.components.Dropdown(choices=model_choices, label="Model", value=model_choices[0], interactive=True), gr.components.Textbox(lines=2, label="System Message", value="You are a highly knowledgeable and friendly chatbot equipped with extensive information across various domains. Your goal is to understand and respond to user inquiries with accuracy and clarity. You're adept at providing detailed explanations, concise summaries, and insightful responses. Your interactions are always respectful, helpful, and focused on delivering the most relevant information to the user."), gr.components.Textbox(lines=2, label="User Message", value="How many planets are in our solar system?"), gr.components.Slider(minimum=0, maximum=1, value=0.4, label="Temperature"), gr.components.Slider(minimum=0, maximum=1, value=0.25, label="Top p"), gr.components.Slider(minimum=0, maximum=100, step=1, value=7, label="Top k"), gr.components.Slider(minimum=1, maximum=1024, step=1, value=256, label="Max tokens"), gr.components.Slider(minimum=1.0, maximum=2.0, step=0.001, value=1.0, label="Repetition Penalty"), ], outputs=[gr.Textbox(lines=10, label="Output")], title="Chat with models fine-tuned by Felladrin", description="Note that the inference runs on CPU only, which may lead to slower outputs during periods of high demand.", concurrency_limit=1 ) g.launch(max_threads=2)