import spaces from huggingface_hub import InferenceClient import gradio as gr client = InferenceClient('mistralai/Mixtral-8x7B-Instruct-v0.1') @spaces.GPU(duration=60) def generate_response(chat, kwargs): output = '' stream = client.text_generation(chat, **kwargs, stream=True, details=True, return_full_text=False) for response in stream: output += response.token.text return output def function(prompt, history): chat = "" for user_prompt, bot_response in history: chat += f"[INST] {user_prompt} [/INST] {bot_response} " chat += f"[INST] {prompt} [/INST]" kwargs = dict( temperature=0.80, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, do_sample=True, # Upewnij się, że używasz próbkowania seed=1337 ) try: output = generate_response(chat, kwargs) yield output except: yield '' interface = gr.ChatInterface( fn=function, chatbot=gr.Chatbot( avatar_images=None, container=False, show_copy_button=True, layout='bubble', render_markdown=True, line_breaks=True ), css='h1 {font-size:22px;} h2 {font-size:20px;} h3 {font-size:18px;} h4 {font-size:16px;}', autofocus=True, fill_height=True, analytics_enabled=False, submit_btn='Chat', stop_btn=None, retry_btn=None, undo_btn=None, clear_btn=None ) interface.launch()