File size: 1,469 Bytes
0669200
a070f41
e905207
a070f41
0669200
a070f41
0669200
 
 
 
a070f41
 
 
 
0669200
 
 
 
 
 
 
 
 
 
 
 
 
f090db9
0669200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import spaces
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient('mistralai/Mixtral-8x7B-Instruct-v0.1')

@spaces.GPU(duration=60)
def generate_response(chat, kwargs):
    output = ''
    stream = client.text_generation(chat, **kwargs, stream=True, details=True, return_full_text=False)
    for response in stream:
        output += response.token.text
    return output

def function(prompt, history):
    chat = "<s>"
    for user_prompt, bot_response in history:
        chat += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
    chat += f"[INST] {prompt} [/INST]"
    kwargs = dict(
        temperature=0.80,
        max_new_tokens=2048,
        top_p=0.95,
        repetition_penalty=1.0,
        do_sample=True,  # Upewnij się, że używasz próbkowania
        seed=1337
    )

    try:
        output = generate_response(chat, kwargs)
        yield output
    except:
        yield ''

interface = gr.ChatInterface(
    fn=function,
    chatbot=gr.Chatbot(
        avatar_images=None,
        container=False,
        show_copy_button=True,
        layout='bubble',
        render_markdown=True,
        line_breaks=True
    ),
    css='h1 {font-size:22px;} h2 {font-size:20px;} h3 {font-size:18px;} h4 {font-size:16px;}',
    autofocus=True,
    fill_height=True,
    analytics_enabled=False,
    submit_btn='Chat',
    stop_btn=None,
    retry_btn=None,
    undo_btn=None,
    clear_btn=None
)

interface.launch()