File size: 3,281 Bytes
75a8e67
 
 
 
 
 
 
 
29753df
 
75a8e67
 
 
 
 
 
 
29f1553
75a8e67
 
 
 
 
 
 
 
29f1553
75a8e67
29f1553
75a8e67
29f1553
75a8e67
 
 
 
29f1553
75a8e67
 
 
 
 
b08fdd4
75a8e67
 
 
2f574e1
75a8e67
ab78af9
2f574e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75a8e67
2f574e1
 
 
 
 
 
75a8e67
 
 
2f574e1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os
import gradio as gr
from http import HTTPStatus
import openai
from typing import Generator, List, Optional, Tuple, Dict
from urllib.error import HTTPError

API_URL = os.getenv('API_URL')
API_KEY = os.getenv('API_KEY')
oai_client = openai.OpenAI(api_key=API_KEY, base_url=API_URL)

History = List[Tuple[str, str]]
Messages = List[Dict[str, str]]

def clear_session() -> History:
    return '', []

def history_to_messages(history: History) -> Messages:
    messages = []
    for h in history:
        messages.append({'role': 'user', 'content': h[0]})
        messages.append({'role': 'assistant', 'content': h[1]})
    return messages

def messages_to_history(messages: Messages) -> Tuple[str, History]:
    history = []
    for q, r in zip(messages[0::2], messages[1::2]):
        history.append([q['content'], r['content']])
    return history

def model_chat(query: Optional[str], history: Optional[History]) -> Generator[Tuple[str, History], None, None]:
    if query is None:
        query = ''
    if history is None:
        history = []
    messages = history_to_messages(history)
    messages.append({'role': 'user', 'content': query})
    gen = oai_client.chat.completions.create(
        model='dicta-il/dictalm2.0-instruct',
        messages=messages,
        temperature=0.7,
        max_tokens=1024,
        top_p=0.9,
        stream=True
    )
    full_response = ''
    for completion in gen:
        text = completion.choices[0].delta.content
        full_response += text or ''
        yield full_response
        
with gr.Blocks(css='''
    .gr-group {direction: rtl;}
    .chatbot{text-align:right;}
  .dicta-header {
    background-color: #f4f4f4;  /* Replace with desired background color */
    border-radius: 10px;
    padding: 20px;
    text-align: center;
    display: flex;
    flex-direction: row;
    align-items: center;
  }

  .dicta-logo {
    width: 150px; /* Replace with actual logo width as desired */
    height: auto;
    margin-bottom: 20px;
  }

  .dicta-intro-text {
    color: #333; /* Replace with desired text color */
    margin-bottom: 20px;
    text-align: center;
    display: flex;
    flex-direction: column;
    align-items: center;
    width: 100%;
  }
''') as demo:
    gr.Markdown("""
<div class="dicta-header">
  <img src="file/dicta-logo.jpg" alt="Dicta Logo" class="dicta-logo" style="max-height: 75px">
  <div class="dicta-intro-text">
    <h1>DictaLM 2.0 - Instruct Chat Demo</h1>
    <p>Welcome to the interactive demo of DictaLM-2.0. Explore the capabilities of our model and see how it can assist with your tasks.</p>
    <p dir='rtl'> 讘专讜讻讬诐 讛讘讗讬诐 诇讚诪讜 讛讗讬谞讟专讗拽讟讬讘讬 砖诇 DictaLM-2.0. 讞拽专讜 讗转 讬讻讜诇讜转 讛诪讜讚诇 砖诇谞讜 讜专讗讜 讻讬爪讚 讛讜讗 讬讻讜诇 诇住讬讬注 诇讻诐 讘诪砖讬诪讜转讬讻诐.</p>
  </div>
</div>
""")
    
    interface = gr.ChatInterface(model_chat)
    interface.chatbot.rtl = True
    interface.textbox.placeholder = "讛讻谞住 砖讗诇讛 讘注讘专讬转 (讗讜 讘讗谞讙诇讬转!)"
    interface.textbox.rtl = True
    interface.textbox.text_align = 'right'
    interface.theme_css += '.gr-group {direction: rtl !important;}'
    


demo.queue(api_open=False).launch(max_threads=10,height=800, share=False, allowed_paths=['dicta-logo.jpg'], server_port=7861)