ahuang11 commited on
Commit
a7d2870
1 Parent(s): f473fb8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -60
app.py CHANGED
@@ -1,68 +1,182 @@
1
- from context import query
2
- from openai import AsyncOpenAI
 
 
 
3
  import panel as pn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # taken from fleet context
6
- SYSTEM_PROMPT = """
7
- You are an expert in Python libraries. You carefully provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning. If you think there might not be a correct answer, you say so.
8
- Each token you produce is another opportunity to use computation, therefore you always spend a few sentences explaining background context, assumptions, and step-by-step thinking BEFORE you try to answer a question.
9
- Your users are experts in AI and ethics, so they already know you're a language model and your capabilities and limitations, so don't remind them of that. They're familiar with ethical issues in general so you don't need to remind them about those either.
10
- Your users are also in a CLI environment. You are capable of writing and running code. DO NOT write hypothetical code. ALWAYS write real code that will execute and run end-to-end.
11
-
12
- Instructions:
13
- - Be objective, direct. Include literal information from the context, don't add any conclusion or subjective information.
14
- - When writing code, ALWAYS have some sort of output (like a print statement). If you're writing a function, call it at the end. Do not generate the output, because the user can run it themselves.
15
- - ALWAYS cite your sources. Context will be given to you after the text ### Context source_url ### with source_url being the url to the file. For example, ### Context https://example.com/docs/api.html#files ### will have a source_url of https://example.com/docs/api.html#files.
16
- - When you cite your source, please cite it as [num] with `num` starting at 1 and incrementing with each source cited (1, 2, 3, ...). At the bottom, have a newline-separated `num: source_url` at the end of the response. ALWAYS add a new line between sources or else the user won't be able to read it. DO NOT convert links into markdown, EVER! If you do that, the user will not be able to click on the links.
17
- For example:
18
- **Context 1**: https://example.com/docs/api.html#pdfs
19
- I'm a big fan of PDFs.
20
- **Context 2**: https://example.com/docs/api.html#csvs
21
- I'm a big fan of CSVs.
22
- ### Prompt ###
23
- What is this person a big fan of?
24
- ### Response ###
25
- This person is a big fan of PDFs[1] and CSVs[2].
26
- 1: https://example.com/docs/api.html#pdfs
27
- 2: https://example.com/docs/api.html#csvs
28
- """
29
-
30
- pn.extension()
31
-
32
- MODEL = "gpt-3.5-turbo"
33
-
34
-
35
- async def answer(contents, user, instance):
36
- # start with system prompt
37
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
38
-
39
- # add context to the user input
40
- context = ""
41
- fleet_responses = query(contents, k=3)
42
- for i, response in enumerate(fleet_responses):
43
- context += f"\n\n**Context {i}**: {response['metadata']['url']}\n{response['metadata']['text']}"
44
- instance.send(context, avatar="🛩️", user="Fleet Context", respond=False)
45
-
46
- # get history of messages (skipping the intro message)
47
- # and serialize fleet context messages as "user" role
48
- messages.extend(
49
- instance.serialize(role_names={"user": ["user", "Fleet Context"]})[1:]
 
 
 
 
50
  )
 
 
 
 
51
 
52
- openai_response = await client.chat.completions.create(
53
- model=MODEL, messages=messages, temperature=0.2, stream=True
 
 
 
 
 
 
 
 
 
54
  )
55
 
56
- message = ""
57
- async for chunk in openai_response:
58
- token = chunk.choices[0].delta.content
59
- if token:
60
- message += token
61
- yield message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
 
 
 
 
 
 
 
 
 
63
 
64
- client = AsyncOpenAI()
65
- intro_message = pn.chat.ChatMessage("Ask me anything about Python libraries!", user="System")
66
- chat_interface = pn.chat.ChatInterface(intro_message, callback=answer, callback_user="OpenAI")
67
- template = pn.template.FastListTemplate(main=[chat_interface], title="Panel UI of Fleet Context 🛩️")
68
- template.servable()
 
1
+ import asyncio
2
+ import random
3
+ import sqlite3
4
+
5
+ import os
6
  import panel as pn
7
+ import pandas as pd
8
+ from litellm import acompletion
9
+
10
+ pn.extension("perspective")
11
+
12
+
13
+ MODELS = [
14
+ "mistral/mistral-tiny",
15
+ "mistral/mistral-small",
16
+ "mistral/mistral-medium",
17
+ "mistral/mistral-large-latest",
18
+ ]
19
+
20
+ VOTING_LABELS = [
21
+ "👈 A is better",
22
+ "🤗 About the same",
23
+ "😓 Both not good",
24
+ "👉 B is better",
25
+ ]
26
+
27
+
28
+ def set_api_key(api_key):
29
+ os.environ["MISTRAL_API_KEY"] = api_key
30
+
31
+
32
+ async def respond(content, user, instance):
33
+ """
34
+ Respond to the user in the chat interface.
35
+ """
36
+ try:
37
+ instance.disabled = True
38
+ chat_label = instance.name
39
+ if chat_model := chat_models.get(chat_label):
40
+ model = chat_model
41
+ else:
42
+ # remove past history up to new message
43
+ instance.objects = instance.objects[-1:]
44
+ header_a.object = f"## Model: A"
45
+ header_b.object = f"## Model: B"
46
+ model = chat_models[chat_label] = random.choice(MODELS)
47
+
48
+ messages = instance.serialize()
49
+ messages.append({"role": "user", "content": content})
50
+
51
+ response = await acompletion(
52
+ model=model, messages=messages, stream=True, max_tokens=128
53
+ )
54
+
55
+ message = None
56
+ async for chunk in response:
57
+ if not chunk.choices[0].delta["content"]:
58
+ continue
59
+ message = instance.stream(
60
+ chunk.choices[0].delta["content"], user="Assistant", message=message
61
+ )
62
+ finally:
63
+ instance.disabled = False
64
+
65
 
66
+ async def forward_message(content, user, instance):
67
+ """
68
+ Send the message to the other chat interface and respond to the user in both.
69
+ """
70
+ if instance is chat_interface_a:
71
+ other_instance = chat_interface_b
72
+ else:
73
+ other_instance = chat_interface_a
74
+ other_instance.append(pn.chat.ChatMessage(content, user=user))
75
+
76
+ coroutines = [
77
+ respond(content, user, chat_interface)
78
+ for chat_interface in (chat_interface_a, chat_interface_b)
79
+ ]
80
+ await asyncio.gather(*coroutines)
81
+
82
+
83
+ def click_vote(event):
84
+ """
85
+ Count the votes and update the voting results.
86
+ """
87
+ if len(chat_models) == 0:
88
+ return
89
+
90
+ voting_label = event.obj.name
91
+ if voting_label == VOTING_LABELS[0]:
92
+ chat_model = chat_models[chat_interface_a.name]
93
+ voting_counts[chat_model] = voting_counts.get(chat_model, 0) + 1
94
+ elif voting_label == VOTING_LABELS[3]:
95
+ chat_model = chat_models[chat_interface_b.name]
96
+ voting_counts[chat_model] = voting_counts.get(chat_model, 0) + 1
97
+ elif voting_label == VOTING_LABELS[1]:
98
+ chat_model_a = chat_models[chat_interface_a.name]
99
+ chat_model_b = chat_models[chat_interface_b.name]
100
+ if chat_model_a == chat_model_b:
101
+ voting_counts[chat_model_a] = voting_counts.get(chat_model_a, 0) + 1
102
+ else:
103
+ voting_counts[chat_model_a] = voting_counts.get(chat_model_a, 0) + 1
104
+ voting_counts[chat_model_b] = voting_counts.get(chat_model_b, 0) + 1
105
+
106
+ header_a.object = f"## Model: {chat_models[chat_interface_a.name]}"
107
+ header_b.object = f"## Model: {chat_models[chat_interface_b.name]}"
108
+ for chat_label in set(chat_models.keys()):
109
+ chat_models.pop(chat_label)
110
+
111
+ perspective.object = (
112
+ pd.DataFrame(voting_counts, index=["Votes"])
113
+ .melt(var_name="Model", value_name="Votes")
114
+ .set_index("Model")
115
  )
116
+ with sqlite3.connect("voting_counts.db") as conn:
117
+ pd.DataFrame(voting_counts.items(), columns=["Model", "Votes"]).to_sql(
118
+ "voting_counts", conn, if_exists="replace", index=False
119
+ )
120
 
121
+
122
+ # initialize
123
+ chat_models = {}
124
+ with sqlite3.connect("voting_counts.db") as conn:
125
+ conn.execute(
126
+ "CREATE TABLE IF NOT EXISTS voting_counts (Model TEXT PRIMARY KEY, Votes INTEGER)"
127
+ )
128
+ voting_counts = (
129
+ pd.read_sql("SELECT * FROM voting_counts", conn)
130
+ .set_index("Model")["Votes"]
131
+ .to_dict()
132
  )
133
 
134
+ # header
135
+ api_key_input = pn.widgets.PasswordInput(placeholder="Mistral API Key")
136
+ pn.bind(set_api_key, api_key_input)
137
+
138
+ # main
139
+ tabs = pn.Tabs()
140
+
141
+ # tab 1
142
+ chat_interface_kwargs = dict(
143
+ callback=forward_message,
144
+ show_undo=False,
145
+ show_rerun=False,
146
+ show_clear=False,
147
+ show_stop=False,
148
+ show_button_name=False,
149
+ callback_exception="verbose",
150
+ )
151
+ header_a = pn.pane.Markdown("## Model: A")
152
+ chat_interface_a = pn.chat.ChatInterface(
153
+ name="A", header=header_a, **chat_interface_kwargs
154
+ )
155
+ header_b = pn.pane.Markdown("## Model: B")
156
+ chat_interface_b = pn.chat.ChatInterface(
157
+ name="B", header=header_b, **chat_interface_kwargs
158
+ )
159
+
160
+ button_kwargs = dict(sizing_mode="stretch_width")
161
+ button_row = pn.Row()
162
+ for voting_label in VOTING_LABELS:
163
+ button = pn.widgets.Button(name=voting_label, **button_kwargs)
164
+ button.on_click(click_vote)
165
+ button_row.append(button)
166
+
167
+ tabs.append(("Chat", pn.Column(pn.Row(chat_interface_a, chat_interface_b), button_row)))
168
 
169
+ # tab 2
170
+ perspective = pn.pane.Perspective(
171
+ pd.DataFrame(voting_counts, index=["Votes"])
172
+ .melt(var_name="Model", value_name="Votes")
173
+ .set_index("Model"),
174
+ sizing_mode="stretch_both",
175
+ editable=False,
176
+ )
177
+ tabs.append(("Voting Results", perspective))
178
 
179
+ # layout
180
+ pn.template.FastListTemplate(
181
+ title="Mistral Chat Arena", header=[api_key_input], main=[tabs]
182
+ ).servable()