jijivski commited on
Commit
9882e38
1 Parent(s): e2bf898

hover and question _ppl

Browse files
data/mata_df.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/model_release_time.csv ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,Release Date,model,MMLU,GSM8,Humanities,SocialSciences,STEM,Other,Longbench
2
+ Baichuan2-13B-Base,2023-08-24,Baichuan2-13B-Base,58.1,52.7,51.54,66.2,47.89,65.24,62.55
3
+ Baichuan2-13B-Chat,2023-06-24,Baichuan2-13B-Chat,52.1,55.0,50.71,65.19,47.13,65.01,
4
+ Baichuan2-7B-Base,2023-08-24,Baichuan2-7B-Base,54.0,24.4,46.87,58.73,42.63,58.9,16.32
5
+ Baichuan2-7B-Chat,2023-08-24,Baichuan2-7B-Chat,52.9,32.0,46.44,58.82,41.93,59.22,32.22
6
+ Colossal-LLaMA-2-7b-base,2023-09-24,Colossal-LLaMA-2-7b-base,53.06,9.0,73.1,75.0,34.8,44.0,23.83
7
+ HF_RWKV_v5-Eagle-7B,2023-11-15,HF_RWKV_v5-Eagle-7B,33.04,9.3,32.58,34.94,28.29,36.66,19.33
8
+ Llama-2-13b-hf,2023-07-18,Llama-2-13b-hf,55.77,22.8,76.0,82.0,28.6,46.4,7.39
9
+ Llama-2-7b-hf,2023-07-18,Llama-2-7b-hf,46.87,14.4,70.2,65.0,38.4,42.2,15.29
10
+ Qwen-14B-Chat,2023-09-24,Qwen-14B-Chat,66.5,59.0,58.24,74.78,56.87,70.78,38.72
11
+ Qwen-1_8B,2023-11-30,Qwen-1_8B,45.3,32.0,40.77,50.93,37.04,51.92,35.4
12
+ Qwen-1_8B-Chat,2023-11-30,Qwen-1_8B-Chat,43.99,4.0,39.91,50.08,38.47,49.73,14.39
13
+ Qwen-7B,2023-09-24,Qwen-7B,59.84,44.9,75.4,80.0,37.5,48.2,45.53
14
+ Qwen-7B-Chat,2023-09-24,Qwen-7B-Chat,57.0,54.0,47.86,64.32,46.91,61.64,33.89
15
+ Skywork-13B-base,2023-10-22,Skywork-13B-base,62.1,55.0,56.62,70.13,47.19,67.69,23.48
16
+ TinyLlama-1.1B-Chat-v0.6,2023-11-24,TinyLlama-1.1B-Chat-v0.6,25.98,2.1,22.2,28.0,32.1,23.5,5.05
17
+ Yi-6B,2024-01-17,Yi-6B,64.11,12.1,83.0,85.0,42.9,45.8,38.89
18
+ Yi-6B-Chat,2024-01-17,Yi-6B-Chat,58.24,38.4,55.75,72.41,51.57,69.91,39.54
19
+ baichuan-13b-chat,2023-06-24,baichuan-13b-chat,52.1,0.1,43.95,56.48,38.19,56.2,9.29
20
+ baichuan-7b-chat,2023-09-24,baichuan-7b-chat,42.8,9.1,40.83,46.96,35.17,47.28,23.3
21
+ chatglm3-6b,2023-10-24,chatglm3-6b,61.4,72.0,46.12,59.12,42.82,56.49,
22
+ falcon-rw-1b,2023-04-24,falcon-rw-1b,25.28,0.5,29.2,28.0,29.5,28.9,5.31
23
+ interlm-20b,2023-09-18,interlm-20b,61.85,23.0,,,,,
24
+ internlm-chat-7b,2023-06-06,internlm-chat-7b,50.8,34.0,46.08,58.56,40.28,56.68,9.73
25
+ llama2-7b-chat-hf,2023-07-18,llama2-7b-chat-hf,48.32,45.5,72.5,72.0,30.4,43.4,19.58
26
+ llama_hf_7b,2023-02-18,llama_hf_7b,46.87,10.0,31.99,31.75,28.35,36.63,4.7
27
+ mistral-7b-v0.1,2023-09-18,mistral-7b-v0.1,64.16,37.8,83.0,86.0,48.2,55.4,32.2
28
+ opt-13b,2022-05-11,opt-13b,24.9,1.7,31.0,27.0,36.6,25.9,
29
+ opt-2.7b,2022-05-11,opt-2.7b,25.43,0.2,26.67,24.63,26.86,24.01,66.67
30
+ phi-1_5,2023-08-18,phi-1_5,43.89,12.4,46.8,64.0,38.4,41.0,8.07
31
+ phi-2,2023-12-24,phi-2,58.11,54.8,69.0,77.0,49.1,47.6,5.49
32
+ pythia-12b,2023-02-24,pythia-12b,26.76,1.7,30.4,29.0,33.0,31.9,
33
+ vicuna-7b-v1.5,2023-07-24,vicuna-7b-v1.5,50.82,8.1,71.3,76.0,44.6,42.8,20.39
34
+ xverse-13b,2023-08-06,xverse-13b,55.1,18.0,,,,,
35
+ zephyr-7b-beta,2023-10-26,zephyr-7b-beta,60.7,11.3,80.7,78.0,34.8,51.2,35.74
36
+ zhongjing-base,2023-09-24,zhongjing-base,48.23,26.0,75.4,69.0,39.3,45.2,3.36
gradio_samples/gradio_hover.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+
3
+ # def generate_hoverable_html(text):
4
+ # # 分割文本为单词
5
+ # words = text.split()
6
+
7
+ # # 为每个单词创建一个带有悬停信息的 HTML span 元素
8
+ # html_words = [
9
+ # f'<span class="hoverable-word" data-info="Information about {word}">{word}</span>'
10
+ # for word in words
11
+ # ]
12
+
13
+ # # 将单词合并回字符串
14
+ # hoverable_html = ' '.join(html_words)
15
+
16
+ # # 添加 CSS 和 JavaScript
17
+ # custom_html = f"""
18
+ # <style>
19
+ # .hoverable-word {{
20
+ # color: blue;
21
+ # cursor: pointer;
22
+ # }}
23
+ # .hoverable-word:hover::after {{
24
+ # content: attr(data-info);
25
+ # color: white;
26
+ # background-color: black;
27
+ # padding: 4px;
28
+ # margin-left: 8px;
29
+ # position: absolute;
30
+ # }}
31
+ # </style>
32
+ # <div>{hoverable_html}</div>
33
+ # """
34
+
35
+ # return custom_html
36
+
37
+ # # 创建 Gradio 界面
38
+ # with gr.Blocks() as demo:
39
+ # with gr.Row():
40
+ # text_input = gr.Textbox(label="Input Text", placeholder="Type here...")
41
+ # output_html = gr.HTML()
42
+
43
+ # # 连接输入、处理函数和输出
44
+ # text_input.change(generate_hoverable_html, text_input, output_html)
45
+
46
+ # demo.launch()
47
+
48
+ import gradio as gr
49
+
50
+ def generate_hoverable_html(text):
51
+ # 分割文本为单词
52
+ words = text.split()
53
+ prob_dic={'a':{'b':0.1,'c':0.2},'b':{'a':0.1,'c':0.2}}
54
+ # 为每个单词创建一个带有悬停信息的 HTML span 元素
55
+ html_words = [
56
+ f'<span class="hoverable-word" data-info="{prob_dic[word]}">{word}</span>'
57
+ for word in words
58
+ ]
59
+
60
+ # 将单词合并回字符串
61
+ hoverable_html = ' '.join(html_words)
62
+
63
+ # 添加 CSS 和 JavaScript
64
+ custom_html = f"""
65
+ <style>
66
+ .hoverable-word {{
67
+ color: blue;
68
+ cursor: pointer;
69
+ }}
70
+ .hoverable-word:hover::after {{
71
+ content: attr(data-info);
72
+ color: white;
73
+ background-color: black;
74
+ padding: 4px;
75
+ margin-left: 8px;
76
+ position: absolute;
77
+ }}
78
+ </style>
79
+ <div>{hoverable_html}</div>
80
+ """
81
+
82
+ return custom_html
83
+
84
+ # 创建 Gradio 界面
85
+ with gr.Blocks() as demo:
86
+ with gr.Row():
87
+ text_input = gr.Textbox(label="Input Text", placeholder="Type here...")
88
+ output_html = gr.HTML()
89
+
90
+ # 连接输入、处理函数和输出
91
+ text_input.change(generate_hoverable_html, text_input, output_html)
92
+
93
+ demo.launch(debug=True)
gradio_samples/web_ui.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import gradio
4
+ import numpy as np
5
+ import torch
6
+ from transformers import LogitsProcessor
7
+
8
+ from modules import html_generator, shared
9
+
10
+ params = {
11
+ 'active': True,
12
+ 'color_by_perplexity': False,
13
+ 'color_by_probability': False,
14
+ 'ppl_scale': 15.0, # No slider for this right now, because I don't think it really needs to be changed. Very large perplexity scores don't show up often.
15
+ 'probability_dropdown': False,
16
+ 'verbose': False # For debugging mostly
17
+ }
18
+
19
+
20
+ class PerplexityLogits(LogitsProcessor):
21
+ def __init__(self, verbose=False):
22
+ self.generated_token_ids = []
23
+ self.selected_probs = []
24
+ self.top_token_ids_list = []
25
+ self.top_probs_list = []
26
+ self.perplexities_list = []
27
+ self.last_probs = None
28
+ self.verbose = verbose
29
+
30
+ def __call__(self, input_ids, scores):
31
+ # t0 = time.time()
32
+ probs = torch.softmax(scores, dim=-1, dtype=torch.float)
33
+ log_probs = torch.nan_to_num(torch.log(probs)) # Note: This is to convert log(0) nan to 0, but probs*log_probs makes this 0 not affect the perplexity.
34
+ entropy = -torch.sum(probs * log_probs)
35
+ entropy = entropy.cpu().numpy()
36
+ perplexity = round(float(np.exp(entropy)), 4)
37
+ self.perplexities_list.append(perplexity)
38
+ last_token_id = int(input_ids[0][-1].cpu().numpy().item())
39
+ # Store the generated tokens (not sure why this isn't accessible in the output endpoint!)
40
+ self.generated_token_ids.append(last_token_id)
41
+ # Get last probability, and add to the list if it wasn't there
42
+ if len(self.selected_probs) > 0:
43
+ # Is the selected token in the top tokens?
44
+ if self.verbose:
45
+ print('Probs: Token after', shared.tokenizer.decode(last_token_id))
46
+ print('Probs:', [shared.tokenizer.decode(token_id) for token_id in self.top_token_ids_list[-1][0]])
47
+ print('Probs:', [round(float(prob), 4) for prob in self.top_probs_list[-1][0]])
48
+ if last_token_id in self.top_token_ids_list[-1][0]:
49
+ idx = self.top_token_ids_list[-1][0].index(last_token_id)
50
+ self.selected_probs.append(self.top_probs_list[-1][0][idx])
51
+ else:
52
+ self.top_token_ids_list[-1][0].append(last_token_id)
53
+ last_prob = round(float(self.last_probs[last_token_id]), 4)
54
+ self.top_probs_list[-1][0].append(last_prob)
55
+ self.selected_probs.append(last_prob)
56
+ else:
57
+ self.selected_probs.append(1.0) # Placeholder for the last token of the prompt
58
+
59
+ if self.verbose:
60
+ pplbar = "-"
61
+ if not np.isnan(perplexity):
62
+ pplbar = "*" * round(perplexity)
63
+ print(f"PPL: Token after {shared.tokenizer.decode(last_token_id)}\t{perplexity:.2f}\t{pplbar}")
64
+
65
+ # Get top 5 probabilities
66
+ top_tokens_and_probs = torch.topk(probs, 5)
67
+ top_probs = top_tokens_and_probs.values.cpu().numpy().astype(float).tolist()
68
+ top_token_ids = top_tokens_and_probs.indices.cpu().numpy().astype(int).tolist()
69
+
70
+ self.top_token_ids_list.append(top_token_ids)
71
+ self.top_probs_list.append(top_probs)
72
+
73
+ probs = probs.cpu().numpy().flatten()
74
+ self.last_probs = probs # Need to keep this as a reference for top probs
75
+
76
+ # t1 = time.time()
77
+ # print(f"PPL Processor: {(t1-t0):.3f} s")
78
+ # About 1 ms, though occasionally up to around 100 ms, not sure why...
79
+ # Doesn't actually modify the logits!
80
+ return scores
81
+
82
+
83
+ # Stores the perplexity and top probabilities
84
+ ppl_logits_processor = None
85
+
86
+
87
+ def logits_processor_modifier(logits_processor_list, input_ids):
88
+ global ppl_logits_processor
89
+ if params['active']:
90
+ ppl_logits_processor = PerplexityLogits(verbose=params['verbose'])
91
+ logits_processor_list.append(ppl_logits_processor)
92
+
93
+
94
+ def output_modifier(text):
95
+ global ppl_logits_processor
96
+ # t0 = time.time()
97
+
98
+ if not params['active']:
99
+ return text
100
+
101
+ # TODO: It's probably more efficient to do this above rather than modifying all these lists
102
+ # Remove last element of perplexities_list, top_token_ids_list, top_tokens_list, top_probs_list since everything is off by one because this extension runs before generation
103
+ perplexities = ppl_logits_processor.perplexities_list[:-1]
104
+ top_token_ids_list = ppl_logits_processor.top_token_ids_list[:-1]
105
+ top_tokens_list = [[shared.tokenizer.decode(token_id) for token_id in top_token_ids[0]] for top_token_ids in top_token_ids_list]
106
+ top_probs_list = ppl_logits_processor.top_probs_list[:-1]
107
+ # Remove first element of generated_token_ids, generated_tokens, selected_probs because they are for the last token of the prompt
108
+ gen_token_ids = ppl_logits_processor.generated_token_ids[1:]
109
+ gen_tokens = [shared.tokenizer.decode(token_id) for token_id in gen_token_ids]
110
+ sel_probs = ppl_logits_processor.selected_probs[1:]
111
+
112
+ end_part = '</div></div>' if params['probability_dropdown'] else '</span>' # Helps with finding the index after replacing part of the text.
113
+
114
+ i = 0
115
+ for token, prob, ppl, top_tokens, top_probs in zip(gen_tokens, sel_probs, perplexities, top_tokens_list, top_probs_list):
116
+ color = 'ffffff'
117
+ if params['color_by_probability'] and params['color_by_perplexity']:
118
+ color = probability_perplexity_color_scale(prob, ppl)
119
+ elif params['color_by_perplexity']:
120
+ color = perplexity_color_scale(ppl)
121
+ elif params['color_by_probability']:
122
+ color = probability_color_scale(prob)
123
+ if token in text[i:]:
124
+ if params['probability_dropdown']:
125
+ text = text[:i] + text[i:].replace(token, add_dropdown_html(token, color, top_tokens, top_probs[0], ppl), 1)
126
+ else:
127
+ text = text[:i] + text[i:].replace(token, add_color_html(token, color), 1)
128
+ i += text[i:].find(end_part) + len(end_part)
129
+
130
+ # Use full perplexity list for calculating the average here.
131
+ print('Average perplexity:', round(np.mean(ppl_logits_processor.perplexities_list[:-1]), 4))
132
+ # t1 = time.time()
133
+ # print(f"Modifier: {(t1-t0):.3f} s")
134
+ # About 50 ms
135
+ return text
136
+
137
+
138
+ def probability_color_scale(prob):
139
+ '''
140
+ Green-yellow-red color scale
141
+ '''
142
+
143
+ rv = 0
144
+ gv = 0
145
+ if prob <= 0.5:
146
+ rv = 'ff'
147
+ gv = hex(int(255 * prob * 2))[2:]
148
+ if len(gv) < 2:
149
+ gv = '0' * (2 - len(gv)) + gv
150
+ else:
151
+ rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
152
+ gv = 'ff'
153
+ if len(rv) < 2:
154
+ rv = '0' * (2 - len(rv)) + rv
155
+
156
+ return rv + gv + '00'
157
+
158
+
159
+ def perplexity_color_scale(ppl):
160
+ '''
161
+ Red component only, white for 0 perplexity (sorry if you're not in dark mode)
162
+ '''
163
+ value = hex(max(int(255.0 - params['ppl_scale'] * (float(ppl) - 1.0)), 0))[2:]
164
+ if len(value) < 2:
165
+ value = '0' * (2 - len(value)) + value
166
+
167
+ return 'ff' + value + value
168
+
169
+
170
+ def probability_perplexity_color_scale(prob, ppl):
171
+ '''
172
+ Green-yellow-red for probability and blue component for perplexity
173
+ '''
174
+
175
+ rv = 0
176
+ gv = 0
177
+ bv = hex(min(max(int(params['ppl_scale'] * (float(ppl) - 1.0)), 0), 255))[2:]
178
+ if len(bv) < 2:
179
+ bv = '0' * (2 - len(bv)) + bv
180
+
181
+ if prob <= 0.5:
182
+ rv = 'ff'
183
+ gv = hex(int(255 * prob * 2))[2:]
184
+ if len(gv) < 2:
185
+ gv = '0' * (2 - len(gv)) + gv
186
+ else:
187
+ rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
188
+ gv = 'ff'
189
+ if len(rv) < 2:
190
+ rv = '0' * (2 - len(rv)) + rv
191
+
192
+ return rv + gv + bv
193
+
194
+
195
+ def add_color_html(token, color):
196
+ return f'<span style="color: #{color}">{token}</span>'
197
+
198
+
199
+ # TODO: Major issue: Applying this to too many tokens will cause a permanent slowdown in generation speed until the messages are removed from the history.
200
+ # I think the issue is from HTML elements taking up space in the visible history, and things like history deepcopy add latency proportional to the size of the history.
201
+ # Potential solution is maybe to modify the main generation code to send just the internal text and not the visible history, to avoid moving too much around.
202
+ # I wonder if we can also avoid using deepcopy here.
203
+ def add_dropdown_html(token, color, top_tokens, top_probs, perplexity=0):
204
+ html = f'<div class="hoverable"><span style="color: #{color}">{token}</span><div class="dropdown"><table class="dropdown-content"><tbody>'
205
+ for token_option, prob in zip(top_tokens, top_probs):
206
+ # TODO: Bold for selected token?
207
+ # Using divs prevented the problem of divs inside spans causing issues.
208
+ # Now the problem is that divs show the same whitespace of one space between every token.
209
+ # There is probably some way to fix this in CSS that I don't know about.
210
+ row_color = probability_color_scale(prob)
211
+ row_class = ' class="selected"' if token_option == token else ''
212
+ html += f'<tr{row_class}><td style="color: #{row_color}">{token_option}</td><td style="color: #{row_color}">{prob:.4f}</td></tr>'
213
+ if perplexity != 0:
214
+ ppl_color = perplexity_color_scale(perplexity)
215
+ html += f'<tr><td>Perplexity:</td><td style="color: #{ppl_color}">{perplexity:.4f}</td></tr>'
216
+ html += '</tbody></table></div></div>'
217
+ return html # About 750 characters per token...
218
+
219
+
220
+ def custom_css():
221
+ return """
222
+ .dropdown {
223
+ display: none;
224
+ position: absolute;
225
+ z-index: 50;
226
+ background-color: var(--block-background-fill);
227
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
228
+ width: max-content;
229
+ overflow: visible;
230
+ padding: 5px;
231
+ border-radius: 10px;
232
+ border: 1px solid var(--border-color-primary);
233
+ }
234
+
235
+ .dropdown-content {
236
+ border: none;
237
+ z-index: 50;
238
+ }
239
+
240
+ .dropdown-content tr.selected {
241
+ background-color: var(--block-label-background-fill);
242
+ }
243
+
244
+ .dropdown-content td {
245
+ color: var(--body-text-color);
246
+ }
247
+
248
+ .hoverable {
249
+ color: var(--body-text-color);
250
+ position: relative;
251
+ display: inline-block;
252
+ overflow: visible;
253
+ font-size: 15px;
254
+ line-height: 1.75;
255
+ margin: 0;
256
+ padding: 0;
257
+ }
258
+
259
+ .hoverable:hover .dropdown {
260
+ display: block;
261
+ }
262
+
263
+ pre {
264
+ white-space: pre-wrap;
265
+ }
266
+
267
+ # TODO: This makes the hover menus extend outside the bounds of the chat area, which is good.
268
+ # However, it also makes the scrollbar disappear, which is bad.
269
+ # The scroll bar needs to still be present. So for now, we can't see dropdowns that extend past the edge of the chat area.
270
+ #.chat {
271
+ # overflow-y: auto;
272
+ #}
273
+ """
274
+
275
+
276
+ # Monkeypatch applied to html_generator.py
277
+ # We simply don't render markdown into HTML. We wrap everything in <pre> tags to preserve whitespace
278
+ # formatting. If you're coloring tokens by perplexity or probability, or especially if you're using
279
+ # the probability dropdown, you probably care more about seeing the tokens the model actually outputted
280
+ # rather than rendering ```code blocks``` or *italics*.
281
+ def convert_to_markdown(string):
282
+ return '<pre>' + string + '</pre>'
283
+
284
+
285
+ html_generator.convert_to_markdown = convert_to_markdown
286
+
287
+
288
+ def ui():
289
+ def update_active_check(x):
290
+ params.update({'active': x})
291
+
292
+ def update_color_by_ppl_check(x):
293
+ params.update({'color_by_perplexity': x})
294
+
295
+ def update_color_by_prob_check(x):
296
+ params.update({'color_by_probability': x})
297
+
298
+ def update_prob_dropdown_check(x):
299
+ params.update({'probability_dropdown': x})
300
+
301
+ active_check = gradio.Checkbox(value=True, label="Compute probabilities and perplexity scores", info="Activate this extension. Note that this extension currently does not work with exllama or llama.cpp.")
302
+ color_by_ppl_check = gradio.Checkbox(value=False, label="Color by perplexity", info="Higher perplexity is more red. If also showing probability, higher perplexity has more blue component.")
303
+ color_by_prob_check = gradio.Checkbox(value=False, label="Color by probability", info="Green-yellow-red linear scale, with 100% green, 50% yellow, 0% red.")
304
+ prob_dropdown_check = gradio.Checkbox(value=False, label="Probability dropdown", info="Hover over a token to show a dropdown of top token probabilities. Currently slightly buggy with whitespace between tokens.")
305
+
306
+ active_check.change(update_active_check, active_check, None)
307
+ color_by_ppl_check.change(update_color_by_ppl_check, color_by_ppl_check, None)
308
+ color_by_prob_check.change(update_color_by_prob_check, color_by_prob_check, None)
309
+ prob_dropdown_check.change(update_prob_dropdown_check, prob_dropdown_check, None)