CISCai commited on
Commit
af946e4
1 Parent(s): ee30551

Initial version

Browse files
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Sigbjørn Skjæret
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -9,6 +9,8 @@ app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: Chat Template Editor
 
 
 
 
12
  ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  license: mit
11
  short_description: Chat Template Editor
12
+ hf_oauth: true
13
+ hf_oauth_scopes:
14
+ - read-repos
15
+ - write-repos
16
  ---
 
 
app.py ADDED
@@ -0,0 +1,1312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from difflib import Differ, unified_diff
4
+ from itertools import groupby
5
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
6
+ from huggingface_hub import HfApi, CommitOperationAdd
7
+ from transformers import PreTrainedTokenizerBase
8
+ from enum import StrEnum
9
+ from copy import deepcopy
10
+
11
+
12
+ hfapi = HfApi()
13
+
14
+
15
+ class ModelFiles(StrEnum):
16
+ TOKENIZER_CHAT_TEMPLATE = "tokenizer_chat_template.jinja"
17
+ TOKENIZER_CONFIG = "tokenizer_config.json"
18
+ TOKENIZER_INVERSE_TEMPLATE = "inverse_template.jinja"
19
+
20
+
21
+ example_labels = [
22
+ "Single user message",
23
+ "Single user message with system prompt",
24
+ "Longer conversation",
25
+ "Tool call",
26
+ "Tool call with response",
27
+ "Tool call with multiple responses",
28
+ "Tool call with complex tool definition",
29
+ "RAG call",
30
+ ]
31
+ example_values = [
32
+ [
33
+ "{}",
34
+ """[
35
+ {
36
+ "role": "user",
37
+ "content": "What is the capital of Norway?"
38
+ }
39
+ ]""",
40
+ ],
41
+ [
42
+ "{}",
43
+ """[
44
+ {
45
+ "role": "system",
46
+ "content": "You are a somewhat helpful AI."
47
+ },
48
+ {
49
+ "role": "user",
50
+ "content": "What is the capital of Norway?"
51
+ }
52
+ ]""",
53
+ ],
54
+ [
55
+ "{}",
56
+ """[
57
+ {
58
+ "role": "user",
59
+ "content": "What is the capital of Norway?"
60
+ },
61
+ {
62
+ "role": "assistant",
63
+ "content": "Oslo is the capital of Norway."
64
+ },
65
+ {
66
+ "role": "user",
67
+ "content": "What is the world famous sculpture park there called?"
68
+ },
69
+ {
70
+ "role": "assistant",
71
+ "content": "The world famous sculpture park in Oslo is called Vigelandsparken."
72
+ },
73
+ {
74
+ "role": "user",
75
+ "content": "What is the most famous sculpture in the park?"
76
+ }
77
+ ]""",
78
+ ],
79
+ [
80
+ """{
81
+ "tools": [
82
+ {
83
+ "type": "function",
84
+ "function": {
85
+ "name": "get_current_weather",
86
+ "description": "Get the current weather in a given location",
87
+ "parameters": {
88
+ "type": "object",
89
+ "properties": {
90
+ "location": {
91
+ "type": "string",
92
+ "description": "The city and state, e.g. San Francisco, CA"
93
+ },
94
+ "unit": {
95
+ "type": "string",
96
+ "enum": [ "celsius", "fahrenheit" ]
97
+ }
98
+ },
99
+ "required": [ "location" ]
100
+ }
101
+ }
102
+ }
103
+ ]
104
+ }""",
105
+ """[
106
+ {
107
+ "role": "user",
108
+ "content": "What's the weather like in Oslo?"
109
+ }
110
+ ]""",
111
+ ],
112
+ [
113
+ """{
114
+ "tools": [
115
+ {
116
+ "type": "function",
117
+ "function": {
118
+ "name": "get_current_weather",
119
+ "description": "Get the current weather in a given location",
120
+ "parameters": {
121
+ "type": "object",
122
+ "properties": {
123
+ "location": {
124
+ "type": "string",
125
+ "description": "The city and state, e.g. San Francisco, CA"
126
+ },
127
+ "unit": {
128
+ "type": "string",
129
+ "enum": [ "celsius", "fahrenheit" ]
130
+ }
131
+ },
132
+ "required": [ "location" ]
133
+ }
134
+ }
135
+ }
136
+ ]
137
+ }""",
138
+ """[
139
+ {
140
+ "role": "user",
141
+ "content": "What's the weather like in Oslo?"
142
+ },
143
+ {
144
+ "role": "assistant",
145
+ "content": null,
146
+ "tool_calls": [
147
+ {
148
+ "id": "toolcall1",
149
+ "type": "function",
150
+ "function": {
151
+ "name": "get_current_weather",
152
+ "arguments": {
153
+ "location": "Oslo, Norway",
154
+ "unit": "celsius"
155
+ }
156
+ }
157
+ }
158
+ ]
159
+ },
160
+ {
161
+ "role": "tool",
162
+ "content": "20",
163
+ "tool_call_id": "toolcall1"
164
+ }
165
+ ]""",
166
+ ],
167
+ [
168
+ """{
169
+ "tools": [
170
+ {
171
+ "type": "function",
172
+ "function": {
173
+ "name": "get_current_weather",
174
+ "description": "Get the current weather in a given location",
175
+ "parameters": {
176
+ "type": "object",
177
+ "properties": {
178
+ "location": {
179
+ "type": "string",
180
+ "description": "The city and state, e.g. San Francisco, CA"
181
+ },
182
+ "unit": {
183
+ "type": "string",
184
+ "enum": [ "celsius", "fahrenheit" ]
185
+ }
186
+ },
187
+ "required": [ "location" ]
188
+ }
189
+ }
190
+ }
191
+ ]
192
+ }""",
193
+ """[
194
+ {
195
+ "role": "user",
196
+ "content": "What's the weather like in Oslo and Stockholm?"
197
+ },
198
+ {
199
+ "role": "assistant",
200
+ "content": null,
201
+ "tool_calls": [
202
+ {
203
+ "id": "toolcall1",
204
+ "type": "function",
205
+ "function": {
206
+ "name": "get_current_weather",
207
+ "arguments": {
208
+ "location": "Oslo, Norway",
209
+ "unit": "celsius"
210
+ }
211
+ }
212
+ },
213
+ {
214
+ "id": "toolcall2",
215
+ "type": "function",
216
+ "function": {
217
+ "name": "get_current_weather",
218
+ "arguments": {
219
+ "location": "Stockholm, Sweden",
220
+ "unit": "celsius"
221
+ }
222
+ }
223
+ }
224
+ ]
225
+ },
226
+ {
227
+ "role": "tool",
228
+ "content": "20",
229
+ "tool_call_id": "toolcall1"
230
+ },
231
+ {
232
+ "role": "tool",
233
+ "content": "22",
234
+ "tool_call_id": "toolcall2"
235
+ }
236
+ ]""",
237
+ ],
238
+ [
239
+ """{
240
+ "tools": [
241
+ {
242
+ "type": "function",
243
+ "function": {
244
+ "name": "create_user",
245
+ "description": "creates a user",
246
+ "parameters": {
247
+ "type": "object",
248
+ "properties": {
249
+ "user": {
250
+ "title": "User",
251
+ "type": "object",
252
+ "properties": {
253
+ "user_id": {
254
+ "title": "User Id",
255
+ "description": "The unique identifier for a user",
256
+ "default": 0,
257
+ "type": "integer"
258
+ },
259
+ "name": {
260
+ "title": "Name",
261
+ "description": "The name of the user",
262
+ "type": "string"
263
+ },
264
+ "birthday": {
265
+ "type": "string",
266
+ "description": "The birthday of the user, e.g. 2022-01-01",
267
+ "pattern": "^([1-9] |1[0-9]| 2[0-9]|3[0-1])(.|-)([1-9] |1[0-2])(.|-|)20[0-9][0-9]$"
268
+ },
269
+ "email": {
270
+ "title": "Email",
271
+ "description": "The email address of the user",
272
+ "type": "string"
273
+ },
274
+ "friends": {
275
+ "title": "Friends",
276
+ "description": "List of friends of the user",
277
+ "type": "array",
278
+ "items": {"type": "string"}
279
+ }
280
+ },
281
+ "required": ["name", "email"]
282
+ }
283
+ },
284
+ "required": ["user"],
285
+ "definitions": {}
286
+ }
287
+ }
288
+ }
289
+ ]
290
+ }""",
291
+ """[
292
+ {
293
+ "role": "user",
294
+ "content": "Create a user for Test User (test@user.org), born Januray 1st 2000, with some random friends."
295
+ }
296
+ ]""",
297
+ ],
298
+ [
299
+ """{
300
+ "documents": [
301
+ {
302
+ "title": "Much ado about nothing",
303
+ "content": "Dolor sit amet..."
304
+ },
305
+ {
306
+ "title": "Less ado about something",
307
+ "content": "Lorem ipsum..."
308
+ }
309
+ ]
310
+ }""",
311
+ """[
312
+ {
313
+ "role": "user",
314
+ "content": "Write a brief summary of the following documents."
315
+ }
316
+ ]""",
317
+ ],
318
+ ]
319
+
320
+
321
+ pr_description_default = "### Changes\n* \n\n**Updated using [Chat Template Editor](https://huggingface.co/spaces/CISCai/chat-template-editor)**"
322
+
323
+
324
+ class TokenizerConfig():
325
+ def __init__(self, tokenizer_config: dict):
326
+ self._data = deepcopy(tokenizer_config)
327
+ self.chat_template = self._data.get("chat_template")
328
+
329
+ @property
330
+ def chat_template(self) -> str | list | None:
331
+ templates = [
332
+ {
333
+ "name": k,
334
+ "template": v,
335
+ }
336
+ for k, v in self.chat_templates.items() if v
337
+ ]
338
+
339
+ if not templates:
340
+ return None
341
+ elif len(templates) == 1 and templates[0]["name"] == "default":
342
+ return templates[0]["template"]
343
+ else:
344
+ return templates
345
+
346
+ @chat_template.setter
347
+ def chat_template(self, value: str | list | None):
348
+ if not value:
349
+ self.chat_templates.clear()
350
+ elif isinstance(value, str):
351
+ self.chat_templates = {
352
+ "default": value,
353
+ }
354
+ else:
355
+ self.chat_templates = {
356
+ t["name"]: t["template"]
357
+ for t in value
358
+ }
359
+
360
+ # @property
361
+ # def inverse_template(self) -> str | None:
362
+ # return self._data.get("inverse_template")
363
+
364
+ # @inverse_template.setter
365
+ # def inverse_template(self, value: str | None):
366
+ # if value:
367
+ # self._data["inverse_template"] = value
368
+ # elif "inverse_template" in self._data:
369
+ # del self._data["inverse_template"]
370
+
371
+ def json(self, indent: int | str | None = 4) -> str:
372
+ self._data["chat_template"] = self.chat_template
373
+
374
+ return json.dumps(self._data, ensure_ascii = False, indent = indent)
375
+
376
+
377
+ def get_json_indent(
378
+ json: str,
379
+ ) -> int | str | None:
380
+ nonl = json.replace('\r', '').replace('\n', '')
381
+ start = nonl.find("{")
382
+ first = nonl.find('"')
383
+ return "\t" if start >= 0 and nonl[start + 1] == "\t" else None if first == json.find('"') else min(max(first - start - 1, 2), 4)
384
+
385
+
386
+ def character_diff(
387
+ diff_title: str | None,
388
+ str_original: str,
389
+ str_updated: str,
390
+ ):
391
+ d = Differ()
392
+
393
+ title = [] if diff_title is None else [ (f"\n@@ {diff_title} @@\n", "@") ]
394
+ diffs = [
395
+ ("".join(map(lambda x: x[2:].replace("\t", "\u21e5").replace("\r", "\u240d\r").replace("\n", "\u240a\n") if x[0] != " " else x[2:], tokens)), group if group != " " else None) # .replace(" ", "\u2423")
396
+ for group, tokens in groupby(d.compare(str_updated, str_original), lambda x: x[0])
397
+ ]
398
+
399
+ return title + ([("No changes", "?")] if len(diffs) == 1 and diffs[0][1] is None else diffs)
400
+
401
+
402
+ with gr.Blocks(
403
+ ) as blocks:
404
+ with gr.Row():
405
+ hf_search = HuggingfaceHubSearch(
406
+ label = "Search Huggingface Hub",
407
+ placeholder = "Search for models on Huggingface",
408
+ search_type = "model",
409
+ sumbit_on_select = True,
410
+ scale = 2,
411
+ )
412
+
413
+ hf_branch = gr.Dropdown(
414
+ None,
415
+ label = "Branch",
416
+ scale = 1,
417
+ )
418
+
419
+ gr.LoginButton(
420
+ "Sign in for write access or gated/private repos",
421
+ scale = 1,
422
+ )
423
+
424
+ gr.Markdown(
425
+ """# Chat Template Editor
426
+
427
+ Any model repository with chat template(s) is supported (including GGUFs), however do note that all the model info is extracted using the Hugging Face API.
428
+ For GGUFs in particular this means that the chat template may deviate from the actual content in any given GGUF file as only the default template from an arbitrary GGUF file is returned.
429
+
430
+ If you sign in and grant this editor write access you will get the option to create a pull request of your changes (provided you have access to the repository).
431
+
432
+ You can freely edit and test GGUF chat template(s) (and are encouraged to do so), but you cannot commit any changes, it is recommended to use the [GGUF Editor](https://huggingface.co/spaces/CISCai/gguf-editor) to save the final result to a GGUF.
433
+ """,
434
+ )
435
+
436
+ with gr.Accordion("Commit Changes", open = False, visible = False) as pr_group:
437
+ with gr.Tabs() as pr_tabs:
438
+ with gr.Tab("Edit", id = "edit") as pr_edit_tab:
439
+ pr_title = gr.Textbox(
440
+ placeholder = "Title",
441
+ show_label = False,
442
+ max_lines = 1,
443
+ interactive = True,
444
+ )
445
+
446
+ pr_description = gr.Code(
447
+ label = "Description",
448
+ language = "markdown",
449
+ lines = 10,
450
+ max_lines = 10,
451
+ interactive = True,
452
+ )
453
+
454
+ with gr.Tab("Preview (with diffs)", id = "preview") as pr_preview_tab:
455
+ pr_preview_title = gr.Textbox(
456
+ show_label = False,
457
+ max_lines = 1,
458
+ interactive = False,
459
+ )
460
+
461
+ pr_preview_description = gr.Markdown(
462
+ label = "Description",
463
+ height = "13rem",
464
+ container = True,
465
+ )
466
+
467
+ pr_preview_diff = gr.HighlightedText(
468
+ label = "Diff",
469
+ combine_adjacent = True,
470
+ color_map = { "+": "red", "-": "green", "@": "blue", "?": "blue" },
471
+ interactive = False,
472
+ show_legend = False,
473
+ show_inline_category = False,
474
+ )
475
+
476
+ pr_submit = gr.Button(
477
+ "Create Pull Request",
478
+ variant = "huggingface",
479
+ interactive = False,
480
+ )
481
+ pr_submit.click(
482
+ lambda: gr.Button(
483
+ interactive = False,
484
+ ),
485
+ outputs = [
486
+ pr_submit,
487
+ ],
488
+ show_api = False,
489
+ )
490
+
491
+ with gr.Tabs() as template_tabs:
492
+ with gr.Tab("Edit", id = "edit") as edit_tab:
493
+ with gr.Accordion("Template Input", open = False):
494
+ chat_settings = gr.Code(
495
+ label = "Template Settings (kwargs)",
496
+ language = "json",
497
+ interactive = True,
498
+ render = False,
499
+ )
500
+
501
+ chat_messages = gr.Code(
502
+ label = "Template Messages",
503
+ language = "json",
504
+ interactive = True,
505
+ render = False,
506
+ )
507
+
508
+ example_input = gr.Examples(
509
+ examples = example_values,
510
+ example_labels = example_labels,
511
+ inputs = [
512
+ chat_settings,
513
+ chat_messages,
514
+ ],
515
+ )
516
+
517
+ chat_settings.render()
518
+ chat_messages.render()
519
+
520
+ chat_template = gr.Code(
521
+ label = "Chat Template (default)",
522
+ language = "jinja2",
523
+ interactive = True,
524
+ )
525
+
526
+ with gr.Accordion("Additional Templates", open = False):
527
+ inverse_template = gr.Code(
528
+ label = "Inverse Template",
529
+ language = "jinja2",
530
+ interactive = True,
531
+ visible = False,
532
+ )
533
+
534
+ chat_template_tool_use = gr.Code(
535
+ label = "Chat Template (tool_use)",
536
+ language = "jinja2",
537
+ interactive = True,
538
+ )
539
+
540
+ chat_template_rag = gr.Code(
541
+ label = "Chat Template (rag)",
542
+ language = "jinja2",
543
+ interactive = True,
544
+ )
545
+
546
+ with gr.Tab("Render", id = "render") as render_tab:
547
+ rendered_chat_template = gr.Textbox(
548
+ label = "Chat Prompt (default)",
549
+ interactive = False,
550
+ lines = 20,
551
+ show_copy_button = True,
552
+ )
553
+
554
+ with gr.Accordion("Additional Output", open = False):
555
+ rendered_inverse_template = gr.Code(
556
+ label = "Inverse Chat Messages",
557
+ language = "json",
558
+ interactive = False,
559
+ visible = False,
560
+ )
561
+
562
+ rendered_chat_template_tool_use = gr.Textbox(
563
+ label = "Chat Prompt (tool_use)",
564
+ interactive = False,
565
+ lines = 20,
566
+ show_copy_button = True,
567
+ )
568
+
569
+ rendered_chat_template_rag = gr.Textbox(
570
+ label = "Chat Prompt (rag)",
571
+ interactive = False,
572
+ lines = 20,
573
+ show_copy_button = True,
574
+ )
575
+
576
+ model_info = gr.State(
577
+ value = {},
578
+ )
579
+
580
+
581
+ @gr.on(
582
+ triggers = [
583
+ hf_search.submit,
584
+ ],
585
+ inputs = [
586
+ hf_search,
587
+ ],
588
+ outputs = [
589
+ hf_branch,
590
+ ],
591
+ show_api = False,
592
+ )
593
+ def get_branches(
594
+ repo: str,
595
+ oauth_token: gr.OAuthToken | None = None,
596
+ ):
597
+ branches = []
598
+
599
+ try:
600
+ refs = hfapi.list_repo_refs(
601
+ repo,
602
+ token = oauth_token.token if oauth_token else False,
603
+ )
604
+ branches = [b.name for b in refs.branches]
605
+
606
+ open_prs = hfapi.get_repo_discussions(
607
+ repo,
608
+ discussion_type = "pull_request",
609
+ discussion_status = "open",
610
+ token = oauth_token.token if oauth_token else False,
611
+ )
612
+ branches += [pr.git_reference for pr in open_prs]
613
+ except Exception as e:
614
+ pass
615
+
616
+ return {
617
+ hf_branch: gr.Dropdown(
618
+ branches or None,
619
+ value = "main" if "main" in branches else None,
620
+ ),
621
+ }
622
+
623
+
624
+ @gr.on(
625
+ triggers = [
626
+ pr_title.input,
627
+ ],
628
+ inputs = [
629
+ pr_title,
630
+ ],
631
+ outputs = [
632
+ pr_submit,
633
+ ],
634
+ show_api = False,
635
+ )
636
+ def enable_pr_submit(
637
+ title: str,
638
+ ):
639
+ return gr.Button(
640
+ interactive = bool(title)
641
+ )
642
+
643
+
644
+ @gr.on(
645
+ triggers = [
646
+ pr_preview_tab.select,
647
+ ],
648
+ inputs = [
649
+ model_info,
650
+ pr_title,
651
+ pr_description,
652
+ chat_template,
653
+ chat_template_tool_use,
654
+ chat_template_rag,
655
+ inverse_template,
656
+ ],
657
+ outputs = [
658
+ pr_preview_title,
659
+ pr_preview_description,
660
+ pr_preview_diff,
661
+ ],
662
+ show_api = False,
663
+ )
664
+ def render_pr_preview(
665
+ info: dict,
666
+ title: str,
667
+ description: str,
668
+ template: str,
669
+ template_tool_use: str,
670
+ template_rag: str,
671
+ template_inverse: str,
672
+ ):
673
+ changes = []
674
+ org_template = ""
675
+ org_template_inverse = ""
676
+ org_template_tool_use = ""
677
+ org_template_rag = ""
678
+
679
+ tokenizer_file = info.get(ModelFiles.TOKENIZER_CONFIG, {})
680
+ org_config = tokenizer_file.get("data")
681
+
682
+ if org_config:
683
+ tokenizer_config = TokenizerConfig(tokenizer_file.get("content"))
684
+
685
+ org_template = tokenizer_config.chat_templates.get("default") or ""
686
+ org_template_tool_use = tokenizer_config.chat_templates.get("tool_use") or ""
687
+ org_template_rag = tokenizer_config.chat_templates.get("rag") or ""
688
+ # org_template_inverse = tokenizer_config.inverse_template or ""
689
+
690
+ tokenizer_config.chat_templates["default"] = template
691
+ tokenizer_config.chat_templates["tool_use"] = template_tool_use
692
+ tokenizer_config.chat_templates["rag"] = template_rag
693
+ # tokenizer_config.inverse_template = template_inverse
694
+
695
+ new_config = tokenizer_config.json(get_json_indent(org_config))
696
+ if org_config.endswith("\n"):
697
+ new_config += "\n"
698
+
699
+ changes += [
700
+ (token if token[1] in ("-", "+", "@") else token[1:].replace("\t", "\u21e5").replace("\r\n", "\u240d\u240a\r\n").replace("\r", "\u240d\r").replace("\n", "\u240a\n"), token[0] if token[0] != " " else None) # .replace(" ", "\u2423")
701
+ for token in unified_diff(new_config.splitlines(keepends = True), org_config.splitlines(keepends = True), fromfile = ModelFiles.TOKENIZER_CONFIG, tofile = ModelFiles.TOKENIZER_CONFIG)
702
+ ]
703
+
704
+ tokenizer_chat_template = info.get(ModelFiles.TOKENIZER_CHAT_TEMPLATE, {})
705
+ org_template = tokenizer_chat_template.get("data", org_template)
706
+
707
+ tokenizer_inverse_template = info.get(ModelFiles.TOKENIZER_INVERSE_TEMPLATE, {})
708
+ org_template_inverse = tokenizer_inverse_template.get("data", org_template_inverse)
709
+
710
+ if org_template or template:
711
+ changes += character_diff(f"Default Template{f' ({ModelFiles.TOKENIZER_CHAT_TEMPLATE})' if tokenizer_chat_template else ''}", org_template, template)
712
+
713
+ if org_template_inverse or template_inverse:
714
+ changes += character_diff(f"Inverse Template{f' ({ModelFiles.TOKENIZER_INVERSE_TEMPLATE})' if tokenizer_inverse_template else ''}", org_template_inverse, template_inverse)
715
+
716
+ if org_template_tool_use or template_tool_use:
717
+ changes += character_diff("Tool Use Template", org_template_tool_use, template_tool_use)
718
+
719
+ if org_template_rag or template_rag:
720
+ changes += character_diff("RAG Template", org_template_rag, template_rag)
721
+
722
+ return title, description, changes
723
+
724
+
725
+ @gr.on(
726
+ triggers = [
727
+ pr_submit.click,
728
+ ],
729
+ inputs = [
730
+ hf_search,
731
+ hf_branch,
732
+ model_info,
733
+ pr_title,
734
+ pr_description,
735
+ chat_template,
736
+ chat_template_tool_use,
737
+ chat_template_rag,
738
+ inverse_template,
739
+ ],
740
+ outputs = [
741
+ model_info,
742
+ hf_branch,
743
+ pr_title,
744
+ pr_preview_title,
745
+ pr_description,
746
+ pr_submit,
747
+ ],
748
+ show_api = False,
749
+ )
750
+ def submit_pull_request(
751
+ repo: str,
752
+ branch: str | None,
753
+ info: dict,
754
+ title: str,
755
+ description: str,
756
+ template: str,
757
+ template_tool_use: str,
758
+ template_rag: str,
759
+ template_inverse: str,
760
+ progress = gr.Progress(track_tqdm = True),
761
+ oauth_token: gr.OAuthToken | None = None,
762
+ ):
763
+ operations = []
764
+ pr_branch = branch if branch.startswith("refs/pr/") else None
765
+
766
+ tokenizer_file = info.get(ModelFiles.TOKENIZER_CONFIG, {})
767
+ if org_config := tokenizer_file.get("data"):
768
+ tokenizer_config = TokenizerConfig(tokenizer_file.get("content"))
769
+
770
+ tokenizer_config.chat_templates["default"] = template
771
+ tokenizer_config.chat_templates["tool_use"] = template_tool_use
772
+ tokenizer_config.chat_templates["rag"] = template_rag
773
+ # tokenizer_config.inverse_template = template_inverse
774
+
775
+ new_config = tokenizer_config.json(get_json_indent(org_config))
776
+ if org_config.endswith("\n"):
777
+ new_config += "\n"
778
+
779
+ if org_config != new_config:
780
+ operations.append(CommitOperationAdd(ModelFiles.TOKENIZER_CONFIG, new_config.encode("utf-8")))
781
+
782
+ tokenizer_chat_template = info.get(ModelFiles.TOKENIZER_CHAT_TEMPLATE, {})
783
+ if template_data := tokenizer_chat_template.get("data"):
784
+ if template_data != template:
785
+ operations.append(CommitOperationAdd(ModelFiles.TOKENIZER_CHAT_TEMPLATE, template.encode("utf-8")))
786
+
787
+ tokenizer_inverse_template = info.get(ModelFiles.TOKENIZER_INVERSE_TEMPLATE, {})
788
+ if template_data := tokenizer_inverse_template.get("data"):
789
+ if template_data != template_inverse:
790
+ operations.append(CommitOperationAdd(ModelFiles.TOKENIZER_INVERSE_TEMPLATE, template_inverse.encode("utf-8")))
791
+
792
+ if not operations:
793
+ gr.Info("No changes to commit...")
794
+ return gr.skip()
795
+
796
+ try:
797
+ commit = hfapi.create_commit(
798
+ repo,
799
+ operations,
800
+ revision = branch,
801
+ commit_message = title,
802
+ commit_description = description,
803
+ create_pr = False if pr_branch else True,
804
+ parent_commit = info.get("parent_commit"),
805
+ token = oauth_token.token if oauth_token else False,
806
+ )
807
+ except Exception as e:
808
+ gr.Warning(
809
+ message = str(e),
810
+ duration = None,
811
+ title = "Error committing changes",
812
+ )
813
+ return gr.skip()
814
+
815
+ info["parent_commit"] = commit.oid
816
+
817
+ if org_config:
818
+ tokenizer_file["data"] = new_config
819
+ tokenizer_file["content"] = json.loads(new_config)
820
+
821
+ if tokenizer_chat_template:
822
+ tokenizer_chat_template["data"] = template
823
+
824
+ if tokenizer_inverse_template:
825
+ tokenizer_inverse_template["data"] = template_inverse
826
+
827
+ branches = []
828
+
829
+ try:
830
+ refs = hfapi.list_repo_refs(
831
+ repo,
832
+ token = oauth_token.token if oauth_token else False,
833
+ )
834
+ branches = [b.name for b in refs.branches]
835
+
836
+ open_prs = hfapi.get_repo_discussions(
837
+ repo,
838
+ discussion_type = "pull_request",
839
+ discussion_status = "open",
840
+ token = oauth_token.token if oauth_token else False,
841
+ )
842
+ branches += [pr.git_reference for pr in open_prs]
843
+ except Exception as e:
844
+ pass
845
+
846
+ pr_created = commit.pr_revision if commit.pr_revision in branches else None
847
+
848
+ return {
849
+ model_info: info,
850
+ hf_branch: gr.skip() if pr_branch else gr.Dropdown(
851
+ branches or None,
852
+ value = pr_created or branch,
853
+ ),
854
+ pr_title: gr.skip() if pr_branch else gr.Textbox(
855
+ value = None,
856
+ placeholder = "Message" if pr_created else "Title",
857
+ label = commit.commit_message if pr_created else None,
858
+ show_label = True if pr_created else False,
859
+ ),
860
+ pr_preview_title: gr.skip() if pr_branch else gr.Textbox(
861
+ label = commit.commit_message if pr_created else None,
862
+ show_label = True if pr_created else False,
863
+ ),
864
+ pr_description: gr.Code(
865
+ value = pr_description_default,
866
+ ),
867
+ pr_submit: gr.skip() if pr_branch else gr.Button(
868
+ value = f"Commit to PR #{commit.pr_num}" if pr_created else "Create Pull Request",
869
+ ),
870
+ }
871
+
872
+
873
+ @gr.on(
874
+ triggers = [
875
+ hf_search.submit,
876
+ hf_branch.change,
877
+ ],
878
+ outputs = [
879
+ pr_tabs,
880
+ template_tabs,
881
+ ],
882
+ show_api = False,
883
+ )
884
+ def switch_to_edit_tabs():
885
+ return gr.Tabs(
886
+ selected = "edit",
887
+ ), gr.Tabs(
888
+ selected = "edit",
889
+ )
890
+
891
+
892
+ @gr.on(
893
+ triggers = [
894
+ chat_template.focus,
895
+ chat_template_tool_use.focus,
896
+ chat_template_rag.focus,
897
+ inverse_template.focus,
898
+ ],
899
+ outputs = [
900
+ pr_tabs,
901
+ ],
902
+ show_api = False,
903
+ )
904
+ def switch_to_edit_tab():
905
+ return gr.Tabs(
906
+ selected = "edit",
907
+ )
908
+
909
+
910
+ def template_data_from_model_info(
911
+ repo: str,
912
+ branch: str | None,
913
+ oauth_token: gr.OAuthToken | None = None,
914
+ ):
915
+ try:
916
+ info = hfapi.model_info(
917
+ repo,
918
+ revision = branch,
919
+ expand = [
920
+ "config",
921
+ "disabled",
922
+ "gated",
923
+ "gguf",
924
+ "private",
925
+ "widgetData",
926
+ ],
927
+ token = oauth_token.token if oauth_token else False,
928
+ )
929
+ except Exception as e:
930
+ gr.Warning(
931
+ message = str(e),
932
+ title = "Error loading model info",
933
+ )
934
+ return {}, None, None, None, None, None, None
935
+
936
+ templates = info.gguf.get("chat_template") if info.gguf else info.config.get("tokenizer_config", {}).get("chat_template") if info.config else None
937
+ model_info = {
938
+ "gguf": bool(info.gguf),
939
+ "disabled": info.disabled,
940
+ "gated": info.gated,
941
+ "private": info.private,
942
+ }
943
+
944
+ template_messages = example_values[0][1]
945
+
946
+ template_tool_use = None
947
+ template_rag = None
948
+ template_inverse = None
949
+ template_kwargs = {
950
+ "add_generation_prompt": True,
951
+ "clean_up_tokenization_spaces": False,
952
+ "bos_token": "<|startoftext|>",
953
+ "eos_token": "<|im_end|>",
954
+ }
955
+ if info.config:
956
+ # template_inverse = info.config.get("tokenizer_config", {}).get("inverse_template")
957
+ for k, v in info.config.get("tokenizer_config", {}).items():
958
+ if k != "chat_template": # and k != "inverse_template":
959
+ template_kwargs[k] = v
960
+
961
+ if info.widget_data:
962
+ for data in info.widget_data:
963
+ if "messages" in data:
964
+ template_messages = json.dumps(data["messages"], ensure_ascii = False, indent = 4)
965
+ break
966
+
967
+ if isinstance(templates, list):
968
+ templates = { template["name"]: template["template"] for template in templates }
969
+ template_tool_use = templates.get("tool_use")
970
+ template_rag = templates.get("rag")
971
+ templates = templates.get("default")
972
+
973
+ return model_info, json.dumps(template_kwargs, ensure_ascii = False, indent = 4), template_messages, templates, template_tool_use, template_rag, template_inverse
974
+
975
+
976
+ def template_data_from_model_files(
977
+ repo: str,
978
+ branch: str | None,
979
+ info: dict,
980
+ progress = gr.Progress(track_tqdm = True),
981
+ oauth_token: gr.OAuthToken | None = None,
982
+ ):
983
+ write_access = False
984
+
985
+ if info and oauth_token and hfapi.get_token_permission(oauth_token.token) == "write":
986
+ if info.get("gguf"):
987
+ gr.Warning("Repository contains GGUFs, use GGUF Editor if you want to commit changes...")
988
+ elif info.get("disabled"):
989
+ gr.Warning("Repository is disabled, committing changes is not possible...")
990
+ elif (gated := info.get("gated")) or (private := info.get("private")):
991
+ try:
992
+ hfapi.auth_check(
993
+ repo,
994
+ token = oauth_token.token if oauth_token else False,
995
+ )
996
+ except Exception as e:
997
+ if gated:
998
+ gr.Warning(f"Repository is gated with {gated} approval, you must request access to be able to make changes...")
999
+ elif private:
1000
+ gr.Warning("Repository is private, you must use proper credentials to be able to make changes...")
1001
+
1002
+ gr.Warning(str(e))
1003
+ else:
1004
+ write_access = True
1005
+ else:
1006
+ write_access = True
1007
+
1008
+ if write_access:
1009
+ if (write_access := hfapi.file_exists(
1010
+ repo,
1011
+ ModelFiles.TOKENIZER_CONFIG,
1012
+ revision = branch,
1013
+ token = oauth_token.token if oauth_token else False,
1014
+ )):
1015
+ try:
1016
+ commits = hfapi.list_repo_commits(
1017
+ repo,
1018
+ revision = branch,
1019
+ token = oauth_token.token if oauth_token else False,
1020
+ )
1021
+ parent_commit = commits[0].commit_id if commits else None
1022
+
1023
+ tokenizer_config_file = hfapi.hf_hub_download(
1024
+ repo,
1025
+ ModelFiles.TOKENIZER_CONFIG,
1026
+ revision = parent_commit or branch,
1027
+ token = oauth_token.token if oauth_token else False,
1028
+ )
1029
+
1030
+ tokenizer_chat_template = None
1031
+ if (hfapi.file_exists(
1032
+ repo,
1033
+ ModelFiles.TOKENIZER_CHAT_TEMPLATE,
1034
+ revision = branch,
1035
+ token = oauth_token.token if oauth_token else False,
1036
+ )):
1037
+ tokenizer_chat_template = hfapi.hf_hub_download(
1038
+ repo,
1039
+ ModelFiles.TOKENIZER_CHAT_TEMPLATE,
1040
+ revision = parent_commit or branch,
1041
+ token = oauth_token.token if oauth_token else False,
1042
+ )
1043
+
1044
+ tokenizer_inverse_template = None
1045
+ if (hfapi.file_exists(
1046
+ repo,
1047
+ ModelFiles.TOKENIZER_INVERSE_TEMPLATE,
1048
+ revision = branch,
1049
+ token = oauth_token.token if oauth_token else False,
1050
+ )):
1051
+ tokenizer_inverse_template = hfapi.hf_hub_download(
1052
+ repo,
1053
+ ModelFiles.TOKENIZER_INVERSE_TEMPLATE,
1054
+ revision = parent_commit or branch,
1055
+ token = oauth_token.token if oauth_token else False,
1056
+ )
1057
+ except Exception as e:
1058
+ gr.Warning(
1059
+ message = str(e),
1060
+ title = "Error downloading template files",
1061
+ )
1062
+ else:
1063
+ info["parent_commit"] = parent_commit
1064
+
1065
+ if tokenizer_config_file:
1066
+ with open(tokenizer_config_file, "r", encoding = "utf-8") as fp:
1067
+ config_content = fp.read()
1068
+ info[ModelFiles.TOKENIZER_CONFIG] = {
1069
+ "data": config_content,
1070
+ "content": json.loads(config_content),
1071
+ }
1072
+
1073
+ if tokenizer_chat_template:
1074
+ with open(tokenizer_chat_template, "r", encoding = "utf-8") as fp:
1075
+ template_data = fp.read()
1076
+ info[ModelFiles.TOKENIZER_CHAT_TEMPLATE] = {
1077
+ "data": template_data,
1078
+ }
1079
+
1080
+ if tokenizer_inverse_template:
1081
+ with open(tokenizer_inverse_template, "r", encoding = "utf-8") as fp:
1082
+ template_data = fp.read()
1083
+ info[ModelFiles.TOKENIZER_INVERSE_TEMPLATE] = {
1084
+ "data": template_data,
1085
+ }
1086
+ else:
1087
+ gr.Warning(f"No {ModelFiles.TOKENIZER_CONFIG} found in repository...")
1088
+
1089
+ pr_details = None
1090
+ if branch and branch.startswith("refs/pr/"):
1091
+ pr_num = branch.split("/")[-1]
1092
+
1093
+ if pr_num and pr_num.isdigit():
1094
+ pr_details = hfapi.get_discussion_details(
1095
+ repo,
1096
+ int(pr_num),
1097
+ token = oauth_token.token if oauth_token else False,
1098
+ )
1099
+
1100
+ return {
1101
+ model_info: info,
1102
+ pr_group: gr.Accordion(
1103
+ visible = write_access,
1104
+ ),
1105
+ pr_title: gr.Textbox(
1106
+ value = None,
1107
+ placeholder = "Message" if pr_details else "Title",
1108
+ label = pr_details.title if pr_details else None,
1109
+ show_label = True if pr_details else False,
1110
+ ),
1111
+ pr_preview_title: gr.Textbox(
1112
+ label = pr_details.title if pr_details else None,
1113
+ show_label = True if pr_details else False,
1114
+ ),
1115
+ pr_description: gr.Code(
1116
+ value = pr_description_default,
1117
+ ),
1118
+ pr_submit: gr.Button(
1119
+ value = f"Commit to PR #{pr_details.num}" if pr_details else "Create Pull Request",
1120
+ ),
1121
+ # chat_template: gr.skip() if ModelFiles.TOKENIZER_CHAT_TEMPLATE not in info else gr.Code(
1122
+ # value = info[ModelFiles.TOKENIZER_CHAT_TEMPLATE]["data"],
1123
+ # ),
1124
+ # inverse_template: gr.skip() if ModelFiles.TOKENIZER_INVERSE_TEMPLATE not in info else gr.Code(
1125
+ # value = info[ModelFiles.TOKENIZER_INVERSE_TEMPLATE]["data"],
1126
+ # ),
1127
+ }
1128
+
1129
+
1130
+ def update_examples(
1131
+ settings: str,
1132
+ ):
1133
+ settings = json.loads(settings)
1134
+ examples = []
1135
+
1136
+ for example in example_values:
1137
+ x = example.copy()
1138
+ x0 = json.loads(x[0])
1139
+ x0.update(settings)
1140
+ x[0] = json.dumps(x0, ensure_ascii = False, indent = 4)
1141
+ examples.append(x)
1142
+
1143
+ return gr.Dataset(
1144
+ samples = examples,
1145
+ )
1146
+
1147
+
1148
+ gr.on(
1149
+ fn = template_data_from_model_info,
1150
+ triggers = [
1151
+ hf_search.submit,
1152
+ hf_branch.input,
1153
+ ],
1154
+ inputs = [
1155
+ hf_search,
1156
+ hf_branch,
1157
+ ],
1158
+ outputs = [
1159
+ model_info,
1160
+ chat_settings,
1161
+ chat_messages,
1162
+ chat_template,
1163
+ chat_template_tool_use,
1164
+ chat_template_rag,
1165
+ inverse_template,
1166
+ ],
1167
+ ).success(
1168
+ fn = update_examples,
1169
+ inputs = [
1170
+ chat_settings,
1171
+ ],
1172
+ outputs = [
1173
+ example_input.dataset,
1174
+ ],
1175
+ show_api = False,
1176
+ ).then(
1177
+ fn = template_data_from_model_files,
1178
+ inputs = [
1179
+ hf_search,
1180
+ hf_branch,
1181
+ model_info,
1182
+ ],
1183
+ outputs = [
1184
+ model_info,
1185
+ pr_group,
1186
+ pr_title,
1187
+ pr_preview_title,
1188
+ pr_description,
1189
+ pr_submit,
1190
+ # chat_template,
1191
+ # inverse_template,
1192
+ ],
1193
+ show_api = False,
1194
+ )
1195
+
1196
+
1197
+ @gr.on(
1198
+ triggers = [
1199
+ render_tab.select,
1200
+ ],
1201
+ inputs = [
1202
+ chat_settings,
1203
+ chat_messages,
1204
+ chat_template,
1205
+ chat_template_tool_use,
1206
+ chat_template_rag,
1207
+ inverse_template,
1208
+ ],
1209
+ outputs = [
1210
+ rendered_chat_template,
1211
+ rendered_chat_template_tool_use,
1212
+ rendered_chat_template_rag,
1213
+ rendered_inverse_template,
1214
+ ],
1215
+ )
1216
+ def render_chat_templates(
1217
+ settings: str,
1218
+ messages: str,
1219
+ template: str,
1220
+ template_tool_use: str | None = None,
1221
+ template_rag: str | None = None,
1222
+ template_inverse: str | None = None,
1223
+ ):
1224
+ try:
1225
+ settings = json.loads(settings) if settings else {}
1226
+ except Exception as e:
1227
+ gr.Warning(
1228
+ message = str(e),
1229
+ duration = None,
1230
+ title = "Template Settings Error",
1231
+ )
1232
+ return gr.skip()
1233
+
1234
+ try:
1235
+ messages = json.loads(messages) if messages else []
1236
+ except Exception as e:
1237
+ gr.Warning(
1238
+ message = str(e),
1239
+ duration = None,
1240
+ title = "Template Messages Error",
1241
+ )
1242
+ return gr.skip()
1243
+
1244
+ if not isinstance(settings, dict):
1245
+ gr.Warning("Invalid Template Settings!")
1246
+ return gr.skip()
1247
+
1248
+ if not messages or not isinstance(messages, list) or not isinstance(messages[0], dict) or "role" not in messages[0]:
1249
+ gr.Warning("No Template Messages!")
1250
+ return gr.skip()
1251
+
1252
+ tools = settings.get("tools")
1253
+ documents = settings.get("documents")
1254
+ add_generation_prompt = settings.get("add_generation_prompt")
1255
+
1256
+ cleanup_settings = []
1257
+ for k in settings.keys():
1258
+ if k.endswith("_side") or k.endswith("_token") or k.endswith("_tokens") or k == "clean_up_tokenization_spaces":
1259
+ continue
1260
+
1261
+ cleanup_settings.append(k)
1262
+
1263
+ for cleanup in cleanup_settings:
1264
+ del settings[cleanup]
1265
+
1266
+ tokenizer = PreTrainedTokenizerBase(**settings)
1267
+ chat_output = None
1268
+ chat_tool_use_output = None
1269
+ chat_rag_output = None
1270
+ inverse_output = None
1271
+ try:
1272
+ chat_output = tokenizer.apply_chat_template(messages, tools = tools, documents = documents, chat_template = template, add_generation_prompt = add_generation_prompt, tokenize = False)
1273
+ except Exception as e:
1274
+ gr.Warning(
1275
+ message = str(e),
1276
+ duration = None,
1277
+ title = "Chat Template Error",
1278
+ )
1279
+ try:
1280
+ chat_tool_use_output = tokenizer.apply_chat_template(messages, tools = tools or [], chat_template = template_tool_use, add_generation_prompt = add_generation_prompt, tokenize = False) if template_tool_use else None
1281
+ except Exception as e:
1282
+ gr.Warning(
1283
+ message = str(e),
1284
+ duration = None,
1285
+ title = "Tool Use Template Error",
1286
+ )
1287
+ try:
1288
+ chat_rag_output = tokenizer.apply_chat_template(messages, documents = documents or [], chat_template = template_rag, add_generation_prompt = add_generation_prompt, tokenize = False) if template_rag else None
1289
+ except Exception as e:
1290
+ gr.Warning(
1291
+ message = str(e),
1292
+ duration = None,
1293
+ title = "RAG Template Error",
1294
+ )
1295
+ try:
1296
+ inverse_output = tokenizer.apply_inverse_template(messages, inverse_template = template_inverse) if template_inverse else None
1297
+ except Exception as e:
1298
+ gr.Warning(
1299
+ message = str(e),
1300
+ duration = None,
1301
+ title = "Inverse Template Error",
1302
+ )
1303
+
1304
+ return chat_output, chat_tool_use_output, chat_rag_output, json.dumps(inverse_output, ensure_ascii = False, indent = 4) if inverse_output is not None else None
1305
+
1306
+
1307
+ if __name__ == "__main__":
1308
+ blocks.queue(
1309
+ max_size = 10,
1310
+ default_concurrency_limit = 10,
1311
+ )
1312
+ blocks.launch()
gradio_huggingfacehub_search-0.0.8-py3-none-any.whl ADDED
Binary file (58.5 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # gradio[oauth]==5.3.0
2
+ huggingface_hub==0.26.1
3
+ # gradio_huggingfacehub_search==0.0.8
4
+ transformers==4.45.2
5
+ ./gradio_huggingfacehub_search-0.0.8-py3-none-any.whl