CISCai commited on
Commit
17252e7
1 Parent(s): e54c92d

Update to gradio 5.x

Browse files
Files changed (3) hide show
  1. README.md +2 -1
  2. app.py +37 -15
  3. requirements.txt +4 -3
README.md CHANGED
@@ -4,7 +4,8 @@ emoji: 🏢
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.42.0
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.5.0
8
+ python_version: 3.11
9
  app_file: app.py
10
  pinned: false
11
  license: mit
app.py CHANGED
@@ -69,7 +69,9 @@ def human_readable_metadata(
69
  with gr.Blocks(
70
  ) as blocks:
71
  with gr.Tab("Editor"):
72
- with gr.Row():
 
 
73
  hf_search = HuggingfaceHubSearch(
74
  label = "Search Huggingface Hub",
75
  placeholder = "Search for models on Huggingface",
@@ -158,7 +160,7 @@ with gr.Blocks(
158
 
159
  meta_string = gr.Textbox(
160
  label = "String",
161
- info = "Enter to update value (Shift+Enter for new line)",
162
  visible = False,
163
  )
164
 
@@ -193,16 +195,16 @@ with gr.Blocks(
193
  ],
194
  datatype = ["str", "str", "str"],
195
  column_widths = ["35%", "15%", "50%"],
196
- wrap = False,
197
  interactive = False,
198
  visible = False,
199
  )
200
 
201
  with gr.Tab("Help"):
202
  gr.Markdown(
203
- """# Huggingface GGUF Editor
204
 
205
- An advanced GGUF editor, reading GGUF files directly from Huggingface repositories and applying changes to your own copies.
206
 
207
  Below you will find a collection of example use-cases to show you how to perform a few common GGUF editing operations:
208
  """,
@@ -489,7 +491,7 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
489
  visible = True,
490
  ),
491
  example_string: dict(
492
- info = "Paste in the updated chat template or make changes here. Using an external Jinja2 editor is recommended",
493
  value = "{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' }}\n {{- message['content'] + eos_token }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- endif %}",
494
  visible = True,
495
  ),
@@ -836,7 +838,12 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
836
  for data, human in deferred_updates:
837
  human[:] = human_readable_metadata(meta, *data)
838
  except Exception as e:
839
- raise gr.Error(e)
 
 
 
 
 
840
 
841
  yield {
842
  meta_state: meta,
@@ -968,7 +975,7 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
968
  visible = True if is_number and do_lookup_token else False,
969
  ),
970
  meta_number: gr.Number(
971
- value = val if is_number and data is not None and not do_select_token else 0,
972
  precision = 10 if typ == GGUFValueType.FLOAT32 or typ == GGUFValueType.FLOAT64 else 0,
973
  interactive = False if do_select_token else True,
974
  visible = True if is_number and not do_token_type else False,
@@ -1113,6 +1120,9 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
1113
  meta: MetadataState,
1114
  keyup: gr.KeyUpData,
1115
  ):
 
 
 
1116
  found = token_search(meta, keyup.input_value)
1117
 
1118
  return {
@@ -1145,12 +1155,20 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
1145
  indices: list[int],
1146
  ):
1147
  if choice is None or choice < 0 or choice >= len(indices) or (token := indices[choice]) < 0:
1148
- raise gr.Error('Token not found')
 
 
 
 
1149
 
1150
  tokens = meta.key.get('tokenizer.ggml.tokens', (-1, []))[1]
1151
 
1152
  if token >= len(tokens):
1153
- raise gr.Error('Invalid token')
 
 
 
 
1154
 
1155
  data = meta.key.get(key, (-1, []))[1]
1156
 
@@ -1170,7 +1188,11 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
1170
  ),
1171
  }
1172
  case _:
1173
- raise gr.Error('Invalid metadata key')
 
 
 
 
1174
 
1175
 
1176
  @gr.on(
@@ -1191,6 +1213,9 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
1191
  meta: MetadataState,
1192
  keyup: gr.KeyUpData,
1193
  ):
 
 
 
1194
  found = token_search(meta, keyup.input_value)
1195
 
1196
  return {
@@ -1214,10 +1239,7 @@ Any framework based on `llama-cpp-python` will let you select which chat templat
1214
  if key:
1215
  gr.Warning('Missing required value type')
1216
 
1217
- return {
1218
- meta_changes: gr.HighlightedText(
1219
- ),
1220
- }
1221
 
1222
  if key in meta.rem:
1223
  meta.rem.remove(key)
 
69
  with gr.Blocks(
70
  ) as blocks:
71
  with gr.Tab("Editor"):
72
+ with gr.Row(
73
+ equal_height = True,
74
+ ):
75
  hf_search = HuggingfaceHubSearch(
76
  label = "Search Huggingface Hub",
77
  placeholder = "Search for models on Huggingface",
 
160
 
161
  meta_string = gr.Textbox(
162
  label = "String",
163
+ info = "Enter to update value (Shift+Enter for new line).",
164
  visible = False,
165
  )
166
 
 
195
  ],
196
  datatype = ["str", "str", "str"],
197
  column_widths = ["35%", "15%", "50%"],
198
+ wrap = True,
199
  interactive = False,
200
  visible = False,
201
  )
202
 
203
  with gr.Tab("Help"):
204
  gr.Markdown(
205
+ """# Hugging Face GGUF Editor
206
 
207
+ An advanced GGUF editor, reading GGUF files directly from Hugging Face repositories and applying changes to your own copies.
208
 
209
  Below you will find a collection of example use-cases to show you how to perform a few common GGUF editing operations:
210
  """,
 
491
  visible = True,
492
  ),
493
  example_string: dict(
494
+ info = "Paste in the updated chat template or make changes here. Using [Chat Template Editor](https://huggingface.co/spaces/CISCai/chat-template-editor) is recommended",
495
  value = "{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' }}\n {{- message['content'] + eos_token }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- endif %}",
496
  visible = True,
497
  ),
 
838
  for data, human in deferred_updates:
839
  human[:] = human_readable_metadata(meta, *data)
840
  except Exception as e:
841
+ gr.Warning(
842
+ title = 'Loading error!',
843
+ message = str(e),
844
+ duration = None,
845
+ )
846
+ return
847
 
848
  yield {
849
  meta_state: meta,
 
975
  visible = True if is_number and do_lookup_token else False,
976
  ),
977
  meta_number: gr.Number(
978
+ value = val if is_number and data is not None and not do_select_token else None,
979
  precision = 10 if typ == GGUFValueType.FLOAT32 or typ == GGUFValueType.FLOAT64 else 0,
980
  interactive = False if do_select_token else True,
981
  visible = True if is_number and not do_token_type else False,
 
1120
  meta: MetadataState,
1121
  keyup: gr.KeyUpData,
1122
  ):
1123
+ if not keyup.input_value:
1124
+ return gr.skip()
1125
+
1126
  found = token_search(meta, keyup.input_value)
1127
 
1128
  return {
 
1155
  indices: list[int],
1156
  ):
1157
  if choice is None or choice < 0 or choice >= len(indices) or (token := indices[choice]) < 0:
1158
+ gr.Warning(
1159
+ title = 'Error',
1160
+ message = 'Token not found',
1161
+ )
1162
+ return gr.skip()
1163
 
1164
  tokens = meta.key.get('tokenizer.ggml.tokens', (-1, []))[1]
1165
 
1166
  if token >= len(tokens):
1167
+ gr.Warning(
1168
+ title = 'Error',
1169
+ message = 'Invalid token',
1170
+ )
1171
+ return gr.skip()
1172
 
1173
  data = meta.key.get(key, (-1, []))[1]
1174
 
 
1188
  ),
1189
  }
1190
  case _:
1191
+ gr.Warning(
1192
+ title = 'Error',
1193
+ message = 'Invalid metadata key',
1194
+ )
1195
+ return gr.skip()
1196
 
1197
 
1198
  @gr.on(
 
1213
  meta: MetadataState,
1214
  keyup: gr.KeyUpData,
1215
  ):
1216
+ if not keyup.input_value:
1217
+ return gr.skip()
1218
+
1219
  found = token_search(meta, keyup.input_value)
1220
 
1221
  return {
 
1239
  if key:
1240
  gr.Warning('Missing required value type')
1241
 
1242
+ return gr.skip()
 
 
 
1243
 
1244
  if key in meta.rem:
1245
  meta.rem.remove(key)
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
- # gradio[oauth]==4.42.0
2
- huggingface_hub==0.25.0
3
- gradio_huggingfacehub_search==0.0.7
 
 
1
+ # gradio[oauth]==5.5.0
2
+ huggingface_hub==0.26.2
3
+ # gradio_huggingfacehub_search==0.0.8
4
+ https://huggingface.co/spaces/CISCai/chat-template-editor/resolve/main/gradio_huggingfacehub_search-0.0.8-py3-none-any.whl