zetavg commited on
Commit
320751d
1 Parent(s): 3a38e1d

add cancel timeout buttons and update ui

Browse files
llama_lora/ui/finetune_ui.py CHANGED
@@ -393,6 +393,8 @@ def do_abort_training():
393
 
394
 
395
  def finetune_ui():
 
 
396
  with gr.Blocks() as finetune_ui_blocks:
397
  with gr.Column(elem_id="finetune_ui_content"):
398
  with gr.Tab("Prepare"):
@@ -466,10 +468,13 @@ def finetune_ui():
466
  elem_id="dataset_plain_text_data_separator",
467
  placeholder=default_dataset_plain_text_data_separator,
468
  value=default_dataset_plain_text_data_separator)
469
- dataset_text_format.change(fn=handle_switch_dataset_text_format, inputs=[
470
- dataset_text_format], outputs=[dataset_plain_text_separators_group])
471
- dataset_text_load_sample_button.click(fn=load_sample_dataset_to_text_input, inputs=[
472
- dataset_text_format], outputs=[dataset_text])
 
 
 
473
  gr.Markdown(
474
  "💡 Switch to the \"Preview\" tab to verify that your inputs are correct.")
475
  with gr.Tab("Preview"):
@@ -484,14 +489,15 @@ def finetune_ui():
484
  )
485
  finetune_dataset_preview = gr.Dataframe(
486
  wrap=True, elem_id="finetune_dataset_preview")
487
- load_dataset_from.change(
488
- fn=handle_switch_dataset_source,
489
- inputs=[load_dataset_from],
490
- outputs=[
491
- dataset_text_input_group,
492
- dataset_from_data_dir_group
493
- ]
494
- )
 
495
 
496
  dataset_inputs = [
497
  template,
@@ -506,21 +512,23 @@ def finetune_ui():
506
  dataset_preview_inputs = dataset_inputs + \
507
  [finetune_dataset_preview_show_actual_prompt]
508
  for i in dataset_preview_inputs:
509
- i.change(
510
- fn=refresh_preview,
511
- inputs=dataset_preview_inputs,
512
- outputs=[finetune_dataset_preview,
513
- finetune_dataset_preview_info_message,
514
- dataset_from_text_message,
515
- dataset_from_data_dir_message
516
- ]
517
- )
518
-
519
- reload_selections_button.click(
 
520
  reload_selections,
521
  inputs=[template, dataset_from_data_dir],
522
  outputs=[template, dataset_from_data_dir],
523
  )
 
524
 
525
  max_seq_length = gr.Slider(
526
  minimum=1, maximum=4096, value=512,
@@ -621,6 +629,13 @@ def finetune_ui():
621
  inputs=None, outputs=None,
622
  cancels=[train_progress])
623
 
 
 
 
 
 
 
 
624
  finetune_ui_blocks.load(_js="""
625
  function finetune_ui_blocks_js() {
626
  // Auto load options
 
393
 
394
 
395
  def finetune_ui():
396
+ things_that_might_timeout = []
397
+
398
  with gr.Blocks() as finetune_ui_blocks:
399
  with gr.Column(elem_id="finetune_ui_content"):
400
  with gr.Tab("Prepare"):
 
468
  elem_id="dataset_plain_text_data_separator",
469
  placeholder=default_dataset_plain_text_data_separator,
470
  value=default_dataset_plain_text_data_separator)
471
+ things_that_might_timeout.append(
472
+ dataset_text_format.change(fn=handle_switch_dataset_text_format, inputs=[
473
+ dataset_text_format], outputs=[dataset_plain_text_separators_group]))
474
+
475
+ things_that_might_timeout.append(
476
+ dataset_text_load_sample_button.click(fn=load_sample_dataset_to_text_input, inputs=[
477
+ dataset_text_format], outputs=[dataset_text]))
478
  gr.Markdown(
479
  "💡 Switch to the \"Preview\" tab to verify that your inputs are correct.")
480
  with gr.Tab("Preview"):
 
489
  )
490
  finetune_dataset_preview = gr.Dataframe(
491
  wrap=True, elem_id="finetune_dataset_preview")
492
+ things_that_might_timeout.append(
493
+ load_dataset_from.change(
494
+ fn=handle_switch_dataset_source,
495
+ inputs=[load_dataset_from],
496
+ outputs=[
497
+ dataset_text_input_group,
498
+ dataset_from_data_dir_group
499
+ ]
500
+ ))
501
 
502
  dataset_inputs = [
503
  template,
 
512
  dataset_preview_inputs = dataset_inputs + \
513
  [finetune_dataset_preview_show_actual_prompt]
514
  for i in dataset_preview_inputs:
515
+ things_that_might_timeout.append(
516
+ i.change(
517
+ fn=refresh_preview,
518
+ inputs=dataset_preview_inputs,
519
+ outputs=[finetune_dataset_preview,
520
+ finetune_dataset_preview_info_message,
521
+ dataset_from_text_message,
522
+ dataset_from_data_dir_message
523
+ ]
524
+ ))
525
+
526
+ things_that_might_timeout.append(reload_selections_button.click(
527
  reload_selections,
528
  inputs=[template, dataset_from_data_dir],
529
  outputs=[template, dataset_from_data_dir],
530
  )
531
+ )
532
 
533
  max_seq_length = gr.Slider(
534
  minimum=1, maximum=4096, value=512,
 
629
  inputs=None, outputs=None,
630
  cancels=[train_progress])
631
 
632
+ stop_timeoutable_btn = gr.Button(
633
+ "stop not-responding elements",
634
+ elem_id="inference_stop_timeoutable_btn",
635
+ elem_classes="foot_stop_timeoutable_btn")
636
+ stop_timeoutable_btn.click(
637
+ fn=None, inputs=None, outputs=None, cancels=things_that_might_timeout)
638
+
639
  finetune_ui_blocks.load(_js="""
640
  function finetune_ui_blocks_js() {
641
  // Auto load options
llama_lora/ui/inference_ui.py CHANGED
@@ -175,6 +175,8 @@ def update_prompt_preview(prompt_template,
175
 
176
 
177
  def inference_ui():
 
 
178
  with gr.Blocks() as inference_ui_blocks:
179
  with gr.Row():
180
  lora_model = gr.Dropdown(
@@ -203,13 +205,20 @@ def inference_ui():
203
  placeholder="Tell me about alpecas and llamas.",
204
  elem_id="inference_variable_0"
205
  )
206
- variable_1 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_1")
207
- variable_2 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_2")
208
- variable_3 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_3")
209
- variable_4 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_4")
210
- variable_5 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_5")
211
- variable_6 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_6")
212
- variable_7 = gr.Textbox(lines=2, label="", visible=False, elem_id="inference_variable_7")
 
 
 
 
 
 
 
213
 
214
  with gr.Accordion("Preview", open=False, elem_id="inference_preview_prompt_container"):
215
  preview_prompt = gr.Textbox(
@@ -303,19 +312,22 @@ def inference_ui():
303
  interactive=False,
304
  elem_id="inference_raw_output")
305
 
306
- show_raw.change(
307
  fn=lambda show_raw: gr.Accordion.update(visible=show_raw),
308
  inputs=[show_raw],
309
  outputs=[raw_output_group])
 
310
 
311
- reload_selections_button.click(
312
  reload_selections,
313
  inputs=[lora_model, prompt_template],
314
  outputs=[lora_model, prompt_template],
315
  )
 
316
 
317
- prompt_template.change(fn=handle_prompt_template_change, inputs=[prompt_template], outputs=[
318
- variable_0, variable_1, variable_2, variable_3, variable_4, variable_5, variable_6, variable_7])
 
319
 
320
  generate_event = generate_btn.click(
321
  fn=do_inference,
@@ -339,9 +351,17 @@ def inference_ui():
339
  stop_btn.click(fn=None, inputs=None, outputs=None,
340
  cancels=[generate_event])
341
 
342
- update_prompt_preview_btn.click(fn=update_prompt_preview, inputs=[prompt_template,
343
- variable_0, variable_1, variable_2, variable_3,
344
- variable_4, variable_5, variable_6, variable_7,], outputs=preview_prompt)
 
 
 
 
 
 
 
 
345
 
346
  inference_ui_blocks.load(_js="""
347
  function inference_ui_blocks_js() {
 
175
 
176
 
177
  def inference_ui():
178
+ things_that_might_timeout = []
179
+
180
  with gr.Blocks() as inference_ui_blocks:
181
  with gr.Row():
182
  lora_model = gr.Dropdown(
 
205
  placeholder="Tell me about alpecas and llamas.",
206
  elem_id="inference_variable_0"
207
  )
208
+ variable_1 = gr.Textbox(
209
+ lines=2, label="", visible=False, elem_id="inference_variable_1")
210
+ variable_2 = gr.Textbox(
211
+ lines=2, label="", visible=False, elem_id="inference_variable_2")
212
+ variable_3 = gr.Textbox(
213
+ lines=2, label="", visible=False, elem_id="inference_variable_3")
214
+ variable_4 = gr.Textbox(
215
+ lines=2, label="", visible=False, elem_id="inference_variable_4")
216
+ variable_5 = gr.Textbox(
217
+ lines=2, label="", visible=False, elem_id="inference_variable_5")
218
+ variable_6 = gr.Textbox(
219
+ lines=2, label="", visible=False, elem_id="inference_variable_6")
220
+ variable_7 = gr.Textbox(
221
+ lines=2, label="", visible=False, elem_id="inference_variable_7")
222
 
223
  with gr.Accordion("Preview", open=False, elem_id="inference_preview_prompt_container"):
224
  preview_prompt = gr.Textbox(
 
312
  interactive=False,
313
  elem_id="inference_raw_output")
314
 
315
+ show_raw_change_event = show_raw.change(
316
  fn=lambda show_raw: gr.Accordion.update(visible=show_raw),
317
  inputs=[show_raw],
318
  outputs=[raw_output_group])
319
+ things_that_might_timeout.append(show_raw_change_event)
320
 
321
+ reload_selections_event = reload_selections_button.click(
322
  reload_selections,
323
  inputs=[lora_model, prompt_template],
324
  outputs=[lora_model, prompt_template],
325
  )
326
+ things_that_might_timeout.append(reload_selections_event)
327
 
328
+ prompt_template_change_event = prompt_template.change(fn=handle_prompt_template_change, inputs=[prompt_template], outputs=[
329
+ variable_0, variable_1, variable_2, variable_3, variable_4, variable_5, variable_6, variable_7])
330
+ things_that_might_timeout.append(prompt_template_change_event)
331
 
332
  generate_event = generate_btn.click(
333
  fn=do_inference,
 
351
  stop_btn.click(fn=None, inputs=None, outputs=None,
352
  cancels=[generate_event])
353
 
354
+ update_prompt_preview_event = update_prompt_preview_btn.click(fn=update_prompt_preview, inputs=[prompt_template,
355
+ variable_0, variable_1, variable_2, variable_3,
356
+ variable_4, variable_5, variable_6, variable_7,], outputs=preview_prompt)
357
+ things_that_might_timeout.append(update_prompt_preview_event)
358
+
359
+ stop_timeoutable_btn = gr.Button(
360
+ "stop not-responding elements",
361
+ elem_id="inference_stop_timeoutable_btn",
362
+ elem_classes="foot_stop_timeoutable_btn")
363
+ stop_timeoutable_btn.click(
364
+ fn=None, inputs=None, outputs=None, cancels=things_that_might_timeout)
365
 
366
  inference_ui_blocks.load(_js="""
367
  function inference_ui_blocks_js() {
llama_lora/ui/main_page.py CHANGED
@@ -395,6 +395,30 @@ def main_page_custom_css():
395
  }
396
  }
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398
  .tippy-box[data-animation=scale-subtle][data-placement^=top]{transform-origin:bottom}.tippy-box[data-animation=scale-subtle][data-placement^=bottom]{transform-origin:top}.tippy-box[data-animation=scale-subtle][data-placement^=left]{transform-origin:right}.tippy-box[data-animation=scale-subtle][data-placement^=right]{transform-origin:left}.tippy-box[data-animation=scale-subtle][data-state=hidden]{transform:scale(.8);opacity:0}
399
  """
400
  return css
 
395
  }
396
  }
397
 
398
+ #tokenizer_encoded_tokens_input_textbox .codemirror-wrapper,
399
+ #tokenizer_decoded_text_input_textbox .codemirror-wrapper {
400
+ margin-bottom: -20px;
401
+ }
402
+
403
+ .foot_stop_timeoutable_btn {
404
+ align-self: flex-end;
405
+ border: 0 !important;
406
+ width: auto !important;
407
+ background: transparent !important;
408
+ box-shadow: none !important;
409
+ padding: 0 !important;
410
+ font-weight: 100 !important;
411
+ font-size: 80% !important;
412
+ text-decoration: underline;
413
+ opacity: 0.3;
414
+ }
415
+ .foot_stop_timeoutable_btn:hover {
416
+ opacity: 0.8;
417
+ }
418
+ .foot_stop_timeoutable_btn:active {
419
+ opacity: 1;
420
+ }
421
+
422
  .tippy-box[data-animation=scale-subtle][data-placement^=top]{transform-origin:bottom}.tippy-box[data-animation=scale-subtle][data-placement^=bottom]{transform-origin:top}.tippy-box[data-animation=scale-subtle][data-placement^=left]{transform-origin:right}.tippy-box[data-animation=scale-subtle][data-placement^=right]{transform-origin:left}.tippy-box[data-animation=scale-subtle][data-state=hidden]{transform:scale(.8);opacity:0}
423
  """
424
  return css
llama_lora/ui/tokenizer_ui.py CHANGED
@@ -31,6 +31,8 @@ def handle_encode(decoded_tokens):
31
 
32
 
33
  def tokenizer_ui():
 
 
34
  with gr.Blocks() as tokenizer_ui_blocks:
35
  with gr.Row():
36
  with gr.Column():
@@ -50,7 +52,6 @@ def tokenizer_ui():
50
  encode_btn = gr.Button("⬅️ Encode")
51
  decoded_tokens_error_message = gr.Markdown(
52
  "", visible=False, elem_classes="error-message")
53
- stop_btn = gr.Button("Stop")
54
 
55
  decoding = decode_btn.click(
56
  fn=handle_decode,
@@ -62,7 +63,15 @@ def tokenizer_ui():
62
  inputs=[decoded_tokens],
63
  outputs=[encoded_tokens, decoded_tokens_error_message],
64
  )
65
- stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[decoding, encoding])
 
 
 
 
 
 
 
 
66
 
67
  tokenizer_ui_blocks.load(_js="""
68
  function tokenizer_ui_blocks_js() {
 
31
 
32
 
33
  def tokenizer_ui():
34
+ things_that_might_timeout = []
35
+
36
  with gr.Blocks() as tokenizer_ui_blocks:
37
  with gr.Row():
38
  with gr.Column():
 
52
  encode_btn = gr.Button("⬅️ Encode")
53
  decoded_tokens_error_message = gr.Markdown(
54
  "", visible=False, elem_classes="error-message")
 
55
 
56
  decoding = decode_btn.click(
57
  fn=handle_decode,
 
63
  inputs=[decoded_tokens],
64
  outputs=[encoded_tokens, decoded_tokens_error_message],
65
  )
66
+ things_that_might_timeout.append(decoding)
67
+ things_that_might_timeout.append(encoding)
68
+
69
+ stop_timeoutable_btn = gr.Button(
70
+ "stop not-responding elements",
71
+ elem_id="inference_stop_timeoutable_btn",
72
+ elem_classes="foot_stop_timeoutable_btn")
73
+ stop_timeoutable_btn.click(
74
+ fn=None, inputs=None, outputs=None, cancels=things_that_might_timeout)
75
 
76
  tokenizer_ui_blocks.load(_js="""
77
  function tokenizer_ui_blocks_js() {