zetavg commited on
Commit
27c4f46
1 Parent(s): 80c2789
app.py CHANGED
@@ -42,7 +42,7 @@ def main(
42
  with gr.Blocks(title=get_page_title(), css=main_page_custom_css()) as demo:
43
  main_page()
44
 
45
- demo.queue(concurrency_count=2).launch(server_name=server_name, share=share)
46
 
47
 
48
  if __name__ == "__main__":
 
42
  with gr.Blocks(title=get_page_title(), css=main_page_custom_css()) as demo:
43
  main_page()
44
 
45
+ demo.queue(concurrency_count=1).launch(server_name=server_name, share=share)
46
 
47
 
48
  if __name__ == "__main__":
llama_lora/ui/finetune_ui.py CHANGED
@@ -360,7 +360,7 @@ Train data (first 10):
360
 
361
  Global.should_stop_training = False
362
 
363
- return Global.train_fn(
364
  get_base_model(), # base_model
365
  get_tokenizer(), # tokenizer
366
  os.path.join(Global.data_dir, "lora_models",
@@ -382,6 +382,8 @@ Train data (first 10):
382
  None, # resume_from_checkpoint
383
  training_callbacks # callbacks
384
  )
 
 
385
  except Exception as e:
386
  raise gr.Error(e)
387
 
@@ -529,7 +531,7 @@ def finetune_ui():
529
  with gr.Row():
530
  with gr.Column():
531
  micro_batch_size = gr.Slider(
532
- minimum=1, maximum=100, step=1, value=4,
533
  label="Micro Batch Size",
534
  info="The number of examples in each mini-batch for gradient computation. A smaller micro_batch_size reduces memory usage but may increase training time."
535
  )
 
360
 
361
  Global.should_stop_training = False
362
 
363
+ results = Global.train_fn(
364
  get_base_model(), # base_model
365
  get_tokenizer(), # tokenizer
366
  os.path.join(Global.data_dir, "lora_models",
 
382
  None, # resume_from_checkpoint
383
  training_callbacks # callbacks
384
  )
385
+ return "Done: " + str(results)
386
+
387
  except Exception as e:
388
  raise gr.Error(e)
389
 
 
531
  with gr.Row():
532
  with gr.Column():
533
  micro_batch_size = gr.Slider(
534
+ minimum=1, maximum=100, step=1, value=8,
535
  label="Micro Batch Size",
536
  info="The number of examples in each mini-batch for gradient computation. A smaller micro_batch_size reduces memory usage but may increase training time."
537
  )
llama_lora/ui/main_page.py CHANGED
@@ -143,6 +143,11 @@ def main_page_custom_css():
143
  box-shadow: none;
144
  }
145
 
 
 
 
 
 
146
  #dataset_plain_text_input_variables_separator textarea,
147
  #dataset_plain_text_input_and_output_separator textarea,
148
  #dataset_plain_text_data_separator textarea {
 
143
  box-shadow: none;
144
  }
145
 
146
+ #inference_output > .wrap {
147
+ /* allow users to select text while generation is still in progress */
148
+ pointer-events: none;
149
+ }
150
+
151
  #dataset_plain_text_input_variables_separator textarea,
152
  #dataset_plain_text_input_and_output_separator textarea,
153
  #dataset_plain_text_data_separator textarea {