Spaces:
Runtime error
Runtime error
zetavg
commited on
Commit
•
2f0a0ce
1
Parent(s):
d50497e
do not allow using base model directly, seems that it will break things
Browse filesSwitching LoRA models will not work afterwards, all will beehive the same.
llama_lora/ui/inference_ui.py
CHANGED
@@ -95,9 +95,12 @@ def do_inference(
|
|
95 |
)
|
96 |
return
|
97 |
|
98 |
-
model = get_base_model()
|
99 |
if lora_model_name != "None":
|
100 |
model = get_model_with_lora(lora_model_name)
|
|
|
|
|
|
|
101 |
tokenizer = get_tokenizer()
|
102 |
|
103 |
inputs = tokenizer(prompt, return_tensors="pt")
|
@@ -229,7 +232,7 @@ def reload_selections(current_lora_model, current_prompt_template):
|
|
229 |
|
230 |
default_lora_models = ["tloen/alpaca-lora-7b"]
|
231 |
available_lora_models = default_lora_models + get_available_lora_model_names()
|
232 |
-
available_lora_models = available_lora_models
|
233 |
|
234 |
current_lora_model = current_lora_model or next(
|
235 |
iter(available_lora_models), None)
|
|
|
95 |
)
|
96 |
return
|
97 |
|
98 |
+
# model = get_base_model()
|
99 |
if lora_model_name != "None":
|
100 |
model = get_model_with_lora(lora_model_name)
|
101 |
+
else:
|
102 |
+
raise ValueError("No LoRA model selected.")
|
103 |
+
|
104 |
tokenizer = get_tokenizer()
|
105 |
|
106 |
inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
232 |
|
233 |
default_lora_models = ["tloen/alpaca-lora-7b"]
|
234 |
available_lora_models = default_lora_models + get_available_lora_model_names()
|
235 |
+
available_lora_models = available_lora_models
|
236 |
|
237 |
current_lora_model = current_lora_model or next(
|
238 |
iter(available_lora_models), None)
|