Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,18 @@ def generate(
|
|
22 |
|
23 |
prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
return outputs[0]["generated_text"]
|
28 |
|
|
|
22 |
|
23 |
prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
|
24 |
|
25 |
+
if model_name == "Felladrin/Pythia-31M-Chat-v1":
|
26 |
+
outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=2, repetition_penalty=1.0016)
|
27 |
+
elif model_name == "Felladrin/Llama-68M-Chat-v1":
|
28 |
+
outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.043)
|
29 |
+
elif model_name == "Felladrin/Smol-Llama-101M-Chat-v1":
|
30 |
+
outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.105)
|
31 |
+
elif model_name == "Felladrin/Llama-160M-Chat-v1":
|
32 |
+
outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=4, repetition_penalty=1.01)
|
33 |
+
elif model_name == "Felladrin/TinyMistral-248M-SFT-v4":
|
34 |
+
outputs = pipe(prompt, max_new_tokens=250, use_cache=True, penalty_alpha=0.5, top_k=5, repetition_penalty=1.001)
|
35 |
+
else:
|
36 |
+
outputs = pipe(prompt, max_new_tokens=250, do_sample=True, temperature=0.65, top_k=35, top_p=0.55, repetition_penalty=1.176)
|
37 |
|
38 |
return outputs[0]["generated_text"]
|
39 |
|