Spaces:
Running
on
Zero
Running
on
Zero
khulaifi95
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
|
45 |
terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")]
|
46 |
|
47 |
|
48 |
-
@spaces.GPU
|
49 |
def chat_llama3_8b(
|
50 |
message: str, history: list, temperature: float, max_new_tokens: int
|
51 |
) -> Generator[str, None, None]:
|
@@ -130,9 +130,6 @@ with gr.Blocks(fill_height=True, css=css) as demo:
|
|
130 |
),
|
131 |
],
|
132 |
examples=[
|
133 |
-
["How to setup a human base on Mars? Give short answer."],
|
134 |
-
["Explain theory of relativity to me like I’m 8 years old."],
|
135 |
-
["What is 9,000 * 9,000?"],
|
136 |
["The detonative temperature of this polypropylene is 2000°F."],
|
137 |
["The preparation method according to claim 1, characterized in that the SO2 accounts for 30 wt% and the Fe2O3 accounts for 70 wt%."],
|
138 |
],
|
|
|
45 |
terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")]
|
46 |
|
47 |
|
48 |
+
@spaces.GPU
|
49 |
def chat_llama3_8b(
|
50 |
message: str, history: list, temperature: float, max_new_tokens: int
|
51 |
) -> Generator[str, None, None]:
|
|
|
130 |
),
|
131 |
],
|
132 |
examples=[
|
|
|
|
|
|
|
133 |
["The detonative temperature of this polypropylene is 2000°F."],
|
134 |
["The preparation method according to claim 1, characterized in that the SO2 accounts for 30 wt% and the Fe2O3 accounts for 70 wt%."],
|
135 |
],
|