Spaces:
Running
Running
tudormunteanu
commited on
Commit
•
b8dc4ec
1
Parent(s):
881e6c7
trust remote code
Browse files
README.md
CHANGED
@@ -12,3 +12,9 @@ short_description: Hold me closer, tiny coder!
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
Test query:
|
17 |
+
|
18 |
+
|
19 |
+
```
|
20 |
+
write a simple Fizz Buzz function in Python.
|
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
# app.py
|
2 |
-
|
3 |
import os
|
4 |
import streamlit as st
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
@@ -20,11 +18,12 @@ def load_model(model_size: str = "32B"):
|
|
20 |
|
21 |
model_id = model_map.get(model_size, "Qwen/Qwen-7B") # default to 7B if size not found
|
22 |
|
23 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
24 |
model = AutoModelForCausalLM.from_pretrained(
|
25 |
model_id,
|
26 |
torch_dtype=torch.float16,
|
27 |
-
device_map="auto"
|
|
|
28 |
)
|
29 |
return model, tokenizer
|
30 |
|
|
|
|
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
18 |
|
19 |
model_id = model_map.get(model_size, "Qwen/Qwen-7B") # default to 7B if size not found
|
20 |
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
22 |
model = AutoModelForCausalLM.from_pretrained(
|
23 |
model_id,
|
24 |
torch_dtype=torch.float16,
|
25 |
+
device_map="auto",
|
26 |
+
trust_remote_code=True
|
27 |
)
|
28 |
return model, tokenizer
|
29 |
|