Spaces:
Running
on
L40S
Running
on
L40S
ryanzhangfan
commited on
Commit
•
887782d
1
Parent(s):
6380db8
Update app.py
Browse files
app.py
CHANGED
@@ -24,6 +24,8 @@ subprocess.run(
|
|
24 |
shell=True,
|
25 |
)
|
26 |
|
|
|
|
|
27 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
|
29 |
# Model paths
|
@@ -39,7 +41,7 @@ gen_model = AutoModelForCausalLM.from_pretrained(
|
|
39 |
torch_dtype=torch.bfloat16,
|
40 |
attn_implementation="flash_attention_2",
|
41 |
trust_remote_code=True,
|
42 |
-
)
|
43 |
|
44 |
# Emu3-Chat model and processor
|
45 |
chat_model = AutoModelForCausalLM.from_pretrained(
|
@@ -48,7 +50,7 @@ chat_model = AutoModelForCausalLM.from_pretrained(
|
|
48 |
torch_dtype=torch.bfloat16,
|
49 |
attn_implementation="flash_attention_2",
|
50 |
trust_remote_code=True,
|
51 |
-
)
|
52 |
|
53 |
tokenizer = AutoTokenizer.from_pretrained(EMU_CHAT_HUB, trust_remote_code=True)
|
54 |
image_processor = AutoImageProcessor.from_pretrained(
|
@@ -56,11 +58,16 @@ image_processor = AutoImageProcessor.from_pretrained(
|
|
56 |
)
|
57 |
image_tokenizer = AutoModel.from_pretrained(
|
58 |
VQ_HUB, device_map="cpu", trust_remote_code=True
|
59 |
-
).eval()
|
60 |
processor = Emu3Processor(
|
61 |
image_processor, image_tokenizer, tokenizer
|
62 |
)
|
63 |
|
|
|
|
|
|
|
|
|
|
|
64 |
@spaces.GPU(duration=300)
|
65 |
def generate_image(prompt):
|
66 |
POSITIVE_PROMPT = " masterpiece, film grained, best quality."
|
|
|
24 |
shell=True,
|
25 |
)
|
26 |
|
27 |
+
print(gr.__version__)
|
28 |
+
|
29 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
|
31 |
# Model paths
|
|
|
41 |
torch_dtype=torch.bfloat16,
|
42 |
attn_implementation="flash_attention_2",
|
43 |
trust_remote_code=True,
|
44 |
+
)
|
45 |
|
46 |
# Emu3-Chat model and processor
|
47 |
chat_model = AutoModelForCausalLM.from_pretrained(
|
|
|
50 |
torch_dtype=torch.bfloat16,
|
51 |
attn_implementation="flash_attention_2",
|
52 |
trust_remote_code=True,
|
53 |
+
)
|
54 |
|
55 |
tokenizer = AutoTokenizer.from_pretrained(EMU_CHAT_HUB, trust_remote_code=True)
|
56 |
image_processor = AutoImageProcessor.from_pretrained(
|
|
|
58 |
)
|
59 |
image_tokenizer = AutoModel.from_pretrained(
|
60 |
VQ_HUB, device_map="cpu", trust_remote_code=True
|
61 |
+
).eval()
|
62 |
processor = Emu3Processor(
|
63 |
image_processor, image_tokenizer, tokenizer
|
64 |
)
|
65 |
|
66 |
+
print(device)
|
67 |
+
gen_model.to(device)
|
68 |
+
chat_model.to(device)
|
69 |
+
image_tokenizer.to(device)
|
70 |
+
|
71 |
@spaces.GPU(duration=300)
|
72 |
def generate_image(prompt):
|
73 |
POSITIVE_PROMPT = " masterpiece, film grained, best quality."
|