Spaces:
Runtime error
Runtime error
Increase queue size, link to ViTL14 HF model
Browse files
app.py
CHANGED
@@ -29,7 +29,7 @@ blip_model.eval()
|
|
29 |
blip_model = blip_model.to(device)
|
30 |
|
31 |
print("Loading CLIP model...")
|
32 |
-
clip_model_name = 'ViT-L/14'
|
33 |
clip_model, clip_preprocess = clip.load(clip_model_name, device=device)
|
34 |
clip_model.to(device).eval()
|
35 |
|
@@ -241,5 +241,5 @@ io = gr.Interface(
|
|
241 |
article=article,
|
242 |
examples=[['example01.jpg'], ['example02.jpg']]
|
243 |
)
|
244 |
-
io.queue(max_size=
|
245 |
io.launch()
|
|
|
29 |
blip_model = blip_model.to(device)
|
30 |
|
31 |
print("Loading CLIP model...")
|
32 |
+
clip_model_name = 'ViT-L/14' # https://huggingface.co/openai/clip-vit-large-patch14
|
33 |
clip_model, clip_preprocess = clip.load(clip_model_name, device=device)
|
34 |
clip_model.to(device).eval()
|
35 |
|
|
|
241 |
article=article,
|
242 |
examples=[['example01.jpg'], ['example02.jpg']]
|
243 |
)
|
244 |
+
io.queue(max_size=32)
|
245 |
io.launch()
|