Spaces:
Runtime error
Runtime error
Update
Browse files- README.md +1 -1
- app.py +32 -19
- requirements.txt +8 -8
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🌖
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
python_version: 3.10.9
|
9 |
app_file: app.py
|
10 |
pinned: false
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.27.0
|
8 |
python_version: 3.10.9
|
9 |
app_file: app.py
|
10 |
pinned: false
|
app.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
|
|
5 |
import string
|
6 |
|
7 |
import gradio as gr
|
@@ -9,30 +10,42 @@ import PIL.Image
|
|
9 |
import torch
|
10 |
from transformers import AutoProcessor, Blip2ForConditionalGeneration
|
11 |
|
12 |
-
DESCRIPTION = '# BLIP-2'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
15 |
|
16 |
MODEL_ID_OPT_6_7B = 'Salesforce/blip2-opt-6.7b'
|
17 |
MODEL_ID_FLAN_T5_XXL = 'Salesforce/blip2-flan-t5-xxl'
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
34 |
}
|
35 |
-
|
|
|
36 |
|
37 |
|
38 |
def generate_caption(model_id: str, image: PIL.Image.Image,
|
@@ -281,4 +294,4 @@ with gr.Blocks(css='style.css') as demo:
|
|
281 |
queue=False,
|
282 |
)
|
283 |
|
284 |
-
demo.queue(
|
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
5 |
+
import os
|
6 |
import string
|
7 |
|
8 |
import gradio as gr
|
|
|
10 |
import torch
|
11 |
from transformers import AutoProcessor, Blip2ForConditionalGeneration
|
12 |
|
13 |
+
DESCRIPTION = '# [BLIP-2](https://github.com/salesforce/LAVIS/tree/main/projects/blip2)'
|
14 |
+
|
15 |
+
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
16 |
+
DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
17 |
+
|
18 |
+
if torch.cuda.is_available():
|
19 |
+
DESCRIPTION += '\n<p>Running on GPU 🔥</p>'
|
20 |
+
else:
|
21 |
+
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.'
|
22 |
|
23 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
24 |
|
25 |
MODEL_ID_OPT_6_7B = 'Salesforce/blip2-opt-6.7b'
|
26 |
MODEL_ID_FLAN_T5_XXL = 'Salesforce/blip2-flan-t5-xxl'
|
27 |
+
|
28 |
+
if torch.cuda.is_available():
|
29 |
+
model_dict = {
|
30 |
+
#MODEL_ID_OPT_6_7B: {
|
31 |
+
# 'processor':
|
32 |
+
# AutoProcessor.from_pretrained(MODEL_ID_OPT_6_7B),
|
33 |
+
# 'model':
|
34 |
+
# Blip2ForConditionalGeneration.from_pretrained(MODEL_ID_OPT_6_7B,
|
35 |
+
# device_map='auto',
|
36 |
+
# load_in_8bit=True),
|
37 |
+
#},
|
38 |
+
MODEL_ID_FLAN_T5_XXL: {
|
39 |
+
'processor':
|
40 |
+
AutoProcessor.from_pretrained(MODEL_ID_FLAN_T5_XXL),
|
41 |
+
'model':
|
42 |
+
Blip2ForConditionalGeneration.from_pretrained(MODEL_ID_FLAN_T5_XXL,
|
43 |
+
device_map='auto',
|
44 |
+
load_in_8bit=True),
|
45 |
+
}
|
46 |
}
|
47 |
+
else:
|
48 |
+
model_dict = {}
|
49 |
|
50 |
|
51 |
def generate_caption(model_id: str, image: PIL.Image.Image,
|
|
|
294 |
queue=False,
|
295 |
)
|
296 |
|
297 |
+
demo.queue(api_open=False, max_size=10).launch()
|
requirements.txt
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
accelerate==0.
|
2 |
-
bitsandbytes==0.
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
1 |
+
accelerate==0.18.0
|
2 |
+
bitsandbytes==0.38.1
|
3 |
+
gradio==3.27.0
|
4 |
+
huggingface-hub==0.13.4
|
5 |
+
Pillow==9.5.0
|
6 |
+
torch==2.0.0
|
7 |
+
torchvision==0.15.1
|
8 |
+
transformers==4.28.1
|