TangJicheng commited on
Commit
bec475d
1 Parent(s): 0f497f0

clean code

Browse files
Files changed (1) hide show
  1. app.py +0 -16
app.py CHANGED
@@ -23,22 +23,6 @@ model = blip_decoder(pretrained=model_url, image_size=384, vit='large')
23
  model.eval()
24
  model = model.to(device)
25
 
26
-
27
- from models.blip_vqa import blip_vqa
28
-
29
- image_size_vq = 480
30
- transform_vq = transforms.Compose([
31
- transforms.Resize((image_size_vq,image_size_vq),interpolation=InterpolationMode.BICUBIC),
32
- transforms.ToTensor(),
33
- transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
34
- ])
35
-
36
- model_url_vq = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'
37
-
38
- model_vq = blip_vqa(pretrained=model_url_vq, image_size=480, vit='base')
39
- model_vq.eval()
40
- model_vq = model_vq.to(device)
41
-
42
  def inference_image_caption(raw_image):
43
  image = transform(raw_image).unsqueeze(0).to(device)
44
  with torch.no_grad():
 
23
  model.eval()
24
  model = model.to(device)
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def inference_image_caption(raw_image):
27
  image = transform(raw_image).unsqueeze(0).to(device)
28
  with torch.no_grad():