yeecin commited on
Commit
2280605
1 Parent(s): 86c445c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -1,14 +1,24 @@
 
1
  import gradio as gr
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
  from PIL import Image
 
 
 
 
 
4
 
5
- # 加载模型和处理器
6
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
7
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
8
 
 
 
 
 
 
 
 
9
  def generate_caption(image):
10
  inputs = processor(image, return_tensors="pt")
11
- outputs = model.generate(**inputs)
12
  description = processor.decode(outputs[0], skip_special_tokens=True)
13
  return description
14
 
 
1
+ import os
2
  import gradio as gr
3
+ from transformers import BlipProcessor ,BlipForConditionalGeneration
4
  from PIL import Image
5
+ from transformers import CLIPProcessor, ChineseCLIPVisionModel ,AutoProcessor
6
+
7
+ # 设置环境变量 HF_HOME 和 HF_ENDPOINT
8
+ # os.environ['HF_HOME'] = 'D:/AI/OCR/img2text/models'
9
+ # os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
10
 
 
 
 
11
 
12
+ # model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
13
+ # processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
14
+ # 加载模型和处理器
15
+ # processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
16
+ # model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
17
+ processor = BlipProcessor.from_pretrained("IDEA-CCNL/Taiyi-BLIP-750M-Chinese")
18
+ model = BlipForConditionalGeneration.from_pretrained("IDEA-CCNL/Taiyi-BLIP-750M-Chinese")
19
  def generate_caption(image):
20
  inputs = processor(image, return_tensors="pt")
21
+ outputs = model(**inputs)
22
  description = processor.decode(outputs[0], skip_special_tokens=True)
23
  return description
24