ChirathD commited on
Commit
5815286
1 Parent(s): e81b7a6

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +47 -0
handler.py CHANGED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # +
2
+ from typing import Dict, List, Any
3
+ from PIL import Image
4
+ import torch
5
+ import os
6
+ from io import BytesIO
7
+ from transformers import BlipForConditionalGeneration, BlipProcessor
8
+ # -
9
+
10
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
+
12
+ class EndpointHandler():
13
+ def __init__(self, path=""):
14
+ # load the optimized model
15
+
16
+ self.processor = Blip2Processor.from_pretrained("ChirathD/Blip-2-test-1")
17
+ self.model = Blip2ForConditionalGeneration.from_pretrained("ChirathD/Blip-2-test-1").to(device)
18
+ self.model.eval()
19
+ self.model = self.model.to(device)
20
+
21
+
22
+
23
+ def __call__(self, data: Any) -> Dict[str, Any]:
24
+ """
25
+ Args:
26
+ data (:obj:):
27
+ includes the input data and the parameters for the inference.
28
+ Return:
29
+ A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing :
30
+ - "caption": A string corresponding to the generated caption.
31
+ """
32
+ inputs = data.pop("inputs", data)
33
+ parameters = data.pop("parameters", {})
34
+
35
+ raw_images = [Image.open(BytesIO(_img)) for _img in inputs]
36
+
37
+ processed_image = self.processor(images=raw_images, return_tensors="pt")
38
+ processed_image["pixel_values"] = processed_image["pixel_values"].to(device)
39
+ processed_image = {**processed_image, **parameters}
40
+
41
+ with torch.no_grad():
42
+ out = self.model.generate(
43
+ **processed_image
44
+ )
45
+ captions = self.processor.batch_decode(out, skip_special_tokens=True)
46
+
47
+ return {"captions": captions}