squanchd commited on
Commit
ca6c577
1 Parent(s): d9b1019
Files changed (2) hide show
  1. handler.py +29 -0
  2. requirements.txt +2 -0
handler.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import numpy as np
3
+ from transformers import CLIPProcessor, CLIPModel
4
+ from PIL import Image
5
+ from io import BytesIO
6
+ import base64
7
+
8
+ class EndpointHandler():
9
+ def __init__(self, path=""):
10
+ # Preload all the elements you are going to need at inference.
11
+ self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
12
+ self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
13
+
14
+
15
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
16
+ """
17
+ data args:
18
+ inputs (:obj: `str` | `PIL.Image` | `np.array`)
19
+ kwargs
20
+ Return:
21
+ A :obj:`list` | `dict`: will be serialized and returned
22
+ """
23
+
24
+ words = data.pop("words", data)
25
+ image = Image.open(BytesIO(base64.b64decode(data['image'])))
26
+ inputs = self.processor(text=words, images=image, return_tensors="pt", padding=True)
27
+ outputs = self.model(**inputs)
28
+ embeddings = outputs.image_embeds.detach().numpy().flatten().tolist()
29
+ return {"embeddings": embeddings}
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ pillow
2
+ numpy