Spaces:
Runtime error
Runtime error
Singularity666
commited on
Commit
•
f9f1b17
1
Parent(s):
ee11fe8
Upload 7 files
Browse files- app.py +51 -0
- gitignore.txt +7 -0
- main.py +335 -0
- requirements.txt +6 -0
- saved_text_embeddings.pt +3 -0
- testing_df.csv +0 -0
- weights.pt +3 -0
app.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pickle
|
3 |
+
import pandas as pd
|
4 |
+
import torch
|
5 |
+
from PIL import Image
|
6 |
+
import numpy as np
|
7 |
+
from main import predict_caption, CLIPModel , get_text_embeddings
|
8 |
+
|
9 |
+
|
10 |
+
st.markdown(
|
11 |
+
"""
|
12 |
+
<style>
|
13 |
+
body {
|
14 |
+
background-color: transparent;
|
15 |
+
}
|
16 |
+
</style>
|
17 |
+
""",
|
18 |
+
unsafe_allow_html=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
device = torch.device("cpu")
|
23 |
+
|
24 |
+
testing_df = pd.read_csv("testing_df.csv")
|
25 |
+
model = CLIPModel().to(device)
|
26 |
+
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
|
27 |
+
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
|
28 |
+
|
29 |
+
|
30 |
+
def show_predicted_caption(image):
|
31 |
+
matches = predict_caption(
|
32 |
+
image, model, text_embeddings, testing_df["caption"]
|
33 |
+
)[0]
|
34 |
+
return matches
|
35 |
+
|
36 |
+
st.title("Medical Image Captioning")
|
37 |
+
st.write("Upload an image to get a caption:")
|
38 |
+
|
39 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
|
40 |
+
if uploaded_file is not None:
|
41 |
+
image = Image.open(uploaded_file)
|
42 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
43 |
+
st.write("")
|
44 |
+
|
45 |
+
if st.button("Generate Caption"):
|
46 |
+
with st.spinner("Generating caption..."):
|
47 |
+
image_np = np.array(image)
|
48 |
+
caption = show_predicted_caption(image_np)
|
49 |
+
st.success(f"Caption: {caption}")
|
50 |
+
|
51 |
+
|
gitignore.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.pyc
|
3 |
+
*.pyo
|
4 |
+
*.pyd
|
5 |
+
*~
|
6 |
+
.env
|
7 |
+
|
main.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn
|
2 |
+
from tqdm.autonotebook import tqdm
|
3 |
+
from transformers import AutoTokenizer, AutoModel
|
4 |
+
from transformers import DistilBertModel, DistilBertConfig, DistilBertTokenizer
|
5 |
+
import albumentations as A
|
6 |
+
import cv2
|
7 |
+
import timm
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
|
13 |
+
class CFG:
|
14 |
+
debug = False
|
15 |
+
image_path = '/content/content/new_images_v5'
|
16 |
+
captions_path = '/content/content/all_data/new_caption.csv'
|
17 |
+
batch_size = 12
|
18 |
+
num_workers = 2
|
19 |
+
head_lr = 1e-3
|
20 |
+
image_encoder_lr = 1e-4
|
21 |
+
text_encoder_lr = 1e-5
|
22 |
+
weight_decay = 1e-3
|
23 |
+
patience = 1
|
24 |
+
factor = 0.8
|
25 |
+
epochs = 2
|
26 |
+
saved_model_clinical = '/content/content/new_weights.pt'
|
27 |
+
trained_model = 'clinical_bert_weights.pt'
|
28 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
29 |
+
|
30 |
+
model_name = 'resnet50'
|
31 |
+
image_embedding = 2048
|
32 |
+
text_encoder_model = "distilbert-base-uncased"
|
33 |
+
clinical_encoder_model = "emilyalsentzer/Bio_ClinicalBERT"
|
34 |
+
text_embedding = 768
|
35 |
+
text_tokenizer = "distilbert-base-uncased"
|
36 |
+
max_length = 200
|
37 |
+
|
38 |
+
pretrained = True # for both image encoder and text encoder
|
39 |
+
trainable = True # for both image encoder and text encoder
|
40 |
+
temperature = 1.0
|
41 |
+
|
42 |
+
# image size
|
43 |
+
size = 224
|
44 |
+
|
45 |
+
# for projection head; used for both image and text encoders
|
46 |
+
num_projection_layers = 1
|
47 |
+
projection_dim = 256
|
48 |
+
dropout = 0.1
|
49 |
+
|
50 |
+
|
51 |
+
def build_loaders(dataframe, tokenizer, mode):
|
52 |
+
transforms = get_transforms(mode=mode)
|
53 |
+
dataset = CLIPDataset(
|
54 |
+
dataframe["image"].values,
|
55 |
+
dataframe["caption"].values,
|
56 |
+
tokenizer=tokenizer,
|
57 |
+
transforms=transforms,
|
58 |
+
)
|
59 |
+
|
60 |
+
dataloader = torch.utils.data.DataLoader(
|
61 |
+
dataset,
|
62 |
+
batch_size=CFG.batch_size,
|
63 |
+
num_workers=CFG.num_workers,
|
64 |
+
shuffle=True if mode == "train" else False,
|
65 |
+
)
|
66 |
+
return dataloader
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
class AvgMeter:
|
71 |
+
def __init__(self, name="Metric"):
|
72 |
+
self.name = name
|
73 |
+
self.reset()
|
74 |
+
|
75 |
+
def reset(self):
|
76 |
+
self.avg, self.sum, self.count = [0] * 3
|
77 |
+
|
78 |
+
def update(self, val, count=1):
|
79 |
+
self.count += count
|
80 |
+
self.sum += val * count
|
81 |
+
self.avg = self.sum / self.count
|
82 |
+
|
83 |
+
def __repr__(self):
|
84 |
+
text = f"{self.name}: {self.avg:.4f}"
|
85 |
+
return text
|
86 |
+
|
87 |
+
def get_lr(optimizer):
|
88 |
+
for param_group in optimizer.param_groups:
|
89 |
+
return param_group["lr"]
|
90 |
+
|
91 |
+
|
92 |
+
# Custom dataset object. Will tokenize text and apply transforms to images before yielding them.
|
93 |
+
|
94 |
+
class CLIPDataset(torch.utils.data.Dataset):
|
95 |
+
def __init__(self, image_filenames, captions, tokenizer, transforms):
|
96 |
+
"""
|
97 |
+
image_filenames and cpations must have the same length; so, if there are
|
98 |
+
multiple captions for each image, the image_filenames must have repetitive
|
99 |
+
file names
|
100 |
+
"""
|
101 |
+
|
102 |
+
self.image_filenames = image_filenames
|
103 |
+
self.captions = list(captions)
|
104 |
+
self.skippedImgCount = 0
|
105 |
+
self.encoded_captions = tokenizer(
|
106 |
+
list(captions), padding=True, truncation=True, max_length=CFG.max_length
|
107 |
+
)
|
108 |
+
self.transforms = transforms
|
109 |
+
|
110 |
+
def __getitem__(self, idx):
|
111 |
+
item = {
|
112 |
+
key: torch.tensor(values[idx])
|
113 |
+
for key, values in self.encoded_captions.items()
|
114 |
+
}
|
115 |
+
|
116 |
+
image = cv2.imread(f"{CFG.image_path}/{self.image_filenames[idx]}")
|
117 |
+
if image is None:
|
118 |
+
# Skip the current example and move to the next one
|
119 |
+
self.skippedImgCount += 1
|
120 |
+
return self.__getitem__((idx + 1) % len(self))
|
121 |
+
|
122 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
123 |
+
image = self.transforms(image=image)['image']
|
124 |
+
item['image'] = torch.tensor(image).permute(2, 0, 1).float()
|
125 |
+
item['caption'] = self.captions[idx]
|
126 |
+
|
127 |
+
return item
|
128 |
+
|
129 |
+
def __len__(self):
|
130 |
+
return len(self.captions)
|
131 |
+
|
132 |
+
|
133 |
+
def get_transforms(mode="train"):
|
134 |
+
if mode == "train":
|
135 |
+
return A.Compose(
|
136 |
+
[
|
137 |
+
A.Resize(CFG.size, CFG.size, always_apply=True),
|
138 |
+
A.Normalize(max_pixel_value=255.0, always_apply=True),
|
139 |
+
]
|
140 |
+
)
|
141 |
+
else:
|
142 |
+
return A.Compose(
|
143 |
+
[
|
144 |
+
A.Resize(CFG.size, CFG.size, always_apply=True),
|
145 |
+
A.Normalize(max_pixel_value=255.0, always_apply=True),
|
146 |
+
]
|
147 |
+
)
|
148 |
+
|
149 |
+
|
150 |
+
class ImageEncoder(nn.Module):
|
151 |
+
"""
|
152 |
+
Encode images to a fixed size vector
|
153 |
+
"""
|
154 |
+
|
155 |
+
def __init__(
|
156 |
+
self, model_name=CFG.model_name, pretrained=CFG.pretrained, trainable=CFG.trainable
|
157 |
+
):
|
158 |
+
super().__init__()
|
159 |
+
self.model = timm.create_model(
|
160 |
+
model_name, pretrained, num_classes=0, global_pool="avg"
|
161 |
+
)
|
162 |
+
for p in self.model.parameters():
|
163 |
+
p.requires_grad = trainable
|
164 |
+
|
165 |
+
def forward(self, x):
|
166 |
+
return self.model(x)
|
167 |
+
|
168 |
+
class TextEncoder(nn.Module):
|
169 |
+
def __init__(self, model_name=CFG.text_encoder_model, pretrained=CFG.pretrained, trainable=CFG.trainable):
|
170 |
+
super().__init__()
|
171 |
+
if pretrained:
|
172 |
+
# self.model = DistilBertModel.from_pretrained(model_name)
|
173 |
+
|
174 |
+
# Use Bio-ClinicalBERT
|
175 |
+
self.model = AutoModel.from_pretrained(CFG.clinical_encoder_model)
|
176 |
+
|
177 |
+
else:
|
178 |
+
self.model = DistilBertModel(config=DistilBertConfig())
|
179 |
+
|
180 |
+
for p in self.model.parameters():
|
181 |
+
p.requires_grad = trainable
|
182 |
+
|
183 |
+
# we are using the CLS token hidden representation as the sentence's embedding
|
184 |
+
self.target_token_idx = 0
|
185 |
+
|
186 |
+
def forward(self, input_ids, attention_mask):
|
187 |
+
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
|
188 |
+
last_hidden_state = output.last_hidden_state
|
189 |
+
return last_hidden_state[:, self.target_token_idx, :]
|
190 |
+
|
191 |
+
|
192 |
+
# Get both image and text encodings into a same size matrix
|
193 |
+
class ProjectionHead(nn.Module):
|
194 |
+
def __init__(
|
195 |
+
self,
|
196 |
+
embedding_dim,
|
197 |
+
projection_dim=CFG.projection_dim,
|
198 |
+
dropout=CFG.dropout
|
199 |
+
):
|
200 |
+
super().__init__()
|
201 |
+
self.projection = nn.Linear(embedding_dim, projection_dim)
|
202 |
+
self.gelu = nn.GELU()
|
203 |
+
self.fc = nn.Linear(projection_dim, projection_dim)
|
204 |
+
self.dropout = nn.Dropout(dropout)
|
205 |
+
self.layer_norm = nn.LayerNorm(projection_dim)
|
206 |
+
|
207 |
+
def forward(self, x):
|
208 |
+
projected = self.projection(x)
|
209 |
+
x = self.gelu(projected)
|
210 |
+
x = self.fc(x)
|
211 |
+
x = self.dropout(x)
|
212 |
+
x = x + projected
|
213 |
+
x = self.layer_norm(x)
|
214 |
+
return x
|
215 |
+
|
216 |
+
|
217 |
+
class CLIPModel(nn.Module):
|
218 |
+
def __init__(
|
219 |
+
self,
|
220 |
+
temperature=CFG.temperature,
|
221 |
+
image_embedding=CFG.image_embedding,
|
222 |
+
text_embedding=CFG.text_embedding,
|
223 |
+
):
|
224 |
+
super().__init__()
|
225 |
+
self.image_encoder = ImageEncoder()
|
226 |
+
self.text_encoder = TextEncoder()
|
227 |
+
self.image_projection = ProjectionHead(embedding_dim=image_embedding)
|
228 |
+
self.text_projection = ProjectionHead(embedding_dim=text_embedding)
|
229 |
+
self.temperature = temperature
|
230 |
+
|
231 |
+
def forward(self, batch):
|
232 |
+
# Getting Image and Text Features
|
233 |
+
image_features = self.image_encoder(batch["image"])
|
234 |
+
text_features = self.text_encoder(
|
235 |
+
input_ids=batch["input_ids"], attention_mask=batch["attention_mask"]
|
236 |
+
)
|
237 |
+
# Getting Image and Text Embeddings (with same dimension)
|
238 |
+
image_embeddings = self.image_projection(image_features)
|
239 |
+
text_embeddings = self.text_projection(text_features)
|
240 |
+
|
241 |
+
# Calculating the Loss
|
242 |
+
logits = (text_embeddings @ image_embeddings.T) / self.temperature
|
243 |
+
images_similarity = image_embeddings @ image_embeddings.T
|
244 |
+
texts_similarity = text_embeddings @ text_embeddings.T
|
245 |
+
targets = F.softmax(
|
246 |
+
(images_similarity + texts_similarity) / 2 * self.temperature, dim=-1
|
247 |
+
)
|
248 |
+
texts_loss = cross_entropy(logits, targets, reduction='none')
|
249 |
+
images_loss = cross_entropy(logits.T, targets.T, reduction='none')
|
250 |
+
loss = (images_loss + texts_loss) / 2.0 # shape: (batch_size)
|
251 |
+
return loss.mean()
|
252 |
+
def cross_entropy(preds, targets, reduction='none'):
|
253 |
+
log_softmax = nn.LogSoftmax(dim=-1)
|
254 |
+
loss = (-targets * log_softmax(preds)).sum(1)
|
255 |
+
if reduction == "none":
|
256 |
+
return loss
|
257 |
+
elif reduction == "mean":
|
258 |
+
return loss.mean()
|
259 |
+
|
260 |
+
|
261 |
+
|
262 |
+
|
263 |
+
|
264 |
+
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
|
276 |
+
|
277 |
+
# INFERENCE CODE
|
278 |
+
def get_image_embeddings(image):
|
279 |
+
# preprocess the image
|
280 |
+
if image is None:
|
281 |
+
print("Image not found!")
|
282 |
+
return None
|
283 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
284 |
+
image = get_transforms("valid")(image=image)['image']
|
285 |
+
image = image.reshape(3, 224, 224)
|
286 |
+
model = CLIPModel().to(device)
|
287 |
+
model.load_state_dict(torch.load('weights.pt', map_location=device))
|
288 |
+
model.eval()
|
289 |
+
|
290 |
+
with torch.no_grad():
|
291 |
+
image_tensor = torch.from_numpy(image)
|
292 |
+
image_features = model.image_encoder(image_tensor.unsqueeze(0).to(device))
|
293 |
+
image_embeddings = model.image_projection(image_features)
|
294 |
+
image_embeddings = F.normalize(image_embeddings, p=2, dim=-1)
|
295 |
+
|
296 |
+
return image_embeddings
|
297 |
+
|
298 |
+
|
299 |
+
def predict_caption(image, model, text_embeddings, captions, n=1):
|
300 |
+
# get the image embeddings
|
301 |
+
image_embeddings = get_image_embeddings(image)
|
302 |
+
if image_embeddings is None:
|
303 |
+
return None
|
304 |
+
|
305 |
+
# normalize the embeddings
|
306 |
+
image_embeddings_n = F.normalize(image_embeddings, p=2, dim=-1)
|
307 |
+
text_embeddings_n = F.normalize(text_embeddings, p=2, dim=-1)
|
308 |
+
# calculate the dot product of image and text embeddings
|
309 |
+
dot_similarity = image_embeddings_n @ text_embeddings_n.T
|
310 |
+
|
311 |
+
# get the top n matches
|
312 |
+
values, indices = torch.topk(dot_similarity.squeeze(0), n)
|
313 |
+
indices = indices.cpu().numpy().tolist()
|
314 |
+
matches = [captions[idx] for idx in indices]
|
315 |
+
|
316 |
+
return matches
|
317 |
+
|
318 |
+
def get_text_embeddings(valid_df):
|
319 |
+
tokenizer = AutoTokenizer.from_pretrained(CFG.clinical_encoder_model)
|
320 |
+
valid_loader = build_loaders(valid_df, tokenizer, mode="valid")
|
321 |
+
|
322 |
+
model = CLIPModel().to(device)
|
323 |
+
model.load_state_dict(torch.load("weights.pt", map_location=device))
|
324 |
+
model.eval()
|
325 |
+
|
326 |
+
valid_text_embeddings = []
|
327 |
+
with torch.no_grad():
|
328 |
+
for batch in tqdm(valid_loader):
|
329 |
+
text_features = model.text_encoder(
|
330 |
+
input_ids=batch["input_ids"].to(device), attention_mask=batch["attention_mask"].to(device)
|
331 |
+
)
|
332 |
+
text_embeddings = model.text_projection(text_features)
|
333 |
+
valid_text_embeddings.append(text_embeddings)
|
334 |
+
|
335 |
+
return model, torch.cat(valid_text_embeddings)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
opencv-python==4.5.4.60
|
3 |
+
transformers
|
4 |
+
albumentations
|
5 |
+
timm
|
6 |
+
tqdm
|
saved_text_embeddings.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2da707e595ccab006a2159d26d55469c7f015ea4a4dbc645154972fa96a17cc5
|
3 |
+
size 7538453
|
testing_df.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
weights.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7be01315d364ccdce0cc260f8b43c7381e3629e670017e5aaa9bcc6ca172eb34
|
3 |
+
size 531111517
|