ACCC1380 commited on
Commit
87b20d0
1 Parent(s): ec8ca8b

Upload lora-scripts/sd-scripts/finetune/blip/blip.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/finetune/blip/blip.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2022, salesforce.com, inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: BSD-3-Clause
5
+ * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
+ * By Junnan Li
7
+ '''
8
+ import warnings
9
+ warnings.filterwarnings("ignore")
10
+
11
+ # from models.vit import VisionTransformer, interpolate_pos_embed
12
+ # from models.med import BertConfig, BertModel, BertLMHeadModel
13
+ from blip.vit import VisionTransformer, interpolate_pos_embed
14
+ from blip.med import BertConfig, BertModel, BertLMHeadModel
15
+ from transformers import BertTokenizer
16
+
17
+ import torch
18
+ from torch import nn
19
+ import torch.nn.functional as F
20
+
21
+ import os
22
+ from urllib.parse import urlparse
23
+ from timm.models.hub import download_cached_file
24
+ from library.utils import setup_logging
25
+ setup_logging()
26
+ import logging
27
+ logger = logging.getLogger(__name__)
28
+
29
+ class BLIP_Base(nn.Module):
30
+ def __init__(self,
31
+ med_config = 'configs/med_config.json',
32
+ image_size = 224,
33
+ vit = 'base',
34
+ vit_grad_ckpt = False,
35
+ vit_ckpt_layer = 0,
36
+ ):
37
+ """
38
+ Args:
39
+ med_config (str): path for the mixture of encoder-decoder model's configuration file
40
+ image_size (int): input image size
41
+ vit (str): model size of vision transformer
42
+ """
43
+ super().__init__()
44
+
45
+ self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
46
+ self.tokenizer = init_tokenizer()
47
+ med_config = BertConfig.from_json_file(med_config)
48
+ med_config.encoder_width = vision_width
49
+ self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
50
+
51
+
52
+ def forward(self, image, caption, mode):
53
+
54
+ assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
55
+ text = self.tokenizer(caption, return_tensors="pt").to(image.device)
56
+
57
+ if mode=='image':
58
+ # return image features
59
+ image_embeds = self.visual_encoder(image)
60
+ return image_embeds
61
+
62
+ elif mode=='text':
63
+ # return text features
64
+ text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
65
+ return_dict = True, mode = 'text')
66
+ return text_output.last_hidden_state
67
+
68
+ elif mode=='multimodal':
69
+ # return multimodel features
70
+ image_embeds = self.visual_encoder(image)
71
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
72
+
73
+ text.input_ids[:,0] = self.tokenizer.enc_token_id
74
+ output = self.text_encoder(text.input_ids,
75
+ attention_mask = text.attention_mask,
76
+ encoder_hidden_states = image_embeds,
77
+ encoder_attention_mask = image_atts,
78
+ return_dict = True,
79
+ )
80
+ return output.last_hidden_state
81
+
82
+
83
+
84
+ class BLIP_Decoder(nn.Module):
85
+ def __init__(self,
86
+ med_config = 'configs/med_config.json',
87
+ image_size = 384,
88
+ vit = 'base',
89
+ vit_grad_ckpt = False,
90
+ vit_ckpt_layer = 0,
91
+ prompt = 'a picture of ',
92
+ ):
93
+ """
94
+ Args:
95
+ med_config (str): path for the mixture of encoder-decoder model's configuration file
96
+ image_size (int): input image size
97
+ vit (str): model size of vision transformer
98
+ """
99
+ super().__init__()
100
+
101
+ self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
102
+ self.tokenizer = init_tokenizer()
103
+ med_config = BertConfig.from_json_file(med_config)
104
+ med_config.encoder_width = vision_width
105
+ self.text_decoder = BertLMHeadModel(config=med_config)
106
+
107
+ self.prompt = prompt
108
+ self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
109
+
110
+
111
+ def forward(self, image, caption):
112
+
113
+ image_embeds = self.visual_encoder(image)
114
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
115
+
116
+ text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
117
+
118
+ text.input_ids[:,0] = self.tokenizer.bos_token_id
119
+
120
+ decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
121
+ decoder_targets[:,:self.prompt_length] = -100
122
+
123
+ decoder_output = self.text_decoder(text.input_ids,
124
+ attention_mask = text.attention_mask,
125
+ encoder_hidden_states = image_embeds,
126
+ encoder_attention_mask = image_atts,
127
+ labels = decoder_targets,
128
+ return_dict = True,
129
+ )
130
+ loss_lm = decoder_output.loss
131
+
132
+ return loss_lm
133
+
134
+ def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
135
+ image_embeds = self.visual_encoder(image)
136
+
137
+ # recent version of transformers seems to do repeat_interleave automatically
138
+ # if not sample:
139
+ # image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
140
+
141
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
142
+ model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
143
+
144
+ prompt = [self.prompt] * image.size(0)
145
+ input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
146
+ input_ids[:,0] = self.tokenizer.bos_token_id
147
+ input_ids = input_ids[:, :-1]
148
+
149
+ if sample:
150
+ #nucleus sampling
151
+ outputs = self.text_decoder.generate(input_ids=input_ids,
152
+ max_length=max_length,
153
+ min_length=min_length,
154
+ do_sample=True,
155
+ top_p=top_p,
156
+ num_return_sequences=1,
157
+ eos_token_id=self.tokenizer.sep_token_id,
158
+ pad_token_id=self.tokenizer.pad_token_id,
159
+ repetition_penalty=1.1,
160
+ **model_kwargs)
161
+ else:
162
+ #beam search
163
+ outputs = self.text_decoder.generate(input_ids=input_ids,
164
+ max_length=max_length,
165
+ min_length=min_length,
166
+ num_beams=num_beams,
167
+ eos_token_id=self.tokenizer.sep_token_id,
168
+ pad_token_id=self.tokenizer.pad_token_id,
169
+ repetition_penalty=repetition_penalty,
170
+ **model_kwargs)
171
+
172
+ captions = []
173
+ for output in outputs:
174
+ caption = self.tokenizer.decode(output, skip_special_tokens=True)
175
+ captions.append(caption[len(self.prompt):])
176
+ return captions
177
+
178
+
179
+ def blip_decoder(pretrained='',**kwargs):
180
+ model = BLIP_Decoder(**kwargs)
181
+ if pretrained:
182
+ model,msg = load_checkpoint(model,pretrained)
183
+ assert(len(msg.missing_keys)==0)
184
+ return model
185
+
186
+ def blip_feature_extractor(pretrained='',**kwargs):
187
+ model = BLIP_Base(**kwargs)
188
+ if pretrained:
189
+ model,msg = load_checkpoint(model,pretrained)
190
+ assert(len(msg.missing_keys)==0)
191
+ return model
192
+
193
+ def init_tokenizer():
194
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
195
+ tokenizer.add_special_tokens({'bos_token':'[DEC]'})
196
+ tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
197
+ tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
198
+ return tokenizer
199
+
200
+
201
+ def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
202
+
203
+ assert vit in ['base', 'large'], "vit parameter must be base or large"
204
+ if vit=='base':
205
+ vision_width = 768
206
+ visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
207
+ num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
208
+ drop_path_rate=0 or drop_path_rate
209
+ )
210
+ elif vit=='large':
211
+ vision_width = 1024
212
+ visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
213
+ num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
214
+ drop_path_rate=0.1 or drop_path_rate
215
+ )
216
+ return visual_encoder, vision_width
217
+
218
+ def is_url(url_or_filename):
219
+ parsed = urlparse(url_or_filename)
220
+ return parsed.scheme in ("http", "https")
221
+
222
+ def load_checkpoint(model,url_or_filename):
223
+ if is_url(url_or_filename):
224
+ cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
225
+ checkpoint = torch.load(cached_file, map_location='cpu')
226
+ elif os.path.isfile(url_or_filename):
227
+ checkpoint = torch.load(url_or_filename, map_location='cpu')
228
+ else:
229
+ raise RuntimeError('checkpoint url or path is invalid')
230
+
231
+ state_dict = checkpoint['model']
232
+
233
+ state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
234
+ if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
235
+ state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
236
+ model.visual_encoder_m)
237
+ for key in model.state_dict().keys():
238
+ if key in state_dict.keys():
239
+ if state_dict[key].shape!=model.state_dict()[key].shape:
240
+ del state_dict[key]
241
+
242
+ msg = model.load_state_dict(state_dict,strict=False)
243
+ logger.info('load checkpoint from %s'%url_or_filename)
244
+ return model,msg
245
+