|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
class Attention(nn.Module): |
|
|
|
def __init__(self, input_size, hidden_size, num_classes): |
|
super(Attention, self).__init__() |
|
self.attention_cell = AttentionCell(input_size, hidden_size, num_classes) |
|
self.hidden_size = hidden_size |
|
self.num_classes = num_classes |
|
self.generator = nn.Linear(hidden_size, num_classes) |
|
|
|
def _char_to_onehot(self, input_char, onehot_dim=38): |
|
input_char = input_char.unsqueeze(1) |
|
batch_size = input_char.size(0) |
|
one_hot = torch.FloatTensor(batch_size, onehot_dim).zero_().to(device) |
|
one_hot = one_hot.scatter_(1, input_char, 1) |
|
return one_hot |
|
|
|
def forward(self, batch_H, text, is_train=True, batch_max_length=25): |
|
""" |
|
input: |
|
batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x contextual_feature_channels] |
|
text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [GO] token. text[:, 0] = [GO]. |
|
output: probability distribution at each step [batch_size x num_steps x num_classes] |
|
""" |
|
batch_size = batch_H.size(0) |
|
num_steps = batch_max_length + 1 |
|
|
|
output_hiddens = torch.FloatTensor(batch_size, num_steps, self.hidden_size).fill_(0).to(device) |
|
hidden = (torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device), |
|
torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device)) |
|
|
|
if is_train: |
|
for i in range(num_steps): |
|
|
|
char_onehots = self._char_to_onehot(text[:, i], onehot_dim=self.num_classes) |
|
|
|
hidden, alpha = self.attention_cell(hidden, batch_H, char_onehots) |
|
output_hiddens[:, i, :] = hidden[0] |
|
probs = self.generator(output_hiddens) |
|
|
|
else: |
|
targets = torch.LongTensor(batch_size).fill_(0).to(device) |
|
probs = torch.FloatTensor(batch_size, num_steps, self.num_classes).fill_(0).to(device) |
|
|
|
for i in range(num_steps): |
|
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes) |
|
hidden, alpha = self.attention_cell(hidden, batch_H, char_onehots) |
|
probs_step = self.generator(hidden[0]) |
|
probs[:, i, :] = probs_step |
|
_, next_input = probs_step.max(1) |
|
targets = next_input |
|
|
|
return probs |
|
|
|
|
|
class AttentionCell(nn.Module): |
|
|
|
def __init__(self, input_size, hidden_size, num_embeddings): |
|
super(AttentionCell, self).__init__() |
|
self.i2h = nn.Linear(input_size, hidden_size, bias=False) |
|
self.h2h = nn.Linear(hidden_size, hidden_size) |
|
self.score = nn.Linear(hidden_size, 1, bias=False) |
|
self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size) |
|
self.hidden_size = hidden_size |
|
|
|
def forward(self, prev_hidden, batch_H, char_onehots): |
|
|
|
batch_H_proj = self.i2h(batch_H) |
|
prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1) |
|
e = self.score(torch.tanh(batch_H_proj + prev_hidden_proj)) |
|
|
|
alpha = F.softmax(e, dim=1) |
|
context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) |
|
concat_context = torch.cat([context, char_onehots], 1) |
|
cur_hidden = self.rnn(concat_context, prev_hidden) |
|
return cur_hidden, alpha |
|
|