COMET2019_ConceptNet / original.py
fzbuzz's picture
Upload model
5d30583
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
'''
Much of this code is taken from HuggingFace's OpenAI LM Implementation here:
https://github.com/huggingface/pytorch-openai-transformer-lm
'''
def gelu(x):
return (0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *
(x + 0.044715 * torch.pow(x, 3)))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style \
(epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
assert n_state % cfg.nH == 0
self.register_buffer('b', torch.tril(torch.ones(
n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.nH
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.adpt)
self.resid_dropout = nn.Dropout(cfg.rdpt)
# dimensions of w: (batch_size x num_heads x seq_length x seq_length)
def _attn(self, q, k, v, sequence_mask):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
b_subset = self.b[:, :, :w.size(-2), :w.size(-1)]
if sequence_mask is not None:
b_subset = b_subset * sequence_mask.view(
sequence_mask.size(0), 1, -1)
b_subset = b_subset.permute(1, 0, 2, 3)
w = w * b_subset + -1e9 * (1 - b_subset)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, sequence_mask):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value, sequence_mask)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.hSize
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.rdpt)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.hSize
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, sequence_mask):
a = self.attn(x, sequence_mask)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, vocab=40990, n_ctx=512):
super(TransformerModel, self).__init__()
self.vocab = vocab
self.embed = nn.Embedding(vocab, cfg.hSize)
self.drop = nn.Dropout(cfg.edpt)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block)
for _ in range(cfg.nL)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, sequence_mask):
x = x.view(-1, x.size(-2), x.size(-1))
e = self.embed(x)
# Add the position information to the input embeddings
h = e.sum(dim=2)
for block in self.h:
h = block(h, sequence_mask)
return h
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, vocab=40990, n_ctx=512,
return_probs=False, return_acts=False):
super(LMModel, self).__init__()
self.transformer = TransformerModel(cfg, vocab=vocab, n_ctx=n_ctx)
self.lm_head = LMHead(self.transformer, cfg, trunc_and_reshape=False)
self.return_probs = return_probs
self.return_acts = return_acts
if self.return_probs or self.return_acts:
pos_emb_mask = torch.zeros(1, 1, vocab)
pos_emb_mask[:, :, -n_ctx:] = -1e12
self.register_buffer('pos_emb_mask', pos_emb_mask)
def forward(self, x, sequence_mask=None):
h = self.transformer(x, sequence_mask)
lm_logits = self.lm_head(h)
if self.return_probs:
lm_logits = F.softmax(lm_logits + self.pos_emb_mask, dim=-1)
elif self.return_acts:
lm_logits = lm_logits + self.pos_emb_mask
return lm_logits
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, trunc_and_reshape=True):
super(LMHead, self).__init__()
self.n_embd = cfg.hSize
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.trunc_and_reshape = trunc_and_reshape # XD
def forward(self, h):
# Truncated Language modeling logits (we remove the last token)
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd) \
if self.trunc_and_reshape else h # XD
lm_logits = self.decoder(h_trunc)
return lm_logits
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
DEFAULT_CONFIG = dotdict({
'n_embd': 768,
'n_head': 12,
'n_layer': 12,
'embd_pdrop': 0.1,
'attn_pdrop': 0.1,
'resid_pdrop': 0.1,
'afn': 'gelu',
'clf_pdrop': 0.1})