|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
import torch |
|
|
|
|
|
def get_2d_sincos_pos_embed( |
|
embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16 |
|
): |
|
""" |
|
grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or |
|
[1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
|
""" |
|
if isinstance(grid_size, int): |
|
grid_size = (grid_size, grid_size) |
|
|
|
grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale |
|
grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale |
|
grid = np.meshgrid(grid_w, grid_h) |
|
grid = np.stack(grid, axis=0) |
|
|
|
grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) |
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) |
|
if cls_token and extra_tokens > 0: |
|
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) |
|
return pos_embed |
|
|
|
|
|
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): |
|
if embed_dim % 2 != 0: |
|
raise ValueError("embed_dim must be divisible by 2") |
|
|
|
|
|
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) |
|
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) |
|
|
|
emb = np.concatenate([emb_h, emb_w], axis=1) |
|
return emb |
|
|
|
|
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): |
|
""" |
|
embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) |
|
""" |
|
if embed_dim % 2 != 0: |
|
raise ValueError("embed_dim must be divisible by 2") |
|
|
|
omega = np.arange(embed_dim // 2, dtype=np.float64) |
|
omega /= embed_dim / 2.0 |
|
omega = 1.0 / 10000 ** omega |
|
|
|
pos = pos.reshape(-1) |
|
out = np.einsum("m,d->md", pos, omega) |
|
|
|
emb_sin = np.sin(out) |
|
emb_cos = np.cos(out) |
|
|
|
emb = np.concatenate([emb_sin, emb_cos], axis=1) |
|
return emb |
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
from .curope import cuRoPE2D |
|
|
|
RoPE2D = cuRoPE2D |
|
except ImportError: |
|
print('Warning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead') |
|
|
|
|
|
class RoPE2D(torch.nn.Module): |
|
|
|
def __init__(self, freq=100.0, F0=1.0): |
|
super().__init__() |
|
self.base = freq |
|
self.F0 = F0 |
|
self.cache = {} |
|
|
|
def get_cos_sin(self, D, seq_len, device, dtype): |
|
if (D, seq_len, device, dtype) not in self.cache: |
|
inv_freq = 1.0 / (self.base ** (torch.arange(0, D, 2).float().to(device) / D)) |
|
t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) |
|
freqs = torch.einsum("i,j->ij", t, inv_freq).to(dtype) |
|
freqs = torch.cat((freqs, freqs), dim=-1) |
|
cos = freqs.cos() |
|
sin = freqs.sin() |
|
self.cache[D, seq_len, device, dtype] = (cos, sin) |
|
return self.cache[D, seq_len, device, dtype] |
|
|
|
@staticmethod |
|
def rotate_half(x): |
|
x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2:] |
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
def apply_rope1d(self, tokens, pos1d, cos, sin): |
|
assert pos1d.ndim == 2 |
|
|
|
|
|
cos = torch.nn.functional.embedding(pos1d, cos)[:, None, :, :].squeeze(1) |
|
sin = torch.nn.functional.embedding(pos1d, sin)[:, None, :, :].squeeze(1) |
|
return (tokens * cos) + (self.rotate_half(tokens) * sin) |
|
|
|
def forward(self, tokens, positions): |
|
""" |
|
input: |
|
* tokens: batch_size x nheads x ntokens x dim |
|
* positions: batch_size x ntokens x 2 (y and x position of each token) |
|
output: |
|
* tokens after appplying RoPE2D (batch_size x nheads x ntokens x dim) |
|
""" |
|
positions = positions.to(torch.int).to(tokens.device) |
|
assert tokens.size(2) % 2 == 0, "number of dimensions should be a multiple of two" |
|
D = tokens.size(2) // 2 |
|
assert positions.ndim == 3 and positions.shape[-1] == 2 |
|
cos, sin = self.get_cos_sin(D, int(positions.max()) + 1, tokens.device, tokens.dtype) |
|
|
|
y, x = tokens.chunk(2, dim=-1) |
|
y = self.apply_rope1d(y, positions[:, :, 0], cos, sin) |
|
x = self.apply_rope1d(x, positions[:, :, 1], cos, sin) |
|
tokens = torch.cat((y, x), dim=-1) |
|
return tokens |
|
|