repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/plateau_lr.py | """ Plateau Scheduler
Adapts PyTorch plateau scheduler and allows application of noise, warmup.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from .scheduler import Scheduler
class PlateauLRScheduler(Scheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(
self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(
optimizer,
'lr',
noise_range_t=noise_range_t,
noise_type=noise_type,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self._is_apply_noise(epoch):
self._apply_noise(epoch)
def step_update(self, num_updates: int, metric: float = None):
return None
def _apply_noise(self, epoch):
noise = self._calculate_noise(epoch)
# apply the noise on top of previous LR, cache the old value so we can restore for normal
# stepping of base scheduler
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
def _get_lr(self, t: int) -> float:
assert False, 'should not be called as step is overridden'
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/cosine_lr.py | """ Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise, k-decay.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
k_decay=1.0,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1:
_logger.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.k_decay = k_decay
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
k = self.k_decay
if i < self.cycle_limit:
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/__init__.py | from .cosine_lr import CosineLRScheduler
from .multistep_lr import MultiStepLRScheduler
from .plateau_lr import PlateauLRScheduler
from .poly_lr import PolyLRScheduler
from .step_lr import StepLRScheduler
from .tanh_lr import TanhLRScheduler
from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/step_lr.py | """ Step Scheduler
Basic step LR schedule with warmup, noise.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import torch
from .scheduler import Scheduler
class StepLRScheduler(Scheduler):
"""
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values]
return lrs
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/multistep_lr.py | """ MultiStep LR Scheduler
Basic multi step LR schedule with warmup, noise.
"""
import torch
import bisect
from timm.scheduler.scheduler import Scheduler
from typing import List
class MultiStepLRScheduler(Scheduler):
"""
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
decay_t: List[int],
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def get_curr_decay_steps(self, t):
# find where in the array t goes,
# assumes self.decay_t is sorted
return bisect.bisect_right(self.decay_t, t + 1)
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values]
return lrs
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/scheduler/scheduler.py | import abc
from abc import ABC
from typing import Any, Dict, Optional
import torch
class Scheduler(ABC):
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
t_in_epochs: bool = True,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True,
) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.t_in_epochs = t_in_epochs
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
@abc.abstractmethod
def _get_lr(self, t: int) -> float:
pass
def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]:
proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs)
if not proceed:
return None
return self._get_lr(t)
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self._get_values(epoch, on_epoch=True)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self._get_values(num_updates, on_epoch=False)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
if 'lr_scale' in param_group:
param_group[self.param_group_field] = value * param_group['lr_scale']
else:
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self._is_apply_noise(t):
noise = self._calculate_noise(t)
lrs = [v + v * noise for v in lrs]
return lrs
def _is_apply_noise(self, t) -> bool:
"""Return True if scheduler in noise range."""
apply_noise = False
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
return apply_noise
def _calculate_noise(self, t) -> float:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
return noise
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
return noise
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/mlp_mixer.py | """ MLP-Mixer, ResMLP, and gMLP in PyTorch
This impl originally based on MLP-Mixer paper.
Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
@article{tolstikhin2021,
title={MLP-Mixer: An all-MLP Architecture for Vision},
author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner,
Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP
Code: https://github.com/facebookresearch/deit
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
@misc{touvron2021resmlp,
title={ResMLP: Feedforward networks for image classification with data-efficient training},
author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and
Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou},
year={2021},
eprint={2105.03404},
}
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
@misc{liu2021pay,
title={Pay Attention to MLPs},
author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le},
year={2021},
eprint={2105.08050},
}
A thank you to paper authors for releasing code and weights.
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['MixerBlock', 'MlpMixer'] # model_registry will add each entrypoint fn to this
class MixerBlock(nn.Module):
""" Residual Block w/ token mixing and channel MLPs
Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
def __init__(
self,
dim,
seq_len,
mlp_ratio=(0.5, 4.0),
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop=0.,
drop_path=0.,
):
super().__init__()
tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)]
self.norm1 = norm_layer(dim)
self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2))
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, x):
return torch.addcmul(self.beta, self.alpha, x)
class ResBlock(nn.Module):
""" Residual MLP block w/ LayerScale and Affine 'norm'
Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
def __init__(
self,
dim,
seq_len,
mlp_ratio=4,
mlp_layer=Mlp,
norm_layer=Affine,
act_layer=nn.GELU,
init_values=1e-4,
drop=0.,
drop_path=0.,
):
super().__init__()
channel_dim = int(dim * mlp_ratio)
self.norm1 = norm_layer(dim)
self.linear_tokens = nn.Linear(seq_len, seq_len)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop)
self.ls1 = nn.Parameter(init_values * torch.ones(dim))
self.ls2 = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2))
x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x)))
return x
class SpatialGatingUnit(nn.Module):
""" Spatial Gating Unit
Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm):
super().__init__()
gate_dim = dim // 2
self.norm = norm_layer(gate_dim)
self.proj = nn.Linear(seq_len, seq_len)
def init_weights(self):
# special init for the projection gate, called as override by base model init
nn.init.normal_(self.proj.weight, std=1e-6)
nn.init.ones_(self.proj.bias)
def forward(self, x):
u, v = x.chunk(2, dim=-1)
v = self.norm(v)
v = self.proj(v.transpose(-1, -2))
return u * v.transpose(-1, -2)
class SpatialGatingBlock(nn.Module):
""" Residual Block w/ Spatial Gating
Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
def __init__(
self,
dim,
seq_len,
mlp_ratio=4,
mlp_layer=GatedMlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop=0.,
drop_path=0.,
):
super().__init__()
channel_dim = int(dim * mlp_ratio)
self.norm = norm_layer(dim)
sgu = partial(SpatialGatingUnit, seq_len=seq_len)
self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.mlp_channels(self.norm(x)))
return x
class MlpMixer(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
patch_size=16,
num_blocks=8,
embed_dim=512,
mlp_ratio=(0.5, 4.0),
block_layer=MixerBlock,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop_rate=0.,
proj_drop_rate=0.,
drop_path_rate=0.,
nlhb=False,
stem_norm=False,
global_pool='avg',
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.grad_checkpointing = False
self.stem = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if stem_norm else None,
)
# FIXME drop_path (stochastic depth scaling rule or all the same?)
self.blocks = nn.Sequential(*[
block_layer(
embed_dim,
self.stem.num_patches,
mlp_ratio,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
drop=proj_drop_rate,
drop_path=drop_path_rate,
)
for _ in range(num_blocks)])
self.norm = norm_layer(embed_dim)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(nlhb=nlhb)
@torch.jit.ignore
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.
named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):
""" Mixer weight initialization (trying to match Flax defaults)
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# like MLP init in vit (my original init)
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
# NOTE if a parent module contains init_weights method, it can override the init of the
# child modules as this will be called in depth-first order.
module.init_weights()
def checkpoint_filter_fn(state_dict, model):
""" Remap checkpoints if needed """
if 'patch_embed.proj.weight' in state_dict:
# Remap FB ResMlp models -> timm
out_dict = {}
for k, v in state_dict.items():
k = k.replace('patch_embed.', 'stem.')
k = k.replace('attn.', 'linear_tokens.')
k = k.replace('mlp.', 'mlp_channels.')
k = k.replace('gamma_', 'ls')
if k.endswith('.alpha') or k.endswith('.beta'):
v = v.reshape(1, 1, -1)
out_dict[k] = v
return out_dict
return state_dict
def _create_mixer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for MLP-Mixer models.')
model = build_model_with_cfg(
MlpMixer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mixer_s32_224.untrained': _cfg(),
'mixer_s16_224.untrained': _cfg(),
'mixer_b32_224.untrained': _cfg(),
'mixer_b16_224.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth',
),
'mixer_b16_224.goog_in21k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth',
num_classes=21843
),
'mixer_l32_224.untrained': _cfg(),
'mixer_l16_224.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth',
),
'mixer_l16_224.goog_in21k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth',
num_classes=21843
),
# Mixer ImageNet-21K-P pretraining
'mixer_b16_224.miil_in21k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth',
mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
'mixer_b16_224.miil_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth',
mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear',
),
'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'gmixer_24_224.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_12_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_24_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth',
#url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_36_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_big_24_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_12_224.fb_distilled_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_24_224.fb_distilled_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_36_224.fb_distilled_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_big_24_224.fb_distilled_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_12_224.fb_dino': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'resmlp_24_224.fb_dino': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'gmlp_ti16_224.untrained': _cfg(),
'gmlp_s16_224.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth',
),
'gmlp_b16_224.untrained': _cfg(),
})
@register_model
def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-S/32 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs)
model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-S/16 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs)
model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-B/32 224x224
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-B/16 224x224. ImageNet-1k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-L/32 224x224.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args)
return model
@register_model
def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer:
""" Mixer-L/16 224x224. ImageNet-1k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer:
""" Glu-Mixer-12 224x224
Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer
"""
model_args = dict(
patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0),
mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs)
model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer:
""" Glu-Mixer-24 224x224
Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer
"""
model_args = dict(
patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0),
mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs)
model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer:
""" ResMLP-12
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer:
""" ResMLP-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer:
""" ResMLP-36
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args)
return model
@register_model
def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer:
""" ResMLP-B-24
Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404
"""
model_args = dict(
patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4,
block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer:
""" gMLP-Tiny
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer:
""" gMLP-Small
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args)
return model
@register_model
def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer:
""" gMLP-Base
Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
model_args = dict(
patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp, **kwargs)
model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args)
return model
register_model_deprecations(__name__, {
'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k',
'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k',
'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k',
'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k',
'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k',
'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k',
'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k',
'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k',
'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k',
'resmlp_12_224_dino': 'resmlp_12_224',
'resmlp_24_224_dino': 'resmlp_24_224',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_registry.py | """ Model Registry
Hacked together by / Copyright 2020 Ross Wightman
"""
import fnmatch
import re
import sys
import warnings
from collections import defaultdict, deque
from copy import deepcopy
from dataclasses import replace
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple
from ._pretrained import PretrainedCfg, DefaultCfg
__all__ = [
'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs',
'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules',
'get_pretrained_cfg_value', 'is_model_pretrained'
]
_module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module
_model_to_module: Dict[str, str] = {} # mapping of model names to module names
_model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns
_model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present
_model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects
_model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs
_model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names
_module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict)
_deprecated_models: Dict[str, Optional[str]] = {}
def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]:
model_name, *tag_list = model_name.split('.', 1)
tag = tag_list[0] if tag_list else no_tag
return model_name, tag
def get_arch_name(model_name: str) -> str:
return split_model_name_tag(model_name)[0]
def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):
out = defaultdict(DefaultCfg)
default_set = set() # no tag and tags ending with * are prioritized as default
for k, v in cfgs.items():
if isinstance(v, dict):
v = PretrainedCfg(**v)
has_weights = v.has_weights
model, tag = split_model_name_tag(k)
is_default_set = model in default_set
priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)
tag = tag.strip('*')
default_cfg = out[model]
if priority:
default_cfg.tags.appendleft(tag)
default_set.add(model)
elif has_weights and not default_cfg.is_pretrained:
default_cfg.tags.appendleft(tag)
else:
default_cfg.tags.append(tag)
if has_weights:
default_cfg.is_pretrained = True
default_cfg.cfgs[tag] = v
return out
def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:
# lookup containing module
mod = sys.modules[fn.__module__]
module_name_split = fn.__module__.split('.')
module_name = module_name_split[-1] if len(module_name_split) else ''
# add model to __all__ in module
model_name = fn.__name__
if hasattr(mod, '__all__'):
mod.__all__.append(model_name)
else:
mod.__all__ = [model_name] # type: ignore
# add entries to registry dict/sets
if model_name in _model_entrypoints:
warnings.warn(
f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being '
'registered conflicts with an existing name. Please check if this is not expected.',
stacklevel=2,
)
_model_entrypoints[model_name] = fn
_model_to_module[model_name] = module_name
_module_to_models[module_name].add(model_name)
if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:
# this will catch all models that have entrypoint matching cfg key, but miss any aliasing
# entrypoints or non-matching combos
default_cfg = mod.default_cfgs[model_name]
if not isinstance(default_cfg, DefaultCfg):
# new style default cfg dataclass w/ multiple entries per model-arch
assert isinstance(default_cfg, dict)
# old style cfg dict per model-arch
pretrained_cfg = PretrainedCfg(**default_cfg)
default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})
for tag_idx, tag in enumerate(default_cfg.tags):
is_default = tag_idx == 0
pretrained_cfg = default_cfg.cfgs[tag]
model_name_tag = '.'.join([model_name, tag]) if tag else model_name
replace_items = dict(architecture=model_name, tag=tag if tag else None)
if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':
# auto-complete hub name w/ architecture.tag
replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag
pretrained_cfg = replace(pretrained_cfg, **replace_items)
if is_default:
_model_pretrained_cfgs[model_name] = pretrained_cfg
if pretrained_cfg.has_weights:
# add tagless entry if it's default and has weights
_model_has_pretrained.add(model_name)
if tag:
_model_pretrained_cfgs[model_name_tag] = pretrained_cfg
if pretrained_cfg.has_weights:
# add model w/ tag if tag is valid
_model_has_pretrained.add(model_name_tag)
_model_with_tags[model_name].append(model_name_tag)
else:
_model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)
_model_default_cfgs[model_name] = default_cfg
return fn
def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''):
def _fn(pretrained=False, **kwargs):
assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.'
current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__
warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2)
pretrained_cfg = kwargs.pop('pretrained_cfg', None)
return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs)
return _fn
def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]):
mod = sys.modules[module_name]
module_name_split = module_name.split('.')
module_name = module_name_split[-1] if len(module_name_split) else ''
for deprecated, current in deprecation_map.items():
if hasattr(mod, '__all__'):
mod.__all__.append(deprecated)
current_fn = None
current_tag = ''
if current:
current_name, current_tag = split_model_name_tag(current)
current_fn = getattr(mod, current_name)
deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag)
setattr(mod, deprecated, deprecated_entrypoint_fn)
_model_entrypoints[deprecated] = deprecated_entrypoint_fn
_model_to_module[deprecated] = module_name
_module_to_models[module_name].add(deprecated)
_deprecated_models[deprecated] = current
_module_to_deprecated_models[module_name][deprecated] = current
def _natural_key(string_: str) -> List[Union[int, str]]:
"""See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _expand_filter(filter: str):
""" expand a 'base_filter' to 'base_filter.*' if no tag portion"""
filter_base, filter_tag = split_model_name_tag(filter)
if not filter_tag:
return ['.'.join([filter_base, '*']), filter]
else:
return [filter]
def list_models(
filter: Union[str, List[str]] = '',
module: str = '',
pretrained: bool = False,
exclude_filters: Union[str, List[str]] = '',
name_matches_cfg: bool = False,
include_tags: Optional[bool] = None,
) -> List[str]:
""" Return list of available model names, sorted alphabetically
Args:
filter - Wildcard filter string that works with fnmatch
module - Limit model selection to a specific submodule (ie 'vision_transformer')
pretrained - Include only models with valid pretrained weights if True
exclude_filters - Wildcard filters to exclude models after including them with filter
name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases)
include_tags - Include pretrained tags in model names (model.tag). If None, defaults
set to True when pretrained=True else False (default: None)
Returns:
models - The sorted list of models
Example:
model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet'
model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module
"""
if filter:
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
else:
include_filters = []
if include_tags is None:
# FIXME should this be default behaviour? or default to include_tags=True?
include_tags = pretrained
all_models: Set[str] = _module_to_models[module] if module else set(_model_entrypoints.keys())
all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings
if include_tags:
# expand model names to include names w/ pretrained tags
models_with_tags: Set[str] = set()
for m in all_models:
models_with_tags.update(_model_with_tags[m])
all_models = models_with_tags
# expand include and exclude filters to include a '.*' for proper match if no tags in filter
include_filters = [ef for f in include_filters for ef in _expand_filter(f)]
exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)]
if include_filters:
models: Set[str] = set()
for f in include_filters:
include_models = fnmatch.filter(all_models, f) # include these models
if len(include_models):
models = models.union(include_models)
else:
models = all_models
if exclude_filters:
if not isinstance(exclude_filters, (tuple, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf) # exclude these models
if len(exclude_models):
models = models.difference(exclude_models)
if pretrained:
models = _model_has_pretrained.intersection(models)
if name_matches_cfg:
models = set(_model_pretrained_cfgs).intersection(models)
return sorted(models, key=_natural_key)
def list_pretrained(
filter: Union[str, List[str]] = '',
exclude_filters: str = '',
) -> List[str]:
return list_models(
filter=filter,
pretrained=True,
exclude_filters=exclude_filters,
include_tags=True,
)
def get_deprecated_models(module: str = '') -> Dict[str, str]:
all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models
return deepcopy(all_deprecated)
def is_model(model_name: str) -> bool:
""" Check if a model name exists
"""
arch_name = get_arch_name(model_name)
return arch_name in _model_entrypoints
def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]:
"""Fetch a model entrypoint for specified model name
"""
arch_name = get_arch_name(model_name)
if module_filter and arch_name not in _module_to_models.get(module_filter, {}):
raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.')
return _model_entrypoints[arch_name]
def list_modules() -> List[str]:
""" Return list of module names that contain models / model entrypoints
"""
modules = _module_to_models.keys()
return sorted(modules)
def is_model_in_modules(
model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]]
) -> bool:
"""Check if a model exists within a subset of modules
Args:
model_name - name of model to check
module_names - names of modules to search in
"""
arch_name = get_arch_name(model_name)
assert isinstance(module_names, (tuple, list, set))
return any(arch_name in _module_to_models[n] for n in module_names)
def is_model_pretrained(model_name: str) -> bool:
return model_name in _model_has_pretrained
def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]:
if model_name in _model_pretrained_cfgs:
return deepcopy(_model_pretrained_cfgs[model_name])
arch_name, tag = split_model_name_tag(model_name)
if arch_name in _model_default_cfgs:
# if model arch exists, but the tag is wrong, error out
raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.')
if allow_unregistered:
# if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created
return None
raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.')
def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]:
""" Get a specific model default_cfg value by key. None if key doesn't exist.
"""
cfg = get_pretrained_cfg(model_name, allow_unregistered=False)
return getattr(cfg, cfg_key, None)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/efficientformer_v2.py | """ EfficientFormer-V2
@article{
li2022rethinking,
title={Rethinking Vision Transformers for MobileNet Size and Speed},
author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian},
journal={arXiv preprint arXiv:2212.08059},
year={2022}
}
Significantly refactored and cleaned up for timm from original at: https://github.com/snap-research/EfficientFormer
Original code licensed Apache 2.0, Copyright (c) 2022 Snap Inc.
Modifications and timm support by / Copyright 2023, Ross Wightman
"""
import math
from functools import partial
from typing import Dict
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct
from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
EfficientFormer_width = {
'L': (40, 80, 192, 384), # 26m 83.3% 6attn
'S2': (32, 64, 144, 288), # 12m 81.6% 4attn dp0.02
'S1': (32, 48, 120, 224), # 6.1m 79.0
'S0': (32, 48, 96, 176), # 75.0 75.7
}
EfficientFormer_depth = {
'L': (5, 5, 15, 10), # 26m 83.3%
'S2': (4, 4, 12, 8), # 12m
'S1': (3, 3, 9, 6), # 79.0
'S0': (2, 2, 6, 4), # 75.7
}
EfficientFormer_expansion_ratios = {
'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)),
'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)),
'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)),
'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4)),
}
class ConvNorm(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding='',
dilation=1,
groups=1,
bias=True,
norm_layer='batchnorm2d',
norm_kwargs=None,
):
norm_kwargs = norm_kwargs or {}
super(ConvNorm, self).__init__()
self.conv = create_conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Attention2d(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim=384,
key_dim=32,
num_heads=8,
attn_ratio=4,
resolution=7,
act_layer=nn.GELU,
stride=None,
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
resolution = to_2tuple(resolution)
if stride is not None:
resolution = tuple([math.ceil(r / stride) for r in resolution])
self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim)
self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear')
else:
self.stride_conv = None
self.upsample = None
self.resolution = resolution
self.N = self.resolution[0] * self.resolution[1]
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
kh = self.key_dim * self.num_heads
self.q = ConvNorm(dim, kh)
self.k = ConvNorm(dim, kh)
self.v = ConvNorm(dim, self.dh)
self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh)
self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1)
self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1)
self.act = act_layer()
self.proj = ConvNorm(self.dh, dim, 1)
pos = torch.stack(torch.meshgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1]
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N))
self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False)
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
B, C, H, W = x.shape
if self.stride_conv is not None:
x = self.stride_conv(x)
q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2)
k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3)
v = self.v(x)
v_local = self.v_local(v)
v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2)
attn = (q @ k) * self.scale
attn = attn + self.get_attention_biases(x.device)
attn = self.talking_head1(attn)
attn = attn.softmax(dim=-1)
attn = self.talking_head2(attn)
x = (attn @ v).transpose(2, 3)
x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local
if self.upsample is not None:
x = self.upsample(x)
x = self.act(x)
x = self.proj(x)
return x
class LocalGlobalQuery(torch.nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.pool = nn.AvgPool2d(1, 2, 0)
self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim)
self.proj = ConvNorm(in_dim, out_dim, 1)
def forward(self, x):
local_q = self.local(x)
pool_q = self.pool(x)
q = local_q + pool_q
q = self.proj(q)
return q
class Attention2dDownsample(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim=384,
key_dim=16,
num_heads=8,
attn_ratio=4,
resolution=7,
out_dim=None,
act_layer=nn.GELU,
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.resolution = to_2tuple(resolution)
self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution])
self.N = self.resolution[0] * self.resolution[1]
self.N2 = self.resolution2[0] * self.resolution2[1]
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
self.out_dim = out_dim or dim
kh = self.key_dim * self.num_heads
self.q = LocalGlobalQuery(dim, kh)
self.k = ConvNorm(dim, kh, 1)
self.v = ConvNorm(dim, self.dh, 1)
self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh)
self.act = act_layer()
self.proj = ConvNorm(self.dh, self.out_dim, 1)
self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N))
k_pos = torch.stack(torch.meshgrid(torch.arange(
self.resolution[0]),
torch.arange(self.resolution[1]))).flatten(1)
q_pos = torch.stack(torch.meshgrid(
torch.arange(0, self.resolution[0], step=2),
torch.arange(0, self.resolution[1], step=2))).flatten(1)
rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1]
self.register_buffer('attention_bias_idxs', rel_pos, persistent=False)
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
B, C, H, W = x.shape
q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2)
k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3)
v = self.v(x)
v_local = self.v_local(v)
v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2)
attn = (q @ k) * self.scale
attn = attn + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(2, 3)
x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local
x = self.act(x)
x = self.proj(x)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1,
resolution=7,
use_attn=False,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
):
super().__init__()
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
norm_layer = norm_layer or nn.Identity()
self.conv = ConvNorm(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
norm_layer=norm_layer,
)
if use_attn:
self.attn = Attention2dDownsample(
dim=in_chs,
out_dim=out_chs,
resolution=resolution,
act_layer=act_layer,
)
else:
self.attn = None
def forward(self, x):
out = self.conv(x)
if self.attn is not None:
return self.attn(x) + out
return out
class ConvMlpWithNorm(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
drop=0.,
mid_conv=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = ConvNormAct(
in_features, hidden_features, 1,
bias=True, norm_layer=norm_layer, act_layer=act_layer)
if mid_conv:
self.mid = ConvNormAct(
hidden_features, hidden_features, 3,
groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer)
else:
self.mid = nn.Identity()
self.drop1 = nn.Dropout(drop)
self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.mid(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class EfficientFormerV2Block(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5,
resolution=7,
stride=None,
use_attn=True,
):
super().__init__()
if use_attn:
self.token_mixer = Attention2d(
dim,
resolution=resolution,
act_layer=act_layer,
stride=stride,
)
self.ls1 = LayerScale2d(
dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
else:
self.token_mixer = None
self.ls1 = None
self.drop_path1 = None
self.mlp = ConvMlpWithNorm(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer,
drop=proj_drop,
mid_conv=True,
)
self.ls2 = LayerScale2d(
dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
if self.token_mixer is not None:
x = x + self.drop_path1(self.ls1(self.token_mixer(x)))
x = x + self.drop_path2(self.ls2(self.mlp(x)))
return x
class Stem4(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d):
super().__init__()
self.stride = 4
self.conv1 = ConvNormAct(
in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True,
norm_layer=norm_layer, act_layer=act_layer
)
self.conv2 = ConvNormAct(
out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True,
norm_layer=norm_layer, act_layer=act_layer
)
class EfficientFormerV2Stage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
resolution=7,
downsample=True,
block_stride=None,
downsample_use_attn=False,
block_use_attn=False,
num_vit=1,
mlp_ratio=4.,
proj_drop=.0,
drop_path=0.,
layer_scale_init_value=1e-5,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
):
super().__init__()
self.grad_checkpointing = False
mlp_ratio = to_ntuple(depth)(mlp_ratio)
resolution = to_2tuple(resolution)
if downsample:
self.downsample = Downsample(
dim,
dim_out,
use_attn=downsample_use_attn,
resolution=resolution,
norm_layer=norm_layer,
act_layer=act_layer,
)
dim = dim_out
resolution = tuple([math.ceil(r / 2) for r in resolution])
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
for block_idx in range(depth):
remain_idx = depth - num_vit - 1
b = EfficientFormerV2Block(
dim,
resolution=resolution,
stride=block_stride,
mlp_ratio=mlp_ratio[block_idx],
use_attn=block_use_attn and block_idx > remain_idx,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
blocks += [b]
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EfficientFormerV2(nn.Module):
def __init__(
self,
depths,
in_chans=3,
img_size=224,
global_pool='avg',
embed_dims=None,
downsamples=None,
mlp_ratios=4,
norm_layer='batchnorm2d',
norm_eps=1e-5,
act_layer='gelu',
num_classes=1000,
drop_rate=0.,
proj_drop_rate=0.,
drop_path_rate=0.,
layer_scale_init_value=1e-5,
num_vit=0,
distillation=True,
):
super().__init__()
assert global_pool in ('avg', '')
self.num_classes = num_classes
self.global_pool = global_pool
self.feature_info = []
img_size = to_2tuple(img_size)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
act_layer = get_act_layer(act_layer)
self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer)
prev_dim = embed_dims[0]
stride = 4
num_stages = len(depths)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
downsamples = downsamples or (False,) + (True,) * (len(depths) - 1)
mlp_ratios = to_ntuple(num_stages)(mlp_ratios)
stages = []
for i in range(num_stages):
curr_resolution = tuple([math.ceil(s / stride) for s in img_size])
stage = EfficientFormerV2Stage(
prev_dim,
embed_dims[i],
depth=depths[i],
resolution=curr_resolution,
downsample=downsamples[i],
block_stride=2 if i == 2 else None,
downsample_use_attn=i >= 3,
block_use_attn=i >= 2,
num_vit=num_vit,
mlp_ratio=mlp_ratios[i],
proj_drop=proj_drop_rate,
drop_path=dpr[i],
layer_scale_init_value=layer_scale_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
if downsamples[i]:
stride *= 2
prev_dim = embed_dims[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')]
stages.append(stage)
self.stages = nn.Sequential(*stages)
# Classifier head
self.num_features = embed_dims[-1]
self.norm = norm_layer(embed_dims[-1])
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.dist = distillation
if self.dist:
self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
else:
self.head_dist = None
self.apply(self.init_weights)
self.distilled_training = False
# init for classification
def init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters() if 'attention_biases' in k}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=(2, 3))
x = self.head_drop(x)
if pre_logits:
return x
x, x_dist = self.head(x), self.head_dist(x)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train/finetune, inference average the classifier predictions
return (x + x_dist) / 2
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True,
'crop_pct': .95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv',
**kwargs
}
default_cfgs = generate_default_cfgs({
'efficientformerv2_s0.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformerv2_s1.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformerv2_s2.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformerv2_l.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
})
def _create_efficientformerv2(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
EfficientFormerV2, variant, pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
@register_model
def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2:
model_args = dict(
depths=EfficientFormer_depth['S0'],
embed_dims=EfficientFormer_width['S0'],
num_vit=2,
drop_path_rate=0.0,
mlp_ratios=EfficientFormer_expansion_ratios['S0'],
)
return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2:
model_args = dict(
depths=EfficientFormer_depth['S1'],
embed_dims=EfficientFormer_width['S1'],
num_vit=2,
drop_path_rate=0.0,
mlp_ratios=EfficientFormer_expansion_ratios['S1'],
)
return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2:
model_args = dict(
depths=EfficientFormer_depth['S2'],
embed_dims=EfficientFormer_width['S2'],
num_vit=4,
drop_path_rate=0.02,
mlp_ratios=EfficientFormer_expansion_ratios['S2'],
)
return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2:
model_args = dict(
depths=EfficientFormer_depth['L'],
embed_dims=EfficientFormer_width['L'],
num_vit=6,
drop_path_rate=0.1,
mlp_ratios=EfficientFormer_expansion_ratios['L'],
)
return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/volo.py | """ Vision OutLOoker (VOLO) implementation
Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112
Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below
Modifications and additions for timm by / Copyright 2022, Ross Wightman
"""
# Copyright 2021 Sea Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['VOLO'] # model_registry will add each entrypoint fn to this
class OutlookAttention(nn.Module):
def __init__(
self,
dim,
num_heads,
kernel_size=3,
padding=1,
stride=1,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
head_dim = dim // num_heads
self.num_heads = num_heads
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.scale = head_dim ** -0.5
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W
h, w = math.ceil(H / self.stride), math.ceil(W / self.stride)
v = self.unfold(v).reshape(
B, self.num_heads, C // self.num_heads,
self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H
attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
attn = self.attn(attn).reshape(
B, h * w, self.num_heads, self.kernel_size * self.kernel_size,
self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk
attn = attn * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w)
x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride)
x = self.proj(x.permute(0, 2, 3, 1))
x = self.proj_drop(x)
return x
class Outlooker(nn.Module):
def __init__(
self,
dim,
kernel_size,
padding,
stride=1,
num_heads=1,
mlp_ratio=3.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
qkv_bias=False,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = OutlookAttention(
dim,
num_heads,
kernel_size=kernel_size,
padding=padding,
stride=stride,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, H, W, C = x.shape
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, H, W, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Transformer(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ClassAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
head_dim=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
if head_dim is not None:
self.head_dim = head_dim
else:
head_dim = dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias)
self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.head_dim * self.num_heads, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim)
attn = ((q * self.scale) @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads)
cls_embed = self.proj(cls_embed)
cls_embed = self.proj_drop(cls_embed)
return cls_embed
class ClassBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim=None,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim,
num_heads=num_heads,
head_dim=head_dim,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x):
cls_embed = x[:, :1]
cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x)))
cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed)))
return torch.cat([cls_embed, x[:, 1:]], dim=1)
def get_block(block_type, **kargs):
if block_type == 'ca':
return ClassBlock(**kargs)
def rand_bbox(size, lam, scale=1):
"""
get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling)
return: bounding box
"""
W = size[1] // scale
H = size[2] // scale
cut_rat = np.sqrt(1. - lam)
cut_w = (W * cut_rat).astype(int)
cut_h = (H * cut_rat).astype(int)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class PatchEmbed(nn.Module):
""" Image to Patch Embedding.
Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding
"""
def __init__(
self,
img_size=224,
stem_conv=False,
stem_stride=1,
patch_size=8,
in_chans=3,
hidden_dim=64,
embed_dim=384,
):
super().__init__()
assert patch_size in [4, 8, 16]
if stem_conv:
self.conv = nn.Sequential(
nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
)
else:
self.conv = None
self.proj = nn.Conv2d(
hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride)
self.num_patches = (img_size // patch_size) * (img_size // patch_size)
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
x = self.proj(x) # B, C, H, W
return x
class Downsample(nn.Module):
""" Image to Patch Embedding, downsampling between stage1 and stage2
"""
def __init__(self, in_embed_dim, out_embed_dim, patch_size=2):
super().__init__()
self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.proj(x) # B, C, H, W
x = x.permute(0, 2, 3, 1)
return x
def outlooker_blocks(
block_fn,
index,
dim,
layers,
num_heads=1,
kernel_size=3,
padding=1,
stride=2,
mlp_ratio=3.,
qkv_bias=False,
attn_drop=0,
drop_path_rate=0.,
**kwargs,
):
"""
generate outlooker layer in stage1
return: outlooker layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1)
blocks.append(block_fn(
dim,
kernel_size=kernel_size,
padding=padding,
stride=stride,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
drop_path=block_dpr,
))
blocks = nn.Sequential(*blocks)
return blocks
def transformer_blocks(
block_fn,
index,
dim,
layers,
num_heads,
mlp_ratio=3.,
qkv_bias=False,
attn_drop=0,
drop_path_rate=0.,
**kwargs,
):
"""
generate transformer layers in stage2
return: transformer layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1)
blocks.append(block_fn(
dim,
num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
drop_path=block_dpr,
))
blocks = nn.Sequential(*blocks)
return blocks
class VOLO(nn.Module):
"""
Vision Outlooker, the main class of our model
"""
def __init__(
self,
layers,
img_size=224,
in_chans=3,
num_classes=1000,
global_pool='token',
patch_size=8,
stem_hidden_dim=64,
embed_dims=None,
num_heads=None,
downsamples=(True, False, False, False),
outlook_attention=(True, False, False, False),
mlp_ratio=3.0,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
post_layers=('ca', 'ca'),
use_aux_head=True,
use_mix_token=False,
pooling_scale=2,
):
super().__init__()
num_layers = len(layers)
mlp_ratio = to_ntuple(num_layers)(mlp_ratio)
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.mix_token = use_mix_token
self.pooling_scale = pooling_scale
self.num_features = embed_dims[-1]
if use_mix_token: # enable token mixing, see token labeling for details.
self.beta = 1.0
assert global_pool == 'token', "return all tokens if mix_token is enabled"
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
stem_conv=True,
stem_stride=2,
patch_size=patch_size,
in_chans=in_chans,
hidden_dim=stem_hidden_dim,
embed_dim=embed_dims[0],
)
# inital positional encoding, we add positional encoding after outlooker blocks
patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale)
self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1]))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
# set the main block in network
network = []
for i in range(len(layers)):
if outlook_attention[i]:
# stage 1
stage = outlooker_blocks(
Outlooker,
i,
embed_dims[i],
layers,
num_heads[i],
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
)
network.append(stage)
else:
# stage 2
stage = transformer_blocks(
Transformer,
i,
embed_dims[i],
layers,
num_heads[i],
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
drop_path_rate=drop_path_rate,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
)
network.append(stage)
if downsamples[i]:
# downsampling between two stages
network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2))
self.network = nn.ModuleList(network)
# set post block, for example, class attention layers
self.post_network = None
if post_layers is not None:
self.post_network = nn.ModuleList([
get_block(
post_layers[i],
dim=embed_dims[-1],
num_heads=num_heads[-1],
mlp_ratio=mlp_ratio[-1],
qkv_bias=qkv_bias,
attn_drop=attn_drop_rate,
drop_path=0.,
norm_layer=norm_layer)
for i in range(len(post_layers))
])
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1]))
trunc_normal_(self.cls_token, std=.02)
# set output type
if use_aux_head:
self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
else:
self.aux_head = None
self.norm = norm_layer(self.num_features)
# Classifier head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[
(r'^network\.(\d+)\.(\d+)', None),
(r'^network\.(\d+)', (0,)),
],
blocks2=[
(r'^cls_token', (0,)),
(r'^post_network\.(\d+)', None),
(r'^norm', (99999,))
],
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
if self.aux_head is not None:
self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_tokens(self, x):
for idx, block in enumerate(self.network):
if idx == 2:
# add positional encoding after outlooker blocks
x = x + self.pos_embed
x = self.pos_drop(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(block, x)
else:
x = block(x)
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
return x
def forward_cls(self, x):
B, N, C = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat([cls_tokens, x], dim=1)
for block in self.post_network:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(block, x)
else:
x = block(x)
return x
def forward_train(self, x):
""" A separate forward fn for training with mix_token (if a train script supports).
Combining multiple modes in as single forward with different return types is torchscript hell.
"""
x = self.patch_embed(x)
x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C
# mix token, see token labeling for details.
if self.mix_token and self.training:
lam = np.random.beta(self.beta, self.beta)
patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale)
temp_x = x.clone()
sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1
sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2
temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :]
x = temp_x
else:
bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0
# step2: tokens learning in the two stages
x = self.forward_tokens(x)
# step3: post network, apply class attention or not
if self.post_network is not None:
x = self.forward_cls(x)
x = self.norm(x)
if self.global_pool == 'avg':
x_cls = x.mean(dim=1)
elif self.global_pool == 'token':
x_cls = x[:, 0]
else:
x_cls = x
if self.aux_head is None:
return x_cls
x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling
if not self.training:
return x_cls + 0.5 * x_aux.max(1)[0]
if self.mix_token and self.training: # reverse "mix token", see token labeling for details.
x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1])
temp_x = x_aux.clone()
temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :]
x_aux = temp_x
x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1])
# return these: 1. class token, 2. classes from all feature tokens, 3. bounding box
return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)
def forward_features(self, x):
x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C
# step2: tokens learning in the two stages
x = self.forward_tokens(x)
# step3: post network, apply class attention or not
if self.post_network is not None:
x = self.forward_cls(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
out = x.mean(dim=1)
elif self.global_pool == 'token':
out = x[:, 0]
else:
out = x
x = self.head_drop(x)
if pre_logits:
return out
out = self.head(out)
if self.aux_head is not None:
# generate classes in all feature tokens, see token labeling
aux = self.aux_head(x[:, 1:])
out = out + 0.5 * aux.max(1)[0]
return out
def forward(self, x):
""" simplified forward (without mix token training) """
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_volo(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(
VOLO,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'volo_d1_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar',
crop_pct=0.96),
'volo_d1_384.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar',
crop_pct=1.0, input_size=(3, 384, 384)),
'volo_d2_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar',
crop_pct=0.96),
'volo_d2_384.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar',
crop_pct=1.0, input_size=(3, 384, 384)),
'volo_d3_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar',
crop_pct=0.96),
'volo_d3_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar',
crop_pct=1.0, input_size=(3, 448, 448)),
'volo_d4_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar',
crop_pct=0.96),
'volo_d4_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar',
crop_pct=1.15, input_size=(3, 448, 448)),
'volo_d5_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar',
crop_pct=0.96),
'volo_d5_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar',
crop_pct=1.15, input_size=(3, 448, 448)),
'volo_d5_512.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar',
crop_pct=1.15, input_size=(3, 512, 512)),
})
@register_model
def volo_d1_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D1 model, Params: 27M """
model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs)
model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d1_384(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D1 model, Params: 27M """
model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs)
model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d2_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D2 model, Params: 59M """
model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d2_384(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D2 model, Params: 59M """
model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d3_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D3 model, Params: 86M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d3_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D3 model, Params: 86M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d4_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d4_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_512(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/nfnet.py | """ Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
Paper: `High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
Status:
* These models are a work in progress, experiments ongoing.
* Pretrained weights for two models so far, more to come.
* Model details updated to closer match official JAX code now that it's released
* NF-ResNet, NF-RegNet-B, and NFNet-F models supported
Hacked together by / copyright Ross Wightman, 2021.
"""
from collections import OrderedDict
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, Tuple, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \
get_act_layer, get_act_fn, get_attn, make_divisible
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class NfCfg:
depths: Tuple[int, int, int, int]
channels: Tuple[int, int, int, int]
alpha: float = 0.2
stem_type: str = '3x3'
stem_chs: Optional[int] = None
group_size: Optional[int] = None
attn_layer: Optional[str] = None
attn_kwargs: dict = None
attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used
width_factor: float = 1.0
bottle_ratio: float = 0.5
num_features: int = 0 # num out_channels for final conv, no final_conv if 0
ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal
reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle
extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models
gamma_in_act: bool = False
same_padding: bool = False
std_conv_eps: float = 1e-5
skipinit: bool = False # disabled by default, non-trivial performance impact
zero_init_fc: bool = False
act_layer: str = 'silu'
class GammaAct(nn.Module):
def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False):
super().__init__()
self.act_fn = get_act_fn(act_type)
self.gamma = gamma
self.inplace = inplace
def forward(self, x):
return self.act_fn(x, inplace=self.inplace).mul_(self.gamma)
def act_with_gamma(act_type, gamma: float = 1.):
def _create(inplace=False):
return GammaAct(act_type, gamma=gamma, inplace=inplace)
return _create
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
conv_layer: Callable = ScaledStdConv2d,
):
""" AvgPool Downsampling as in 'D' ResNet variants. Support for dilation."""
super(DownsampleAvg, self).__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = conv_layer(in_chs, out_chs, 1, stride=1)
def forward(self, x):
return self.conv(self.pool(x))
@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301
class NormFreeBlock(nn.Module):
"""Normalization-Free pre-activation block.
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
alpha: float = 1.0,
beta: float = 1.0,
bottle_ratio: float = 0.25,
group_size: Optional[int] = None,
ch_div: int = 1,
reg: bool = True,
extra_conv: bool = False,
skipinit: bool = False,
attn_layer: Optional[Callable] = None,
attn_gain: bool = 2.0,
act_layer: Optional[Callable] = None,
conv_layer: Callable = ScaledStdConv2d,
drop_path_rate: float = 0.,
):
super().__init__()
first_dilation = first_dilation or dilation
out_chs = out_chs or in_chs
# RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet
mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div)
groups = 1 if not group_size else mid_chs // group_size
if group_size and group_size % ch_div == 0:
mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error
self.alpha = alpha
self.beta = beta
self.attn_gain = attn_gain
if in_chs != out_chs or stride != 1 or dilation != first_dilation:
self.downsample = DownsampleAvg(
in_chs,
out_chs,
stride=stride,
dilation=dilation,
first_dilation=first_dilation,
conv_layer=conv_layer,
)
else:
self.downsample = None
self.act1 = act_layer()
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.act2 = act_layer(inplace=True)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
if extra_conv:
self.act2b = act_layer(inplace=True)
self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups)
else:
self.act2b = None
self.conv2b = None
if reg and attn_layer is not None:
self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3
else:
self.attn = None
self.act3 = act_layer()
self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.)
if not reg and attn_layer is not None:
self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3
else:
self.attn_last = None
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None
def forward(self, x):
out = self.act1(x) * self.beta
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(out)
# residual branch
out = self.conv1(out)
out = self.conv2(self.act2(out))
if self.conv2b is not None:
out = self.conv2b(self.act2b(out))
if self.attn is not None:
out = self.attn_gain * self.attn(out)
out = self.conv3(self.act3(out))
if self.attn_last is not None:
out = self.attn_gain * self.attn_last(out)
out = self.drop_path(out)
if self.skipinit_gain is not None:
out.mul_(self.skipinit_gain)
out = out * self.alpha + shortcut
return out
def create_stem(
in_chs: int,
out_chs: int,
stem_type: str = '',
conv_layer: Optional[Callable] = None,
act_layer: Optional[Callable] = None,
preact_feature: bool = True,
):
stem_stride = 2
stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv')
stem = OrderedDict()
assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool')
if 'deep' in stem_type:
if 'quad' in stem_type:
# 4 deep conv stack as in NFNet-F models
assert not 'pool' in stem_type
stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs)
strides = (2, 1, 1, 2)
stem_stride = 4
stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3')
else:
if 'tiered' in stem_type:
stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py
else:
stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets
strides = (2, 1, 1)
stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2')
last_idx = len(stem_chs) - 1
for i, (c, s) in enumerate(zip(stem_chs, strides)):
stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s)
if i != last_idx:
stem[f'act{i + 2}'] = act_layer(inplace=True)
in_chs = c
elif '3x3' in stem_type:
# 3x3 stem conv as in RegNet
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2)
else:
# 7x7 stem conv as in ResNet
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
if 'pool' in stem_type:
stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1)
stem_stride = 4
return nn.Sequential(stem), stem_stride, stem_feature
# from https://github.com/deepmind/deepmind-research/tree/master/nfnets
_nonlin_gamma = dict(
identity=1.0,
celu=1.270926833152771,
elu=1.2716004848480225,
gelu=1.7015043497085571,
leaky_relu=1.70590341091156,
log_sigmoid=1.9193484783172607,
log_softmax=1.0002083778381348,
relu=1.7139588594436646,
relu6=1.7131484746932983,
selu=1.0008515119552612,
sigmoid=4.803835391998291,
silu=1.7881293296813965,
softsign=2.338853120803833,
softplus=1.9203323125839233,
tanh=1.5939117670059204,
)
class NormFreeNet(nn.Module):
""" Normalization-Free Network
As described in :
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
and
`High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171
This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and
the (preact) ResNet models described earlier in the paper.
There are a few differences:
* channels are rounded to be divisible by 8 by default (keep tensor core kernels happy),
this changes channel dim and param counts slightly from the paper models
* activation correcting gamma constants are moved into the ScaledStdConv as it has less performance
impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl.
* a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but
apply it in each activation. This is slightly slower, numerically different, but matches official impl.
* skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput
for what it is/does. Approx 8-10% throughput loss.
"""
def __init__(
self,
cfg: NfCfg,
num_classes: int = 1000,
in_chans: int = 3,
global_pool: str = 'avg',
output_stride: int = 32,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
**kwargs,
):
"""
Args:
cfg: Model architecture configuration.
num_classes: Number of classifier classes.
in_chans: Number of input channels.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth drop-path rate.
**kwargs: Extra kwargs overlayed onto cfg.
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
cfg = replace(cfg, **kwargs)
assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})."
conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d
if cfg.gamma_in_act:
act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer])
conv_layer = partial(conv_layer, eps=cfg.std_conv_eps)
else:
act_layer = get_act_layer(cfg.act_layer)
conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps)
attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div)
self.stem, stem_stride, stem_feat = create_stem(
in_chans,
stem_chs,
cfg.stem_type,
conv_layer=conv_layer,
act_layer=act_layer,
)
self.feature_info = [stem_feat]
drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
prev_chs = stem_chs
net_stride = stem_stride
dilation = 1
expected_var = 1.0
stages = []
for stage_idx, stage_depth in enumerate(cfg.depths):
stride = 1 if stage_idx == 0 and stem_stride > 2 else 2
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx in range(cfg.depths[stage_idx]):
first_block = block_idx == 0 and stage_idx == 0
out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div)
blocks += [NormFreeBlock(
in_chs=prev_chs, out_chs=out_chs,
alpha=cfg.alpha,
beta=1. / expected_var ** 0.5,
stride=stride if block_idx == 0 else 1,
dilation=dilation,
first_dilation=first_dilation,
group_size=cfg.group_size,
bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio,
ch_div=cfg.ch_div,
reg=cfg.reg,
extra_conv=cfg.extra_conv,
skipinit=cfg.skipinit,
attn_layer=attn_layer,
attn_gain=cfg.attn_gain,
act_layer=act_layer,
conv_layer=conv_layer,
drop_path_rate=drop_path_rates[stage_idx][block_idx],
)]
if block_idx == 0:
expected_var = 1. # expected var is reset after first block of each stage
expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance
first_dilation = dilation
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')]
stages += [nn.Sequential(*blocks)]
self.stages = nn.Sequential(*stages)
if cfg.num_features:
# The paper NFRegNet models have an EfficientNet-like final head convolution.
self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div)
self.final_conv = conv_layer(prev_chs, self.num_features, 1)
self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv')
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.final_act = act_layer(inplace=cfg.num_features > 0)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
for n, m in self.named_modules():
if 'fc' in n and isinstance(m, nn.Linear):
if cfg.zero_init_fc:
nn.init.zeros_(m.weight)
else:
nn.init.normal_(m.weight, 0., .01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear')
if m.bias is not None:
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None),
(r'^final_conv', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.final_conv(x)
x = self.final_act(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _nfres_cfg(
depths,
channels=(256, 512, 1024, 2048),
group_size=None,
act_layer='relu',
attn_layer=None,
attn_kwargs=None,
):
attn_kwargs = attn_kwargs or {}
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='7x7_pool',
stem_chs=64,
bottle_ratio=0.25,
group_size=group_size,
act_layer=act_layer,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
)
return cfg
def _nfreg_cfg(depths, channels=(48, 104, 208, 440)):
num_features = 1280 * channels[-1] // 440
attn_kwargs = dict(rd_ratio=0.5)
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='3x3',
group_size=8,
width_factor=0.75,
bottle_ratio=2.25,
num_features=num_features,
reg=True,
attn_layer='se',
attn_kwargs=attn_kwargs,
)
return cfg
def _nfnet_cfg(
depths,
channels=(256, 512, 1536, 1536),
group_size=128,
bottle_ratio=0.5,
feat_mult=2.,
act_layer='gelu',
attn_layer='se',
attn_kwargs=None,
):
num_features = int(channels[-1] * feat_mult)
attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5)
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='deep_quad',
stem_chs=128,
group_size=group_size,
bottle_ratio=bottle_ratio,
extra_conv=True,
num_features=num_features,
act_layer=act_layer,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
)
return cfg
def _dm_nfnet_cfg(
depths,
channels=(256, 512, 1536, 1536),
act_layer='gelu',
skipinit=True,
):
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='deep_quad',
stem_chs=128,
group_size=128,
bottle_ratio=0.5,
extra_conv=True,
gamma_in_act=True,
same_padding=True,
skipinit=skipinit,
num_features=int(channels[-1] * 2.0),
act_layer=act_layer,
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.5),
)
return cfg
model_cfgs = dict(
# NFNet-F models w/ GELU compatible with DeepMind weights
dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)),
dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)),
dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)),
dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)),
dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)),
dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)),
dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)),
# NFNet-F models w/ GELU
nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)),
nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)),
nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)),
nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)),
nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)),
nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)),
nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)),
nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)),
# Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act
nfnet_l0=_nfnet_cfg(
depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,
attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'),
eca_nfnet_l0=_nfnet_cfg(
depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l1=_nfnet_cfg(
depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l2=_nfnet_cfg(
depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l3=_nfnet_cfg(
depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
# EffNet influenced RegNet defs.
# NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8.
nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)),
nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)),
nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)),
nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)),
nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)),
nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)),
# ResNet (preact, D style deep stem/avg down) defs
nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)),
nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)),
nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)),
nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()),
nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()),
nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()),
)
def _create_normfreenet(variant, pretrained=False, **kwargs):
model_cfg = model_cfgs[variant]
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
NormFreeNet,
variant,
pretrained,
model_cfg=model_cfg,
feature_cfg=feature_cfg,
**kwargs,
)
def _dcfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dm_nfnet_f0.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth',
pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'),
'dm_nfnet_f1.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'),
'dm_nfnet_f2.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'),
'dm_nfnet_f3.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth',
pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'),
'dm_nfnet_f4.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth',
pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'),
'dm_nfnet_f5.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth',
pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'),
'dm_nfnet_f6.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth',
pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'),
'nfnet_f0': _dcfg(
url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)),
'nfnet_f1': _dcfg(
url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)),
'nfnet_f2': _dcfg(
url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)),
'nfnet_f3': _dcfg(
url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)),
'nfnet_f4': _dcfg(
url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)),
'nfnet_f5': _dcfg(
url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)),
'nfnet_f6': _dcfg(
url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)),
'nfnet_f7': _dcfg(
url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)),
'nfnet_l0.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_nfnet_l0.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_nfnet_l1.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0),
'eca_nfnet_l2.ra3_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth',
pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0),
'eca_nfnet_l3': _dcfg(
url='',
pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0),
'nf_regnet_b0': _dcfg(
url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'),
'nf_regnet_b1.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec
'nf_regnet_b2': _dcfg(
url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'),
'nf_regnet_b3': _dcfg(
url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'),
'nf_regnet_b4': _dcfg(
url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'),
'nf_regnet_b5': _dcfg(
url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'),
'nf_resnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_resnet50.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'),
'nf_resnet101': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'),
})
@register_model
def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F0 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F1 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F2 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F3 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F4 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F5 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F6 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F0
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F1
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F2
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F3
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F4
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F5
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F6
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F7
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs)
@register_model
def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-L0b w/ SiLU
My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio
"""
return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L0 w/ SiLU
My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L1 w/ SiLU
My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L2 w/ SiLU
My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L3 w/ SiLU
My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B0
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B1
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B2
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B3
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B4
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B5
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-26
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-50
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-101
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet26
"""
return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet50
"""
return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet101
"""
return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet26
"""
return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet50
"""
return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet101
"""
return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/registry.py | from ._registry import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/features.py | from ._features import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/nasnet.py | """ NasNet-A (Large)
nasnetalarge implementation grabbed from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['NASNetALarge']
class ActConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''):
super(ActConvBn, self).__init__()
self.act = nn.ReLU()
self.conv = create_conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
def forward(self, x):
x = self.act(x)
x = self.conv(x)
x = self.bn(x)
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = create_conv2d(
in_channels, in_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=in_channels)
self.pointwise_conv2d = create_conv2d(
in_channels, out_channels, kernel_size=1, padding=0)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False):
super(BranchSeparables, self).__init__()
middle_channels = out_channels if stem_cell else in_channels
self.act_1 = nn.ReLU()
self.separable_1 = SeparableConv2d(
in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type)
self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1)
self.act_2 = nn.ReLU(inplace=True)
self.separable_2 = SeparableConv2d(
middle_channels, out_channels, kernel_size, stride=1, padding=pad_type)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
def forward(self, x):
x = self.act_1(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.act_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self, stem_size, num_channels=42, pad_type=''):
super(CellStem0, self).__init__()
self.num_channels = num_channels
self.stem_size = stem_size
self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1)
self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self, stem_size, num_channels, pad_type=''):
super(CellStem1, self).__init__()
self.num_channels = num_channels
self.stem_size = stem_size
self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1)
self.act = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False))
self.path_2 = nn.Sequential()
self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1)
self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.act(x_conv0)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2(x_relu)
# final path
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(FirstCell, self).__init__()
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1)
self.act = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.path_2 = nn.Sequential()
self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
def forward(self, x, x_prev):
x_relu = self.act(x_prev)
x_path1 = self.path_1(x_relu)
x_path2 = self.path_2(x_relu)
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(NormalCell, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type)
self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(ReductionCell1, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetALarge(nn.Module):
"""NASNetALarge (6 @ 4032) """
def __init__(
self,
num_classes=1000,
in_chans=3,
stem_size=96,
channel_multiplier=2,
num_features=4032,
output_stride=32,
drop_rate=0.,
global_pool='avg',
pad_type='same',
):
super(NASNetALarge, self).__init__()
self.num_classes = num_classes
self.stem_size = stem_size
self.num_features = num_features
self.channel_multiplier = channel_multiplier
assert output_stride == 32
channels = self.num_features // 24
# 24 is default value for the architecture
self.conv0 = ConvNormAct(
in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False)
self.cell_stem_0 = CellStem0(
self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type)
self.cell_stem_1 = CellStem1(
self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type)
self.cell_0 = FirstCell(
in_chs_left=channels, out_chs_left=channels // 2,
in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_1 = NormalCell(
in_chs_left=2 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_2 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_3 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_4 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_5 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.reduction_cell_0 = ReductionCell0(
in_chs_left=6 * channels, out_chs_left=2 * channels,
in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_6 = FirstCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_7 = NormalCell(
in_chs_left=8 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_8 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_9 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_10 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_11 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.reduction_cell_1 = ReductionCell1(
in_chs_left=12 * channels, out_chs_left=4 * channels,
in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_12 = FirstCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_13 = NormalCell(
in_chs_left=16 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_14 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_15 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_16 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_17 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.act = nn.ReLU(inplace=True)
self.feature_info = [
dict(num_chs=96, reduction=2, module='conv0'),
dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'),
dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'),
dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'),
dict(num_chs=4032, reduction=32, module='act'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv0|cell_stem_[01]',
blocks=[
(r'^cell_(\d+)', None),
(r'^reduction_cell_0', (6,)),
(r'^reduction_cell_1', (12,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x_conv0 = self.conv0(x)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_cell_4 = self.cell_4(x_cell_3, x_cell_2)
x_cell_5 = self.cell_5(x_cell_4, x_cell_3)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_cell_10 = self.cell_10(x_cell_9, x_cell_8)
x_cell_11 = self.cell_11(x_cell_10, x_cell_9)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
x_cell_16 = self.cell_16(x_cell_15, x_cell_14)
x_cell_17 = self.cell_17(x_cell_16, x_cell_15)
x = self.act(x_cell_17)
return x
def forward_head(self, x):
x = self.global_pool(x)
x = self.head_drop(x)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_nasnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
NASNetALarge,
variant,
pretrained,
feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model
**kwargs,
)
default_cfgs = generate_default_cfgs({
'nasnetalarge.tf_in1k': {
'hf_hub_id': 'timm/',
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth',
'input_size': (3, 331, 331),
'pool_size': (11, 11),
'crop_pct': 0.911,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv0.conv',
'classifier': 'last_linear',
},
})
@register_model
def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge:
"""NASNet-A large model architecture.
"""
model_kwargs = dict(pad_type='same', **kwargs)
return _create_nasnet('nasnetalarge', pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/res2net.py | """ Res2Net and Res2NeXt
Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/
Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
__all__ = []
class Bottle2neck(nn.Module):
""" Res2Net/Res2NeXT Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py
"""
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=26,
scale=4,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=None,
attn_layer=None,
**_,
):
super(Bottle2neck, self).__init__()
self.scale = scale
self.is_first = stride > 1 or downsample is not None
self.num_scales = max(1, scale - 1)
width = int(math.floor(planes * (base_width / 64.0))) * cardinality
self.width = width
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False)
self.bn1 = norm_layer(width * scale)
convs = []
bns = []
for i in range(self.num_scales):
convs.append(nn.Conv2d(
width, width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False))
bns.append(norm_layer(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
if self.is_first:
# FIXME this should probably have count_include_pad=False, but hurts original weights
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
else:
self.pool = None
self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = attn_layer(outplanes) if attn_layer is not None else None
self.relu = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
sp = spx[0] # redundant, for torchscript
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
if i == 0 or self.is_first:
sp = spx[i]
else:
sp = sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
if self.pool is not None: # self.is_first == True, None check for torchscript
spo.append(self.pool(spx[-1]))
else:
spo.append(spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
if self.se is not None:
out = self.se(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.relu(out)
return out
def _create_res2net(variant, pretrained=False, **kwargs):
return build_model_with_cfg(ResNet, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'),
'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'),
'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'),
'res2next50.in1k': _cfg(hf_hub_id='timm/'),
'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'),
'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'),
})
@register_model
def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w4s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4))
return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-101 26w4s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4))
return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w6s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6))
return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 26w8s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8))
return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 48w2s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2))
return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Res2Net-50 14w8s model.
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8))
return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs))
@register_model
def res2next50(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2NeXt-50 4s
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4))
return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net50d(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2Net-50
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep',
avg_down=True, stem_width=32, block_args=dict(scale=4))
return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs))
@register_model
def res2net101d(pretrained=False, **kwargs) -> ResNet:
"""Construct Res2Net-50
"""
model_args = dict(
block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep',
avg_down=True, stem_width=32, block_args=dict(scale=4))
return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_features.py | """ PyTorch Feature Extraction Helpers
A collection of classes, functions, modules to help extract features from models
and provide a common interface for describing them.
The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter
https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict, defaultdict
from copy import deepcopy
from functools import partial
from typing import Dict, List, Sequence, Tuple, Union
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from timm.layers import Format
__all__ = ['FeatureInfo', 'FeatureHooks', 'FeatureDictNet', 'FeatureListNet', 'FeatureHookNet']
class FeatureInfo:
def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]):
prev_reduction = 1
for i, fi in enumerate(feature_info):
# sanity check the mandatory fields, there may be additional fields depending on the model
assert 'num_chs' in fi and fi['num_chs'] > 0
assert 'reduction' in fi and fi['reduction'] >= prev_reduction
prev_reduction = fi['reduction']
assert 'module' in fi
fi.setdefault('index', i)
self.out_indices = out_indices
self.info = feature_info
def from_other(self, out_indices: Tuple[int]):
return FeatureInfo(deepcopy(self.info), out_indices)
def get(self, key, idx=None):
""" Get value by key at specified index (indices)
if idx == None, returns value for key at each output index
if idx is an integer, return value for that feature module index (ignoring output indices)
if idx is a list/tupple, return value for each module index (ignoring output indices)
"""
if idx is None:
return [self.info[i][key] for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [self.info[i][key] for i in idx]
else:
return self.info[idx][key]
def get_dicts(self, keys=None, idx=None):
""" return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)
"""
if idx is None:
if keys is None:
return [self.info[i] for i in self.out_indices]
else:
return [{k: self.info[i][k] for k in keys} for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx]
else:
return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}
def channels(self, idx=None):
""" feature channels accessor
"""
return self.get('num_chs', idx)
def reduction(self, idx=None):
""" feature reduction (output stride) accessor
"""
return self.get('reduction', idx)
def module_name(self, idx=None):
""" feature module name accessor
"""
return self.get('module', idx)
def __getitem__(self, item):
return self.info[item]
def __len__(self):
return len(self.info)
class FeatureHooks:
""" Feature Hook Helper
This module helps with the setup and extraction of hooks for extracting features from
internal nodes in a model by node name.
FIXME This works well in eager Python but needs redesign for torchscript.
"""
def __init__(
self,
hooks: Sequence[str],
named_modules: dict,
out_map: Sequence[Union[int, str]] = None,
default_hook_type: str = 'forward',
):
# setup feature hooks
self._feature_outputs = defaultdict(OrderedDict)
modules = {k: v for k, v in named_modules}
for i, h in enumerate(hooks):
hook_name = h['module']
m = modules[hook_name]
hook_id = out_map[i] if out_map else hook_name
hook_fn = partial(self._collect_output_hook, hook_id)
hook_type = h.get('hook_type', default_hook_type)
if hook_type == 'forward_pre':
m.register_forward_pre_hook(hook_fn)
elif hook_type == 'forward':
m.register_forward_hook(hook_fn)
else:
assert False, "Unsupported hook type"
def _collect_output_hook(self, hook_id, *args):
x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre
if isinstance(x, tuple):
x = x[0] # unwrap input tuple
self._feature_outputs[x.device][hook_id] = x
def get_output(self, device) -> Dict[str, torch.tensor]:
output = self._feature_outputs[device]
self._feature_outputs[device] = OrderedDict() # clear after reading
return output
def _module_list(module, flatten_sequential=False):
# a yield/iter would be better for this but wouldn't be compatible with torchscript
ml = []
for name, module in module.named_children():
if flatten_sequential and isinstance(module, nn.Sequential):
# first level of Sequential containers is flattened into containing model
for child_name, child_module in module.named_children():
combined = [name, child_name]
ml.append(('_'.join(combined), '.'.join(combined), child_module))
else:
ml.append((name, name, module))
return ml
def _get_feature_info(net, out_indices):
feature_info = getattr(net, 'feature_info')
if isinstance(feature_info, FeatureInfo):
return feature_info.from_other(out_indices)
elif isinstance(feature_info, (list, tuple)):
return FeatureInfo(net.feature_info, out_indices)
else:
assert False, "Provided feature_info is not valid"
def _get_return_layers(feature_info, out_map):
module_names = feature_info.module_name()
return_layers = {}
for i, name in enumerate(module_names):
return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i]
return return_layers
class FeatureDictNet(nn.ModuleDict):
""" Feature extractor with OrderedDict return
Wrap a model and extract features as specified by the out indices, the network is
partially re-built from contained modules.
There is a strong assumption that the modules have been registered into the model in the same
order as they are used. There should be no reuse of the same nn.Module more than once, including
trivial modules like `self.relu = nn.ReLU`.
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
All Sequential containers that are directly assigned to the original model will have their
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
"""
def __init__(
self,
model: nn.Module,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
out_map: Sequence[Union[int, str]] = None,
output_fmt: str = 'NCHW',
feature_concat: bool = False,
flatten_sequential: bool = False,
):
"""
Args:
model: Model from which to extract features.
out_indices: Output indices of the model features to extract.
out_map: Return id mapping for each output index, otherwise str(index) is used.
feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting
first element e.g. `x[0]`
flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules)
"""
super(FeatureDictNet, self).__init__()
self.feature_info = _get_feature_info(model, out_indices)
self.output_fmt = Format(output_fmt)
self.concat = feature_concat
self.grad_checkpointing = False
self.return_layers = {}
return_layers = _get_return_layers(self.feature_info, out_map)
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = set(return_layers.keys())
layers = OrderedDict()
for new_name, old_name, module in modules:
layers[new_name] = module
if old_name in remaining:
# return id has to be consistently str type for torchscript
self.return_layers[new_name] = str(return_layers[old_name])
remaining.remove(old_name)
if not remaining:
break
assert not remaining and len(self.return_layers) == len(return_layers), \
f'Return layers ({remaining}) are not present in model'
self.update(layers)
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def _collect(self, x) -> (Dict[str, torch.Tensor]):
out = OrderedDict()
for i, (name, module) in enumerate(self.items()):
if self.grad_checkpointing and not torch.jit.is_scripting():
# Skipping checkpoint of first module because need a gradient at input
# Skipping last because networks with in-place ops might fail w/ checkpointing enabled
# NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues
first_or_last_module = i == 0 or i == max(len(self) - 1, 0)
x = module(x) if first_or_last_module else checkpoint(module, x)
else:
x = module(x)
if name in self.return_layers:
out_id = self.return_layers[name]
if isinstance(x, (tuple, list)):
# If model tap is a tuple or list, concat or select first element
# FIXME this may need to be more generic / flexible for some nets
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
else:
out[out_id] = x
return out
def forward(self, x) -> Dict[str, torch.Tensor]:
return self._collect(x)
class FeatureListNet(FeatureDictNet):
""" Feature extractor with list return
A specialization of FeatureDictNet that always returns features as a list (values() of dict).
"""
def __init__(
self,
model: nn.Module,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
output_fmt: str = 'NCHW',
feature_concat: bool = False,
flatten_sequential: bool = False,
):
"""
Args:
model: Model from which to extract features.
out_indices: Output indices of the model features to extract.
feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting
first element e.g. `x[0]`
flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules)
"""
super().__init__(
model,
out_indices=out_indices,
output_fmt=output_fmt,
feature_concat=feature_concat,
flatten_sequential=flatten_sequential,
)
def forward(self, x) -> (List[torch.Tensor]):
return list(self._collect(x).values())
class FeatureHookNet(nn.ModuleDict):
""" FeatureHookNet
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
network in any way.
If `no_rewrite` is False, the model will be re-written as in the
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
FIXME this does not currently work with Torchscript, see FeatureHooks class
"""
def __init__(
self,
model: nn.Module,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
out_map: Sequence[Union[int, str]] = None,
return_dict: bool = False,
output_fmt: str = 'NCHW',
no_rewrite: bool = False,
flatten_sequential: bool = False,
default_hook_type: str = 'forward',
):
"""
Args:
model: Model from which to extract features.
out_indices: Output indices of the model features to extract.
out_map: Return id mapping for each output index, otherwise str(index) is used.
return_dict: Output features as a dict.
no_rewrite: Enforce that model is not re-written if True, ie no modules are removed / changed.
flatten_sequential arg must also be False if this is set True.
flatten_sequential: Re-write modules by flattening first two levels of nn.Sequential containers.
default_hook_type: The default hook type to use if not specified in model.feature_info.
"""
super().__init__()
assert not torch.jit.is_scripting()
self.feature_info = _get_feature_info(model, out_indices)
self.return_dict = return_dict
self.output_fmt = Format(output_fmt)
self.grad_checkpointing = False
layers = OrderedDict()
hooks = []
if no_rewrite:
assert not flatten_sequential
if hasattr(model, 'reset_classifier'): # make sure classifier is removed?
model.reset_classifier(0)
layers['body'] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {
f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type
for f in self.feature_info.get_dicts()
}
for new_name, old_name, module in modules:
layers[new_name] = module
for fn, fm in module.named_modules(prefix=old_name):
if fn in remaining:
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if not remaining:
break
assert not remaining, f'Return layers ({remaining}) are not present in model'
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x):
for i, (name, module) in enumerate(self.items()):
if self.grad_checkpointing and not torch.jit.is_scripting():
# Skipping checkpoint of first module because need a gradient at input
# Skipping last because networks with in-place ops might fail w/ checkpointing enabled
# NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues
first_or_last_module = i == 0 or i == max(len(self) - 1, 0)
x = module(x) if first_or_last_module else checkpoint(module, x)
else:
x = module(x)
out = self.hooks.get_output(x.device)
return out if self.return_dict else list(out.values())
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/crossvit.py | """ CrossViT Model
@inproceedings{
chen2021crossvit,
title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}},
author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda},
booktitle={International Conference on Computer Vision (ICCV)},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.14899
Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py
NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from functools import partial
from typing import List
from typing import Tuple
import torch
import torch.hub
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if multi_conv:
if patch_size[0] == 12:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
)
elif patch_size[0] == 16:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1),
)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class CrossAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = head_dim ** -0.5
self.wq = nn.Linear(dim, dim, bias=qkv_bias)
self.wk = nn.Linear(dim, dim, bias=qkv_bias)
self.wv = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# B1C -> B1H(C/H) -> BH1(C/H)
q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x)))
return x
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
patches,
depth,
num_heads,
mlp_ratio,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
num_branches = len(dim)
self.num_branches = num_branches
# different branch could have different embedding size, the first one is the base
self.blocks = nn.ModuleList()
for d in range(num_branches):
tmp = []
for i in range(depth[d]):
tmp.append(Block(
dim=dim[d],
num_heads=num_heads[d],
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
))
if len(tmp) != 0:
self.blocks.append(nn.Sequential(*tmp))
if len(self.blocks) == 0:
self.blocks = None
self.projs = nn.ModuleList()
for d in range(num_branches):
if dim[d] == dim[(d + 1) % num_branches] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])]
self.projs.append(nn.Sequential(*tmp))
self.fusion = nn.ModuleList()
for d in range(num_branches):
d_ = (d + 1) % num_branches
nh = num_heads[d_]
if depth[-1] == 0: # backward capability:
self.fusion.append(
CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
else:
tmp = []
for _ in range(depth[-1]):
tmp.append(CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
self.fusion.append(nn.Sequential(*tmp))
self.revert_projs = nn.ModuleList()
for d in range(num_branches):
if dim[(d + 1) % num_branches] == dim[d] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(),
nn.Linear(dim[(d + 1) % num_branches], dim[d])]
self.revert_projs.append(nn.Sequential(*tmp))
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outs_b = []
for i, block in enumerate(self.blocks):
outs_b.append(block(x[i]))
# only take the cls token out
proj_cls_token = torch.jit.annotate(List[torch.Tensor], [])
for i, proj in enumerate(self.projs):
proj_cls_token.append(proj(outs_b[i][:, 0:1, ...]))
# cross attention
outs = []
for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)):
tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1)
tmp = fusion(tmp)
reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...])
tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1)
outs.append(tmp)
return outs
def _compute_num_patches(img_size, patches):
return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)]
@register_notrace_function
def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript
"""
Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing.
Args:
x (Tensor): input image
ss (tuple[int, int]): height and width to scale to
crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False
Returns:
Tensor: the "scaled" image batch tensor
"""
H, W = x.shape[-2:]
if H != ss[0] or W != ss[1]:
if crop_scale and ss[0] <= H and ss[1] <= W:
cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.))
x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]]
else:
x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False)
return x
class CrossVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
img_scale=(1.0, 1.0),
patch_size=(8, 16),
in_chans=3,
num_classes=1000,
embed_dim=(192, 384),
depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)),
num_heads=(6, 12),
mlp_ratio=(2., 2., 4.),
multi_conv=False,
crop_scale=False,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.img_size = to_2tuple(img_size)
img_scale = to_2tuple(img_scale)
self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale]
self.crop_scale = crop_scale # crop instead of interpolate for scale
num_patches = _compute_num_patches(self.img_size_scaled, patch_size)
self.num_branches = len(patch_size)
self.embed_dim = embed_dim
self.num_features = sum(embed_dim)
self.patch_embed = nn.ModuleList()
# hard-coded for torch jit script
for i in range(self.num_branches):
setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i])))
setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i])))
for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim):
self.patch_embed.append(
PatchEmbed(
img_size=im_s,
patch_size=p,
in_chans=in_chans,
embed_dim=d,
multi_conv=multi_conv,
))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
total_depth = sum([sum(x[-2:]) for x in depth])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule
dpr_ptr = 0
self.blocks = nn.ModuleList()
for idx, block_cfg in enumerate(depth):
curr_depth = max(block_cfg[:-1]) + block_cfg[-1]
dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth]
blk = MultiScaleBlock(
embed_dim,
num_patches,
block_cfg,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr_,
norm_layer=norm_layer,
)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.ModuleList([
nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)])
for i in range(self.num_branches):
trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02)
trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
out = set()
for i in range(self.num_branches):
out.add(f'cls_token_{i}')
pe = getattr(self, f'pos_embed_{i}', None)
if pe is not None and pe.requires_grad:
out.add(f'pos_embed_{i}')
return out
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.ModuleList(
[nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in
range(self.num_branches)])
def forward_features(self, x) -> List[torch.Tensor]:
B = x.shape[0]
xs = []
for i, patch_embed in enumerate(self.patch_embed):
x_ = x
ss = self.img_size_scaled[i]
x_ = scale_image(x_, ss, self.crop_scale)
x_ = patch_embed(x_)
cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script
cls_tokens = cls_tokens.expand(B, -1, -1)
x_ = torch.cat((cls_tokens, x_), dim=1)
pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script
x_ = x_ + pos_embed
x_ = self.pos_drop(x_)
xs.append(x_)
for i, blk in enumerate(self.blocks):
xs = blk(xs)
# NOTE: was before branch token section, move to here to assure all branch token are before layer norm
xs = [norm(xs[i]) for i, norm in enumerate(self.norm)]
return xs
def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor:
xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs]
xs = [self.head_drop(x) for x in xs]
if pre_logits or isinstance(self.head[0], nn.Identity):
return torch.cat([x for x in xs], dim=1)
return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0)
def forward(self, x):
xs = self.forward_features(x)
x = self.forward_head(xs)
return x
def _create_crossvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
def pretrained_filter_fn(state_dict):
new_state_dict = {}
for key in state_dict.keys():
if 'pos_embed' in key or 'cls_token' in key:
new_key = key.replace(".", "_")
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return new_state_dict
return build_model_with_cfg(
CrossVit,
variant,
pretrained,
pretrained_filter_fn=pretrained_filter_fn,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'),
'classifier': ('head.0', 'head.1'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_15_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_15_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_18_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_18_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_9_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[3, 3], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[6, 6], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[12, 12], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/focalnet.py | """ FocalNet
As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926
Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet
This impl is/has:
* fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible
* re-ordered downsample / layer so that striding always at beginning of layer (stage)
* no input size constraints or input resolution/H/W tracking through the model
* torchscript fixed and a number of quirks cleaned up
* feature extraction support via `features_only=True`
"""
# --------------------------------------------------------
# FocalNets -- Focal Modulation Networks
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Jianwei Yang (jianwyan@microsoft.com)
# --------------------------------------------------------
from functools import partial
from typing import Callable, Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply
from ._registry import generate_default_cfgs, register_model
__all__ = ['FocalNet']
class FocalModulation(nn.Module):
def __init__(
self,
dim: int,
focal_window,
focal_level: int,
focal_factor: int = 2,
bias: bool = True,
use_post_norm: bool = False,
normalize_modulator: bool = False,
proj_drop: float = 0.,
norm_layer: Callable = LayerNorm2d,
):
super().__init__()
self.dim = dim
self.focal_window = focal_window
self.focal_level = focal_level
self.focal_factor = focal_factor
self.use_post_norm = use_post_norm
self.normalize_modulator = normalize_modulator
self.input_split = [dim, dim, self.focal_level + 1]
self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias)
self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
self.act = nn.GELU()
self.proj = nn.Conv2d(dim, dim, kernel_size=1)
self.proj_drop = nn.Dropout(proj_drop)
self.focal_layers = nn.ModuleList()
self.kernel_sizes = []
for k in range(self.focal_level):
kernel_size = self.focal_factor * k + self.focal_window
self.focal_layers.append(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False),
nn.GELU(),
))
self.kernel_sizes.append(kernel_size)
self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity()
def forward(self, x):
# pre linear projection
x = self.f(x)
q, ctx, gates = torch.split(x, self.input_split, 1)
# context aggreation
ctx_all = 0
for l, focal_layer in enumerate(self.focal_layers):
ctx = focal_layer(ctx)
ctx_all = ctx_all + ctx * gates[:, l:l + 1]
ctx_global = self.act(ctx.mean((2, 3), keepdim=True))
ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:]
# normalize context
if self.normalize_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
# focal modulation
x_out = q * self.h(ctx_all)
x_out = self.norm(x_out)
# post linear projection
x_out = self.proj(x_out)
x_out = self.proj_drop(x_out)
return x_out
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class FocalNetBlock(nn.Module):
""" Focal Modulation Network Block.
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.,
focal_level: int = 1,
focal_window: int = 3,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
layerscale_value: float = 1e-4,
proj_drop: float = 0.,
drop_path: float = 0.,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm2d,
):
"""
Args:
dim: Number of input channels.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
focal_level: Number of focal levels.
focal_window: Focal window size at first focal level.
use_post_norm: Whether to use layer norm after modulation.
use_post_norm_in_modulation: Whether to use layer norm in modulation.
layerscale_value: Initial layerscale value.
proj_drop: Dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.focal_window = focal_window
self.focal_level = focal_level
self.use_post_norm = use_post_norm
self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity()
self.modulation = FocalModulation(
dim,
focal_window=focal_window,
focal_level=self.focal_level,
use_post_norm=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
proj_drop=proj_drop,
norm_layer=norm_layer,
)
self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity()
self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
use_conv=True,
)
self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity()
self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
# Focal Modulation
x = self.norm1(x)
x = self.modulation(x)
x = self.norm1_post(x)
x = shortcut + self.drop_path1(self.ls1(x))
# FFN
x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x)))))
return x
class FocalNetStage(nn.Module):
""" A basic Focal Transformer layer for one stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
depth: int,
mlp_ratio: float = 4.,
downsample: bool = True,
focal_level: int = 1,
focal_window: int = 1,
use_overlap_down: bool = False,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
layerscale_value: float = 1e-4,
proj_drop: float = 0.,
drop_path: float = 0.,
norm_layer: Callable = LayerNorm2d,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
depth: Number of blocks.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
downsample: Downsample layer at start of the layer.
focal_level: Number of focal levels
focal_window: Focal window size at first focal level
use_overlap_down: User overlapped convolution in downsample layer.
use_post_norm: Whether to use layer norm after modulation.
use_post_norm_in_modulation: Whether to use layer norm in modulation.
layerscale_value: Initial layerscale value
proj_drop: Dropout rate for projections.
drop_path: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.depth = depth
self.grad_checkpointing = False
if downsample:
self.downsample = Downsample(
in_chs=dim,
out_chs=out_dim,
stride=2,
overlap=use_overlap_down,
norm_layer=norm_layer,
)
else:
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.ModuleList([
FocalNetBlock(
dim=out_dim,
mlp_ratio=mlp_ratio,
focal_level=focal_level,
focal_window=focal_window,
use_post_norm=use_post_norm,
use_post_norm_in_modulation=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
layerscale_value=layerscale_value,
proj_drop=proj_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x):
x = self.downsample(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 4,
overlap: bool = False,
norm_layer: Optional[Callable] = None,
):
"""
Args:
in_chs: Number of input image channels.
out_chs: Number of linear projection output channels.
stride: Downsample stride.
overlap: Use overlapping convolutions if True.
norm_layer: Normalization layer.
"""
super().__init__()
self.stride = stride
padding = 0
kernel_size = stride
if overlap:
assert stride in (2, 4)
if stride == 4:
kernel_size, padding = 7, 2
elif stride == 2:
kernel_size, padding = 3, 1
self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity()
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return x
class FocalNet(nn.Module):
"""" Focal Modulation Networks (FocalNets)
"""
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
mlp_ratio: float = 4.,
focal_levels: Tuple[int, ...] = (2, 2, 2, 2),
focal_windows: Tuple[int, ...] = (3, 3, 3, 3),
use_overlap_down: bool = False,
use_post_norm: bool = False,
use_post_norm_in_modulation: bool = False,
normalize_modulator: bool = False,
head_hidden_size: Optional[int] = None,
head_init_scale: float = 1.0,
layerscale_value: Optional[float] = None,
drop_rate: bool = 0.,
proj_drop_rate: bool = 0.,
drop_path_rate: bool = 0.1,
norm_layer: Callable = partial(LayerNorm2d, eps=1e-5),
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Focal Transformer layer.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level.
focal_windows: The focal window size at all stages.
use_overlap_down: Whether to use convolutional embedding.
use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models)
layerscale_value: Value for layer scale.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.num_features = embed_dim[-1]
self.feature_info = []
self.stem = Downsample(
in_chs=in_chans,
out_chs=embed_dim[0],
overlap=use_overlap_down,
norm_layer=norm_layer,
)
in_dim = embed_dim[0]
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
layers = []
for i_layer in range(self.num_layers):
out_dim = embed_dim[i_layer]
layer = FocalNetStage(
dim=in_dim,
out_dim=out_dim,
depth=depths[i_layer],
mlp_ratio=mlp_ratio,
downsample=i_layer > 0,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_overlap_down=use_overlap_down,
use_post_norm=use_post_norm,
use_post_norm_in_modulation=use_post_norm_in_modulation,
normalize_modulator=normalize_modulator,
layerscale_value=layerscale_value,
proj_drop=proj_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
)
in_dim = out_dim
layers += [layer]
self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')]
self.layers = nn.Sequential(*layers)
if head_hidden_size:
self.norm = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=norm_layer,
)
else:
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def no_weight_decay(self):
return {''}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=[
(r'^layers\.(\d+)', None),
(r'^norm', (99999,))
] if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for l in self.layers:
l.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
if name and 'head.fc' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
"focalnet_tiny_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_small_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_base_srf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_tiny_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_small_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_base_lrf.ms_in1k": _cfg(
hf_hub_id='timm/'),
"focalnet_large_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_large_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_xlarge_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_xlarge_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
"focalnet_huge_fl3.ms_in22k": _cfg(
hf_hub_id='timm/',
num_classes=21842),
"focalnet_huge_fl4.ms_in22k": _cfg(
hf_hub_id='timm/',
num_classes=0),
})
def checkpoint_filter_fn(state_dict, model: FocalNet):
state_dict = state_dict.get('model', state_dict)
if 'stem.proj.weight' in state_dict:
return state_dict
import re
out_dict = {}
dest_dict = model.state_dict()
for k, v in state_dict.items():
k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
k = k.replace('patch_embed', 'stem')
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
if 'norm' in k and k not in dest_dict:
k = re.sub(r'norm([0-9])', r'norm\1_post', k)
k = k.replace('ln.', 'norm.')
k = k.replace('head', 'head.fc')
if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape:
v = v.reshape(dest_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_focalnet(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
FocalNet, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
@register_model
def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs)
return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs)
return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs)
return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs)
return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs)
# FocalNet large+ models
@register_model
def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4,
use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs)
@register_model
def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet:
model_kwargs = dict(
depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4],
use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/ghostnet.py | """
An implementation of GhostNet & GhostNetV2 Models as defined in:
GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907
GhostNetV2: Enhance Cheap Operation with Long-Range Attention. https://proceedings.neurips.cc/paper_files/paper/2022/file/40b60852a4abdaa696b5a1a78da34635-Paper-Conference.pdf
The train script & code of models at:
Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch
Original model: https://github.com/huawei-noah/Efficient-AI-Backbones/blob/master/ghostnetv2_pytorch/model/ghostnetv2_torch.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, Linear, make_divisible
from ._builder import build_model_with_cfg
from ._efficientnet_blocks import SqueezeExcite, ConvBnAct
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['GhostNet']
_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
class GhostModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
use_act=True,
act_layer=nn.ReLU,
):
super(GhostModule, self).__init__()
self.out_chs = out_chs
init_chs = math.ceil(out_chs / ratio)
new_chs = init_chs * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.out_chs, :, :]
class GhostModuleV2(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
use_act=True,
act_layer=nn.ReLU,
):
super().__init__()
self.gate_fn = nn.Sigmoid()
self.out_chs = out_chs
init_chs = math.ceil(out_chs / ratio)
new_chs = init_chs * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.short_conv = nn.Sequential(
nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(out_chs),
nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs),
nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2))
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.out_chs, :, :] * F.interpolate(
self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest')
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
act_layer=nn.ReLU,
se_ratio=0.,
mode='original',
):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
if mode == 'original':
self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer)
else:
self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(shortcut)
return x
class GhostNet(nn.Module):
def __init__(
self,
cfgs,
num_classes=1000,
width=1.0,
in_chans=3,
output_stride=32,
global_pool='avg',
drop_rate=0.2,
version='v1',
):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
self.cfgs = cfgs
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
# building first layer
stem_chs = make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
self.bn1 = nn.BatchNorm2d(stem_chs)
self.act1 = nn.ReLU(inplace=True)
prev_chs = stem_chs
# building inverted residual blocks
stages = nn.ModuleList([])
stage_idx = 0
layer_idx = 0
net_stride = 2
for cfg in self.cfgs:
layers = []
s = 1
for k, exp_size, c, se_ratio, s in cfg:
out_chs = make_divisible(c * width, 4)
mid_chs = make_divisible(exp_size * width, 4)
layer_kwargs = {}
if version == 'v2' and layer_idx > 1:
layer_kwargs['mode'] = 'attn'
layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs))
prev_chs = out_chs
layer_idx += 1
if s > 1:
net_stride *= 2
self.feature_info.append(dict(
num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
stages.append(nn.Sequential(*layers))
stage_idx += 1
out_chs = make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
self.pool_dim = prev_chs = out_chs
self.blocks = nn.Sequential(*stages)
# building last several layers
self.num_features = out_chs = 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
# FIXME init
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x):
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classifier(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model: nn.Module):
out_dict = {}
for k, v in state_dict.items():
if 'total' in k:
continue
out_dict[k] = v
return out_dict
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs):
"""
Constructs a GhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
model_kwargs = dict(
cfgs=cfgs,
width=width,
**kwargs,
)
return build_model_with_cfg(
GhostNet,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True),
**model_kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'ghostnet_050.untrained': _cfg(),
'ghostnet_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'
),
'ghostnet_130.untrained': _cfg(),
'ghostnetv2_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_10.pth.tar'
),
'ghostnetv2_130.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_13.pth.tar'
),
'ghostnetv2_160.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_16.pth.tar'
),
})
@register_model
def ghostnet_050(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-0.5x """
model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_100(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-1.0x """
model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_130(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-1.3x """
model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.0x """
model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs)
return model
@register_model
def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.3x """
model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs)
return model
@register_model
def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.6x """
model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/rexnet.py | """ ReXNet
A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -
https://arxiv.org/abs/2007.00992
Adapted from original impl at https://github.com/clovaai/rexnet
Copyright (c) 2020-present NAVER Corp. MIT license
Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman
Copyright 2020 Ross Wightman
"""
from functools import partial
from math import ceil
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule
from ._builder import build_model_with_cfg
from ._efficientnet_builder import efficientnet_init_weights
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['RexNet'] # model_registry will add each entrypoint fn to this
SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d)
class LinearBottleneck(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride,
dilation=(1, 1),
exp_ratio=1.0,
se_ratio=0.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_path=None,
):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs
self.in_channels = in_chs
self.out_channels = out_chs
if exp_ratio != 1.:
dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)
self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer)
else:
dw_chs = in_chs
self.conv_exp = None
self.conv_dw = ConvNormAct(
dw_chs,
dw_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=dw_chs,
apply_act=False,
)
if se_ratio > 0:
self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div))
else:
self.se = None
self.act_dw = create_act_layer(dw_act_layer)
self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False)
self.drop_path = drop_path
def feat_channels(self, exp=False):
return self.conv_dw.out_channels if exp else self.out_channels
def forward(self, x):
shortcut = x
if self.conv_exp is not None:
x = self.conv_exp(x)
x = self.conv_dw(x)
if self.se is not None:
x = self.se(x)
x = self.act_dw(x)
x = self.conv_pwl(x)
if self.use_shortcut:
if self.drop_path is not None:
x = self.drop_path(x)
x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1)
return x
def _block_cfg(
width_mult=1.0,
depth_mult=1.0,
initial_chs=16,
final_chs=180,
se_ratio=0.,
ch_div=1,
):
layers = [1, 2, 2, 3, 3, 5]
strides = [1, 2, 2, 2, 1, 2]
layers = [ceil(element * depth_mult) for element in layers]
strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])
exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])
depth = sum(layers[:]) * 3
base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs
# The following channel configuration is a simple instance to make each layer become an expand layer.
out_chs_list = []
for i in range(depth // 3):
out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))
base_chs += final_chs / (depth // 3 * 1.0)
se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:])
return list(zip(out_chs_list, exp_ratios, strides, se_ratios))
def _build_blocks(
block_cfg,
prev_chs,
width_mult,
ch_div=1,
output_stride=32,
act_layer='swish',
dw_act_layer='relu6',
drop_path_rate=0.,
):
feat_chs = [prev_chs]
feature_info = []
curr_stride = 2
dilation = 1
features = []
num_blocks = len(block_cfg)
for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg):
next_dilation = dilation
if stride > 1:
fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]
if curr_stride >= output_stride:
next_dilation = dilation * stride
stride = 1
block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule
drop_path = DropPath(block_dpr) if block_dpr > 0. else None
features.append(LinearBottleneck(
in_chs=prev_chs,
out_chs=chs,
exp_ratio=exp_ratio,
stride=stride,
dilation=(dilation, next_dilation),
se_ratio=se_ratio,
ch_div=ch_div,
act_layer=act_layer,
dw_act_layer=dw_act_layer,
drop_path=drop_path,
))
curr_stride *= stride
dilation = next_dilation
prev_chs = chs
feat_chs += [features[-1].feat_channels()]
pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')]
features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer))
return features, feature_info
class RexNet(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
initial_chs=16,
final_chs=180,
width_mult=1.0,
depth_mult=1.0,
se_ratio=1/12.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_rate=0.2,
drop_path_rate=0.,
):
super(RexNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
assert output_stride in (32, 16, 8)
stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32
stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)
self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer)
block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div)
features, self.feature_info = _build_blocks(
block_cfg,
stem_chs,
width_mult,
ch_div,
output_stride,
act_layer,
dw_act_layer,
drop_path_rate,
)
self.num_features = features[-1].out_channels
self.features = nn.Sequential(*features)
self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)
efficientnet_init_weights(self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^features\.(\d+)',
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.features, x, flatten=True)
else:
x = self.features(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_rexnet(variant, pretrained, **kwargs):
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
RexNet,
variant,
pretrained,
feature_cfg=feature_cfg,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnetr_100.untrained': _cfg(),
'rexnetr_130.untrained': _cfg(),
'rexnetr_150.untrained': _cfg(),
'rexnetr_200.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_200.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
})
@register_model
def rexnet_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x"""
return _create_rexnet('rexnet_100', pretrained, **kwargs)
@register_model
def rexnet_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x"""
return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)
@register_model
def rexnet_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x"""
return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)
@register_model
def rexnet_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x"""
return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)
@register_model
def rexnet_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x"""
return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs)
@register_model
def rexnetr_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)
@register_model
def rexnetr_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)
@register_model
def rexnetr_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)
@register_model
def rexnetr_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)
@register_model
def rexnetr_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x w/ rounded (mod 16) channels"""
return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/repvit.py | """ RepViT
Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective`
- https://arxiv.org/abs/2307.09283
@misc{wang2023repvit,
title={RepViT: Revisiting Mobile CNN From ViT Perspective},
author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding},
year={2023},
eprint={2307.09283},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
Adapted from official impl at https://github.com/jameslahm/RepViT
"""
__all__ = ['RepVit']
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._registry import register_model, generate_default_cfgs
from ._builder import build_model_with_cfg
from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple
from ._manipulate import checkpoint_seq
import torch
class ConvNorm(nn.Sequential):
def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False))
self.add_module('bn', nn.BatchNorm2d(out_dim))
nn.init.constant_(self.bn.weight, bn_weight_init)
nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
m = nn.Conv2d(
w.size(1) * self.c.groups,
w.size(0),
w.shape[2:],
stride=self.c.stride,
padding=self.c.padding,
dilation=self.c.dilation,
groups=self.c.groups,
device=c.weight.device,
)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(nn.Sequential):
def __init__(self, in_dim, out_dim, bias=True, std=0.02):
super().__init__()
self.add_module('bn', nn.BatchNorm1d(in_dim))
self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias))
trunc_normal_(self.l.weight, std=std)
if bias:
nn.init.constant_(self.l.bias, 0)
@torch.no_grad()
def fuse(self):
bn, l = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.l.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
m = nn.Linear(w.size(1), w.size(0), device=l.weight.device)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class RepVggDw(nn.Module):
def __init__(self, ed, kernel_size, legacy=False):
super().__init__()
self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed)
if legacy:
self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed)
# Make torchscript happy.
self.bn = nn.Identity()
else:
self.conv1 = nn.Conv2d(ed, ed, 1, 1, 0, groups=ed)
self.bn = nn.BatchNorm2d(ed)
self.dim = ed
self.legacy = legacy
def forward(self, x):
return self.bn(self.conv(x) + self.conv1(x) + x)
@torch.no_grad()
def fuse(self):
conv = self.conv.fuse()
if self.legacy:
conv1 = self.conv1.fuse()
else:
conv1 = self.conv1
conv_w = conv.weight
conv_b = conv.bias
conv1_w = conv1.weight
conv1_b = conv1.bias
conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1])
identity = nn.functional.pad(
torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1]
)
final_conv_w = conv_w + conv1_w + identity
final_conv_b = conv_b + conv1_b
conv.weight.data.copy_(final_conv_w)
conv.bias.data.copy_(final_conv_b)
if not self.legacy:
bn = self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = conv.weight * w[:, None, None, None]
b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / (bn.running_var + bn.eps) ** 0.5
conv.weight.data.copy_(w)
conv.bias.data.copy_(b)
return conv
class RepVitMlp(nn.Module):
def __init__(self, in_dim, hidden_dim, act_layer):
super().__init__()
self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0)
self.act = act_layer()
self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0)
def forward(self, x):
return self.conv2(self.act(self.conv1(x)))
class RepViTBlock(nn.Module):
def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy=False):
super(RepViTBlock, self).__init__()
self.token_mixer = RepVggDw(in_dim, kernel_size, legacy)
self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity()
self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer)
def forward(self, x):
x = self.token_mixer(x)
x = self.se(x)
identity = x
x = self.channel_mixer(x)
return identity + x
class RepVitStem(nn.Module):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
self.act1 = act_layer()
self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
self.stride = 4
def forward(self, x):
return self.conv2(self.act1(self.conv1(x)))
class RepVitDownsample(nn.Module):
def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy=False):
super().__init__()
self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer, legacy=legacy)
self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim)
self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1)
self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer)
def forward(self, x):
x = self.pre_block(x)
x = self.spatial_downsample(x)
x = self.channel_downsample(x)
identity = x
x = self.ffn(x)
return x + identity
class RepVitClassifier(nn.Module):
def __init__(self, dim, num_classes, distillation=False, drop=0.0):
super().__init__()
self.head_drop = nn.Dropout(drop)
self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity()
self.distillation = distillation
self.distilled_training = False
self.num_classes = num_classes
if distillation:
self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
x = self.head_drop(x)
if self.distillation:
x1, x2 = self.head(x), self.head_dist(x)
if self.training and self.distilled_training and not torch.jit.is_scripting():
return x1, x2
else:
return (x1 + x2) / 2
else:
x = self.head(x)
return x
@torch.no_grad()
def fuse(self):
if not self.num_classes > 0:
return nn.Identity()
head = self.head.fuse()
if self.distillation:
head_dist = self.head_dist.fuse()
head.weight += head_dist.weight
head.bias += head_dist.bias
head.weight /= 2
head.bias /= 2
return head
else:
return head
class RepVitStage(nn.Module):
def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True, legacy=False):
super().__init__()
if downsample:
self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy)
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
blocks = []
use_se = True
for _ in range(depth):
blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy))
use_se = not use_se
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class RepVit(nn.Module):
def __init__(
self,
in_chans=3,
img_size=224,
embed_dim=(48,),
depth=(2,),
mlp_ratio=2,
global_pool='avg',
kernel_size=3,
num_classes=1000,
act_layer=nn.GELU,
distillation=True,
drop_rate=0.0,
legacy=False,
):
super(RepVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.embed_dim = embed_dim
self.num_classes = num_classes
in_dim = embed_dim[0]
self.stem = RepVitStem(in_chans, in_dim, act_layer)
stride = self.stem.stride
resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))])
num_stages = len(embed_dim)
mlp_ratios = to_ntuple(num_stages)(mlp_ratio)
self.feature_info = []
stages = []
for i in range(num_stages):
downsample = True if i != 0 else False
stages.append(
RepVitStage(
in_dim,
embed_dim[i],
depth[i],
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
kernel_size=kernel_size,
downsample=downsample,
legacy=legacy,
)
)
stage_stride = 2 if downsample else 1
stride *= stage_stride
resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution])
self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')]
in_dim = embed_dim[i]
self.stages = nn.Sequential(*stages)
self.num_features = embed_dim[-1]
self.head_drop = nn.Dropout(drop_rate)
self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^stem', blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]) # stem and embed
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None, distillation=False):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = (
RepVitClassifier(self.embed_dim[-1], num_classes, distillation) if num_classes > 0 else nn.Identity()
)
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.head.distilled_training = enable
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean((2, 3), keepdim=False)
x = self.head_drop(x)
return self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
@torch.no_grad()
def fuse(self):
def fuse_children(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
fused = child.fuse()
setattr(net, child_name, fused)
fuse_children(fused)
else:
fuse_children(child)
fuse_children(self)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.95,
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.c',
'classifier': ('head.head.l', 'head.head_dist.l'),
**kwargs,
}
default_cfgs = generate_default_cfgs(
{
'repvit_m1.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m3.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m0_9.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m0_9.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_0.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_0.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_1.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_1.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_5.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_5.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2_3.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2_3.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
}
)
def _create_repvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
RepVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
@register_model
def repvit_m1(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1 model
"""
model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2), legacy=True)
return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m2(pretrained=False, **kwargs):
"""
Constructs a RepViT-M2 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2), legacy=True)
return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m3(pretrained=False, **kwargs):
"""
Constructs a RepViT-M3 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2), legacy=True)
return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m0_9(pretrained=False, **kwargs):
"""
Constructs a RepViT-M0.9 model
"""
model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2))
return _create_repvit('repvit_m0_9', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_0(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.0 model
"""
model_args = dict(embed_dim=(56, 112, 224, 448), depth=(2, 2, 14, 2))
return _create_repvit('repvit_m1_0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_1(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.1 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2))
return _create_repvit('repvit_m1_1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_5(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.5 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 24, 4))
return _create_repvit('repvit_m1_5', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m2_3(pretrained=False, **kwargs):
"""
Constructs a RepViT-M2.3 model
"""
model_args = dict(embed_dim=(80, 160, 320, 640), depth=(6, 6, 34, 2))
return _create_repvit('repvit_m2_3', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/mobilevit.py | """ MobileViT
Paper:
V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178
V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680
MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below)
License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source)
Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman
"""
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
import math
from typing import Callable, Tuple, Optional
import torch
import torch.nn.functional as F
from torch import nn
from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups
from .vision_transformer import Block as TransformerBlock
__all__ = []
def _inverted_residual_block(d, c, s, br=4.0):
# inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise)
return ByoBlockCfg(
type='bottle', d=d, c=c, s=s, gs=1, br=br,
block_kwargs=dict(bottle_in=True, linear_out=True))
def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0):
# inverted residual + mobilevit blocks as per MobileViT network
return (
_inverted_residual_block(d=d, c=c, s=s, br=br),
ByoBlockCfg(
type='mobilevit', d=1, c=c, s=1,
block_kwargs=dict(
transformer_dim=transformer_dim,
transformer_depth=transformer_depth,
patch_size=patch_size)
)
)
def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5):
# inverted residual + mobilevit blocks as per MobileViT network
return (
_inverted_residual_block(d=d, c=c, s=s, br=br),
ByoBlockCfg(
type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1,
block_kwargs=dict(
transformer_depth=transformer_depth,
patch_size=patch_size)
)
)
def _mobilevitv2_cfg(multiplier=1.0):
chs = (64, 128, 256, 384, 512)
if multiplier != 1.0:
chs = tuple([int(c * multiplier) for c in chs])
cfg = ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=chs[0], s=1, br=2.0),
_inverted_residual_block(d=2, c=chs[1], s=2, br=2.0),
_mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2),
_mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4),
_mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3),
),
stem_chs=int(32 * multiplier),
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
)
return cfg
model_cfgs = dict(
mobilevit_xxs=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=16, s=1, br=2.0),
_inverted_residual_block(d=3, c=24, s=2, br=2.0),
_mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0),
_mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0),
_mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=320,
),
mobilevit_xs=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=48, s=2),
_mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=384,
),
mobilevit_s=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=64, s=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=640,
),
semobilevit_s=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=64, s=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
attn_layer='se',
attn_kwargs=dict(rd_ratio=1/8),
num_features=640,
),
mobilevitv2_050=_mobilevitv2_cfg(.50),
mobilevitv2_075=_mobilevitv2_cfg(.75),
mobilevitv2_125=_mobilevitv2_cfg(1.25),
mobilevitv2_100=_mobilevitv2_cfg(1.0),
mobilevitv2_150=_mobilevitv2_cfg(1.5),
mobilevitv2_175=_mobilevitv2_cfg(1.75),
mobilevitv2_200=_mobilevitv2_cfg(2.0),
)
@register_notrace_module
class MobileVitBlock(nn.Module):
""" MobileViT block
Paper: https://arxiv.org/abs/2110.02178?context=cs.LG
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 3,
stride: int = 1,
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
dilation: Tuple[int, int] = (1, 1),
mlp_ratio: float = 2.0,
transformer_dim: Optional[int] = None,
transformer_depth: int = 2,
patch_size: int = 8,
num_heads: int = 4,
attn_drop: float = 0.,
drop: int = 0.,
no_fusion: bool = False,
drop_path_rate: float = 0.,
layers: LayerFn = None,
transformer_norm_layer: Callable = nn.LayerNorm,
**kwargs, # eat unused args
):
super(MobileVitBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
out_chs = out_chs or in_chs
transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs)
self.conv_kxk = layers.conv_norm_act(
in_chs, in_chs, kernel_size=kernel_size,
stride=stride, groups=groups, dilation=dilation[0])
self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False)
self.transformer = nn.Sequential(*[
TransformerBlock(
transformer_dim,
mlp_ratio=mlp_ratio,
num_heads=num_heads,
qkv_bias=True,
attn_drop=attn_drop,
proj_drop=drop,
drop_path=drop_path_rate,
act_layer=layers.act,
norm_layer=transformer_norm_layer,
)
for _ in range(transformer_depth)
])
self.norm = transformer_norm_layer(transformer_dim)
self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1)
if no_fusion:
self.conv_fusion = None
else:
self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1)
self.patch_size = to_2tuple(patch_size)
self.patch_area = self.patch_size[0] * self.patch_size[1]
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
# Local representation
x = self.conv_kxk(x)
x = self.conv_1x1(x)
# Unfold (feature map -> patches)
patch_h, patch_w = self.patch_size
B, C, H, W = x.shape
new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w
num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w
num_patches = num_patch_h * num_patch_w # N
interpolate = False
if new_h != H or new_w != W:
# Note: Padding can be done, but then it needs to be handled in attention function.
x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False)
interpolate = True
# [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w]
x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2)
# [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w
x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1)
# Global representations
x = self.transformer(x)
x = self.norm(x)
# Fold (patch -> feature map)
# [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w]
x = x.contiguous().view(B, self.patch_area, num_patches, -1)
x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w)
# [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W]
x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w)
if interpolate:
x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False)
x = self.conv_proj(x)
if self.conv_fusion is not None:
x = self.conv_fusion(torch.cat((shortcut, x), dim=1))
return x
class LinearSelfAttention(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680`
This layer can be used for self- as well as cross-attention.
Args:
embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)`
attn_drop (float): Dropout value for context scores. Default: 0.0
bias (bool): Use bias in learnable layers. Default: True
Shape:
- Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels,
:math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches
- Output: same as the input
.. note::
For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels
in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor,
we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be
expensive on resource-constrained devices) that may be required to convert the unfolded tensor from
channel-first to channel-last format in case of a linear layer.
"""
def __init__(
self,
embed_dim: int,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
bias: bool = True,
) -> None:
super().__init__()
self.embed_dim = embed_dim
self.qkv_proj = nn.Conv2d(
in_channels=embed_dim,
out_channels=1 + (2 * embed_dim),
bias=bias,
kernel_size=1,
)
self.attn_drop = nn.Dropout(attn_drop)
self.out_proj = nn.Conv2d(
in_channels=embed_dim,
out_channels=embed_dim,
bias=bias,
kernel_size=1,
)
self.out_drop = nn.Dropout(proj_drop)
def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor:
# [B, C, P, N] --> [B, h + 2d, P, N]
qkv = self.qkv_proj(x)
# Project x into query, key and value
# Query --> [B, 1, P, N]
# value, key --> [B, d, P, N]
query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1)
# apply softmax along N dimension
context_scores = F.softmax(query, dim=-1)
context_scores = self.attn_drop(context_scores)
# Compute context vector
# [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1]
context_vector = (key * context_scores).sum(dim=-1, keepdim=True)
# combine context vector with values
# [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N]
out = F.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
out = self.out_drop(out)
return out
@torch.jit.ignore()
def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
# x --> [B, C, P, N]
# x_prev = [B, C, P, M]
batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape
q_patch_area, q_num_patches = x.shape[-2:]
assert (
kv_patch_area == q_patch_area
), "The number of pixels in a patch for query and key_value should be the same"
# compute query, key, and value
# [B, C, P, M] --> [B, 1 + d, P, M]
qk = F.conv2d(
x_prev,
weight=self.qkv_proj.weight[:self.embed_dim + 1],
bias=self.qkv_proj.bias[:self.embed_dim + 1],
)
# [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M]
query, key = qk.split([1, self.embed_dim], dim=1)
# [B, C, P, N] --> [B, d, P, N]
value = F.conv2d(
x,
weight=self.qkv_proj.weight[self.embed_dim + 1],
bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None,
)
# apply softmax along M dimension
context_scores = F.softmax(query, dim=-1)
context_scores = self.attn_drop(context_scores)
# compute context vector
# [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1]
context_vector = (key * context_scores).sum(dim=-1, keepdim=True)
# combine context vector with values
# [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N]
out = F.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
out = self.out_drop(out)
return out
def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
if x_prev is None:
return self._forward_self_attn(x)
else:
return self._forward_cross_attn(x, x_prev=x_prev)
class LinearTransformerBlock(nn.Module):
"""
This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_
Args:
embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)`
mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim
drop (float): Dropout rate. Default: 0.0
attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0
drop_path (float): Stochastic depth rate Default: 0.0
norm_layer (Callable): Normalization layer. Default: layer_norm_2d
Shape:
- Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim,
:math:`P` is number of pixels in a patch, and :math:`N` is number of patches,
- Output: same shape as the input
"""
def __init__(
self,
embed_dim: int,
mlp_ratio: float = 2.0,
drop: float = 0.0,
attn_drop: float = 0.0,
drop_path: float = 0.0,
act_layer=None,
norm_layer=None,
) -> None:
super().__init__()
act_layer = act_layer or nn.SiLU
norm_layer = norm_layer or GroupNorm1
self.norm1 = norm_layer(embed_dim)
self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop)
self.drop_path1 = DropPath(drop_path)
self.norm2 = norm_layer(embed_dim)
self.mlp = ConvMlp(
in_features=embed_dim,
hidden_features=int(embed_dim * mlp_ratio),
act_layer=act_layer,
drop=drop)
self.drop_path2 = DropPath(drop_path)
def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
if x_prev is None:
# self-attention
x = x + self.drop_path1(self.attn(self.norm1(x)))
else:
# cross-attention
res = x
x = self.norm1(x) # norm
x = self.attn(x, x_prev) # attn
x = self.drop_path1(x) + res # residual
# Feed forward network
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
@register_notrace_module
class MobileVitV2Block(nn.Module):
"""
This class defines the `MobileViTv2 block <>`_
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 3,
bottle_ratio: float = 1.0,
group_size: Optional[int] = 1,
dilation: Tuple[int, int] = (1, 1),
mlp_ratio: float = 2.0,
transformer_dim: Optional[int] = None,
transformer_depth: int = 2,
patch_size: int = 8,
attn_drop: float = 0.,
drop: int = 0.,
drop_path_rate: float = 0.,
layers: LayerFn = None,
transformer_norm_layer: Callable = GroupNorm1,
**kwargs, # eat unused args
):
super(MobileVitV2Block, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
out_chs = out_chs or in_chs
transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs)
self.conv_kxk = layers.conv_norm_act(
in_chs, in_chs, kernel_size=kernel_size,
stride=1, groups=groups, dilation=dilation[0])
self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False)
self.transformer = nn.Sequential(*[
LinearTransformerBlock(
transformer_dim,
mlp_ratio=mlp_ratio,
attn_drop=attn_drop,
drop=drop,
drop_path=drop_path_rate,
act_layer=layers.act,
norm_layer=transformer_norm_layer
)
for _ in range(transformer_depth)
])
self.norm = transformer_norm_layer(transformer_dim)
self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False)
self.patch_size = to_2tuple(patch_size)
self.patch_area = self.patch_size[0] * self.patch_size[1]
self.coreml_exportable = is_exportable()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
patch_h, patch_w = self.patch_size
new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w
num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w
num_patches = num_patch_h * num_patch_w # N
if new_h != H or new_w != W:
x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True)
# Local representation
x = self.conv_kxk(x)
x = self.conv_1x1(x)
# Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N]
C = x.shape[1]
if self.coreml_exportable:
x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w))
else:
x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4)
x = x.reshape(B, C, -1, num_patches)
# Global representations
x = self.transformer(x)
x = self.norm(x)
# Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W]
if self.coreml_exportable:
# adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624
x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w)
x = F.pixel_shuffle(x, upscale_factor=patch_h)
else:
x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3)
x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w)
x = self.conv_proj(x)
return x
register_block('mobilevit', MobileVitBlock)
register_block('mobilevit2', MobileVitV2Block)
def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': (0., 0., 0.), 'std': (1., 1., 1.),
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'fixed_input_size': False,
**kwargs
}
default_cfgs = generate_default_cfgs({
'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevitv2_050.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_075.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_100.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_125.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_175.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_200.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
})
@register_model
def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs)
@register_model
def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs)
@register_model
def mobilevit_s(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs)
register_model_deprecations(__name__, {
'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k',
'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k',
'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k',
'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384',
'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384',
'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/gcvit.py | """ Global Context ViT
From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py
Global Context Vision Transformers -https://arxiv.org/abs/2206.09959
@article{hatamizadeh2022global,
title={Global Context Vision Transformers},
author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo},
journal={arXiv preprint arXiv:2206.09959},
year={2022}
}
Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit.
The license for this code release is Apache 2.0 with no commercial restrictions.
However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license
(https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones...
Hacked together by / Copyright 2022, Ross Wightman
"""
import math
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \
get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ['GlobalContextVit']
class MbConvBlock(nn.Module):
""" A depthwise separable / fused mbconv style residual block with SE, `no norm.
"""
def __init__(
self,
in_chs,
out_chs=None,
expand_ratio=1.0,
attn_layer='se',
bias=False,
act_layer=nn.GELU,
):
super().__init__()
attn_kwargs = dict(act_layer=act_layer)
if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca':
attn_kwargs['rd_ratio'] = 0.25
attn_kwargs['bias'] = False
attn_layer = get_attn(attn_layer)
out_chs = out_chs or in_chs
mid_chs = int(expand_ratio * in_chs)
self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias)
self.act = act_layer()
self.se = attn_layer(mid_chs, **attn_kwargs)
self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias)
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.act(x)
x = self.se(x)
x = self.conv_pw(x)
x = x + shortcut
return x
class Downsample2d(nn.Module):
def __init__(
self,
dim,
dim_out=None,
reduction='conv',
act_layer=nn.GELU,
norm_layer=LayerNorm2d, # NOTE in NCHW
):
super().__init__()
dim_out = dim_out or dim
self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity()
self.conv_block = MbConvBlock(dim, act_layer=act_layer)
assert reduction in ('conv', 'max', 'avg')
if reduction == 'conv':
self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False)
elif reduction == 'max':
assert dim == dim_out
self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
assert dim == dim_out
self.reduction = nn.AvgPool2d(kernel_size=2)
self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity()
def forward(self, x):
x = self.norm1(x)
x = self.conv_block(x)
x = self.reduction(x)
x = self.norm2(x)
return x
class FeatureBlock(nn.Module):
def __init__(
self,
dim,
levels=0,
reduction='max',
act_layer=nn.GELU,
):
super().__init__()
reductions = levels
levels = max(1, levels)
if reduction == 'avg':
pool_fn = partial(nn.AvgPool2d, kernel_size=2)
else:
pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1)
self.blocks = nn.Sequential()
for i in range(levels):
self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer))
if reductions:
self.blocks.add_module(f'pool{i+1}', pool_fn())
reductions -= 1
def forward(self, x):
return self.blocks(x)
class Stem(nn.Module):
def __init__(
self,
in_chs: int = 3,
out_chs: int = 96,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW
):
super().__init__()
self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1)
self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer)
def forward(self, x):
x = self.conv1(x)
x = self.down(x)
return x
class WindowAttentionGlobal(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
use_global: bool = True,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
super().__init__()
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.use_global = use_global
self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads)
if self.use_global:
self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias)
else:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, q_global: Optional[torch.Tensor] = None):
B, N, C = x.shape
if self.use_global and q_global is not None:
_assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal')
kv = self.qkv(x)
kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q = q_global.repeat(B // q_global.shape[0], 1, 1, 1)
q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
else:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = q * self.scale
attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0
attn = self.rel_pos(attn)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def window_partition(x, window_size: Tuple[int, int]):
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class GlobalContextVitBlock(nn.Module):
def __init__(
self,
dim: int,
feat_size: Tuple[int, int],
num_heads: int,
window_size: int = 7,
mlp_ratio: float = 4.,
use_global: bool = True,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
attn_layer: Callable = WindowAttentionGlobal,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
):
super().__init__()
feat_size = to_2tuple(feat_size)
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1]))
self.norm1 = norm_layer(dim)
self.attn = attn_layer(
dim,
num_heads=num_heads,
window_size=window_size,
use_global=use_global,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _window_attn(self, x, q_global: Optional[torch.Tensor] = None):
B, H, W, C = x.shape
x_win = window_partition(x, self.window_size)
x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C)
attn_win = self.attn(x_win, q_global)
x = window_reverse(attn_win, self.window_size, (H, W))
return x
def forward(self, x, q_global: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class GlobalContextVitStage(nn.Module):
def __init__(
self,
dim,
depth: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
downsample: bool = True,
global_norm: bool = False,
stage_norm: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.0,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
norm_layer_cl: Callable = LayerNorm2d,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(
dim=dim,
dim_out=dim * 2,
norm_layer=norm_layer,
)
dim = dim * 2
feat_size = (feat_size[0] // 2, feat_size[1] // 2)
else:
self.downsample = nn.Identity()
self.feat_size = feat_size
window_size = to_2tuple(window_size)
feat_levels = int(math.log2(min(feat_size) / min(window_size)))
self.global_block = FeatureBlock(dim, feat_levels)
self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity()
self.blocks = nn.ModuleList([
GlobalContextVitBlock(
dim=dim,
num_heads=num_heads,
feat_size=feat_size,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
use_global=(i % 2 != 0),
layer_scale=layer_scale,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
act_layer=act_layer,
norm_layer=norm_layer_cl,
)
for i in range(depth)
])
self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity()
self.dim = dim
self.feat_size = feat_size
self.grad_checkpointing = False
def forward(self, x):
# input NCHW, downsample & global block are 2d conv + pooling
x = self.downsample(x)
global_query = self.global_block(x)
# reshape NCHW --> NHWC for transformer blocks
x = x.permute(0, 2, 3, 1)
global_query = self.global_norm(global_query.permute(0, 2, 3, 1))
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x, global_query)
x = self.norm(x)
x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW
return x
class GlobalContextVit(nn.Module):
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
img_size: Tuple[int, int] = 224,
window_ratio: Tuple[int, ...] = (32, 32, 16, 32),
window_size: Tuple[int, ...] = None,
embed_dim: int = 64,
depths: Tuple[int, ...] = (3, 4, 19, 5),
num_heads: Tuple[int, ...] = (2, 4, 8, 16),
mlp_ratio: float = 3.0,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init='',
act_layer: str = 'gelu',
norm_layer: str = 'layernorm2d',
norm_layer_cl: str = 'layernorm',
norm_eps: float = 1e-5,
):
super().__init__()
act_layer = get_act_layer(act_layer)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
img_size = to_2tuple(img_size)
feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4
self.global_pool = global_pool
self.num_classes = num_classes
self.drop_rate = drop_rate
num_stages = len(depths)
self.num_features = int(embed_dim * 2 ** (num_stages - 1))
if window_size is not None:
window_size = to_ntuple(num_stages)(window_size)
else:
assert window_ratio is not None
window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)])
self.stem = Stem(
in_chs=in_chans,
out_chs=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for i in range(num_stages):
last_stage = i == num_stages - 1
stage_scale = 2 ** max(i - 1, 0)
stages.append(GlobalContextVitStage(
dim=embed_dim * stage_scale,
depth=depths[i],
num_heads=num_heads[i],
feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale),
window_size=window_size[i],
downsample=i != 0,
stage_norm=last_stage,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
layer_scale=layer_scale,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
self.stages = nn.Sequential(*stages)
# Classifier head
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
if weight_init:
named_apply(partial(self._init_weights, scheme=weight_init), self)
def _init_weights(self, module, name, scheme='vit'):
# note Conv2d left as default init
if scheme == 'vit':
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)'
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_gcvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
'gcvit_xxtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'),
'gcvit_xtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'),
'gcvit_tiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'),
'gcvit_small.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'),
'gcvit_base.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'),
})
@register_model
def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(2, 2, 6, 2),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 6, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(3, 6, 12, 24),
embed_dim=96,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(4, 8, 16, 32),
embed_dim=128,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/xception.py | """
Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch)
@author: tstandley
Adapted by cadene
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import torch.jit
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['Xception']
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True):
super(Block, self).__init__()
if out_channels != in_channels or strides != 1:
self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_channels)
else:
self.skip = None
rep = []
for i in range(reps):
if grow_first:
inc = in_channels if i == 0 else out_channels
outc = out_channels
else:
inc = in_channels
outc = in_channels if i < (reps - 1) else out_channels
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1))
rep.append(nn.BatchNorm2d(outc))
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
rep.append(nn.MaxPool2d(3, strides, 1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'):
""" Constructor
Args:
num_classes: number of classes
"""
super(Xception, self).__init__()
self.drop_rate = drop_rate
self.global_pool = global_pool
self.num_classes = num_classes
self.num_features = 2048
self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.act2 = nn.ReLU(inplace=True)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False)
self.block2 = Block(128, 256, 2, 2)
self.block3 = Block(256, 728, 2, 2)
self.block4 = Block(728, 728, 3, 1)
self.block5 = Block(728, 728, 3, 1)
self.block6 = Block(728, 728, 3, 1)
self.block7 = Block(728, 728, 3, 1)
self.block8 = Block(728, 728, 3, 1)
self.block9 = Block(728, 728, 3, 1)
self.block10 = Block(728, 728, 3, 1)
self.block11 = Block(728, 728, 3, 1)
self.block12 = Block(728, 1024, 2, 2, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.act3 = nn.ReLU(inplace=True)
self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(self.num_features)
self.act4 = nn.ReLU(inplace=True)
self.feature_info = [
dict(num_chs=64, reduction=2, module='act2'),
dict(num_chs=128, reduction=4, module='block2.rep.0'),
dict(num_chs=256, reduction=8, module='block3.rep.0'),
dict(num_chs=728, reduction=16, module='block12.rep.0'),
dict(num_chs=2048, reduction=32, module='act4'),
]
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
# #------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^conv[12]|bn[12]',
blocks=[
(r'^block(\d+)', None),
(r'^conv[34]|bn[34]', (99,)),
],
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "gradient checkpointing not supported"
@torch.jit.ignore
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.act3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.act4(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
return x if pre_logits else self.fc(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _xception(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
Xception, variant, pretrained,
feature_cfg=dict(feature_cls='hook'),
**kwargs)
default_cfgs = generate_default_cfgs({
'legacy_xception.tf_in1k': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth',
'input_size': (3, 299, 299),
'pool_size': (10, 10),
'crop_pct': 0.8975,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
}
})
@register_model
def legacy_xception(pretrained=False, **kwargs) -> Xception:
return _xception('legacy_xception', pretrained=pretrained, **kwargs)
register_model_deprecations(__name__, {
'xception': 'legacy_xception',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/inception_v4.py | """ Pytorch Inception-V4 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['InceptionV4']
class Mixed3a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = conv_block(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed4a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed4a, self).__init__()
self.branch0 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed5a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed5a, self).__init__()
self.conv = conv_block(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class InceptionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionA, self).__init__()
self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionA, self).__init__()
self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(384, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=3, stride=1, padding=1),
conv_block(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionB, self).__init__()
self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.branch2 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionB, self).__init__()
self.branch0 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1024, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionC(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionC, self).__init__()
self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV4, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = 1536
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
features = [
conv_block(in_chans, 32, kernel_size=3, stride=2),
conv_block(32, 32, kernel_size=3, stride=1),
conv_block(32, 64, kernel_size=3, stride=1, padding=1),
Mixed3a(conv_block),
Mixed4a(conv_block),
Mixed5a(conv_block),
]
features += [InceptionA(conv_block) for _ in range(4)]
features += [ReductionA(conv_block)] # Mixed6a
features += [InceptionB(conv_block) for _ in range(7)]
features += [ReductionB(conv_block)] # Mixed7a
features += [InceptionC(conv_block) for _ in range(3)]
self.features = nn.Sequential(*features)
self.feature_info = [
dict(num_chs=64, reduction=2, module='features.2'),
dict(num_chs=160, reduction=4, module='features.3'),
dict(num_chs=384, reduction=8, module='features.9'),
dict(num_chs=1024, reduction=16, module='features.17'),
dict(num_chs=1536, reduction=32, module='features.21'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^features\.[012]\.',
blocks=r'^features\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
return build_model_with_cfg(
InceptionV4,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
default_cfgs = generate_default_cfgs({
'inception_v4.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'features.0.conv', 'classifier': 'last_linear',
}
})
@register_model
def inception_v4(pretrained=False, **kwargs):
return _create_inception_v4('inception_v4', pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/inception_resnet_v2.py | """ Pytorch Inception-Resnet-V2 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._manipulate import flatten_modules
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['InceptionResnetV2']
class Mixed_5b(nn.Module):
def __init__(self, conv_block=None):
super(Mixed_5b, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch0 = conv_block(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(192, 48, kernel_size=1, stride=1),
conv_block(48, 64, kernel_size=5, stride=1, padding=2)
)
self.branch2 = nn.Sequential(
conv_block(192, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(192, 64, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block35(nn.Module):
def __init__(self, scale=1.0, conv_block=None):
super(Block35, self).__init__()
self.scale = scale
conv_block = conv_block or ConvNormAct
self.branch0 = conv_block(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(320, 32, kernel_size=1, stride=1),
conv_block(32, 32, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(320, 32, kernel_size=1, stride=1),
conv_block(32, 48, kernel_size=3, stride=1, padding=1),
conv_block(48, 64, kernel_size=3, stride=1, padding=1)
)
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.act = nn.ReLU()
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.act(out)
return out
class Mixed_6a(nn.Module):
def __init__(self, conv_block=None):
super(Mixed_6a, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch0 = conv_block(320, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(320, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=3, stride=1, padding=1),
conv_block(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0, conv_block=None):
super(Block17, self).__init__()
self.scale = scale
conv_block = conv_block or ConvNormAct
self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1088, 128, kernel_size=1, stride=1),
conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
self.act = nn.ReLU()
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.act(out)
return out
class Mixed_7a(nn.Module):
def __init__(self, conv_block=None):
super(Mixed_7a, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch0 = nn.Sequential(
conv_block(1088, 256, kernel_size=1, stride=1),
conv_block(256, 384, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1088, 256, kernel_size=1, stride=1),
conv_block(256, 288, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
conv_block(1088, 256, kernel_size=1, stride=1),
conv_block(256, 288, kernel_size=3, stride=1, padding=1),
conv_block(288, 320, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, no_relu=False, conv_block=None):
super(Block8, self).__init__()
self.scale = scale
conv_block = conv_block or ConvNormAct
self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(2080, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)),
conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
)
self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
self.relu = None if no_relu else nn.ReLU()
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if self.relu is not None:
out = self.relu(out)
return out
class InceptionResnetV2(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
drop_rate=0.,
output_stride=32,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionResnetV2, self).__init__()
self.num_classes = num_classes
self.num_features = 1536
assert output_stride == 32
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2)
self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1)
self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')]
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1)
self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')]
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b(conv_block=conv_block)
self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)])
self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')]
self.mixed_6a = Mixed_6a(conv_block=conv_block)
self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)])
self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')]
self.mixed_7a = Mixed_7a(conv_block=conv_block)
self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)])
self.block8 = Block8(no_relu=True, conv_block=conv_block)
self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1)
self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')]
self.global_pool, self.head_drop, self.classif = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))}
module_map.pop(('classif',))
def _matcher(name):
if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]):
return 0
elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]):
return 1
elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]):
return len(module_map) + 1
else:
for k in module_map.keys():
if k == tuple(name.split('.')[:len(k)]):
return module_map[k]
return float('inf')
return _matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "checkpointing not supported"
@torch.jit.ignore
def get_classifier(self):
return self.classif
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv2d_1a(x)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.classif(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_resnet_v2(variant, pretrained=False, **kwargs):
return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs)
default_cfgs = generate_default_cfgs({
# ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz
'inception_resnet_v2.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.8975, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
},
# As per https://arxiv.org/abs/1705.07204 and
# ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz
'inception_resnet_v2.tf_ens_adv_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.8975, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
}
})
@register_model
def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2:
return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs)
register_model_deprecations(__name__, {
'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/regnet.py | """RegNet X, Y, Z, and more
Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678
Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
Paper: `Fast and Accurate Model Scaling` - https://arxiv.org/abs/2103.06877
Original Impl: None
Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here)
and cleaned up with more descriptive variable names.
Weights from original pycls impl have been modified:
* first layer from BGR -> RGB as most PyTorch models are
* removed training specific dict entries from checkpoints and keep model state_dict only
* remap names to match the ones here
Supports weight loading from torchvision and classy-vision (incl VISSL SEER)
A number of custom timm model definitions additions including:
* stochastic depth, gradient checkpointing, layer-decay, configurable dilation
* a pre-activation 'V' variant
* only known RegNet-Z model definitions with pretrained weights
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from dataclasses import dataclass, replace
from functools import partial
from typing import Optional, Union, Callable
import numpy as np
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct
from timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq, named_apply
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['RegNet', 'RegNetCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class RegNetCfg:
depth: int = 21
w0: int = 80
wa: float = 42.63
wm: float = 2.66
group_size: int = 24
bottle_ratio: float = 1.
se_ratio: float = 0.
group_min_ratio: float = 0.
stem_width: int = 32
downsample: Optional[str] = 'conv1x1'
linear_out: bool = False
preact: bool = False
num_features: int = 0
act_layer: Union[str, Callable] = 'relu'
norm_layer: Union[str, Callable] = 'batchnorm'
def quantize_float(f, q):
"""Converts a float to the closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.):
"""Adjusts the compatibility of widths and groups."""
bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)]
if min_ratio:
# torchvision uses a different rounding scheme for ensuring bottleneck widths divisible by group widths
bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for w_bot, g in zip(bottleneck_widths, groups)]
else:
bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)]
widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)]
return widths, groups
def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8):
"""Generates per block widths from RegNet parameters."""
assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % quant == 0
# TODO dWr scaling?
# depth = int(depth * (scale ** 0.1))
# width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths
widths_cont = np.arange(depth) * width_slope + width_initial
width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult))
widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant
num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1
groups = np.array([group_size for _ in range(num_stages)])
return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist()
def downsample_conv(
in_chs,
out_chs,
kernel_size=1,
stride=1,
dilation=1,
norm_layer=None,
preact=False,
):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
dilation = dilation if kernel_size > 1 else 1
if preact:
return create_conv2d(
in_chs,
out_chs,
kernel_size,
stride=stride,
dilation=dilation,
)
else:
return ConvNormAct(
in_chs,
out_chs,
kernel_size,
stride=stride,
dilation=dilation,
norm_layer=norm_layer,
apply_act=False,
)
def downsample_avg(
in_chs,
out_chs,
kernel_size=1,
stride=1,
dilation=1,
norm_layer=None,
preact=False,
):
""" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
pool = nn.Identity()
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
if preact:
conv = create_conv2d(in_chs, out_chs, 1, stride=1)
else:
conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False)
return nn.Sequential(*[pool, conv])
def create_shortcut(
downsample_type,
in_chs,
out_chs,
kernel_size,
stride,
dilation=(1, 1),
norm_layer=None,
preact=False,
):
assert downsample_type in ('avg', 'conv1x1', '', None)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact)
if not downsample_type:
return None # no shortcut, no downsample
elif downsample_type == 'avg':
return downsample_avg(in_chs, out_chs, **dargs)
else:
return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs)
else:
return nn.Identity() # identity shortcut (no downsample)
class Bottleneck(nn.Module):
""" RegNet Bottleneck
This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from
after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.
"""
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=(1, 1),
bottle_ratio=1,
group_size=1,
se_ratio=0.25,
downsample='conv1x1',
linear_out=False,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
drop_block=None,
drop_path_rate=0.,
):
super(Bottleneck, self).__init__()
act_layer = get_act_layer(act_layer)
bottleneck_chs = int(round(out_chs * bottle_ratio))
groups = bottleneck_chs // group_size
cargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)
self.conv2 = ConvNormAct(
bottleneck_chs,
bottleneck_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=groups,
drop_layer=drop_block,
**cargs,
)
if se_ratio:
se_channels = int(round(in_chs * se_ratio))
self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)
else:
self.se = nn.Identity()
self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs)
self.act3 = nn.Identity() if linear_out else act_layer()
self.downsample = create_shortcut(
downsample,
in_chs,
out_chs,
kernel_size=1,
stride=stride,
dilation=dilation,
norm_layer=norm_layer,
)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.se(x)
x = self.conv3(x)
if self.downsample is not None:
# NOTE stuck with downsample as the attr name due to weight compatibility
# now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()
x = self.drop_path(x) + self.downsample(shortcut)
x = self.act3(x)
return x
class PreBottleneck(nn.Module):
""" RegNet Bottleneck
This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from
after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.
"""
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=(1, 1),
bottle_ratio=1,
group_size=1,
se_ratio=0.25,
downsample='conv1x1',
linear_out=False,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
drop_block=None,
drop_path_rate=0.,
):
super(PreBottleneck, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
bottleneck_chs = int(round(out_chs * bottle_ratio))
groups = bottleneck_chs // group_size
self.norm1 = norm_act_layer(in_chs)
self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1)
self.norm2 = norm_act_layer(bottleneck_chs)
self.conv2 = create_conv2d(
bottleneck_chs,
bottleneck_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=groups,
)
if se_ratio:
se_channels = int(round(in_chs * se_ratio))
self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)
else:
self.se = nn.Identity()
self.norm3 = norm_act_layer(bottleneck_chs)
self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1)
self.downsample = create_shortcut(
downsample,
in_chs,
out_chs,
kernel_size=1,
stride=stride,
dilation=dilation,
preact=True,
)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
pass
def forward(self, x):
x = self.norm1(x)
shortcut = x
x = self.conv1(x)
x = self.norm2(x)
x = self.conv2(x)
x = self.se(x)
x = self.norm3(x)
x = self.conv3(x)
if self.downsample is not None:
# NOTE stuck with downsample as the attr name due to weight compatibility
# now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()
x = self.drop_path(x) + self.downsample(shortcut)
return x
class RegStage(nn.Module):
"""Stage (sequence of blocks w/ the same output shape)."""
def __init__(
self,
depth,
in_chs,
out_chs,
stride,
dilation,
drop_path_rates=None,
block_fn=Bottleneck,
**block_kwargs,
):
super(RegStage, self).__init__()
self.grad_checkpointing = False
first_dilation = 1 if dilation in (1, 2) else 2
for i in range(depth):
block_stride = stride if i == 0 else 1
block_in_chs = in_chs if i == 0 else out_chs
block_dilation = (first_dilation, dilation)
dpr = drop_path_rates[i] if drop_path_rates is not None else 0.
name = "b{}".format(i + 1)
self.add_module(
name,
block_fn(
block_in_chs,
out_chs,
stride=block_stride,
dilation=block_dilation,
drop_path_rate=dpr,
**block_kwargs,
)
)
first_dilation = dilation
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.children(), x)
else:
for block in self.children():
x = block(x)
return x
class RegNet(nn.Module):
"""RegNet-X, Y, and Z Models
Paper: https://arxiv.org/abs/2003.13678
Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
"""
def __init__(
self,
cfg: RegNetCfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=True,
**kwargs,
):
"""
Args:
cfg (RegNetCfg): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
global_pool (str): Global pooling type (default: 'avg')
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
zero_init_last (bool): Zero-init last weight of residual path
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride in (8, 16, 32)
cfg = replace(cfg, **kwargs) # update cfg with extra passed kwargs
# Construct the stem
stem_width = cfg.stem_width
na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer)
if cfg.preact:
self.stem = create_conv2d(in_chans, stem_width, 3, stride=2)
else:
self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args)
self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]
# Construct the stages
prev_width = stem_width
curr_stride = 2
per_stage_args, common_args = self._get_stage_args(
cfg,
output_stride=output_stride,
drop_path_rate=drop_path_rate,
)
assert len(per_stage_args) == 4
block_fn = PreBottleneck if cfg.preact else Bottleneck
for i, stage_args in enumerate(per_stage_args):
stage_name = "s{}".format(i + 1)
self.add_module(
stage_name,
RegStage(
in_chs=prev_width,
block_fn=block_fn,
**stage_args,
**common_args,
)
)
prev_width = stage_args['out_chs']
curr_stride *= stage_args['stride']
self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]
# Construct the head
if cfg.num_features:
self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args)
self.num_features = cfg.num_features
else:
final_act = cfg.linear_out or cfg.preact
self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity()
self.num_features = prev_width
self.head = ClassifierHead(
in_features=self.num_features,
num_classes=num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.):
# Generate RegNet ws per block
widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size)
# Convert to per stage format
stage_widths, stage_depths = np.unique(widths, return_counts=True)
stage_br = [cfg.bottle_ratio for _ in range(num_stages)]
stage_strides = []
stage_dilations = []
net_stride = 2
dilation = 1
for _ in range(num_stages):
if net_stride >= output_stride:
dilation *= default_stride
stride = 1
else:
stride = default_stride
net_stride *= stride
stage_strides.append(stride)
stage_dilations.append(dilation)
stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1]))
# Adjust the compatibility of ws and gws
stage_widths, stage_gs = adjust_widths_groups_comp(
stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio)
arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates']
per_stage_args = [
dict(zip(arg_names, params)) for params in
zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr)
]
common_args = dict(
downsample=cfg.downsample,
se_ratio=cfg.se_ratio,
linear_out=cfg.linear_out,
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
)
return per_stage_args, common_args
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^s(\d+)' if coarse else r'^s(\d+)\.b(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in list(self.children())[1:-1]:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
x = self.final_conv(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
def _filter_fn(state_dict):
state_dict = state_dict.get('model', state_dict)
replaces = [
('f.a.0', 'conv1.conv'),
('f.a.1', 'conv1.bn'),
('f.b.0', 'conv2.conv'),
('f.b.1', 'conv2.bn'),
('f.final_bn', 'conv3.bn'),
('f.se.excitation.0', 'se.fc1'),
('f.se.excitation.2', 'se.fc2'),
('f.se', 'se'),
('f.c.0', 'conv3.conv'),
('f.c.1', 'conv3.bn'),
('f.c', 'conv3.conv'),
('proj.0', 'downsample.conv'),
('proj.1', 'downsample.bn'),
('proj', 'downsample.conv'),
]
if 'classy_state_dict' in state_dict:
# classy-vision & vissl (SEER) weights
import re
state_dict = state_dict['classy_state_dict']['base_model']['model']
out = {}
for k, v in state_dict['trunk'].items():
k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv')
k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn')
k = re.sub(
r'^_feature_blocks.res\d.block(\d)-(\d+)',
lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k)
k = re.sub(r's(\d)\.b(\d+)\.bn', r's\1.b\2.downsample.bn', k)
for s, r in replaces:
k = k.replace(s, r)
out[k] = v
for k, v in state_dict['heads'].items():
if 'projection_head' in k or 'prototypes' in k:
continue
k = k.replace('0.clf.0', 'head.fc')
out[k] = v
return out
if 'stem.0.weight' in state_dict:
# torchvision weights
import re
out = {}
for k, v in state_dict.items():
k = k.replace('stem.0', 'stem.conv')
k = k.replace('stem.1', 'stem.bn')
k = re.sub(
r'trunk_output.block(\d)\.block(\d+)\-(\d+)',
lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k)
for s, r in replaces:
k = k.replace(s, r)
k = k.replace('fc.', 'head.fc.')
out[k] = v
return out
return state_dict
# Model FLOPS = three trailing digits * 10^8
model_cfgs = dict(
# RegNet-X
regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13),
regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22),
regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9),
regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16),
regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16),
regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18),
regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25),
regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23),
regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17),
regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23),
regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19),
regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22),
regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23),
# RegNet-Y
regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25),
regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25),
regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25),
regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25),
regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9),
regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25),
regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25),
regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25),
regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25),
regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25),
regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9),
regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25),
regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25),
regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25),
regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25),
regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25),
regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25),
#regnety_2560=RegNetCfg(w0=640, wa=124.47, wm=2.04, group_size=848, depth=27, se_ratio=0.25),
# Experimental
regnety_040_sgn=RegNetCfg(
w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25,
act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)),
# regnetv = 'preact regnet y'
regnetv_040=RegNetCfg(
depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'),
regnetv_064=RegNetCfg(
depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu',
downsample='avg'),
# RegNet-Z (unverified)
regnetz_005=RegNetCfg(
depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=1024, act_layer='silu',
),
regnetz_040=RegNetCfg(
depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=0, act_layer='silu',
),
regnetz_040_h=RegNetCfg(
depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=1536, act_layer='silu',
),
)
def _create_regnet(variant, pretrained, **kwargs):
return build_model_with_cfg(
RegNet, variant, pretrained,
model_cfg=model_cfgs[variant],
pretrained_filter_fn=_filter_fn,
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0,
'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
def _cfgpyc(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs
}
def _cfgtv2(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.965, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs
}
default_cfgs = generate_default_cfgs({
# timm trained models
'regnety_032.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'),
'regnety_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'),
'regnety_064.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'),
'regnety_080.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'),
'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
# timm in12k pretrain
'regnety_120.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'regnety_160.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# timm custom arch (v and z guess) + trained models
'regnety_040_sgn.untrained': _cfg(url=''),
'regnetv_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth',
first_conv='stem'),
'regnetv_064.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth',
first_conv='stem'),
'regnetz_005.untrained': _cfg(url=''),
'regnetz_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),
'regnetz_040_h.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),
# used in DeiT for distillation (from Facebook DeiT GitHub repository)
'regnety_160.deit_in1k': _cfg(
hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'),
'regnetx_004_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'),
'regnetx_008.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'),
'regnetx_016.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'),
'regnetx_032.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'),
'regnetx_080.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'),
'regnetx_160.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'),
'regnetx_320.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'),
'regnety_004.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'),
'regnety_008_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'),
'regnety_016.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'),
'regnety_032.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'),
'regnety_080_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'),
'regnety_160.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'),
'regnety_320.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'),
'regnety_160.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_320.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_1280.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_160.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'),
'regnety_320.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'),
'regnety_1280.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'),
'regnety_320.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_640.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_1280.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_2560.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_320.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnety_640.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnety_1280.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
# FIXME invalid weight <-> model match, mistake on their end
#'regnety_2560.seer': _cfgtv2(
# url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_cosine_rg256gf_noBNhead_wd1e5_fairstore_bs16_node64_sinkhorn10_proto16k_apex_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',
# num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
})
@register_model
def regnetx_002(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-200MF"""
return _create_regnet('regnetx_002', pretrained, **kwargs)
@register_model
def regnetx_004(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-400MF"""
return _create_regnet('regnetx_004', pretrained, **kwargs)
@register_model
def regnetx_004_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-400MF w/ torchvision group rounding"""
return _create_regnet('regnetx_004_tv', pretrained, **kwargs)
@register_model
def regnetx_006(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-600MF"""
return _create_regnet('regnetx_006', pretrained, **kwargs)
@register_model
def regnetx_008(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-800MF"""
return _create_regnet('regnetx_008', pretrained, **kwargs)
@register_model
def regnetx_016(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-1.6GF"""
return _create_regnet('regnetx_016', pretrained, **kwargs)
@register_model
def regnetx_032(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-3.2GF"""
return _create_regnet('regnetx_032', pretrained, **kwargs)
@register_model
def regnetx_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-4.0GF"""
return _create_regnet('regnetx_040', pretrained, **kwargs)
@register_model
def regnetx_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-6.4GF"""
return _create_regnet('regnetx_064', pretrained, **kwargs)
@register_model
def regnetx_080(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-8.0GF"""
return _create_regnet('regnetx_080', pretrained, **kwargs)
@register_model
def regnetx_120(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-12GF"""
return _create_regnet('regnetx_120', pretrained, **kwargs)
@register_model
def regnetx_160(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-16GF"""
return _create_regnet('regnetx_160', pretrained, **kwargs)
@register_model
def regnetx_320(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-32GF"""
return _create_regnet('regnetx_320', pretrained, **kwargs)
@register_model
def regnety_002(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-200MF"""
return _create_regnet('regnety_002', pretrained, **kwargs)
@register_model
def regnety_004(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-400MF"""
return _create_regnet('regnety_004', pretrained, **kwargs)
@register_model
def regnety_006(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-600MF"""
return _create_regnet('regnety_006', pretrained, **kwargs)
@register_model
def regnety_008(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-800MF"""
return _create_regnet('regnety_008', pretrained, **kwargs)
@register_model
def regnety_008_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-800MF w/ torchvision group rounding"""
return _create_regnet('regnety_008_tv', pretrained, **kwargs)
@register_model
def regnety_016(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-1.6GF"""
return _create_regnet('regnety_016', pretrained, **kwargs)
@register_model
def regnety_032(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-3.2GF"""
return _create_regnet('regnety_032', pretrained, **kwargs)
@register_model
def regnety_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-4.0GF"""
return _create_regnet('regnety_040', pretrained, **kwargs)
@register_model
def regnety_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-6.4GF"""
return _create_regnet('regnety_064', pretrained, **kwargs)
@register_model
def regnety_080(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-8.0GF"""
return _create_regnet('regnety_080', pretrained, **kwargs)
@register_model
def regnety_080_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-8.0GF w/ torchvision group rounding"""
return _create_regnet('regnety_080_tv', pretrained, **kwargs)
@register_model
def regnety_120(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-12GF"""
return _create_regnet('regnety_120', pretrained, **kwargs)
@register_model
def regnety_160(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-16GF"""
return _create_regnet('regnety_160', pretrained, **kwargs)
@register_model
def regnety_320(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-32GF"""
return _create_regnet('regnety_320', pretrained, **kwargs)
@register_model
def regnety_640(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-64GF"""
return _create_regnet('regnety_640', pretrained, **kwargs)
@register_model
def regnety_1280(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-128GF"""
return _create_regnet('regnety_1280', pretrained, **kwargs)
@register_model
def regnety_2560(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-256GF"""
return _create_regnet('regnety_2560', pretrained, **kwargs)
@register_model
def regnety_040_sgn(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-4.0GF w/ GroupNorm """
return _create_regnet('regnety_040_sgn', pretrained, **kwargs)
@register_model
def regnetv_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetV-4.0GF (pre-activation)"""
return _create_regnet('regnetv_040', pretrained, **kwargs)
@register_model
def regnetv_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetV-6.4GF (pre-activation)"""
return _create_regnet('regnetv_064', pretrained, **kwargs)
@register_model
def regnetz_005(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-500MF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs)
@register_model
def regnetz_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-4.0GF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs)
@register_model
def regnetz_040_h(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-4.0GF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs)
register_model_deprecations(__name__, {
'regnetz_040h': 'regnetz_040_h',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/sknet.py | """ Selective Kernel Networks (ResNet base)
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectiveKernel, ConvNormAct, create_attn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class SelectiveKernelBasic(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
sk_kwargs=None,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(SelectiveKernelBasic, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock doest not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = SelectiveKernel(
inplanes, first_planes, stride=stride, dilation=first_dilation,
aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
self.conv2 = ConvNormAct(
first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.conv2.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
sk_kwargs=None,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(
first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.conv3.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'),
'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'),
'skresnet50.untrained': _cfg(),
'skresnet50d.untrained': _cfg(
first_conv='conv1.0'),
'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def skresnet18(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Selective Kernel ResNet-18 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet18', pretrained, **model_args)
@register_model
def skresnet34(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Selective Kernel ResNet-34 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args)
@register_model
def skresnet50(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNet-50 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet50', pretrained, **model_args)
@register_model
def skresnet50d(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNet-50-D model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
return _create_skresnet('skresnet50d', pretrained, **model_args)
@register_model
def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to
the SKNet-50 model in the Select Kernel Paper
"""
sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/efficientvit_mit.py | """ EfficientViT (by MIT Song Han's Lab)
Paper: `Efficientvit: Enhanced linear attention for high-resolution low-computation visual recognition`
- https://arxiv.org/abs/2205.14756
Adapted from official impl at https://github.com/mit-han-lab/efficientvit
"""
__all__ = ['EfficientVit']
from typing import Optional
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, create_conv2d, GELUTanh
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
def val2list(x: list or tuple or any, repeat_time=1):
if isinstance(x, (list, tuple)):
return list(x)
return [x for _ in range(repeat_time)]
def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1):
# repeat elements if necessary
x = val2list(x)
if len(x) > 0:
x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))]
return tuple(x)
def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]:
if isinstance(kernel_size, tuple):
return tuple([get_same_padding(ks) for ks in kernel_size])
else:
assert kernel_size % 2 > 0, "kernel size should be odd number"
return kernel_size // 2
class ConvNormAct(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
dilation=1,
groups=1,
bias=False,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(ConvNormAct, self).__init__()
self.dropout = nn.Dropout(dropout, inplace=False)
self.conv = create_conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity()
self.act = act_layer(inplace=True) if act_layer is not None else nn.Identity()
def forward(self, x):
x = self.dropout(x)
x = self.conv(x)
x = self.norm(x)
x = self.act(x)
return x
class DSConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, None),
):
super(DSConv, self).__init__()
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.depth_conv = ConvNormAct(
in_channels,
in_channels,
kernel_size,
stride,
groups=in_channels,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.point_conv = ConvNormAct(
in_channels,
out_channels,
1,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
def forward(self, x):
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class ConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
mid_channels=None,
expand_ratio=1,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, None),
):
super(ConvBlock, self).__init__()
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
mid_channels = mid_channels or round(in_channels * expand_ratio)
self.conv1 = ConvNormAct(
in_channels,
mid_channels,
kernel_size,
stride,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.conv2 = ConvNormAct(
mid_channels,
out_channels,
kernel_size,
1,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MBConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
mid_channels=None,
expand_ratio=6,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, nn.ReLU6, None),
):
super(MBConv, self).__init__()
use_bias = val2tuple(use_bias, 3)
norm_layer = val2tuple(norm_layer, 3)
act_layer = val2tuple(act_layer, 3)
mid_channels = mid_channels or round(in_channels * expand_ratio)
self.inverted_conv = ConvNormAct(
in_channels,
mid_channels,
1,
stride=1,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.depth_conv = ConvNormAct(
mid_channels,
mid_channels,
kernel_size,
stride=stride,
groups=mid_channels,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
self.point_conv = ConvNormAct(
mid_channels,
out_channels,
1,
norm_layer=norm_layer[2],
act_layer=act_layer[2],
bias=use_bias[2],
)
def forward(self, x):
x = self.inverted_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class FusedMBConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
mid_channels=None,
expand_ratio=6,
groups=1,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, None),
):
super(FusedMBConv, self).__init__()
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
mid_channels = mid_channels or round(in_channels * expand_ratio)
self.spatial_conv = ConvNormAct(
in_channels,
mid_channels,
kernel_size,
stride=stride,
groups=groups,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.point_conv = ConvNormAct(
mid_channels,
out_channels,
1,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
def forward(self, x):
x = self.spatial_conv(x)
x = self.point_conv(x)
return x
class LiteMLA(nn.Module):
"""Lightweight multi-scale linear attention"""
def __init__(
self,
in_channels: int,
out_channels: int,
heads: int or None = None,
heads_ratio: float = 1.0,
dim=8,
use_bias=False,
norm_layer=(None, nn.BatchNorm2d),
act_layer=(None, None),
kernel_func=nn.ReLU,
scales=(5,),
eps=1e-5,
):
super(LiteMLA, self).__init__()
self.eps = eps
heads = heads or int(in_channels // dim * heads_ratio)
total_dim = heads * dim
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.dim = dim
self.qkv = ConvNormAct(
in_channels,
3 * total_dim,
1,
bias=use_bias[0],
norm_layer=norm_layer[0],
act_layer=act_layer[0],
)
self.aggreg = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
3 * total_dim,
3 * total_dim,
scale,
padding=get_same_padding(scale),
groups=3 * total_dim,
bias=use_bias[0],
),
nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0]),
)
for scale in scales
])
self.kernel_func = kernel_func(inplace=False)
self.proj = ConvNormAct(
total_dim * (1 + len(scales)),
out_channels,
1,
bias=use_bias[1],
norm_layer=norm_layer[1],
act_layer=act_layer[1],
)
def _attn(self, q, k, v):
dtype = v.dtype
q, k, v = q.float(), k.float(), v.float()
kv = k.transpose(-1, -2) @ v
out = q @ kv
out = out[..., :-1] / (out[..., -1:] + self.eps)
return out.to(dtype)
def forward(self, x):
B, _, H, W = x.shape
# generate multi-scale q, k, v
qkv = self.qkv(x)
multi_scale_qkv = [qkv]
for op in self.aggreg:
multi_scale_qkv.append(op(qkv))
multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1)
multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2)
q, k, v = multi_scale_qkv.chunk(3, dim=-1)
# lightweight global attention
q = self.kernel_func(q)
k = self.kernel_func(k)
v = F.pad(v, (0, 1), mode="constant", value=1.)
if not torch.jit.is_scripting():
with torch.autocast(device_type=v.device.type, enabled=False):
out = self._attn(q, k, v)
else:
out = self._attn(q, k, v)
# final projection
out = out.transpose(-1, -2).reshape(B, -1, H, W)
out = self.proj(out)
return out
register_notrace_module(LiteMLA)
class EfficientVitBlock(nn.Module):
def __init__(
self,
in_channels,
heads_ratio=1.0,
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
):
super(EfficientVitBlock, self).__init__()
self.context_module = ResidualBlock(
LiteMLA(
in_channels=in_channels,
out_channels=in_channels,
heads_ratio=heads_ratio,
dim=head_dim,
norm_layer=(None, norm_layer),
),
nn.Identity(),
)
self.local_module = ResidualBlock(
MBConv(
in_channels=in_channels,
out_channels=in_channels,
expand_ratio=expand_ratio,
use_bias=(True, True, False),
norm_layer=(None, None, norm_layer),
act_layer=(act_layer, act_layer, None),
),
nn.Identity(),
)
def forward(self, x):
x = self.context_module(x)
x = self.local_module(x)
return x
class ResidualBlock(nn.Module):
def __init__(
self,
main: Optional[nn.Module],
shortcut: Optional[nn.Module] = None,
pre_norm: Optional[nn.Module] = None,
):
super(ResidualBlock, self).__init__()
self.pre_norm = pre_norm if pre_norm is not None else nn.Identity()
self.main = main
self.shortcut = shortcut
def forward(self, x):
res = self.main(self.pre_norm(x))
if self.shortcut is not None:
res = res + self.shortcut(x)
return res
def build_local_block(
in_channels: int,
out_channels: int,
stride: int,
expand_ratio: float,
norm_layer: str,
act_layer: str,
fewer_norm: bool = False,
block_type: str = "default",
):
assert block_type in ["default", "large", "fused"]
if expand_ratio == 1:
if block_type == "default":
block = DSConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=(True, False) if fewer_norm else False,
norm_layer=(None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, None),
)
else:
block = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=(True, False) if fewer_norm else False,
norm_layer=(None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, None),
)
else:
if block_type == "default":
block = MBConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expand_ratio=expand_ratio,
use_bias=(True, True, False) if fewer_norm else False,
norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, act_layer, None),
)
else:
block = FusedMBConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expand_ratio=expand_ratio,
use_bias=(True, False) if fewer_norm else False,
norm_layer=(None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, None),
)
return block
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, block_type='default'):
super().__init__()
self.stride = 2
self.add_module(
'in_conv',
ConvNormAct(
in_chs, out_chs,
kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer,
)
)
stem_block = 0
for _ in range(depth):
self.add_module(f'res{stem_block}', ResidualBlock(
build_local_block(
in_channels=out_chs,
out_channels=out_chs,
stride=1,
expand_ratio=1,
norm_layer=norm_layer,
act_layer=act_layer,
block_type=block_type,
),
nn.Identity(),
))
stem_block += 1
class EfficientVitStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth,
norm_layer,
act_layer,
expand_ratio,
head_dim,
vit_stage=False,
):
super(EfficientVitStage, self).__init__()
blocks = [ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=2,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
fewer_norm=vit_stage,
),
None,
)]
in_chs = out_chs
if vit_stage:
# for stage 3, 4
for _ in range(depth):
blocks.append(
EfficientVitBlock(
in_channels=in_chs,
head_dim=head_dim,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
)
)
else:
# for stage 1, 2
for i in range(1, depth):
blocks.append(ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=1,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer
),
nn.Identity(),
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
return self.blocks(x)
class EfficientVitLargeStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth,
norm_layer,
act_layer,
head_dim,
vit_stage=False,
fewer_norm=False,
):
super(EfficientVitLargeStage, self).__init__()
blocks = [ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=2,
expand_ratio=24 if vit_stage else 16,
norm_layer=norm_layer,
act_layer=act_layer,
fewer_norm=vit_stage or fewer_norm,
block_type='default' if fewer_norm else 'fused',
),
None,
)]
in_chs = out_chs
if vit_stage:
# for stage 4
for _ in range(depth):
blocks.append(
EfficientVitBlock(
in_channels=in_chs,
head_dim=head_dim,
expand_ratio=6,
norm_layer=norm_layer,
act_layer=act_layer,
)
)
else:
# for stage 1, 2, 3
for i in range(depth):
blocks.append(ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=1,
expand_ratio=4,
norm_layer=norm_layer,
act_layer=act_layer,
fewer_norm=fewer_norm,
block_type='default' if fewer_norm else 'fused',
),
nn.Identity(),
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
return self.blocks(x)
class ClassifierHead(nn.Module):
def __init__(
self,
in_channels,
widths,
n_classes=1000,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
norm_eps=1e-5,
):
super(ClassifierHead, self).__init__()
self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW')
self.classifier = nn.Sequential(
nn.Linear(widths[0], widths[1], bias=False),
nn.LayerNorm(widths[1], eps=norm_eps),
act_layer(inplace=True) if act_layer is not None else nn.Identity(),
nn.Dropout(dropout, inplace=False),
nn.Linear(widths[1], n_classes, bias=True),
)
def forward(self, x, pre_logits: bool = False):
x = self.in_conv(x)
x = self.global_pool(x)
if pre_logits:
return x
x = self.classifier(x)
return x
class EfficientVit(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
):
super(EfficientVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer)
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
expand_ratio=expand_ratio,
head_dim=head_dim,
vit_stage=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
class EfficientVitLarge(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
norm_layer=nn.BatchNorm2d,
act_layer=GELUTanh,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
norm_eps=1e-7,
):
super(EfficientVitLarge, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
self.norm_eps = norm_eps
norm_layer = partial(norm_layer, eps=self.norm_eps)
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer, block_type='large')
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitLargeStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
head_dim=head_dim,
vit_stage=i >= 3,
fewer_norm=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
act_layer=act_layer,
norm_eps=self.norm_eps,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
norm_eps=self.norm_eps
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.in_conv.conv',
'classifier': 'head.classifier.4',
'crop_pct': 0.95,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
**kwargs,
}
default_cfgs = generate_default_cfgs({
'efficientvit_b0.r224_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientvit_b1.r224_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientvit_b1.r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0,
),
'efficientvit_b1.r288_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0,
),
'efficientvit_b2.r224_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientvit_b2.r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0,
),
'efficientvit_b2.r288_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0,
),
'efficientvit_b3.r224_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientvit_b3.r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0,
),
'efficientvit_b3.r288_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0,
),
'efficientvit_l1.r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=1.0,
),
'efficientvit_l2.r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=1.0,
),
'efficientvit_l2.r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0,
),
'efficientvit_l2.r288_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0,
),
'efficientvit_l2.r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'efficientvit_l3.r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=1.0,
),
'efficientvit_l3.r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0,
),
'efficientvit_l3.r320_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0,
),
'efficientvit_l3.r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
# 'efficientvit_l0_sam.sam': _cfg(
# # hf_hub_id='timm/',
# input_size=(3, 512, 512), crop_pct=1.0,
# num_classes=0,
# ),
# 'efficientvit_l1_sam.sam': _cfg(
# # hf_hub_id='timm/',
# input_size=(3, 512, 512), crop_pct=1.0,
# num_classes=0,
# ),
# 'efficientvit_l2_sam.sam': _cfg(
# # hf_hub_id='timm/',f
# input_size=(3, 512, 512), crop_pct=1.0,
# num_classes=0,
# ),
})
def _create_efficientvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
EfficientVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
def _create_efficientvit_large(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
EfficientVitLarge,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def efficientvit_b0(pretrained=False, **kwargs):
model_args = dict(
widths=(8, 16, 32, 64, 128), depths=(1, 2, 2, 2, 2), head_dim=16, head_widths=(1024, 1280))
return _create_efficientvit('efficientvit_b0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_b1(pretrained=False, **kwargs):
model_args = dict(
widths=(16, 32, 64, 128, 256), depths=(1, 2, 3, 3, 4), head_dim=16, head_widths=(1536, 1600))
return _create_efficientvit('efficientvit_b1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_b2(pretrained=False, **kwargs):
model_args = dict(
widths=(24, 48, 96, 192, 384), depths=(1, 3, 4, 4, 6), head_dim=32, head_widths=(2304, 2560))
return _create_efficientvit('efficientvit_b2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_b3(pretrained=False, **kwargs):
model_args = dict(
widths=(32, 64, 128, 256, 512), depths=(1, 4, 6, 6, 9), head_dim=32, head_widths=(2304, 2560))
return _create_efficientvit('efficientvit_b3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_l1(pretrained=False, **kwargs):
model_args = dict(
widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, head_widths=(3072, 3200))
return _create_efficientvit_large('efficientvit_l1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_l2(pretrained=False, **kwargs):
model_args = dict(
widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(3072, 3200))
return _create_efficientvit_large('efficientvit_l2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_l3(pretrained=False, **kwargs):
model_args = dict(
widths=(64, 128, 256, 512, 1024), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(6144, 6400))
return _create_efficientvit_large('efficientvit_l3', pretrained=pretrained, **dict(model_args, **kwargs))
# FIXME will wait for v2 SAM models which are pending
# @register_model
# def efficientvit_l0_sam(pretrained=False, **kwargs):
# # only backbone for segment-anything-model weights
# model_args = dict(
# widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 4, 4), head_dim=32, num_classes=0, norm_eps=1e-6)
# return _create_efficientvit_large('efficientvit_l0_sam', pretrained=pretrained, **dict(model_args, **kwargs))
#
#
# @register_model
# def efficientvit_l1_sam(pretrained=False, **kwargs):
# # only backbone for segment-anything-model weights
# model_args = dict(
# widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, num_classes=0, norm_eps=1e-6)
# return _create_efficientvit_large('efficientvit_l1_sam', pretrained=pretrained, **dict(model_args, **kwargs))
#
#
# @register_model
# def efficientvit_l2_sam(pretrained=False, **kwargs):
# # only backbone for segment-anything-model weights
# model_args = dict(
# widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, num_classes=0, norm_eps=1e-6)
# return _create_efficientvit_large('efficientvit_l2_sam', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_manipulate.py | import collections.abc
import math
import re
from collections import defaultdict
from itertools import chain
from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union
import torch
from torch import nn as nn
from torch.utils.checkpoint import checkpoint
__all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv',
'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq']
def model_parameters(model: nn.Module, exclude_head: bool = False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(
fn: Callable,
module: nn.Module, name='',
depth_first: bool = True,
include_root: bool = False,
) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
def named_modules_with_params(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if module._parameters and not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules_with_params(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if module._parameters and depth_first and include_root:
yield name, module
MATCH_PREV_GROUP = (99999,)
def group_with_matcher(
named_objects: Iterator[Tuple[str, Any]],
group_matcher: Union[Dict, Callable],
return_values: bool = False,
reverse: bool = False
):
if isinstance(group_matcher, dict):
# dictionary matcher contains a dict of raw-string regex expr that must be compiled
compiled = []
for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()):
if mspec is None:
continue
# map all matching specifications into 3-tuple (compiled re, prefix, suffix)
if isinstance(mspec, (tuple, list)):
# multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix)
for sspec in mspec:
compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])]
else:
compiled += [(re.compile(mspec), (group_ordinal,), None)]
group_matcher = compiled
def _get_grouping(name):
if isinstance(group_matcher, (list, tuple)):
for match_fn, prefix, suffix in group_matcher:
r = match_fn.match(name)
if r:
parts = (prefix, r.groups(), suffix)
# map all tuple elem to int for numeric sort, filter out None entries
return tuple(map(float, chain.from_iterable(filter(None, parts))))
return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal
else:
ord = group_matcher(name)
if not isinstance(ord, collections.abc.Iterable):
return ord,
return tuple(ord)
# map layers into groups via ordinals (ints or tuples of ints) from matcher
grouping = defaultdict(list)
for k, v in named_objects:
grouping[_get_grouping(k)].append(v if return_values else k)
# remap to integers
layer_id_to_param = defaultdict(list)
lid = -1
for k in sorted(filter(lambda x: x is not None, grouping.keys())):
if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]:
lid += 1
layer_id_to_param[lid].extend(grouping[k])
if reverse:
assert not return_values, "reverse mapping only sensible for name output"
# output reverse mapping
param_to_layer_id = {}
for lid, lm in layer_id_to_param.items():
for n in lm:
param_to_layer_id[n] = lid
return param_to_layer_id
return layer_id_to_param
def group_parameters(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse)
def group_modules(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse)
def flatten_modules(
named_modules: Iterator[Tuple[str, nn.Module]],
depth: int = 1,
prefix: Union[str, Tuple[str, ...]] = '',
module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential',
):
prefix_is_tuple = isinstance(prefix, tuple)
if isinstance(module_types, str):
if module_types == 'container':
module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict)
else:
module_types = (nn.Sequential,)
for name, module in named_modules:
if depth and isinstance(module, module_types):
yield from flatten_modules(
module.named_children(),
depth - 1,
prefix=(name,) if prefix_is_tuple else name,
module_types=module_types,
)
else:
if prefix_is_tuple:
name = prefix + (name,)
yield name, module
else:
if prefix:
name = '.'.join([prefix, name])
yield name, module
def checkpoint_seq(
functions,
x,
every=1,
flatten=False,
skip_last=False,
preserve_rng_state=True
):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a sequence into segments
and checkpoint each segment. All segments except run in :func:`torch.no_grad`
manner, i.e., not storing the intermediate activations. The inputs of each
checkpointed segment will be saved for re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing currently only supports :func:`torch.autograd.backward`
and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`
is not supported.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.
x: A Tensor that is input to :attr:`functions`
every: checkpoint every-n functions (default: 1)
flatten (bool): flatten nn.Sequential of nn.Sequentials
skip_last (bool): skip checkpointing the last function in the sequence if True
preserve_rng_state (bool, optional, default=True): Omit stashing and restoring
the RNG state during each checkpoint.
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_seq(model, input_var, every=2)
"""
def run_function(start, end, functions):
def forward(_x):
for j in range(start, end + 1):
_x = functions[j](_x)
return _x
return forward
if isinstance(functions, torch.nn.Sequential):
functions = functions.children()
if flatten:
functions = chain.from_iterable(functions)
if not isinstance(functions, (tuple, list)):
functions = tuple(functions)
num_checkpointed = len(functions)
if skip_last:
num_checkpointed -= 1
end = -1
for start in range(0, num_checkpointed, every):
end = min(start + every - 1, num_checkpointed - 1)
x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)
if skip_last:
return run_function(end + 1, len(functions) - 1, functions)(x)
return x
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/helpers.py | from ._builder import *
from ._helpers import *
from ._manipulate import *
from ._prune import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/maxxvit.py | """ MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch
This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch.
99% of the implementation was done from papers, however last minute some adjustments were made
based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit
There are multiple sets of models defined for both architectures. Typically, names with a
`_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit.
These configs work well and appear to be a bit faster / lower resource than the paper.
The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to
match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match.
Papers:
MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697
@article{tu2022maxvit,
title={MaxViT: Multi-Axis Vision Transformer},
author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao},
journal={ECCV},
year={2022},
}
CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803
@article{DBLP:journals/corr/abs-2106-04803,
author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan},
title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes},
journal = {CoRR},
volume = {abs/2106.04803},
year = {2021}
}
Hacked together by / Copyright 2022, Ross Wightman
"""
import math
from collections import OrderedDict
from dataclasses import dataclass, replace, field
from functools import partial
from typing import Callable, Optional, Union, Tuple, List
import torch
from torch import nn
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead
from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d
from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert
from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit']
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
head_first: bool = True # head ordering in qkv channel dim
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.
proj_drop: float = 0.
pool_type: str = 'avg2'
rel_pos_type: str = 'bias'
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
no_block_attn: bool = False # disable window block attention for maxvit (ie only grid)
use_nchw_attn: bool = False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order
init_values: Optional[float] = None
act_layer: str = 'gelu'
norm_layer: str = 'layernorm2d'
norm_layer_cl: str = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = 'mbconv'
expand_ratio: float = 4.0
expand_output: bool = True # calculate expansion channels from output (vs input chs)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = 'avg2'
downsample_pool_type: str = 'avg2'
padding: str = ''
attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2
attn_layer: str = 'se'
attn_act_layer: str = 'silu'
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = 'gelu'
norm_layer: str = ''
norm_layer_cl: str = ''
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ('mbconv', 'convnext')
use_mbconv = self.block_type == 'mbconv'
if not self.norm_layer:
self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d'
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = 'layernorm'
if self.norm_eps is None:
self.norm_eps = 1e-5 if use_mbconv else 1e-6
self.downsample_pool_type = self.downsample_pool_type or self.pool_type
@dataclass
class MaxxVitCfg:
embed_dim: Tuple[int, ...] = (96, 192, 384, 768)
depths: Tuple[int, ...] = (2, 3, 5, 2)
block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T')
stem_width: Union[int, Tuple[int, int]] = 64
stem_bias: bool = False
conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg)
transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg)
head_hidden_size: int = None
weight_init: str = 'vit_eff'
class Attention2d(nn.Module):
fused_attn: Final[bool]
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
head_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.head_first = head_first
self.scale = dim_head ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
if self.head_first:
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
else:
q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1)
if self.fused_attn:
attn_bias = None
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
x = torch.nn.functional.scaled_dot_product_attention(
q.transpose(-1, -2).contiguous(),
k.transpose(-1, -2).contiguous(),
v.transpose(-1, -2).contiguous(),
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
).transpose(-1, -2).reshape(B, -1, H, W)
else:
q = q * self.scale
attn = q.transpose(-2, -1) @ k
if self.rel_pos is not None:
attn = self.rel_pos(attn)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionCl(nn.Module):
""" Channels-last multi-head attention (B, ..., C) """
fused_attn: Final[bool]
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
head_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first and dim_out > dim else dim
assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim'
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.head_first = head_first
self.scale = dim_head ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_attn, dim_out, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B = x.shape[0]
restore_shape = x.shape[:-1]
if self.head_first:
q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3)
else:
q, k, v = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2)
if self.fused_attn:
attn_bias = None
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(restore_shape + (-1,))
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma
return x.mul_(gamma) if self.inplace else x * gamma
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class Downsample2d(nn.Module):
""" A downsample pooling module supporting several maxpool and avgpool modes
* 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1
* 'max2' - MaxPool2d w/ kernel_size = stride = 2
* 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1
* 'avg2' - AvgPool2d w/ kernel_size = stride = 2
"""
def __init__(
self,
dim: int,
dim_out: int,
pool_type: str = 'avg2',
padding: str = '',
bias: bool = True,
):
super().__init__()
assert pool_type in ('max', 'max2', 'avg', 'avg2')
if pool_type == 'max':
self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1)
elif pool_type == 'max2':
self.pool = create_pool2d('max', 2, padding=padding or 0) # kernel_size == stride == 2
elif pool_type == 'avg':
self.pool = create_pool2d(
'avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1)
else:
self.pool = create_pool2d('avg', 2, padding=padding or 0)
if dim != dim_out:
self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias)
else:
self.expand = nn.Identity()
def forward(self, x):
x = self.pool(x) # spatial downsample
x = self.expand(x) # expand chs
return x
def _init_transformer(module, name, scheme=''):
if isinstance(module, (nn.Conv2d, nn.Linear)):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# vit like
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
class TransformerBlock2d(nn.Module):
""" Transformer block with 2D downsampling
'2D' NCHW tensor layout
Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW
for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs.
This impl was faster on TPU w/ PT XLA than the 1D experiment.
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
rel_pos_cls: Callable = None,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
act_layer = get_act_layer(cfg.act_layer)
if stride == 2:
self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias)
self.norm1 = nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)),
]))
else:
assert dim == dim_out
self.shortcut = nn.Identity()
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim_out,
dim_head=cfg.dim_head,
expand_first=cfg.expand_first,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop
)
self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = ConvMlp(
in_features=dim_out,
hidden_features=int(dim_out * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def _init_conv(module, name, scheme=''):
if isinstance(module, nn.Conv2d):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# efficientnet like
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.zeros_(module.bias)
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MbConvBlock(nn.Module):
""" Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand)
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: float = 0.
):
super(MbConvBlock, self).__init__()
norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps)
mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio)
groups = num_groups(cfg.group_size, mid_chs)
if stride == 2:
self.shortcut = Downsample2d(
in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', '1x1', 'dw')
stride_pool, stride_1, stride_2 = 1, 1, 1
if cfg.stride_mode == 'pool':
# NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1
stride_pool, dilation_2 = stride, dilation[1]
# FIXME handle dilation of avg pool
elif cfg.stride_mode == '1x1':
# NOTE I don't like this option described in paper, 1x1 w/ stride throws info away
stride_1, dilation_2 = stride, dilation[1]
else:
stride_2, dilation_2 = stride, dilation[0]
self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act)
if stride_pool > 1:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding)
else:
self.down = nn.Identity()
self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1)
self.norm1 = norm_act_layer(mid_chs)
self.conv2_kxk = create_conv2d(
mid_chs, mid_chs, cfg.kernel_size,
stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding)
attn_kwargs = {}
if isinstance(cfg.attn_layer, str):
if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca':
attn_kwargs['act_layer'] = cfg.attn_act_layer
attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs))
# two different orderings for SE and norm2 (due to some weights and trials using SE before norm2)
if cfg.attn_early:
self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.norm2 = norm_act_layer(mid_chs)
self.se = None
else:
self.se_early = None
self.norm2 = norm_act_layer(mid_chs)
self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.pre_norm(x)
x = self.down(x)
# 1x1 expansion conv & norm-act
x = self.conv1_1x1(x)
x = self.norm1(x)
# depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act
x = self.conv2_kxk(x)
if self.se_early is not None:
x = self.se_early(x)
x = self.norm2(x)
if self.se is not None:
x = self.se(x)
# 1x1 linear projection to output width
x = self.conv3_1x1(x)
x = self.drop_path(x) + shortcut
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
conv_mlp: bool = True,
drop_path: float = 0.
):
super().__init__()
out_chs = out_chs or in_chs
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert 'layernorm' in cfg.norm_layer
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs)
elif in_chs != out_chs:
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', 'dw')
stride_pool, stride_dw = 1, 1
# FIXME handle dilation?
if cfg.stride_mode == 'pool':
stride_pool = stride
else:
stride_dw = stride
if stride_pool == 2:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1],
depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
else:
self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = self.drop_path(x) + shortcut
return x
def window_partition(x, window_size: List[int]):
B, H, W, C = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def grid_partition(x, grid_size: List[int]):
B, H, W, C = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C)
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C)
return x
def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size):
rel_pos_cls = None
if cfg.rel_pos_type == 'mlp':
rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim)
elif cfg.rel_pos_type == 'bias':
rel_pos_cls = partial(RelPosBias, window_size=window_size)
elif cfg.rel_pos_type == 'bias_tf':
rel_pos_cls = partial(RelPosBiasTf, window_size=window_size)
return rel_pos_cls
class PartitionAttentionCl(nn.Module):
""" Grid or Block partition + Attn + FFN.
NxC 'channels last' tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = AttentionCl(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
if self.partition_block:
partitioned = window_partition(x, self.partition_size)
else:
partitioned = grid_partition(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse(partitioned, self.partition_size, img_size)
else:
x = grid_reverse(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ParallelPartitionAttention(nn.Module):
""" Experimental. Grid and Block partition + single FFN
NxC tensor layout.
"""
def __init__(
self,
dim: int,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
assert dim % 2 == 0
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
assert cfg.window_size == cfg.grid_size
self.partition_size = to_2tuple(cfg.window_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn_block = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.attn_grid = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
out_features=dim,
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
partitioned_block = window_partition(x, self.partition_size)
partitioned_block = self.attn_block(partitioned_block)
x_window = window_reverse(partitioned_block, self.partition_size, img_size)
partitioned_grid = grid_partition(x, self.partition_size)
partitioned_grid = self.attn_grid(partitioned_grid)
x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size)
return torch.cat([x_window, x_grid], dim=-1)
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def window_partition_nchw(x, window_size: List[int]):
B, C, H, W = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1])
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1])
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W)
return x
def grid_partition_nchw(x, grid_size: List[int]):
B, C, H, W = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1])
windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1])
x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W)
return x
class PartitionAttention2d(nn.Module):
""" Grid or Block partition + Attn + FFN
'2D' NCHW tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ConvMlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[-2:]
if self.partition_block:
partitioned = window_partition_nchw(x, self.partition_size)
else:
partitioned = grid_partition_nchw(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse_nchw(partitioned, self.partition_size, img_size)
else:
x = grid_reverse_nchw(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class MaxxVitBlock(nn.Module):
""" MaxVit conv, window partition + FFN , grid partition + FFN
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
self.nchw_attn = transformer_cfg.use_nchw_attn
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl
self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs)
self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs)
def init_weights(self, scheme=''):
if self.attn_block is not None:
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
if self.attn_block is not None:
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class Stem(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
padding: str = '',
bias: bool = False,
act_layer: str = 'gelu',
norm_layer: str = 'batchnorm2d',
norm_eps: float = 1e-5,
):
super().__init__()
if not isinstance(out_chs, (list, tuple)):
out_chs = to_2tuple(out_chs)
norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps)
self.out_chs = out_chs[-1]
self.stride = 2
self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias)
self.norm1 = norm_act_layer(out_chs[0])
self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias)
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
return x
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs):
transformer_kwargs = {}
conv_kwargs = {}
base_kwargs = {}
for k, v in kwargs.items():
if k.startswith('transformer_'):
transformer_kwargs[k.replace('transformer_', '')] = v
elif k.startswith('conv_'):
conv_kwargs[k.replace('conv_', '')] = v
else:
base_kwargs[k] = v
cfg = replace(
cfg,
transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs),
conv_cfg=replace(cfg.conv_cfg, **conv_kwargs),
**base_kwargs
)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.,
**kwargs,
):
super().__init__()
img_size = to_2tuple(img_size)
if kwargs:
cfg = _overlay_kwargs(cfg, **kwargs)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = cfg.embed_dim[-1]
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
padding=cfg.conv_cfg.padding,
bias=cfg.stem_bias,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')]
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
stages = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
stride *= stage_stride
in_chs = out_chs
self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps)
self.head_hidden_size = cfg.head_hidden_size
if self.head_hidden_size:
self.norm = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=self.head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=final_norm_layer,
)
else:
# standard classifier head w/ norm, pooling, fc classifier
self.norm = final_norm_layer(self.num_features)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _rw_coat_cfg(
stride_mode='pool',
pool_type='avg2',
conv_output_bias=False,
conv_attn_early=False,
conv_attn_act_layer='relu',
conv_norm_layer='',
transformer_shortcut_bias=True,
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Common differences for initial timm models:
# - pre-norm layer in MZBConv included an activation after norm
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - SE act layer was relu, not silu
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
# Variable differences (evolved over training initial models):
# - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat)
# - SE attention was between conv2 and norm/act
# - default to avg pool for mbconv downsample instead of 1x1 or dw conv
# - transformer block shortcut has no bias
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
pre_norm_act=True,
expand_output=False,
output_bias=conv_output_bias,
attn_early=conv_attn_early,
attn_act_layer=conv_attn_act_layer,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
shortcut_bias=transformer_shortcut_bias,
pool_type=pool_type,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _rw_max_cfg(
stride_mode='dw',
pool_type='avg2',
conv_output_bias=False,
conv_attn_ratio=1 / 16,
conv_norm_layer='',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
dim_head=32,
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Differences of initial timm models:
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
output_bias=conv_output_bias,
attn_ratio=conv_attn_ratio,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
dim_head=dim_head,
window_size=window_size,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _next_cfg(
stride_mode='dw',
pool_type='avg2',
conv_norm_layer='layernorm2d',
conv_norm_layer_cl='layernorm',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
no_block_attn=False,
init_values=1e-6,
rel_pos_type='mlp', # MLP by default for maxxvit
rel_pos_dim=512,
):
# For experimental models with convnext instead of mbconv
init_values = to_2tuple(init_values)
return dict(
conv_cfg=MaxxVitConvCfg(
block_type='convnext',
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
init_values=init_values[0],
norm_layer=conv_norm_layer,
norm_layer_cl=conv_norm_layer_cl,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
window_size=window_size,
no_block_attn=no_block_attn, # enabled for MaxxViT-V2
init_values=init_values[1],
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _tf_cfg():
return dict(
conv_cfg=MaxxVitConvCfg(
norm_eps=1e-3,
act_layer='gelu_tanh',
padding='same',
),
transformer_cfg=MaxxVitTransformerCfg(
norm_eps=1e-5,
act_layer='gelu_tanh',
head_first=False, # heads are interleaved (q_nh, q_hdim, k_nh, q_hdim, ....)
rel_pos_type='bias_tf',
),
)
model_cfgs = dict(
# timm specific CoAtNet configs
coatnet_pico_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 3, 5, 2),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
stride_mode='pool',
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
conv_attn_early=True,
transformer_shortcut_bias=False,
),
),
coatnet_1_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
)
),
coatnet_2_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
#init_values=1e-6,
),
),
coatnet_3_rw=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
# Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos)
coatnet_bn_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
transformer_norm_layer='batchnorm2d',
)
),
coatnet_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg(
conv_output_bias=True,
conv_attn_ratio=0.25,
rel_pos_type='mlp',
rel_pos_dim=384,
),
),
coatnet_rmlp_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
),
),
coatnet_rmlp_1_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
pool_type='max',
conv_attn_early=True,
transformer_shortcut_bias=False,
rel_pos_type='mlp',
rel_pos_dim=384, # was supposed to be 512, woops
),
),
coatnet_rmlp_1_rw2=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
rel_pos_dim=512, # was supposed to be 512, woops
),
),
coatnet_rmlp_2_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_rmlp_3_rw=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_nano_cc=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
block_type=('C', 'C', ('C', 'T'), ('C', 'T')),
**_rw_coat_cfg(),
),
coatnext_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(
rel_pos_type='bias',
init_values=(1e-5, None)
),
),
# Trying to be like the CoAtNet paper configs
coatnet_0=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 5, 2),
stem_width=64,
head_hidden_size=768,
),
coatnet_1=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=64,
head_hidden_size=768,
),
coatnet_2=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=128,
head_hidden_size=1024,
),
coatnet_3=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=192,
head_hidden_size=1536,
),
coatnet_4=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 12, 28, 2),
stem_width=192,
head_hidden_size=1536,
),
coatnet_5=MaxxVitCfg(
embed_dim=(256, 512, 1280, 2048),
depths=(2, 12, 28, 2),
stem_width=192,
head_hidden_size=2048,
),
# Experimental MaxVit configs
maxvit_pico_rw=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(),
),
maxvit_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_pm=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('PM',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_rmlp_pico_rw=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_small_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_rmlp_base_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
head_hidden_size=768,
**_rw_max_cfg(
rel_pos_type='mlp',
),
),
maxxvit_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(),
),
maxxvit_rmlp_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_next_cfg(),
),
maxxvit_rmlp_small_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(48, 96),
**_next_cfg(),
),
maxxvitv2_nano_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(48, 96),
weight_init='normal',
**_next_cfg(
no_block_attn=True,
rel_pos_type='bias',
),
),
maxxvitv2_rmlp_base_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 12, 2),
block_type=('M',) * 4,
stem_width=(64, 128),
**_next_cfg(
no_block_attn=True,
),
),
maxxvitv2_rmlp_large_rw=MaxxVitCfg(
embed_dim=(160, 320, 640, 1280),
depths=(2, 6, 16, 2),
block_type=('M',) * 4,
stem_width=(80, 160),
head_hidden_size=1280,
**_next_cfg(
no_block_attn=True,
),
),
# Trying to be like the MaxViT paper configs
maxvit_tiny_tf=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=512,
**_tf_cfg(),
),
maxvit_small_tf=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=768,
**_tf_cfg(),
),
maxvit_base_tf=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=768,
**_tf_cfg(),
),
maxvit_large_tf=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=128,
stem_bias=True,
head_hidden_size=1024,
**_tf_cfg(),
),
maxvit_xlarge_tf=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=192,
stem_bias=True,
head_hidden_size=1536,
**_tf_cfg(),
),
)
def checkpoint_filter_fn(state_dict, model: nn.Module):
model_state_dict = model.state_dict()
out_dict = {}
for k, v in state_dict.items():
if k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
if k in model_state_dict and v.ndim != model_state_dict[k].ndim and v.numel() == model_state_dict[k].numel():
# adapt between conv2d / linear layers
assert v.ndim in (2, 4)
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs):
if cfg_variant is None:
if variant in model_cfgs:
cfg_variant = variant
else:
cfg_variant = '_'.join(variant.split('_')[:-1])
return build_model_with_cfg(
MaxxVit, variant, pretrained,
model_cfg=model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
# timm specific CoAtNet configs, ImageNet-1k pretrain, fixed rel-pos
'coatnet_pico_rw_224.untrained': _cfg(url=''),
'coatnet_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth',
crop_pct=0.9),
'coatnet_0_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'),
'coatnet_1_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'
),
# timm specific CoAtNet configs, ImageNet-12k pretrain w/ 1k fine-tune, fixed rel-pos
'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
#'coatnet_3_rw_224.untrained': _cfg(url=''),
# Experimental CoAtNet configs w/ ImageNet-12k pretrain -> 1k fine-tune (different norm layers, MLP rel-pos)
'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
# Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos)
'coatnet_bn_0_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95),
'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth',
crop_pct=0.9),
'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''),
'coatnet_rmlp_1_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'),
'coatnet_rmlp_2_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'),
'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''),
'coatnet_nano_cc_224.untrained': _cfg(url=''),
'coatnext_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth',
crop_pct=0.9),
# ImagenNet-12k pretrain CoAtNet
'coatnet_2_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_3_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_rmlp_2_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# Trying to be like the CoAtNet paper configs (will adapt if 'tf' weights are ever released)
'coatnet_0_224.untrained': _cfg(url=''),
'coatnet_1_224.untrained': _cfg(url=''),
'coatnet_2_224.untrained': _cfg(url=''),
'coatnet_3_224.untrained': _cfg(url=''),
'coatnet_4_224.untrained': _cfg(url=''),
'coatnet_5_224.untrained': _cfg(url=''),
# timm specific MaxVit configs, ImageNet-1k pretrain or untrained
'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'),
'maxvit_tiny_rw_256.untrained': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
# timm specific MaxVit w/ MLP rel-pos, ImageNet-1k pretrain
'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_small_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth',
crop_pct=0.9,
),
'maxvit_rmlp_small_rw_256.untrained': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
# timm specific MaxVit w/ ImageNet-12k pretrain and 1k fine-tune
'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
),
'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
# timm specific MaxVit w/ ImageNet-12k pretrain
'maxvit_rmlp_base_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
),
# timm MaxxViT configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks)
'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# timm MaxxViT-V2 configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks, more width, no block attn)
'maxxvitv2_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''),
'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# MaxViT models ported from official Tensorflow impl
'maxvit_tiny_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_tiny_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_tiny_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_small_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_small_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_small_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_base_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_large_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_base_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_large_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'),
'maxvit_xlarge_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
})
@register_model
def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs)
@register_model
def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_efficientnet_builder.py | """ EfficientNet, MobileNetV3, etc Builder
Assembles EfficieNet and related network feature blocks from string definitions.
Handles stride, dilation calculations, and selects feature extraction points.
Hacked together by / Copyright 2019, Ross Wightman
"""
import logging
import math
import re
from copy import deepcopy
from functools import partial
from typing import Any, Dict, List
import torch.nn as nn
from ._efficientnet_blocks import *
from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible
__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights",
'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT']
_logger = logging.getLogger(__name__)
_DEBUG_BUILDER = False
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
BlockArgs = List[List[Dict[str, Any]]]
def get_bn_args_tf():
return _BN_ARGS_TF.copy()
def resolve_bn_args(kwargs):
bn_args = {}
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def resolve_act_layer(kwargs, default='relu'):
return get_act_layer(kwargs.pop('act_layer', default))
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit)
def _log_info_if(msg, condition):
if condition:
_logger.info(msg)
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
else:
return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
skip = None
for op in ops:
# string options being checked on individual basis, combine if they grow
if op == 'noskip':
skip = False # force no skip connection
elif op == 'skip':
skip = True # force a skip connection
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = get_act_layer('relu')
elif v == 'r6':
value = get_act_layer('relu6')
elif v == 'hs':
value = get_act_layer('hard_swish')
elif v == 'sw':
value = get_act_layer('swish') # aka SiLU
elif v == 'mi':
value = get_act_layer('mish')
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_layer is None, the model default (passed to model init) will be used
act_layer = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
block_args = dict(
block_type=block_type,
out_chs=int(options['c']),
stride=int(options['s']),
act_layer=act_layer,
)
if block_type == 'ir':
block_args.update(dict(
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else 0.,
noskip=skip is False,
))
if 'cc' in options:
block_args['num_experts'] = int(options['cc'])
elif block_type == 'ds' or block_type == 'dsa':
block_args.update(dict(
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
se_ratio=float(options['se']) if 'se' in options else 0.,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or skip is False,
))
elif block_type == 'er':
block_args.update(dict(
exp_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
exp_ratio=float(options['e']),
force_in_chs=force_in_chs,
se_ratio=float(options['se']) if 'se' in options else 0.,
noskip=skip is False,
))
elif block_type == 'cn':
block_args.update(dict(
kernel_size=int(options['k']),
skip=skip is True,
))
else:
assert False, 'Unknown block type (%s)' % block_type
if 'gs' in options:
block_args['group_size'] = options['gs']
return block_args, num_repeat
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
""" Per-stage depth scaling
Scales the block repeats in each stage. This depth scaling impl maintains
compatibility with the EfficientNet scaling method, while allowing sensible
scaling for other models that may have multiple block arg definitions in each stage.
"""
# We scale the total repeat count for each stage, there may be multiple
# block arg defs per stage so we need to sum.
num_repeat = sum(repeats)
if depth_trunc == 'round':
# Truncating to int by rounding allows stages with few repeats to remain
# proportionally smaller for longer. This is a good choice when stage definitions
# include single repeat stages that we'd prefer to keep that way as long as possible
num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
else:
# The default for EfficientNet truncates repeats to int via 'ceil'.
# Any multiplier > 1.0 will result in an increased depth for every stage.
num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
# Proportionally distribute repeat count scaling to each block definition in the stage.
# Allocation is done in reverse as it results in the first block being less likely to be scaled.
# The first block makes less sense to repeat in most of the arch definitions.
repeats_scaled = []
for r in repeats[::-1]:
rs = max(1, round((r / num_repeat * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::-1]
# Apply the calculated scaling to each block arg in the stage
sa_scaled = []
for ba, rep in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled
def decode_arch_def(
arch_def,
depth_multiplier=1.0,
depth_trunc='ceil',
experts_multiplier=1,
fix_first_last=False,
group_size=None,
):
""" Decode block architecture definition strings -> block kwargs
Args:
arch_def: architecture definition strings, list of list of strings
depth_multiplier: network depth multiplier
depth_trunc: networ depth truncation mode when applying multiplier
experts_multiplier: CondConv experts multiplier
fix_first_last: fix first and last block depths when multiplier is applied
group_size: group size override for all blocks that weren't explicitly set in arch string
Returns:
list of list of block kwargs
"""
arch_args = []
if isinstance(depth_multiplier, tuple):
assert len(depth_multiplier) == len(arch_def)
else:
depth_multiplier = (depth_multiplier,) * len(arch_def)
for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)):
assert isinstance(block_strings, list)
stack_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
ba, rep = _decode_block_str(block_str)
if ba.get('num_experts', 0) > 0 and experts_multiplier > 1:
ba['num_experts'] *= experts_multiplier
if group_size is not None:
ba.setdefault('group_size', group_size)
stack_args.append(ba)
repeats.append(rep)
if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1):
arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc))
else:
arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc))
return arch_args
class EfficientNetBuilder:
""" Build Trunk Blocks
This ended up being somewhat of a cross between
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
and
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
"""
def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False,
act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''):
self.output_stride = output_stride
self.pad_type = pad_type
self.round_chs_fn = round_chs_fn
self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs
self.act_layer = act_layer
self.norm_layer = norm_layer
self.se_layer = get_attn(se_layer)
try:
self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg
self.se_has_ratio = True
except TypeError:
self.se_has_ratio = False
self.drop_path_rate = drop_path_rate
if feature_location == 'depthwise':
# old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense
_logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'")
feature_location = 'expansion'
self.feature_location = feature_location
assert feature_location in ('bottleneck', 'expansion', '')
self.verbose = _DEBUG_BUILDER
# state updated during build, consumed by model
self.in_chs = None
self.features = []
def _make_block(self, ba, block_idx, block_count):
drop_path_rate = self.drop_path_rate * block_idx / block_count
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self.round_chs_fn(ba['out_chs'])
if 'force_in_chs' in ba and ba['force_in_chs']:
# NOTE this is a hack to work around mismatch in TF EdgeEffNet impl
ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs'])
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
assert ba['act_layer'] is not None
ba['norm_layer'] = self.norm_layer
ba['drop_path_rate'] = drop_path_rate
if bt != 'cn':
se_ratio = ba.pop('se_ratio')
if se_ratio and self.se_layer is not None:
if not self.se_from_exp:
# adjust se_ratio by expansion ratio if calculating se channels from block input
se_ratio /= ba.get('exp_ratio', 1.0)
if self.se_has_ratio:
ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio)
else:
ba['se_layer'] = self.se_layer
if bt == 'ir':
_log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
_log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
block = DepthwiseSeparableConv(**ba)
elif bt == 'er':
_log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
block = EdgeResidual(**ba)
elif bt == 'cn':
_log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def __call__(self, in_chs, model_block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
model_block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
_log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose)
self.in_chs = in_chs
total_block_count = sum([len(x) for x in model_block_args])
total_block_idx = 0
current_stride = 2
current_dilation = 1
stages = []
if model_block_args[0][0]['stride'] > 1:
# if the first block starts with a stride, we need to extract first level feat from stem
feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride)
self.features.append(feature_info)
# outer list of block_args defines the stacks
for stack_idx, stack_args in enumerate(model_block_args):
last_stack = stack_idx + 1 == len(model_block_args)
_log_info_if('Stack: {}'.format(stack_idx), self.verbose)
assert isinstance(stack_args, list)
blocks = []
# each stack (stage of blocks) contains a list of block arguments
for block_idx, block_args in enumerate(stack_args):
last_block = block_idx + 1 == len(stack_args)
_log_info_if(' Block: {}'.format(block_idx), self.verbose)
assert block_args['stride'] in (1, 2)
if block_idx >= 1: # only the first block in any stack can have a stride > 1
block_args['stride'] = 1
extract_features = False
if last_block:
next_stack_idx = stack_idx + 1
extract_features = next_stack_idx >= len(model_block_args) or \
model_block_args[next_stack_idx][0]['stride'] > 1
next_dilation = current_dilation
if block_args['stride'] > 1:
next_output_stride = current_stride * block_args['stride']
if next_output_stride > self.output_stride:
next_dilation = current_dilation * block_args['stride']
block_args['stride'] = 1
_log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format(
self.output_stride), self.verbose)
else:
current_stride = next_output_stride
block_args['dilation'] = current_dilation
if next_dilation != current_dilation:
current_dilation = next_dilation
# create the block
block = self._make_block(block_args, total_block_idx, total_block_count)
blocks.append(block)
# stash feature module name and channel info for model feature extraction
if extract_features:
feature_info = dict(
stage=stack_idx + 1,
reduction=current_stride,
**block.feature_info(self.feature_location),
)
leaf_name = feature_info.get('module', '')
if leaf_name:
feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name])
else:
assert last_block
feature_info['module'] = f'blocks.{stack_idx}'
self.features.append(feature_info)
total_block_idx += 1 # incr global block idx (across all stacks)
stages.append(nn.Sequential(*blocks))
return stages
def _init_weight_goog(m, n='', fix_group_fanout=True):
""" Weight initialization as per Tensorflow official implementations.
Args:
m (nn.Module): module to init
n (str): module name
fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs
Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc:
* https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
* https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
"""
if isinstance(m, CondConv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if fix_group_fanout:
fan_out //= m.groups
init_weight_fn = get_condconv_initializer(
lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape)
init_weight_fn(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if fix_group_fanout:
fan_out //= m.groups
nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
fan_out = m.weight.size(0) # fan-out
fan_in = 0
if 'routing_fn' in n:
fan_in = m.weight.size(1)
init_range = 1.0 / math.sqrt(fan_in + fan_out)
nn.init.uniform_(m.weight, -init_range, init_range)
nn.init.zeros_(m.bias)
def efficientnet_init_weights(model: nn.Module, init_fn=None):
init_fn = init_fn or _init_weight_goog
for n, m in model.named_modules():
init_fn(m, n)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_factory.py | import os
from typing import Any, Dict, Optional, Union
from urllib.parse import urlsplit
from timm.layers import set_layer_config
from ._helpers import load_checkpoint
from ._hub import load_model_config_from_hf
from ._pretrained import PretrainedCfg
from ._registry import is_model, model_entrypoint, split_model_name_tag
__all__ = ['parse_model_name', 'safe_model_name', 'create_model']
def parse_model_name(model_name: str):
if model_name.startswith('hf_hub'):
# NOTE for backwards compat, deprecate hf_hub use
model_name = model_name.replace('hf_hub', 'hf-hub')
parsed = urlsplit(model_name)
assert parsed.scheme in ('', 'timm', 'hf-hub')
if parsed.scheme == 'hf-hub':
# FIXME may use fragment as revision, currently `@` in URI path
return parsed.scheme, parsed.path
else:
model_name = os.path.split(parsed.path)[-1]
return 'timm', model_name
def safe_model_name(model_name: str, remove_source: bool = True):
# return a filename / path safe model name
def make_safe(name):
return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_')
if remove_source:
model_name = parse_model_name(model_name)[-1]
return make_safe(model_name)
def create_model(
model_name: str,
pretrained: bool = False,
pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None,
pretrained_cfg_overlay: Optional[Dict[str, Any]] = None,
checkpoint_path: str = '',
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
**kwargs,
):
"""Create a model.
Lookup model's entrypoint function and pass relevant args to create a new model.
<Tip>
**kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()``
and then the model class __init__(). kwargs values set to None are pruned before passing.
</Tip>
Args:
model_name: Name of model to instantiate.
pretrained: If set to `True`, load pretrained ImageNet-1k weights.
pretrained_cfg: Pass in an external pretrained_cfg for model.
pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these.
checkpoint_path: Path of checkpoint to load _after_ the model is initialized.
scriptable: Set layer config so that model is jit scriptable (not working for all models yet).
exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet).
no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only).
Keyword Args:
drop_rate (float): Classifier dropout rate for training.
drop_path_rate (float): Stochastic depth drop rate for training.
global_pool (str): Classifier global pooling type.
Example:
```py
>>> from timm import create_model
>>> # Create a MobileNetV3-Large model with no pretrained weights.
>>> model = create_model('mobilenetv3_large_100')
>>> # Create a MobileNetV3-Large model with pretrained weights.
>>> model = create_model('mobilenetv3_large_100', pretrained=True)
>>> model.num_classes
1000
>>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes.
>>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10)
>>> model.num_classes
10
```
"""
# Parameters that aren't supported by all models or are intended to only override model defaults if set
# should default to None in command line args/cfg. Remove them if they are present and not set so that
# non-supporting models don't break and default args remain in effect.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
model_source, model_name = parse_model_name(model_name)
if model_source == 'hf-hub':
assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.'
# For model names specified in the form `hf-hub:path/architecture_name@revision`,
# load model weights + pretrained_cfg from Hugging Face hub.
pretrained_cfg, model_name, model_args = load_model_config_from_hf(model_name)
if model_args:
for k, v in model_args.items():
kwargs.setdefault(k, v)
else:
model_name, pretrained_tag = split_model_name_tag(model_name)
if pretrained_tag and not pretrained_cfg:
# a valid pretrained_cfg argument takes priority over tag in model name
pretrained_cfg = pretrained_tag
if not is_model(model_name):
raise RuntimeError('Unknown model (%s)' % model_name)
create_fn = model_entrypoint(model_name)
with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
model = create_fn(
pretrained=pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_cfg_overlay=pretrained_cfg_overlay,
**kwargs,
)
if checkpoint_path:
load_checkpoint(model, checkpoint_path)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/sequencer.py | """ Sequencer
Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf
"""
# Copyright (c) 2022. Yuki Tatsunami
# Licensed under the Apache License, Version 2.0 (the "License");
import math
from functools import partial
from itertools import accumulate
from typing import Tuple
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this
def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)):
stdv = 1.0 / math.sqrt(module.hidden_size)
for weight in module.parameters():
nn.init.uniform_(weight, -stdv, stdv)
elif hasattr(module, 'init_weights'):
module.init_weights()
class RNNIdentity(nn.Module):
def __init__(self, *args, **kwargs):
super(RNNIdentity, self).__init__()
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]:
return x, None
class RNN2dBase(nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = 2 * hidden_size if bidirectional else hidden_size
self.union = union
self.with_vertical = True
self.with_horizontal = True
self.with_fc = with_fc
self.fc = None
if with_fc:
if union == "cat":
self.fc = nn.Linear(2 * self.output_size, input_size)
elif union == "add":
self.fc = nn.Linear(self.output_size, input_size)
elif union == "vertical":
self.fc = nn.Linear(self.output_size, input_size)
self.with_horizontal = False
elif union == "horizontal":
self.fc = nn.Linear(self.output_size, input_size)
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
elif union == "cat":
pass
if 2 * self.output_size != input_size:
raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.")
elif union == "add":
pass
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
elif union == "vertical":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_horizontal = False
elif union == "horizontal":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
self.rnn_v = RNNIdentity()
self.rnn_h = RNNIdentity()
def forward(self, x):
B, H, W, C = x.shape
if self.with_vertical:
v = x.permute(0, 2, 1, 3)
v = v.reshape(-1, H, C)
v, _ = self.rnn_v(v)
v = v.reshape(B, W, H, -1)
v = v.permute(0, 2, 1, 3)
else:
v = None
if self.with_horizontal:
h = x.reshape(-1, W, C)
h, _ = self.rnn_h(h)
h = h.reshape(B, H, W, -1)
else:
h = None
if v is not None and h is not None:
if self.union == "cat":
x = torch.cat([v, h], dim=-1)
else:
x = v + h
elif v is not None:
x = v
elif h is not None:
x = h
if self.fc is not None:
x = self.fc(x)
return x
class LSTM2d(RNN2dBase):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc)
if self.with_vertical:
self.rnn_v = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
if self.with_horizontal:
self.rnn_h = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
class Sequencer2dBlock(nn.Module):
def __init__(
self,
dim,
hidden_size,
mlp_ratio=3.0,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
channels_dim = int(mlp_ratio * dim)
self.norm1 = norm_layer(dim)
self.rnn_tokens = rnn_layer(
dim,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.rnn_tokens(self.norm1(x)))
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class Shuffle(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
B, H, W, C = x.shape
r = torch.randperm(H * W)
x = x.reshape(B, -1, C)
x = x[:, r, :].reshape(B, H, W, -1)
return x
class Downsample2d(nn.Module):
def __init__(self, input_dim, output_dim, patch_size):
super().__init__()
self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.down(x)
x = x.permute(0, 2, 3, 1)
return x
class Sequencer2dStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
patch_size,
hidden_size,
mlp_ratio,
downsample=False,
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(dim, dim_out, patch_size)
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
for block_idx in range(depth):
blocks.append(block_layer(
dim_out,
hidden_size,
mlp_ratio=mlp_ratio,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop,
drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path,
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class Sequencer2d(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
global_pool='avg',
layers=(4, 3, 8, 3),
patch_sizes=(7, 2, 2, 1),
embed_dims=(192, 384, 384, 384),
hidden_sizes=(48, 96, 96, 96),
mlp_ratios=(3.0, 3.0, 3.0, 3.0),
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_rnn_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop_rate=0.,
drop_path_rate=0.,
nlhb=False,
stem_norm=False,
):
super().__init__()
assert global_pool in ('', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = embed_dims[-1] # num_features for consistency with other models
self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC)
self.output_fmt = 'NHWC'
self.feature_info = []
self.stem = PatchEmbed(
img_size=None,
patch_size=patch_sizes[0],
in_chans=in_chans,
embed_dim=embed_dims[0],
norm_layer=norm_layer if stem_norm else None,
flatten=False,
output_fmt='NHWC',
)
assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios)
reductions = list(accumulate(patch_sizes, lambda x, y: x * y))
stages = []
prev_dim = embed_dims[0]
for i, _ in enumerate(embed_dims):
stages += [Sequencer2dStage(
prev_dim,
embed_dims[i],
depth=layers[i],
downsample=i > 0,
patch_size=patch_sizes[i],
hidden_size=hidden_sizes[i],
mlp_ratio=mlp_ratios[i],
block_layer=block_layer,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_rnn_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop_rate,
drop_path=drop_path_rate,
)]
prev_dim = embed_dims[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.norm = norm_layer(embed_dims[-1])
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
self.init_weights(nlhb=nlhb)
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.
named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)', None),
(r'^norm', (99999,))
] if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^stages\.(\d+)\.downsample', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stages.0.blocks.0.norm1.weight' in state_dict:
return state_dict # already translated checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
import re
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k)
k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_sequencer2d(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
Sequencer2d,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 8, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
)
model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 14, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[8, 8, 16, 4],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/convmixer.py | """ ConvMixer
"""
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d
from ._registry import register_model, generate_default_cfgs
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
__all__ = ['ConvMixer']
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ConvMixer(nn.Module):
def __init__(
self,
dim,
depth,
kernel_size=9,
patch_size=7,
in_chans=3,
num_classes=1000,
global_pool='avg',
drop_rate=0.,
act_layer=nn.GELU,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = dim
self.grad_checkpointing = False
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
act_layer(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
act_layer(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
act_layer(),
nn.BatchNorm2d(dim)
) for i in range(depth)]
)
self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.pooling(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_convmixer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for ConvMixer models.')
return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head',
'first_conv': 'stem.0',
**kwargs
}
default_cfgs = generate_default_cfgs({
'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'),
'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'),
'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs)
return _create_convmixer('convmixer_1536_20', pretrained, **model_args)
@register_model
def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs)
return _create_convmixer('convmixer_768_32', pretrained, **model_args)
@register_model
def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs)
return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/convit.py | """ ConViT Model
@article{d2021convit,
title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},
author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent},
journal={arXiv preprint arXiv:2103.10697},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.10697
Original code: https://github.com/facebookresearch/convit, original copyright below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
from .vision_transformer_hybrid import HybridEmbed
__all__ = ['ConVit']
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class GPSA(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
locality_strength=1.,
):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.locality_strength = locality_strength
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None
def forward(self, x):
B, N, C = x.shape
if self.rel_indices is None or self.rel_indices.shape[1] != N:
self.rel_indices = self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1, -1)
pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1, -1, 1, 1)
attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map=False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:, :, -1] ** .5
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 # max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads ** .5)
center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1 + kernel_size * h2
self.pos_proj.weight.data[position, 2] = -1
self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance
self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance
self.pos_proj.weight.data *= self.locality_strength
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches ** .5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
rel_indices[:, :, :, 2] = indd.unsqueeze(0)
rel_indices[:, :, :, 1] = indy.unsqueeze(0)
rel_indices[:, :, :, 0] = indx.unsqueeze(0)
device = self.qk.weight.device
return rel_indices.to(device)
class MHSA(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def get_attention_map(self, x, return_map=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N ** .5)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
distances = indd ** .5
distances = distances.to(x.device)
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm,
use_gpsa=True,
locality_strength=1.,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
locality_strength=locality_strength,
)
else:
self.attn = MHSA(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=LayerNorm,
local_up_to_layer=3,
locality_strength=1.,
use_pos_embed=True,
):
super().__init__()
assert global_pool in ('', 'avg', 'token')
embed_dim *= num_heads
self.num_classes = num_classes
self.global_pool = global_pool
self.local_up_to_layer = local_up_to_layer
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
use_gpsa=i < local_up_to_layer,
locality_strength=locality_strength,
) for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
for n, m in self.named_modules():
if hasattr(m, 'local_init'):
m.local_init()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
for u, blk in enumerate(self.blocks):
if u == self.local_up_to_layer:
x = torch.cat((cls_tokens, x), dim=1)
x = blk(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_convit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(ConVit, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# ConViT
'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'),
'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'),
'convit_base.fb_in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def convit_tiny(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4)
model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convit_small(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9)
model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convit_base(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16)
model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/tresnet.py | """
TResNet: High Performance GPU-Dedicated Architecture
https://arxiv.org/pdf/2003.13630.pdf
Original model: https://github.com/mrT23/TResNet
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule,\
ConvNormActAa, ConvNormAct, DropPath
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['TResNet'] # model_registry will add each entrypoint fn to this
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
use_se=True,
aa_layer=None,
drop_path_rate=0.
):
super(BasicBlock, self).__init__()
self.downsample = downsample
self.stride = stride
act_layer = partial(nn.LeakyReLU, negative_slope=1e-3)
if stride == 1:
self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=1, act_layer=act_layer)
else:
self.conv1 = ConvNormActAa(
inplanes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer)
self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False, act_layer=None)
self.act = nn.ReLU(inplace=True)
rd_chs = max(planes * self.expansion // 4, 64)
self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def forward(self, x):
if self.downsample is not None:
shortcut = self.downsample(x)
else:
shortcut = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None:
out = self.se(out)
out = self.drop_path(out) + shortcut
out = self.act(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
use_se=True,
act_layer=None,
aa_layer=None,
drop_path_rate=0.,
):
super(Bottleneck, self).__init__()
self.downsample = downsample
self.stride = stride
act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3)
self.conv1 = ConvNormAct(
inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer)
if stride == 1:
self.conv2 = ConvNormAct(
planes, planes, kernel_size=3, stride=1, act_layer=act_layer)
else:
self.conv2 = ConvNormActAa(
planes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer)
reduction_chs = max(planes * self.expansion // 8, 64)
self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None
self.conv3 = ConvNormAct(
planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.act = nn.ReLU(inplace=True)
def forward(self, x):
if self.downsample is not None:
shortcut = self.downsample(x)
else:
shortcut = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None:
out = self.se(out)
out = self.conv3(out)
out = self.drop_path(out) + shortcut
out = self.act(out)
return out
class TResNet(nn.Module):
def __init__(
self,
layers,
in_chans=3,
num_classes=1000,
width_factor=1.0,
v2=False,
global_pool='fast',
drop_rate=0.,
drop_path_rate=0.,
):
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
super(TResNet, self).__init__()
aa_layer = BlurPool2d
act_layer = nn.LeakyReLU
# TResnet stages
self.inplanes = int(64 * width_factor)
self.planes = int(64 * width_factor)
if v2:
self.inplanes = self.inplanes // 8 * 8
self.planes = self.planes // 8 * 8
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer)
layer1 = self._make_layer(
Bottleneck if v2 else BasicBlock,
self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0])
layer2 = self._make_layer(
Bottleneck if v2 else BasicBlock,
self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1])
layer3 = self._make_layer(
Bottleneck,
self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2])
layer4 = self._make_layer(
Bottleneck,
self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3])
# body
self.body = nn.Sequential(OrderedDict([
('s2d', SpaceToDepth()),
('conv1', conv1),
('layer1', layer1),
('layer2', layer2),
('layer3', layer3),
('layer4', layer4),
]))
self.feature_info = [
dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D?
dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'),
dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'),
dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'),
dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'),
]
# head
self.num_features = (self.planes * 8) * Bottleneck.expansion
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# model initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
# residual connections special initialization
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.zeros_(m.conv2.bn.weight)
if isinstance(m, Bottleneck):
nn.init.zeros_(m.conv3.bn.weight)
def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
layers = []
if stride == 2:
# avg pooling before 1x1 conv
layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
layers += [ConvNormAct(
self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None)]
downsample = nn.Sequential(*layers)
layers = []
for i in range(blocks):
layers.append(block(
self.inplanes,
planes,
stride=stride if i == 0 else 1,
downsample=downsample if i == 0 else None,
use_se=use_se,
aa_layer=aa_layer,
drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate,
))
self.inplanes = planes * block.expansion
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = self.body.s2d(x)
x = self.body.conv1(x)
x = checkpoint_seq([
self.body.layer1,
self.body.layer2,
self.body.layer3,
self.body.layer4],
x, flatten=True)
else:
x = self.body(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'body.conv1.conv.weight' in state_dict:
return state_dict
import re
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k)
k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k)
k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k)
k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k)
k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k)
k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k)
if k.endswith('bn.weight'):
# convert weight from inplace_abn to batchnorm
v = v.abs().add(1e-5)
out_dict[k] = v
return out_dict
def _create_tresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
TResNet,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': (0., 0., 0.), 'std': (1., 1., 1.),
'first_conv': 'body.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_m.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_l.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_xl.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
})
@register_model
def tresnet_m(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[3, 4, 11, 3])
return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_l(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2)
return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_xl(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3)
return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True)
return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k',
'tresnet_m_448': 'tresnet_m.miil_in1k_448',
'tresnet_l_448': 'tresnet_l.miil_in1k_448',
'tresnet_xl_448': 'tresnet_xl.miil_in1k_448',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/hrnet.py | """ HRNet
Copied from https://github.com/HRNet/HRNet-Image-Classification
Original header:
Copyright (c) Microsoft
Licensed under the MIT License.
Written by Bin Xiao (Bin.Xiao@microsoft.com)
Modified by Ke Sun (sunk@mail.ustc.edu.cn)
"""
import logging
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._features import FeatureInfo
from ._registry import register_model, generate_default_cfgs
from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE
__all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this
_BN_MOMENTUM = 0.1
_logger = logging.getLogger(__name__)
cfg_cls = dict(
hrnet_w18_small=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(1,),
num_channels=(32,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(16, 32),
fuse_method='SUM'
),
stage3=dict(
num_modules=1,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(16, 32, 64),
fuse_method='SUM'
),
stage4=dict(
num_modules=1,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(16, 32, 64, 128),
fuse_method='SUM',
),
),
hrnet_w18_small_v2=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=3,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=2,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w18=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w30=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(30, 60),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(30, 60, 120),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(30, 60, 120, 240),
fuse_method='SUM',
),
),
hrnet_w32=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256),
fuse_method='SUM',
),
),
hrnet_w40=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(40, 80),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(40, 80, 160),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(40, 80, 160, 320),
fuse_method='SUM',
),
),
hrnet_w44=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(44, 88),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(44, 88, 176),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(44, 88, 176, 352),
fuse_method='SUM',
),
),
hrnet_w48=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384),
fuse_method='SUM',
),
),
hrnet_w64=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(64, 128),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(64, 128, 256),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(64, 128, 256, 512),
fuse_method='SUM',
),
)
)
class HighResolutionModule(nn.Module):
def __init__(
self,
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
fuse_method,
multi_scale_output=True,
):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
)
self.num_in_chs = num_in_chs
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches,
block_types,
num_blocks,
num_channels,
)
self.fuse_layers = self._make_fuse_layers()
self.fuse_act = nn.ReLU(False)
def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels):
error_msg = ''
if num_branches != len(num_blocks):
error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks))
elif num_branches != len(num_channels):
error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels))
elif num_branches != len(num_in_chs):
error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs))
if error_msg:
_logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1):
downsample = None
if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)]
self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block_type, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return nn.Identity()
num_branches = self.num_branches
num_in_chs = self.num_in_chs
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False),
nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM),
nn.Upsample(scale_factor=2 ** (j - i), mode='nearest')))
elif j == i:
fuse_layer.append(nn.Identity())
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_out_chs_conv3x3 = num_in_chs[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM)
))
else:
num_out_chs_conv3x3 = num_in_chs[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM),
nn.ReLU(False)
))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_in_chs(self):
return self.num_in_chs
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i, branch in enumerate(self.branches):
x[i] = branch(x[i])
x_fuse = []
for i, fuse_outer in enumerate(self.fuse_layers):
y = None
for j, f in enumerate(fuse_outer):
if y is None:
y = f(x[j])
else:
y = y + f(x[j])
x_fuse.append(self.fuse_act(y))
return x_fuse
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward
pass
block_types_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
head='classification',
**kwargs,
):
super(HighResolutionNet, self).__init__()
self.num_classes = num_classes
assert output_stride == 32 # FIXME support dilation
cfg.update(**kwargs)
stem_width = cfg['stem_width']
self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM)
self.act2 = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = block_types_dict[self.stage1_cfg['block_type']]
num_blocks = self.stage1_cfg['num_blocks'][0]
self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks)
stage1_out_channel = block_type.expansion * num_channels
self.stage2_cfg = cfg['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = block_types_dict[self.stage2_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = block_types_dict[self.stage3_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = block_types_dict[self.stage4_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
self.head = head
self.head_channels = None # set if _make_head called
head_conv_bias = cfg.pop('head_conv_bias', True)
if head == 'classification':
# Classification Head
self.num_features = 2048
self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(
pre_stage_channels,
conv_bias=head_conv_bias,
)
self.global_pool, self.head_drop, self.classifier = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
if head == 'incre':
self.num_features = 2048
self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True)
else:
self.num_features = 256
self.incre_modules = None
self.global_pool = nn.Identity()
self.head_drop = nn.Identity()
self.classifier = nn.Identity()
curr_stride = 2
# module names aren't actually valid here, hook or FeatureNet based extraction would not work
self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')]
for i, c in enumerate(self.head_channels if self.head_channels else num_channels):
curr_stride *= 2
c = c * 4 if self.head_channels else c # head block_type expansion factor of 4
self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')]
self.init_weights()
def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True):
head_block_type = Bottleneck
self.head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1))
incre_modules = nn.ModuleList(incre_modules)
if incre_only:
return incre_modules, None, None
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = self.head_channels[i] * head_block_type.expansion
out_channels = self.head_channels[i + 1] * head_block_type.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features,
kernel_size=1, stride=1, padding=0, bias=conv_bias),
nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False),
nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
_in_chs = num_channels_pre_layer[-1]
_out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs
conv3x3s.append(nn.Sequential(
nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False),
nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block_type, inplanes, planes, block_types, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(inplanes, planes, stride, downsample)]
inplanes = planes * block_type.expansion
for i in range(1, block_types):
layers.append(block_type(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block_type = block_types_dict[layer_config['block_type']]
fuse_method = layer_config['fuse_method']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
reset_multi_scale_output = multi_scale_output or i < num_modules - 1
modules.append(HighResolutionModule(
num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output)
)
num_in_chs = modules[-1].get_num_in_chs()
return SequentialList(*modules), num_in_chs
@torch.jit.ignore
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv[12]|bn[12]',
block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [
(r'^layer(\d+)\.(\d+)', None),
(r'^stage(\d+)\.(\d+)', None),
(r'^transition(\d+)', (99999,)),
],
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "gradient checkpointing not supported"
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def stages(self, x) -> List[torch.Tensor]:
x = self.layer1(x)
xl = [t(x) for i, t in enumerate(self.transition1)]
yl = self.stage2(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)]
yl = self.stage3(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)]
yl = self.stage4(xl)
return yl
def forward_features(self, x):
# Stem
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
# Stages
yl = self.stages(x)
if self.incre_modules is None or self.downsamp_modules is None:
return yl
y = None
for i, incre in enumerate(self.incre_modules):
if y is None:
y = incre(yl[i])
else:
down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing
y = incre(yl[i]) + down.forward(y)
y = self.final_layer(y)
return y
def forward_head(self, x, pre_logits: bool = False):
# Classification Head
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.classifier(x)
def forward(self, x):
y = self.forward_features(x)
x = self.forward_head(y)
return x
class HighResolutionNetFeatures(HighResolutionNet):
"""HighResolutionNet feature extraction
The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so.
It would be more complicated to use the FeatureNet helpers.
The `feature_location=incre` allows grabbing increased channel count features using part of the
classification head. If `feature_location=''` the default HRNet features are returned. First stem
conv is used for stride 2 features.
"""
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
feature_location='incre',
out_indices=(0, 1, 2, 3, 4),
**kwargs,
):
assert feature_location in ('incre', '')
super(HighResolutionNetFeatures, self).__init__(
cfg,
in_chans=in_chans,
num_classes=num_classes,
output_stride=output_stride,
global_pool=global_pool,
drop_rate=drop_rate,
head=feature_location,
**kwargs,
)
self.feature_info = FeatureInfo(self.feature_info, out_indices)
self._out_idx = {f['index'] for f in self.feature_info.get_dicts()}
def forward_features(self, x):
assert False, 'Not supported'
def forward(self, x) -> List[torch.tensor]:
out = []
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
if 0 in self._out_idx:
out.append(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.stages(x)
if self.incre_modules is not None:
x = [incre(f) for f, incre in zip(x, self.incre_modules)]
for i, f in enumerate(x):
if i + 1 in self._out_idx:
out.append(f)
return out
def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs):
model_cls = HighResolutionNet
features_only = False
kwargs_filter = None
if model_kwargs.pop('features_only', False):
model_cls = HighResolutionNetFeatures
kwargs_filter = ('num_classes', 'global_pool')
features_only = True
cfg_variant = cfg_variant or variant
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
model_cfg=cfg_cls[cfg_variant],
pretrained_strict=not features_only,
kwargs_filter=kwargs_filter,
**model_kwargs,
)
if features_only:
model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg)
model.default_cfg = model.pretrained_cfg # backwards compat
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18.ms_aug_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95,
),
'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
'hrnet_w48_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
})
@register_model
def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small', pretrained, **kwargs)
@register_model
def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs)
@register_model
def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18', pretrained, **kwargs)
@register_model
def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w30', pretrained, **kwargs)
@register_model
def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w32', pretrained, **kwargs)
@register_model
def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w40', pretrained, **kwargs)
@register_model
def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w44', pretrained, **kwargs)
@register_model
def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w48', pretrained, **kwargs)
@register_model
def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w64', pretrained, **kwargs)
@register_model
def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs)
@register_model
def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_features_fx.py | """ PyTorch FX Based Feature Extraction Helpers
Using https://pytorch.org/vision/stable/feature_extraction.html
"""
from typing import Callable, List, Dict, Union, Type
import torch
from torch import nn
from ._features import _get_feature_info, _get_return_layers
try:
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
# Layers we went to treat as leaf modules
from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame
from timm.layers.non_local_attn import BilinearAttnTransform
from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame
from timm.layers.norm_act import (
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d
)
__all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules',
'register_notrace_function', 'is_notrace_function', 'get_notrace_functions',
'create_feature_extractor', 'FeatureGraphNet', 'GraphExtractNet']
# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here
# BUT modules from timm.models should use the registration mechanism below
_leaf_modules = {
BilinearAttnTransform, # reason: flow control t <= 1
# Reason: get_same_padding has a max which raises a control flow error
Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame,
CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]),
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d,
}
try:
from timm.layers import InplaceAbn
_leaf_modules.add(InplaceAbn)
except ImportError:
pass
def register_notrace_module(module: Type[nn.Module]):
"""
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
"""
_leaf_modules.add(module)
return module
def is_notrace_module(module: Type[nn.Module]):
return module in _leaf_modules
def get_notrace_modules():
return list(_leaf_modules)
# Functions we want to autowrap (treat them as leaves)
_autowrap_functions = set()
def register_notrace_function(func: Callable):
"""
Decorator for functions which ought not to be traced through
"""
_autowrap_functions.add(func)
return func
def is_notrace_function(func: Callable):
return func in _autowrap_functions
def get_notrace_functions():
return list(_autowrap_functions)
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
return _create_feature_extractor(
model, return_nodes,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
)
class FeatureGraphNet(nn.Module):
""" A FX Graph based feature extractor that works with the model feature_info metadata
"""
def __init__(self, model, out_indices, out_map=None):
super().__init__()
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
self.feature_info = _get_feature_info(model, out_indices)
if out_map is not None:
assert len(out_map) == len(out_indices)
return_nodes = _get_return_layers(self.feature_info, out_map)
self.graph_module = create_feature_extractor(model, return_nodes)
def forward(self, x):
return list(self.graph_module(x).values())
class GraphExtractNet(nn.Module):
""" A standalone feature extraction wrapper that maps dict -> list or single tensor
NOTE:
* one can use feature_extractor directly if dictionary output is desired
* unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info
metadata for builtin feature extraction mode
* create_feature_extractor can be used directly if dictionary output is acceptable
Args:
model: model to extract features from
return_nodes: node names to return features from (dict or list)
squeeze_out: if only one output, and output in list format, flatten to single tensor
"""
def __init__(self, model, return_nodes: Union[Dict[str, str], List[str]], squeeze_out: bool = True):
super().__init__()
self.squeeze_out = squeeze_out
self.graph_module = create_feature_extractor(model, return_nodes)
def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]:
out = list(self.graph_module(x).values())
if self.squeeze_out and len(out) == 1:
return out[0]
return out
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/fx_features.py | from ._features_fx import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_builder.py | import dataclasses
import logging
import os
from copy import deepcopy
from typing import Optional, Dict, Callable, Any, Tuple
from torch import nn as nn
from torch.hub import load_state_dict_from_url
from timm.models._features import FeatureListNet, FeatureHookNet
from timm.models._features_fx import FeatureGraphNet
from timm.models._helpers import load_state_dict
from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf
from timm.models._manipulate import adapt_input_conv
from timm.models._pretrained import PretrainedCfg
from timm.models._prune import adapt_model_from_file
from timm.models._registry import get_pretrained_cfg
_logger = logging.getLogger(__name__)
# Global variables for rarely used pretrained checkpoint download progress and hash check.
# Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle.
_DOWNLOAD_PROGRESS = False
_CHECK_HASH = False
_USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0
__all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained',
'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg']
def _resolve_pretrained_source(pretrained_cfg):
cfg_source = pretrained_cfg.get('source', '')
pretrained_url = pretrained_cfg.get('url', None)
pretrained_file = pretrained_cfg.get('file', None)
pretrained_sd = pretrained_cfg.get('state_dict', None)
hf_hub_id = pretrained_cfg.get('hf_hub_id', None)
# resolve where to load pretrained weights from
load_from = ''
pretrained_loc = ''
if cfg_source == 'hf-hub' and has_hf_hub(necessary=True):
# hf-hub specified as source via model identifier
load_from = 'hf-hub'
assert hf_hub_id
pretrained_loc = hf_hub_id
else:
# default source == timm or unspecified
if pretrained_sd:
# direct state_dict pass through is the highest priority
load_from = 'state_dict'
pretrained_loc = pretrained_sd
assert isinstance(pretrained_loc, dict)
elif pretrained_file:
# file load override is the second-highest priority if set
load_from = 'file'
pretrained_loc = pretrained_file
else:
old_cache_valid = False
if _USE_OLD_CACHE:
# prioritized old cached weights if exists and env var enabled
old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False
if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True):
# hf-hub available as alternate weight source in default_cfg
load_from = 'hf-hub'
pretrained_loc = hf_hub_id
elif pretrained_url:
load_from = 'url'
pretrained_loc = pretrained_url
if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None):
# if a filename override is set, return tuple for location w/ (hub_id, filename)
pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename']
return load_from, pretrained_loc
def set_pretrained_download_progress(enable=True):
""" Set download progress for pretrained weights on/off (globally). """
global _DOWNLOAD_PROGRESS
_DOWNLOAD_PROGRESS = enable
def set_pretrained_check_hash(enable=True):
""" Set hash checking for pretrained weights on/off (globally). """
global _CHECK_HASH
_CHECK_HASH = enable
def load_custom_pretrained(
model: nn.Module,
pretrained_cfg: Optional[Dict] = None,
load_fn: Optional[Callable] = None,
):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
pretrained_cfg (dict): Default pretrained model cfg
load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
"""
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
if not pretrained_cfg:
_logger.warning("Invalid pretrained config, cannot load weights.")
return
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
if not load_from:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if load_from == 'hf-hub':
_logger.warning("Hugging Face hub not currently supported for custom load pretrained models.")
elif load_from == 'url':
pretrained_loc = download_cached_file(
pretrained_loc,
check_hash=_CHECK_HASH,
progress=_DOWNLOAD_PROGRESS,
)
if load_fn is not None:
load_fn(model, pretrained_loc)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(pretrained_loc)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def load_pretrained(
model: nn.Module,
pretrained_cfg: Optional[Dict] = None,
num_classes: int = 1000,
in_chans: int = 3,
filter_fn: Optional[Callable] = None,
strict: bool = True,
):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
pretrained_cfg (Optional[Dict]): configuration for pretrained weights / target dataset
num_classes (int): num_classes for target model
in_chans (int): in_chans for target model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
"""
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
if not pretrained_cfg:
raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.")
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
if load_from == 'state_dict':
_logger.info(f'Loading pretrained weights from state dict')
state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override
elif load_from == 'file':
_logger.info(f'Loading pretrained weights from file ({pretrained_loc})')
if pretrained_cfg.get('custom_load', False):
model.load_pretrained(pretrained_loc)
return
else:
state_dict = load_state_dict(pretrained_loc)
elif load_from == 'url':
_logger.info(f'Loading pretrained weights from url ({pretrained_loc})')
if pretrained_cfg.get('custom_load', False):
pretrained_loc = download_cached_file(
pretrained_loc,
progress=_DOWNLOAD_PROGRESS,
check_hash=_CHECK_HASH,
)
model.load_pretrained(pretrained_loc)
return
else:
state_dict = load_state_dict_from_url(
pretrained_loc,
map_location='cpu',
progress=_DOWNLOAD_PROGRESS,
check_hash=_CHECK_HASH,
)
elif load_from == 'hf-hub':
_logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})')
if isinstance(pretrained_loc, (list, tuple)):
state_dict = load_state_dict_from_hf(*pretrained_loc)
else:
state_dict = load_state_dict_from_hf(pretrained_loc)
else:
model_name = pretrained_cfg.get('architecture', 'this model')
raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.")
if filter_fn is not None:
try:
state_dict = filter_fn(state_dict, model)
except TypeError as e:
# for backwards compat with filter fn that take one arg
state_dict = filter_fn(state_dict)
input_convs = pretrained_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = pretrained_cfg.get('classifier', None)
label_offset = pretrained_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != pretrained_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
state_dict.pop(classifier_name + '.weight', None)
state_dict.pop(classifier_name + '.bias', None)
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
load_result = model.load_state_dict(state_dict, strict=strict)
if load_result.missing_keys:
_logger.info(
f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.'
f' This is expected if model is being adapted.')
if load_result.unexpected_keys:
_logger.warning(
f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.'
f' This may be expected if model is being adapted.')
def pretrained_cfg_for_features(pretrained_cfg):
pretrained_cfg = deepcopy(pretrained_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
pretrained_cfg.pop(tr, None)
return pretrained_cfg
def _filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
Args:
pretrained_cfg: input pretrained cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if pretrained_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
for n in default_kwarg_names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# pretrained_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = pretrained_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = pretrained_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
elif n == 'num_classes':
default_val = pretrained_cfg.get(n, None)
# if default is < 0, don't pass through to model
if default_val is not None and default_val >= 0:
kwargs.setdefault(n, pretrained_cfg[n])
else:
default_val = pretrained_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, pretrained_cfg[n])
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
_filter_kwargs(kwargs, names=kwargs_filter)
def resolve_pretrained_cfg(
variant: str,
pretrained_cfg=None,
pretrained_cfg_overlay=None,
) -> PretrainedCfg:
model_with_tag = variant
pretrained_tag = None
if pretrained_cfg:
if isinstance(pretrained_cfg, dict):
# pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg
pretrained_cfg = PretrainedCfg(**pretrained_cfg)
elif isinstance(pretrained_cfg, str):
pretrained_tag = pretrained_cfg
pretrained_cfg = None
# fallback to looking up pretrained cfg in model registry by variant identifier
if not pretrained_cfg:
if pretrained_tag:
model_with_tag = '.'.join([variant, pretrained_tag])
pretrained_cfg = get_pretrained_cfg(model_with_tag)
if not pretrained_cfg:
_logger.warning(
f"No pretrained configuration specified for {model_with_tag} model. Using a default."
f" Please add a config to the model pretrained_cfg registry or pass explicitly.")
pretrained_cfg = PretrainedCfg() # instance with defaults
pretrained_cfg_overlay = pretrained_cfg_overlay or {}
if not pretrained_cfg.architecture:
pretrained_cfg_overlay.setdefault('architecture', variant)
pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay)
return pretrained_cfg
def build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
pretrained_cfg: Optional[Dict] = None,
pretrained_cfg_overlay: Optional[Dict] = None,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[Dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs,
):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretrained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
pretrained_cfg (dict): model's pretrained weight/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
# resolve and update model pretrained config and model kwargs
pretrained_cfg = resolve_pretrained_cfg(
variant,
pretrained_cfg=pretrained_cfg,
pretrained_cfg_overlay=pretrained_cfg_overlay
)
# FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model
pretrained_cfg = pretrained_cfg.to_dict()
_update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Instantiate the model
if model_cfg is None:
model = model_cls(**kwargs)
else:
model = model_cls(cfg=model_cfg, **kwargs)
model.pretrained_cfg = pretrained_cfg
model.default_cfg = model.pretrained_cfg # alias for backwards compat
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
load_pretrained(
model,
pretrained_cfg=pretrained_cfg,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
strict=pretrained_strict,
)
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
output_fmt = getattr(model, 'output_fmt', None)
if output_fmt is not None:
feature_cfg.setdefault('output_fmt', output_fmt)
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
elif feature_cls == 'fx':
feature_cls = FeatureGraphNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg
model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vision_transformer.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
`FlexiViT: One Model for All Patch Sizes`
- https://arxiv.org/abs/2212.08013
The official jax code is released and available at
* https://github.com/google-research/vision_transformer
* https://github.com/google-research/big_vision
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020, Ross Wightman
"""
import logging
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, Optional, Sequence, Set, Tuple, Type, Union, List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, \
OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import PatchEmbed, Mlp, DropPath, AttentionPoolLatent, RmsNorm, PatchDropout, SwiGLUPacked, \
trunc_normal_, lecun_normal_, resample_patch_embed, resample_abs_pos_embed, use_fused_attn, \
get_act_layer, get_norm_layer, LayerType
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['VisionTransformer'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class Attention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_norm: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: nn.Module = nn.LayerNorm,
) -> None:
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q, k = self.q_norm(q), self.k_norm(k)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(
self,
dim: int,
init_values: float = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.,
qkv_bias: bool = False,
qk_norm: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
init_values: Optional[float] = None,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
mlp_layer: nn.Module = Mlp,
) -> None:
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostBlock(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.,
qkv_bias: bool = False,
qk_norm: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
init_values: Optional[float] = None,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
mlp_layer: nn.Module = Mlp,
) -> None:
super().__init__()
self.init_values = init_values
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self) -> None:
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.drop_path1(self.norm1(self.attn(x)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class ParallelScalingBlock(nn.Module):
""" Parallel ViT block (MLP & Attention in parallel)
Based on:
'Scaling Vision Transformers to 22 Billion Parameters` - https://arxiv.org/abs/2302.05442
"""
fused_attn: Final[bool]
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.,
qkv_bias: bool = False,
qk_norm: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
init_values: Optional[float] = None,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
mlp_layer: Optional[nn.Module] = None,
) -> None:
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
mlp_hidden_dim = int(mlp_ratio * dim)
in_proj_out_dim = mlp_hidden_dim + 3 * dim
self.in_norm = norm_layer(dim)
self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias)
self.in_split = [mlp_hidden_dim] + [dim] * 3
if qkv_bias:
self.register_buffer('qkv_bias', None)
self.register_parameter('mlp_bias', None)
else:
self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False)
self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim))
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.attn_out_proj = nn.Linear(dim, dim)
self.mlp_drop = nn.Dropout(proj_drop)
self.mlp_act = act_layer()
self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim)
self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, C = x.shape
# Combined MLP fc1 & qkv projections
y = self.in_norm(x)
if self.mlp_bias is not None:
# Concat constant zero-bias for qkv w/ trainable mlp_bias.
# Appears faster than adding to x_mlp separately
y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias)))
else:
y = self.in_proj(y)
x_mlp, q, k, v = torch.split(y, self.in_split, dim=-1)
# Dot product attention w/ qk norm
q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2)
k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2)
v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2)
if self.fused_attn:
x_attn = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_attn = attn @ v
x_attn = x_attn.transpose(1, 2).reshape(B, N, C)
x_attn = self.attn_out_proj(x_attn)
# MLP activation, dropout, fc2
x_mlp = self.mlp_act(x_mlp)
x_mlp = self.mlp_drop(x_mlp)
x_mlp = self.mlp_out_proj(x_mlp)
# Add residual w/ drop path & layer scale applied
y = self.drop_path(self.ls(x_attn + x_mlp))
x = x + y
return x
class ParallelThingsBlock(nn.Module):
""" Parallel ViT block (N parallel attention followed by N parallel MLP)
Based on:
`Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795
"""
def __init__(
self,
dim: int,
num_heads: int,
num_parallel: int = 2,
mlp_ratio: float = 4.,
qkv_bias: bool = False,
qk_norm: bool = False,
init_values: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
mlp_layer: nn.Module = Mlp,
) -> None:
super().__init__()
self.num_parallel = num_parallel
self.attns = nn.ModuleList()
self.ffns = nn.ModuleList()
for _ in range(num_parallel):
self.attns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('attn', Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
self.ffns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('mlp', mlp_layer(
dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
def _forward_jit(self, x: torch.Tensor) -> torch.Tensor:
x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0)
x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0)
return x
@torch.jit.ignore
def _forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + sum(attn(x) for attn in self.attns)
x = x + sum(ffn(x) for ffn in self.ffns)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
if torch.jit.is_scripting() or torch.jit.is_tracing():
return self._forward_jit(x)
else:
return self._forward(x)
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
dynamic_img_size: Final[bool]
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: Literal['', 'avg', 'token', 'map'] = 'token',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = None,
class_token: bool = True,
no_embed_class: bool = False,
reg_tokens: int = 0,
pre_norm: bool = False,
fc_norm: Optional[bool] = None,
dynamic_img_size: bool = False,
dynamic_img_pad: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', ''] = '',
embed_layer: Callable = PatchEmbed,
norm_layer: Optional[LayerType] = None,
act_layer: Optional[LayerType] = None,
block_fn: Type[nn.Module] = Block,
mlp_layer: Type[nn.Module] = Mlp,
) -> None:
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of image input channels.
num_classes: Mumber of classes for classification head.
global_pool: Type of global pooling for final sequence (default: 'token').
embed_dim: Transformer embedding dimension.
depth: Depth of transformer.
num_heads: Number of attention heads.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: Enable bias for qkv projections if True.
init_values: Layer-scale init values (layer-scale enabled if not None).
class_token: Use class token.
no_embed_class: Don't include position embeddings for class (or reg) tokens.
reg_tokens: Number of register tokens.
fc_norm: Pre head norm after pool (instead of before), if None, enabled when global_pool == 'avg'.
drop_rate: Head dropout rate.
pos_drop_rate: Position embedding dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
weight_init: Weight initialization scheme.
embed_layer: Patch embedding layer.
norm_layer: Normalization layer.
act_layer: MLP activation layer.
block_fn: Transformer block layer.
"""
super().__init__()
assert global_pool in ('', 'avg', 'token', 'map')
assert class_token or global_pool != 'token'
use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-6)
act_layer = get_act_layer(act_layer) or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.num_prefix_tokens += reg_tokens
self.num_reg_tokens = reg_tokens
self.has_class_token = class_token
self.no_embed_class = no_embed_class # don't embed prefix positions (includes reg)
self.dynamic_img_size = dynamic_img_size
self.grad_checkpointing = False
embed_args = {}
if dynamic_img_size:
# flatten deferred until after pos embed
embed_args.update(dict(strict_img_size=False, output_fmt='NHWC'))
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
dynamic_img_pad=dynamic_img_pad,
**embed_args,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None
embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=self.num_prefix_tokens,
)
else:
self.patch_drop = nn.Identity()
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
# Classifier Head
if global_pool == 'map':
self.attn_pool = AttentionPoolLatent(
self.embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
norm_layer=norm_layer,
)
else:
self.attn_pool = None
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode: Literal['jax', 'jax_nlhb', 'moco', ''] = '') -> None:
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(get_init_weights_vit(mode, head_bias), self)
def _init_weights(self, m: nn.Module) -> None:
# this fn left here for compat with downstream users
init_weights_vit_timm(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path: str, prefix: str = '') -> None:
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self) -> Set:
return {'pos_embed', 'cls_token', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict:
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True) -> None:
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool = None) -> None:
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token', 'map')
if global_pool == 'map' and self.attn_pool is None:
assert False, "Cannot currently add attention pooling in reset_classifier()."
elif global_pool != 'map ' and self.attn_pool is not None:
self.attn_pool = None # remove attention pooling
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x: torch.Tensor) -> torch.Tensor:
if self.dynamic_img_size:
B, H, W, C = x.shape
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
)
x = x.view(B, -1, C)
else:
pos_embed = self.pos_embed
to_cat = []
if self.cls_token is not None:
to_cat.append(self.cls_token.expand(x.shape[0], -1, -1))
if self.reg_token is not None:
to_cat.append(self.reg_token.expand(x.shape[0], -1, -1))
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + pos_embed
if to_cat:
x = torch.cat(to_cat + [x], dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
if to_cat:
x = torch.cat(to_cat + [x], dim=1)
x = x + pos_embed
return self.pos_drop(x)
def _intermediate_layers(
self,
x: torch.Tensor,
n: Union[int, Sequence] = 1,
) -> List[torch.Tensor]:
outputs, num_blocks = [], len(self.blocks)
take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n)
# forward pass
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
for i, blk in enumerate(self.blocks):
x = blk(x)
if i in take_indices:
outputs.append(x)
return outputs
def get_intermediate_layers(
self,
x: torch.Tensor,
n: Union[int, Sequence] = 1,
reshape: bool = False,
return_prefix_tokens: bool = False,
norm: bool = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
""" Intermediate layer accessor (NOTE: This is a WIP experiment).
Inspired by DINO / DINOv2 interface
"""
# take last n blocks if n is an int, if in is a sequence, select by matching indices
outputs = self._intermediate_layers(x, n)
if norm:
outputs = [self.norm(out) for out in outputs]
prefix_tokens = [out[:, 0:self.num_prefix_tokens] for out in outputs]
outputs = [out[:, self.num_prefix_tokens:] for out in outputs]
if reshape:
grid_size = self.patch_embed.grid_size
outputs = [
out.reshape(x.shape[0], grid_size[0], grid_size[1], -1).permute(0, 3, 1, 2).contiguous()
for out in outputs
]
if return_prefix_tokens:
return tuple(zip(outputs, prefix_tokens))
return tuple(outputs)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.norm(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
if self.attn_pool is not None:
x = self.attn_pool(x)
elif self.global_pool == 'avg':
x = x[:, self.num_prefix_tokens:].mean(dim=1)
elif self.global_pool:
x = x[:, 0] # class token
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights_vit_timm(module: nn.Module, name: str = '') -> None:
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.0) -> None:
""" ViT weight initialization, matching JAX (Flax) impl """
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_moco(module: nn.Module, name: str = '') -> None:
""" ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def get_init_weights_vit(mode: str = 'jax', head_bias: float = 0.0) -> None:
if 'jax' in mode:
return partial(init_weights_vit_jax, head_bias=head_bias)
elif 'moco' in mode:
return init_weights_vit_moco
else:
return init_weights_vit_timm
def resize_pos_embed(
posemb: torch.Tensor,
posemb_new: torch.Tensor,
num_prefix_tokens: int = 1,
gs_new: Tuple[int, int] = (),
interpolation: str = 'bicubic',
antialias: bool = False,
) -> torch.Tensor:
""" Rescale the grid of position embeddings when loading from state_dict.
*DEPRECATED* This function is being deprecated in favour of resample_abs_pos_embed
Adapted from:
https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
"""
ntok_new = posemb_new.shape[1]
if num_prefix_tokens:
posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:]
ntok_new -= num_prefix_tokens
else:
posemb_prefix, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info(f'Resized position embedding: {posemb.shape} ({[gs_old, gs_old]}) to {posemb_new.shape} ({gs_new}).')
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode=interpolation, antialias=antialias, align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_prefix, posemb_grid], dim=1)
return posemb
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = '') -> None:
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
interpolation = 'bilinear'
antialias = False
big_vision = False
if not prefix:
if 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
elif 'params/embedding/kernel' in w:
prefix = 'params/'
big_vision = True
elif 'params/img/embedding/kernel' in w:
prefix = 'params/img/'
big_vision = True
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]:
embed_conv_w = resample_patch_embed(
embed_conv_w,
model.patch_embed.proj.weight.shape[-2:],
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
if model.cls_token is not None:
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
if big_vision:
pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False)
else:
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
old_shape = pos_embed_w.shape
num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1)
pos_embed_w = resample_abs_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
if (isinstance(model.head, nn.Linear) and
f'{prefix}head/bias' in w and
model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]):
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
# NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
if model.attn_pool is not None:
block_prefix = f'{prefix}MAPHead_0/'
mha_prefix = block_prefix + f'MultiHeadDotProductAttention_0/'
model.attn_pool.latent.copy_(_n2p(w[f'{block_prefix}probe'], t=False))
model.attn_pool.kv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('key', 'value')]))
model.attn_pool.kv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('key', 'value')]))
model.attn_pool.q.weight.copy_(_n2p(w[f'{mha_prefix}query/kernel'], t=False).flatten(1).T)
model.attn_pool.q.bias.copy_(_n2p(w[f'{mha_prefix}query/bias'], t=False).reshape(-1))
model.attn_pool.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
model.attn_pool.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
model.attn_pool.norm.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
model.attn_pool.norm.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
for r in range(2):
getattr(model.attn_pool.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/kernel']))
getattr(model.attn_pool.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/bias']))
mha_sub, b_sub, ln1_sub = (0, 0, 1) if big_vision else (1, 3, 2)
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias']))
def _convert_openai_clip(
state_dict: Dict[str, torch.Tensor],
model: VisionTransformer,
prefix: str = 'visual.',
) -> Dict[str, torch.Tensor]:
out_dict = {}
swaps = [
('conv1', 'patch_embed.proj'),
('positional_embedding', 'pos_embed'),
('transformer.resblocks.', 'blocks.'),
('ln_pre', 'norm_pre'),
('ln_post', 'norm'),
('ln_', 'norm'),
('in_proj_', 'qkv.'),
('out_proj', 'proj'),
('mlp.c_fc', 'mlp.fc1'),
('mlp.c_proj', 'mlp.fc2'),
]
for k, v in state_dict.items():
if not k.startswith(prefix):
continue
k = k.replace(prefix, '')
for sp in swaps:
k = k.replace(sp[0], sp[1])
if k == 'proj':
k = 'head.weight'
v = v.transpose(0, 1)
out_dict['head.bias'] = torch.zeros(v.shape[0])
elif k == 'class_embedding':
k = 'cls_token'
v = v.unsqueeze(0).unsqueeze(1)
elif k == 'pos_embed':
v = v.unsqueeze(0)
if v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v,
model.pos_embed,
0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
out_dict[k] = v
return out_dict
def _convert_dinov2(
state_dict: Dict[str, torch.Tensor],
model: VisionTransformer,
) -> Dict[str, torch.Tensor]:
import re
out_dict = {}
state_dict.pop("mask_token", None)
if 'register_tokens' in state_dict:
# convert dinov2 w/ registers to no_embed_class timm model (neither cls or reg tokens overlap pos embed)
out_dict['reg_token'] = state_dict.pop('register_tokens')
out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0]
out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:]
for k, v in state_dict.items():
if re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k):
out_dict[k.replace("w12", "fc1")] = v
continue
elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k):
out_dict[k.replace("w3", "fc2")] = v
continue
out_dict[k] = v
return out_dict
def checkpoint_filter_fn(
state_dict: Dict[str, torch.Tensor],
model: VisionTransformer,
adapt_layer_scale: bool = False,
interpolation: str = 'bicubic',
antialias: bool = True,
) -> Dict[str, torch.Tensor]:
""" convert patch embedding weight from manual patchify + linear proj to conv"""
import re
out_dict = {}
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
prefix = ''
if 'visual.class_embedding' in state_dict:
return _convert_openai_clip(state_dict, model)
elif 'module.visual.class_embedding' in state_dict:
return _convert_openai_clip(state_dict, model, prefix='module.visual.')
if "mask_token" in state_dict:
state_dict = _convert_dinov2(state_dict, model)
if "encoder" in state_dict:
state_dict = state_dict['encoder']
prefix = 'module.'
if 'visual.trunk.pos_embed' in state_dict:
# convert an OpenCLIP model with timm vision encoder
# FIXME remap final nn.Linear if it exists outside of the timm .trunk (ie in visual.head.proj)
prefix = 'visual.trunk.'
if prefix:
# filter on & remove prefix string from keys
state_dict = {k[len(prefix):]: v for k, v in state_dict.items() if k.startswith(prefix)}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
O, I, H, W = model.patch_embed.proj.weight.shape
if len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1)
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif adapt_layer_scale and 'gamma_' in k:
# remap layer-scale gamma into sub-module (deit3 models)
k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
elif 'pre_logits' in k:
# NOTE representation layer removed as not used in latest 21k/1k pretrained weights
continue
out_dict[k] = v
return out_dict
def _cfg(url: str = '', **kwargs) -> Dict[str, Any]:
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': None,
'crop_pct': 0.9,
'interpolation': 'bicubic',
'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN,
'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj',
'classifier': 'head',
**kwargs,
}
default_cfgs = {
# re-finetuned augreg 21k FT on in1k weights
'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(),
'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg(
hf_hub_id='timm/'),
# How to train your ViT (augreg) weights, pretrained on 21k FT on in1k
'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
# patch models (weights from official Google JAX impl) pretrained on in21k FT on in1k
'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
hf_hub_id='timm/'),
'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0),
# How to train your ViT (augreg) weights trained on in1k only
'vit_small_patch16_224.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_small_patch16_384.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch32_224.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_base_patch32_384.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch16_224.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True),
'vit_base_patch16_384.augreg_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
custom_load=True, input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_patch14_224.untrained': _cfg(url=''),
'vit_huge_patch14_224.untrained': _cfg(url=''),
'vit_giant_patch14_224.untrained': _cfg(url=''),
'vit_gigantic_patch14_224.untrained': _cfg(url=''),
# patch models, imagenet21k (weights from official Google JAX impl), classifier not valid
'vit_base_patch32_224.orig_in21k': _cfg(
#url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth',
hf_hub_id='timm/',
num_classes=0),
'vit_base_patch16_224.orig_in21k': _cfg(
#url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth',
hf_hub_id='timm/',
num_classes=0),
'vit_large_patch32_224.orig_in21k': _cfg(
#url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
hf_hub_id='timm/',
num_classes=0),
'vit_large_patch16_224.orig_in21k': _cfg(
#url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth',
hf_hub_id='timm/',
num_classes=0),
'vit_huge_patch14_224.orig_in21k': _cfg(
hf_hub_id='timm/',
num_classes=0),
# How to train your ViT (augreg) weights, pretrained on in21k
'vit_tiny_patch16_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_small_patch32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_small_patch16_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_base_patch32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_base_patch16_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_base_patch8_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
'vit_large_patch16_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz',
hf_hub_id='timm/',
custom_load=True, num_classes=21843),
# SAM trained models (https://arxiv.org/abs/2106.01548)
'vit_base_patch32_224.sam_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True,
hf_hub_id='timm/'),
'vit_base_patch16_224.sam_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True,
hf_hub_id='timm/'),
# DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only)
'vit_small_patch16_224.dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_small_patch8_224.dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_base_patch16_224.dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_base_patch8_224.dino': _cfg(
url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
# DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only)
'vit_small_patch14_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_base_patch14_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_large_patch14_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_giant_patch14_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
# DINOv2 pretrained w/ registers - https://arxiv.org/abs/2309.16588 (no classifier head, for fine-tune/features only)
'vit_small_patch14_reg4_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_base_patch14_reg4_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_large_patch14_reg4_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg(
url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 518, 518), crop_pct=1.0),
# ViT ImageNet-21K-P pretraining by MILL
'vit_base_patch16_224_miil.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth',
hf_hub_id='timm/',
mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221),
'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth',
hf_hub_id='timm/',
mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'),
# Custom timm variants
'vit_base_patch16_rpn_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth',
hf_hub_id='timm/'),
'vit_medium_patch16_gap_240.sw_in12k': _cfg(
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821),
'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95),
'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'),
'vit_base_patch16_gap_224': _cfg(),
# CLIP pretrained image tower and related fine-tuned weights
'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD),
'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)),
'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)),
'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95),
'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'),
'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0),
'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'),
'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0),
'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'),
'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg(
# hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', # FIXME weight exists, need to push
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD),
'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'),
'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95),
'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'),
'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0),
'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'),
'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD),
'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0),
'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'),
'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0),
'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'),
'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0),
'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg(
hf_hub_id='',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'),
'vit_base_patch32_clip_224.openai_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD),
'vit_base_patch16_clip_224.openai_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD),
'vit_base_patch16_clip_384.openai_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'),
'vit_large_patch14_clip_224.openai_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0),
'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg(
#hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k', # FIXME weight exists, need to push
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821),
'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821),
'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821),
'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821),
'vit_base_patch32_clip_224.openai_ft_in12k': _cfg(
# hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', # FIXME weight exists, need to push
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821),
'vit_base_patch16_clip_224.openai_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821),
'vit_large_patch14_clip_224.openai_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821),
'vit_base_patch32_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512),
'vit_base_patch16_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_large_patch14_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768),
'vit_huge_patch14_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024),
'vit_giant_patch14_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024),
'vit_gigantic_patch14_clip_224.laion2b': _cfg(
hf_hub_id='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280),
'vit_base_patch32_clip_224.datacompxl': _cfg(
hf_hub_id='laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_base_patch32_clip_256.datacompxl': _cfg(
hf_hub_id='laion/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 256, 256), num_classes=512),
'vit_base_patch16_clip_224.datacompxl': _cfg(
hf_hub_id='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_large_patch14_clip_224.datacompxl': _cfg(
hf_hub_id='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768),
'vit_base_patch16_clip_224.dfn2b': _cfg(
hf_hub_id='apple/DFN2B-CLIP-ViT-B-16',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_large_patch14_clip_224.dfn2b': _cfg(
hf_hub_id='apple/DFN2B-CLIP-ViT-L-14',
hf_hub_filename='open_clip_pytorch_model.bin',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768),
'vit_huge_patch14_clip_224.dfn5b': _cfg(
hf_hub_id='apple/DFN5B-CLIP-ViT-H-14',
hf_hub_filename='open_clip_pytorch_model.bin',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024),
'vit_huge_patch14_clip_378.dfn5b': _cfg(
hf_hub_id='apple/DFN5B-CLIP-ViT-H-14-378',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
notes=('natively QuickGELU, use quickgelu model variant for original results',),
crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024),
'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg(
hf_hub_id='facebook/metaclip-b32-fullcc2.5b',
hf_hub_filename='metaclip_b32_fullcc2.5b.bin',
license='cc-by-nc-4.0',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg(
hf_hub_id='facebook/metaclip-b16-fullcc2.5b',
hf_hub_filename='metaclip_b16_fullcc2.5b.bin',
license='cc-by-nc-4.0',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512),
'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg(
hf_hub_id='facebook/metaclip-l14-fullcc2.5b',
hf_hub_filename='metaclip_l14_fullcc2.5b.bin',
license='cc-by-nc-4.0',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768),
'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg(
hf_hub_id='facebook/metaclip-h14-fullcc2.5b',
hf_hub_filename='metaclip_h14_fullcc2.5b.bin',
license='cc-by-nc-4.0',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024),
'vit_base_patch32_clip_224.openai': _cfg(
hf_hub_id='timm/vit_base_patch32_clip_224.openai',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512),
'vit_base_patch16_clip_224.openai': _cfg(
hf_hub_id='timm/vit_base_patch16_clip_224.openai',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512),
'vit_large_patch14_clip_224.openai': _cfg(
hf_hub_id='timm/vit_large_patch14_clip_224.openai',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768),
'vit_large_patch14_clip_336.openai': _cfg(
hf_hub_id='timm/vit_large_patch14_clip_336.openai', hf_hub_filename='open_clip_pytorch_model.bin',
notes=('natively QuickGELU, use quickgelu model variant for original results',),
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
crop_pct=1.0, input_size=(3, 336, 336), num_classes=768),
# experimental (may be removed)
'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95),
'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95),
'vit_small_patch16_36x1_224.untrained': _cfg(url=''),
'vit_small_patch16_18x2_224.untrained': _cfg(url=''),
'vit_base_patch16_18x2_224.untrained': _cfg(url=''),
# EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain
# https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip
'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt',
hf_hub_id='timm/', license='mit',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 196, 196), crop_pct=1.0),
'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt',
hf_hub_id='timm/', license='mit',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
'eva_large_patch14_196.in22k_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt',
hf_hub_id='timm/', license='mit',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 196, 196), crop_pct=1.0),
'eva_large_patch14_336.in22k_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt',
hf_hub_id='timm/', license='mit',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
'flexivit_small.1200ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_small.600ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_small.300ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_base.1200ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_base.600ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_base.300ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_base.1000ep_in21k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843),
'flexivit_base.300ep_in21k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843),
'flexivit_large.1200ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_large.600ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_large.300ep_in1k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95),
'flexivit_base.patch16_in21k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843),
'flexivit_base.patch30_in21k': _cfg(
url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True,
hf_hub_id='timm/',
input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843),
'vit_base_patch16_xp_224.untrained': _cfg(url=''),
'vit_large_patch14_xp_224.untrained': _cfg(url=''),
'vit_huge_patch14_xp_224.untrained': _cfg(url=''),
'vit_base_patch16_224.mae': _cfg(
url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth',
hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_large_patch16_224.mae': _cfg(
url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth',
hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_huge_patch14_224.mae': _cfg(
url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth',
hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_huge_patch14_gap_224.in1k_ijepa': _cfg(
url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar',
# hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_huge_patch14_gap_224.in22k_ijepa': _cfg(
url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar',
# hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_huge_patch16_gap_448.in1k_ijepa': _cfg(
url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar',
# hf_hub_id='timm/',
license='cc-by-nc-4.0',
input_size=(3, 448, 448), crop_pct=1.0,
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_giant_patch16_gap_224.in22k_ijepa': _cfg(
url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar',
# hf_hub_id='timm/',
license='cc-by-nc-4.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0),
'vit_base_patch16_siglip_224.webli': _cfg(
hf_hub_id='timm/ViT-B-16-SigLIP',
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=0),
'vit_base_patch16_siglip_256.webli': _cfg(
hf_hub_id='timm/ViT-B-16-SigLIP-256',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 256, 256),
num_classes=0),
'vit_base_patch16_siglip_384.webli': _cfg(
hf_hub_id='timm/ViT-B-16-SigLIP-384',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 384, 384),
num_classes=0),
'vit_base_patch16_siglip_512.webli': _cfg(
hf_hub_id='timm/ViT-B-16-SigLIP-512',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 512, 512),
num_classes=0),
'vit_large_patch16_siglip_256.webli': _cfg(
hf_hub_id='timm/ViT-L-16-SigLIP-256',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 256, 256),
num_classes=0),
'vit_large_patch16_siglip_384.webli': _cfg(
hf_hub_id='timm/ViT-L-16-SigLIP-384',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 384, 384),
num_classes=0),
'vit_so400m_patch14_siglip_224.webli': _cfg(
hf_hub_id='timm/ViT-SO400M-14-SigLIP',
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=0),
'vit_so400m_patch14_siglip_384.webli': _cfg(
hf_hub_id='timm/ViT-SO400M-14-SigLIP-384',
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 384, 384),
num_classes=0),
'vit_medium_patch16_reg4_256': _cfg(
input_size=(3, 256, 256)),
'vit_medium_patch16_reg4_gap_256': _cfg(
input_size=(3, 256, 256)),
'vit_base_patch16_reg8_gap_256': _cfg(input_size=(3, 256, 256)),
}
_quick_gelu_cfgs = [
'vit_large_patch14_clip_224.dfn2b',
'vit_huge_patch14_clip_224.dfn5b',
'vit_huge_patch14_clip_378.dfn5b',
'vit_base_patch32_clip_224.metaclip_2pt5b',
'vit_base_patch16_clip_224.metaclip_2pt5b',
'vit_large_patch14_clip_224.metaclip_2pt5b',
'vit_huge_patch14_clip_224.metaclip_2pt5b',
'vit_base_patch32_clip_224.openai',
'vit_base_patch16_clip_224.openai',
'vit_large_patch14_clip_224.openai',
'vit_large_patch14_clip_336.openai',
]
default_cfgs.update({
n.replace('_clip_', '_clip_quickgelu_'): default_cfgs[n] for n in _quick_gelu_cfgs
})
default_cfgs = generate_default_cfgs(default_cfgs)
def _create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> VisionTransformer:
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
if 'flexi' in variant:
# FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed
# interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation.
_filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False)
else:
_filter_fn = checkpoint_filter_fn
# FIXME attn pool (currently only in siglip) params removed if pool disabled, is there a better soln?
strict = True
if 'siglip' in variant and kwargs.get('global_pool', None) != 'map':
strict = False
return build_model_with_cfg(
VisionTransformer,
variant,
pretrained,
pretrained_filter_fn=_filter_fn,
pretrained_strict=strict,
**kwargs,
)
@register_model
def vit_tiny_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Tiny (Vit-Ti/16)
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_tiny_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Tiny (Vit-Ti/16) @ 384x384.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small (ViT-S/32)
"""
model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small (ViT-S/32) at 384x384.
"""
model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small (ViT-S/16)
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small (ViT-S/16)
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small (ViT-S/8)
"""
model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer('vit_small_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14)
"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16)
model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_giant_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16)
model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_gigantic_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16)
model = _create_vision_transformer(
'vit_gigantic_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_224_miil(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False)
model = _create_vision_transformer(
'vit_base_patch16_224_miil', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_medium_patch16_gap_240(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 240x240
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False,
global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False)
model = _create_vision_transformer(
'vit_medium_patch16_gap_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_medium_patch16_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 256x256
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False,
global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False)
model = _create_vision_transformer(
'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_medium_patch16_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 384x384
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False,
global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False)
model = _create_vision_transformer(
'vit_medium_patch16_gap_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/16) w/o class token, w/ avg-pool @ 224x224
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=16, class_token=False, global_pool='avg', fc_norm=False)
model = _create_vision_transformer(
'vit_base_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) w/ no class token, avg pool
"""
model_args = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False)
model = _create_vision_transformer(
'vit_huge_patch14_gap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch16_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/16) w/ no class token, avg pool @ 448x448
"""
model_args = dict(
patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False)
model = _create_vision_transformer(
'vit_huge_patch16_gap_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_giant_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Giant (little-gg) model (ViT-g/16) w/ no class token, avg pool
"""
model_args = dict(
patch_size=16, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48/11,
class_token=False, global_pool='avg', fc_norm=False)
model = _create_vision_transformer(
'vit_giant_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/32 CLIP image tower @ 224x224
"""
model_args = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_clip_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/32 CLIP image tower @ 256x256
"""
model_args = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch32_clip_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/32 CLIP image tower @ 384x384
"""
model_args = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch32_clip_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_clip_448(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/32 CLIP image tower @ 448x448
"""
model_args = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch32_clip_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/16 CLIP image tower
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/16 CLIP image tower @ 384x384
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_base_patch16_clip_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) CLIP image tower
"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) CLIP image tower @ 336x336
"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) CLIP image tower.
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_huge_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) CLIP image tower @ 336x336
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_huge_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_clip_378(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_huge_patch14_clip_378', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_giant_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
Pretrained weights from CLIP image tower.
"""
model_args = dict(
patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_gigantic_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-bigG model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
Pretrained weights from CLIP image tower.
"""
model_args = dict(
patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm)
model = _create_vision_transformer(
'vit_gigantic_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch32_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/32 CLIP image tower @ 224x224
"""
model_args = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_base_patch32_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/16 CLIP image tower w/ QuickGELU act
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_base_patch16_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) CLIP image tower w/ QuickGELU act
"""
from timm.layers import get_act_layer
model_args = dict(
patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_large_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_clip_quickgelu_336(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 w/ QuickGELU act
"""
model_args = dict(
patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_large_patch14_clip_quickgelu_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) CLIP image tower w/ QuickGELU act.
"""
model_args = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_huge_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_clip_quickgelu_378(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378 w/ QuickGELU act
"""
model_args = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True,
norm_layer=nn.LayerNorm, act_layer='quick_gelu')
model = _create_vision_transformer(
'vit_huge_patch14_clip_quickgelu_378', pretrained=pretrained, **dict(model_args, **kwargs))
return model
# Experimental models below
@register_model
def vit_base_patch32_plus_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/32+)
"""
model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-5)
model = _create_vision_transformer(
'vit_base_patch32_plus_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_plus_240(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/16+)
"""
model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-5)
model = _create_vision_transformer(
'vit_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_rpn_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base (ViT-B/16) w/ residual post-norm
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-5,
class_token=False, block_fn=ResPostBlock, global_pool='avg')
model = _create_vision_transformer(
'vit_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch16_36x1_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base w/ LayerScale + 36 x 1 (36 block serial) config. Experimental, may remove.
Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795
Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-5)
model = _create_vision_transformer(
'vit_small_patch16_36x1_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Small w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove.
Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795
Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow.
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-5, block_fn=ParallelThingsBlock)
model = _create_vision_transformer(
'vit_small_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Base w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove.
Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelThingsBlock)
model = _create_vision_transformer(
'vit_base_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_large_patch14_196(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" EVA-large model https://arxiv.org/abs/2211.07636 /via MAE MIM pretrain"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg')
model = _create_vision_transformer(
'eva_large_patch14_196', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_large_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" EVA-large model https://arxiv.org/abs/2211.07636 via MAE MIM pretrain"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg')
model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def flexivit_small(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" FlexiViT-Small
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True)
model = _create_vision_transformer('flexivit_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def flexivit_base(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" FlexiViT-Base
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True)
model = _create_vision_transformer('flexivit_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def flexivit_large(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" FlexiViT-Large
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True)
model = _create_vision_transformer('flexivit_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled.
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, no_embed_class=True,
norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True,
)
model = _create_vision_transformer(
'vit_base_patch16_xp_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled.
"""
model_args = dict(
patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, no_embed_class=True,
norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True,
)
model = _create_vision_transformer(
'vit_large_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_huge_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-Huge model (ViT-H/14) w/ parallel blocks and qk norm enabled.
"""
model_args = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, no_embed_class=True,
norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True,
)
model = _create_vision_transformer(
'vit_huge_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-S/14 for DINOv2
"""
model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/14 for DINOv2
"""
model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-L/14 for DINOv2
"""
model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, img_size=518)
model = _create_vision_transformer(
'vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_giant_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-G/14 for DINOv2
"""
# The hidden_features of SwiGLU is calculated by:
# hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
# When embed_dim=1536, hidden_features=4096
# With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192
model_args = dict(
patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5,
mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, img_size=518, act_layer=nn.SiLU
)
model = _create_vision_transformer(
'vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-S/14 for DINOv2 w/ 4 registers
"""
model_args = dict(
patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5,
reg_tokens=4, no_embed_class=True,
)
model = _create_vision_transformer(
'vit_small_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-B/14 for DINOv2 w/ 4 registers
"""
model_args = dict(
patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5,
reg_tokens=4, no_embed_class=True,
)
model = _create_vision_transformer(
'vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-L/14 for DINOv2 w/ 4 registers
"""
model_args = dict(
patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5,
reg_tokens=4, no_embed_class=True,
)
model = _create_vision_transformer(
'vit_large_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_giant_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer:
""" ViT-G/14 for DINOv2
"""
# The hidden_features of SwiGLU is calculated by:
# hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
# When embed_dim=1536, hidden_features=4096
# With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192
model_args = dict(
patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2,
mlp_layer=SwiGLUPacked, act_layer=nn.SiLU, reg_tokens=4, no_embed_class=True,
)
model = _create_vision_transformer(
'vit_giant_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_base_patch16_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_base_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_base_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_siglip_512(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_base_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_large_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_large_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_so400m_patch14_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_so400m_patch14_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_so400m_patch14_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map',
)
model = _create_vision_transformer(
'vit_so400m_patch14_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_medium_patch16_reg4_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=True,
no_embed_class=True, reg_tokens=4,
)
model = _create_vision_transformer(
'vit_medium_patch16_reg4_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_medium_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8,
class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg',
)
model = _create_vision_transformer(
'vit_medium_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_patch16_reg8_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False,
no_embed_class=True, global_pool='avg', reg_tokens=8,
)
model = _create_vision_transformer(
'vit_base_patch16_reg8_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k',
'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k',
'vit_small_patch16_224_in21k': 'vit_small_patch16_224.augreg_in21k',
'vit_base_patch32_224_in21k': 'vit_base_patch32_224.augreg_in21k',
'vit_base_patch16_224_in21k': 'vit_base_patch16_224.augreg_in21k',
'vit_base_patch8_224_in21k': 'vit_base_patch8_224.augreg_in21k',
'vit_large_patch32_224_in21k': 'vit_large_patch32_224.orig_in21k',
'vit_large_patch16_224_in21k': 'vit_large_patch16_224.augreg_in21k',
'vit_huge_patch14_224_in21k': 'vit_huge_patch14_224.orig_in21k',
'vit_base_patch32_224_sam': 'vit_base_patch32_224.sam',
'vit_base_patch16_224_sam': 'vit_base_patch16_224.sam',
'vit_small_patch16_224_dino': 'vit_small_patch16_224.dino',
'vit_small_patch8_224_dino': 'vit_small_patch8_224.dino',
'vit_base_patch16_224_dino': 'vit_base_patch16_224.dino',
'vit_base_patch8_224_dino': 'vit_base_patch8_224.dino',
'vit_base_patch16_224_miil_in21k': 'vit_base_patch16_224_miil.in21k',
'vit_base_patch32_224_clip_laion2b': 'vit_base_patch32_clip_224.laion2b',
'vit_large_patch14_224_clip_laion2b': 'vit_large_patch14_clip_224.laion2b',
'vit_huge_patch14_224_clip_laion2b': 'vit_huge_patch14_clip_224.laion2b',
'vit_giant_patch14_224_clip_laion2b': 'vit_giant_patch14_clip_224.laion2b',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/levit.py | """ LeViT
Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference`
- https://arxiv.org/abs/2104.01136
@article{graham2021levit,
title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference},
author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze},
journal={arXiv preprint arXiv:22104.01136},
year={2021}
}
Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow.
This version combines both conv/linear models and fixes torchscript compatibility.
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# Modified from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# Copyright 2020 Ross Wightman, Apache-2.0 License
from collections import OrderedDict
from functools import partial
from typing import Dict
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN
from timm.layers import to_ntuple, to_2tuple, get_act_layer, DropPath, trunc_normal_
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['Levit']
class ConvNorm(nn.Module):
def __init__(
self, in_chs, out_chs, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.linear = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
nn.init.constant_(self.bn.weight, bn_weight_init)
@torch.no_grad()
def fuse(self):
c, bn = self.linear, self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
m = nn.Conv2d(
w.size(1), w.size(0), w.shape[2:], stride=self.linear.stride,
padding=self.linear.padding, dilation=self.linear.dilation, groups=self.linear.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def forward(self, x):
return self.bn(self.linear(x))
class LinearNorm(nn.Module):
def __init__(self, in_features, out_features, bn_weight_init=1):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=False)
self.bn = nn.BatchNorm1d(out_features)
nn.init.constant_(self.bn.weight, bn_weight_init)
@torch.no_grad()
def fuse(self):
l, bn = self.linear, self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[:, None]
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
m = nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def forward(self, x):
x = self.linear(x)
return self.bn(x.flatten(0, 1)).reshape_as(x)
class NormLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.):
super().__init__()
self.bn = nn.BatchNorm1d(in_features)
self.drop = nn.Dropout(drop)
self.linear = nn.Linear(in_features, out_features, bias=bias)
trunc_normal_(self.linear.weight, std=std)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
@torch.no_grad()
def fuse(self):
bn, l = self.bn, self.linear
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.linear.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.linear.bias
m = nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def forward(self, x):
return self.linear(self.drop(self.bn(x)))
class Stem8(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.stride = 8
self.add_module('conv1', ConvNorm(in_chs, out_chs // 4, 3, stride=2, padding=1))
self.add_module('act1', act_layer())
self.add_module('conv2', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1))
self.add_module('act2', act_layer())
self.add_module('conv3', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1))
class Stem16(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.stride = 16
self.add_module('conv1', ConvNorm(in_chs, out_chs // 8, 3, stride=2, padding=1))
self.add_module('act1', act_layer())
self.add_module('conv2', ConvNorm(out_chs // 8, out_chs // 4, 3, stride=2, padding=1))
self.add_module('act2', act_layer())
self.add_module('conv3', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1))
self.add_module('act3', act_layer())
self.add_module('conv4', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1))
class Downsample(nn.Module):
def __init__(self, stride, resolution, use_pool=False):
super().__init__()
self.stride = stride
self.resolution = to_2tuple(resolution)
self.pool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) if use_pool else None
def forward(self, x):
B, N, C = x.shape
x = x.view(B, self.resolution[0], self.resolution[1], C)
if self.pool is not None:
x = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
else:
x = x[:, ::self.stride, ::self.stride]
return x.reshape(B, -1, C)
class Attention(nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4.,
resolution=14,
use_conv=False,
act_layer=nn.SiLU,
):
super().__init__()
ln_layer = ConvNorm if use_conv else LinearNorm
resolution = to_2tuple(resolution)
self.use_conv = use_conv
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.key_attn_dim = key_dim * num_heads
self.val_dim = int(attn_ratio * key_dim)
self.val_attn_dim = int(attn_ratio * key_dim) * num_heads
self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2)
self.proj = nn.Sequential(OrderedDict([
('act', act_layer()),
('ln', ln_layer(self.val_attn_dim, dim, bn_weight_init=0))
]))
self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
self.register_buffer('attention_bias_idxs', rel_pos, persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x): # x (B,C,H,W)
if self.use_conv:
B, C, H, W = x.shape
q, k, v = self.qkv(x).view(
B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2)
attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
else:
B, N, C = x.shape
q, k, v = self.qkv(x).view(
B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 3, 1)
v = v.permute(0, 2, 1, 3)
attn = q @ k * self.scale + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim)
x = self.proj(x)
return x
class AttentionDownsample(nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
in_dim,
out_dim,
key_dim,
num_heads=8,
attn_ratio=2.0,
stride=2,
resolution=14,
use_conv=False,
use_pool=False,
act_layer=nn.SiLU,
):
super().__init__()
resolution = to_2tuple(resolution)
self.stride = stride
self.resolution = resolution
self.num_heads = num_heads
self.key_dim = key_dim
self.key_attn_dim = key_dim * num_heads
self.val_dim = int(attn_ratio * key_dim)
self.val_attn_dim = self.val_dim * self.num_heads
self.scale = key_dim ** -0.5
self.use_conv = use_conv
if self.use_conv:
ln_layer = ConvNorm
sub_layer = partial(
nn.AvgPool2d,
kernel_size=3 if use_pool else 1, padding=1 if use_pool else 0, count_include_pad=False)
else:
ln_layer = LinearNorm
sub_layer = partial(Downsample, resolution=resolution, use_pool=use_pool)
self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim)
self.q = nn.Sequential(OrderedDict([
('down', sub_layer(stride=stride)),
('ln', ln_layer(in_dim, self.key_attn_dim))
]))
self.proj = nn.Sequential(OrderedDict([
('act', act_layer()),
('ln', ln_layer(self.val_attn_dim, out_dim))
]))
self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
k_pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
q_pos = torch.stack(torch.meshgrid(
torch.arange(0, resolution[0], step=stride),
torch.arange(0, resolution[1], step=stride))).flatten(1)
rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
self.register_buffer('attention_bias_idxs', rel_pos, persistent=False)
self.attention_bias_cache = {} # per-device attention_biases cache
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
if self.use_conv:
B, C, H, W = x.shape
HH, WW = (H - 1) // self.stride + 1, (W - 1) // self.stride + 1
k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2)
q = self.q(x).view(B, self.num_heads, self.key_dim, -1)
attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (v @ attn.transpose(-2, -1)).reshape(B, self.val_attn_dim, HH, WW)
else:
B, N, C = x.shape
k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3)
k = k.permute(0, 2, 3, 1) # BHCN
v = v.permute(0, 2, 1, 3) # BHNC
q = self.q(x).view(B, -1, self.num_heads, self.key_dim).permute(0, 2, 1, 3)
attn = q @ k * self.scale + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim)
x = self.proj(x)
return x
class LevitMlp(nn.Module):
""" MLP for Levit w/ normalization + ability to switch btw conv and linear
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
use_conv=False,
act_layer=nn.SiLU,
drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
ln_layer = ConvNorm if use_conv else LinearNorm
self.ln1 = ln_layer(in_features, hidden_features)
self.act = act_layer()
self.drop = nn.Dropout(drop)
self.ln2 = ln_layer(hidden_features, out_features, bn_weight_init=0)
def forward(self, x):
x = self.ln1(x)
x = self.act(x)
x = self.drop(x)
x = self.ln2(x)
return x
class LevitDownsample(nn.Module):
def __init__(
self,
in_dim,
out_dim,
key_dim,
num_heads=8,
attn_ratio=4.,
mlp_ratio=2.,
act_layer=nn.SiLU,
attn_act_layer=None,
resolution=14,
use_conv=False,
use_pool=False,
drop_path=0.,
):
super().__init__()
attn_act_layer = attn_act_layer or act_layer
self.attn_downsample = AttentionDownsample(
in_dim=in_dim,
out_dim=out_dim,
key_dim=key_dim,
num_heads=num_heads,
attn_ratio=attn_ratio,
act_layer=attn_act_layer,
resolution=resolution,
use_conv=use_conv,
use_pool=use_pool,
)
self.mlp = LevitMlp(
out_dim,
int(out_dim * mlp_ratio),
use_conv=use_conv,
act_layer=act_layer
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = self.attn_downsample(x)
x = x + self.drop_path(self.mlp(x))
return x
class LevitBlock(nn.Module):
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4.,
mlp_ratio=2.,
resolution=14,
use_conv=False,
act_layer=nn.SiLU,
attn_act_layer=None,
drop_path=0.,
):
super().__init__()
attn_act_layer = attn_act_layer or act_layer
self.attn = Attention(
dim=dim,
key_dim=key_dim,
num_heads=num_heads,
attn_ratio=attn_ratio,
resolution=resolution,
use_conv=use_conv,
act_layer=attn_act_layer,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = LevitMlp(
dim,
int(dim * mlp_ratio),
use_conv=use_conv,
act_layer=act_layer
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.attn(x))
x = x + self.drop_path2(self.mlp(x))
return x
class LevitStage(nn.Module):
def __init__(
self,
in_dim,
out_dim,
key_dim,
depth=4,
num_heads=8,
attn_ratio=4.0,
mlp_ratio=4.0,
act_layer=nn.SiLU,
attn_act_layer=None,
resolution=14,
downsample='',
use_conv=False,
drop_path=0.,
):
super().__init__()
resolution = to_2tuple(resolution)
if downsample:
self.downsample = LevitDownsample(
in_dim,
out_dim,
key_dim=key_dim,
num_heads=in_dim // key_dim,
attn_ratio=4.,
mlp_ratio=2.,
act_layer=act_layer,
attn_act_layer=attn_act_layer,
resolution=resolution,
use_conv=use_conv,
drop_path=drop_path,
)
resolution = [(r - 1) // 2 + 1 for r in resolution]
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
blocks = []
for _ in range(depth):
blocks += [LevitBlock(
out_dim,
key_dim,
num_heads=num_heads,
attn_ratio=attn_ratio,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
attn_act_layer=attn_act_layer,
resolution=resolution,
use_conv=use_conv,
drop_path=drop_path,
)]
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class Levit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems
w/ train scripts that don't take tuple outputs,
"""
def __init__(
self,
img_size=224,
in_chans=3,
num_classes=1000,
embed_dim=(192,),
key_dim=64,
depth=(12,),
num_heads=(3,),
attn_ratio=2.,
mlp_ratio=2.,
stem_backbone=None,
stem_stride=None,
stem_type='s16',
down_op='subsample',
act_layer='hard_swish',
attn_act_layer=None,
use_conv=False,
global_pool='avg',
drop_rate=0.,
drop_path_rate=0.):
super().__init__()
act_layer = get_act_layer(act_layer)
attn_act_layer = get_act_layer(attn_act_layer or act_layer)
self.use_conv = use_conv
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = embed_dim[-1]
self.embed_dim = embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
num_stages = len(embed_dim)
assert len(depth) == num_stages
num_heads = to_ntuple(num_stages)(num_heads)
attn_ratio = to_ntuple(num_stages)(attn_ratio)
mlp_ratio = to_ntuple(num_stages)(mlp_ratio)
if stem_backbone is not None:
assert stem_stride >= 2
self.stem = stem_backbone
stride = stem_stride
else:
assert stem_type in ('s16', 's8')
if stem_type == 's16':
self.stem = Stem16(in_chans, embed_dim[0], act_layer=act_layer)
else:
self.stem = Stem8(in_chans, embed_dim[0], act_layer=act_layer)
stride = self.stem.stride
resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))])
in_dim = embed_dim[0]
stages = []
for i in range(num_stages):
stage_stride = 2 if i > 0 else 1
stages += [LevitStage(
in_dim,
embed_dim[i],
key_dim,
depth=depth[i],
num_heads=num_heads[i],
attn_ratio=attn_ratio[i],
mlp_ratio=mlp_ratio[i],
act_layer=act_layer,
attn_act_layer=attn_act_layer,
resolution=resolution,
use_conv=use_conv,
downsample=down_op if stage_stride == 2 else '',
drop_path=drop_path_rate
)]
stride *= stage_stride
resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution])
self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')]
in_dim = embed_dim[i]
self.stages = nn.Sequential(*stages)
# Classifier head
self.head = NormLinear(embed_dim[-1], num_classes, drop=drop_rate) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None, distillation=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = NormLinear(
self.embed_dim[-1], num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if not self.use_conv:
x = x.flatten(2).transpose(1, 2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
class LevitDistilled(Levit):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None, distillation=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity()
self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1)
if pre_logits:
return x
x, x_dist = self.head(x), self.head_dist(x)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train/finetune, inference average the classifier predictions
return (x + x_dist) / 2
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict:
state_dict = state_dict['model']
# filter out attn biases, should not have been persistent
state_dict = {k: v for k, v in state_dict.items() if 'attention_bias_idxs' not in k}
D = model.state_dict()
out_dict = {}
for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()):
if va.ndim == 4 and vb.ndim == 2:
vb = vb[:, :, None, None]
if va.shape != vb.shape:
# head or first-conv shapes may change for fine-tune
assert 'head' in ka or 'stem.conv1.linear' in ka
out_dict[ka] = vb
return out_dict
model_cfgs = dict(
levit_128s=dict(
embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)),
levit_128=dict(
embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)),
levit_192=dict(
embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)),
levit_256=dict(
embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)),
levit_384=dict(
embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)),
# stride-8 stem experiments
levit_384_s8=dict(
embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4),
act_layer='silu', stem_type='s8'),
levit_512_s8=dict(
embed_dim=(512, 640, 896), key_dim=64, num_heads=(8, 10, 14), depth=(4, 4, 4),
act_layer='silu', stem_type='s8'),
# wider experiments
levit_512=dict(
embed_dim=(512, 768, 1024), key_dim=64, num_heads=(8, 12, 16), depth=(4, 4, 4), act_layer='silu'),
# deeper experiments
levit_256d=dict(
embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6), act_layer='silu'),
levit_512d=dict(
embed_dim=(512, 640, 768), key_dim=64, num_heads=(8, 10, 12), depth=(4, 8, 6), act_layer='silu'),
)
def create_levit(variant, cfg_variant=None, pretrained=False, distilled=True, **kwargs):
is_conv = '_conv' in variant
out_indices = kwargs.pop('out_indices', (0, 1, 2))
if kwargs.get('features_only', None):
if not is_conv:
raise RuntimeError('features_only not implemented for LeVit in non-convolutional mode.')
if cfg_variant is None:
if variant in model_cfgs:
cfg_variant = variant
elif is_conv:
cfg_variant = variant.replace('_conv', '')
model_cfg = dict(model_cfgs[cfg_variant], **kwargs)
model = build_model_with_cfg(
LevitDistilled if distilled else Levit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**model_cfg,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.linear', 'classifier': ('head.linear', 'head_dist.linear'),
**kwargs
}
default_cfgs = generate_default_cfgs({
# weights in nn.Linear mode
'levit_128s.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'levit_128.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'levit_192.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'levit_256.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'levit_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
),
# weights in nn.Conv2d mode
'levit_conv_128s.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(4, 4),
),
'levit_conv_128.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(4, 4),
),
'levit_conv_192.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(4, 4),
),
'levit_conv_256.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(4, 4),
),
'levit_conv_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(4, 4),
),
'levit_384_s8.untrained': _cfg(classifier='head.linear'),
'levit_512_s8.untrained': _cfg(classifier='head.linear'),
'levit_512.untrained': _cfg(classifier='head.linear'),
'levit_256d.untrained': _cfg(classifier='head.linear'),
'levit_512d.untrained': _cfg(classifier='head.linear'),
'levit_conv_384_s8.untrained': _cfg(classifier='head.linear'),
'levit_conv_512_s8.untrained': _cfg(classifier='head.linear'),
'levit_conv_512.untrained': _cfg(classifier='head.linear'),
'levit_conv_256d.untrained': _cfg(classifier='head.linear'),
'levit_conv_512d.untrained': _cfg(classifier='head.linear'),
})
@register_model
def levit_128s(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_128s', pretrained=pretrained, **kwargs)
@register_model
def levit_128(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_128', pretrained=pretrained, **kwargs)
@register_model
def levit_192(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_192', pretrained=pretrained, **kwargs)
@register_model
def levit_256(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_256', pretrained=pretrained, **kwargs)
@register_model
def levit_384(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_384', pretrained=pretrained, **kwargs)
@register_model
def levit_384_s8(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_384_s8', pretrained=pretrained, **kwargs)
@register_model
def levit_512_s8(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_512_s8', pretrained=pretrained, distilled=False, **kwargs)
@register_model
def levit_512(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_512', pretrained=pretrained, distilled=False, **kwargs)
@register_model
def levit_256d(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_256d', pretrained=pretrained, distilled=False, **kwargs)
@register_model
def levit_512d(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_512d', pretrained=pretrained, distilled=False, **kwargs)
@register_model
def levit_conv_128s(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_128s', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_128(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_128', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_192(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_192', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_256(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_256', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_384(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_384', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_384_s8(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_384_s8', pretrained=pretrained, use_conv=True, **kwargs)
@register_model
def levit_conv_512_s8(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_512_s8', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
@register_model
def levit_conv_512(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_512', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
@register_model
def levit_conv_256d(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_256d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
@register_model
def levit_conv_512d(pretrained=False, **kwargs) -> Levit:
return create_levit('levit_conv_512d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/selecsls.py | """PyTorch SelecSLS Net example for ImageNet Classification
License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode)
Author: Dushyant Mehta (@mehtadushy)
SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D
Human Pose Estimation with a Single RGB Camera, Mehta et al."
https://arxiv.org/abs/1907.00837
Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models
and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
class SelectSeq(nn.Module):
def __init__(self, mode='index', index=0):
super(SelectSeq, self).__init__()
self.mode = mode
self.index = index
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor]) -> (torch.Tensor)
pass
def forward(self, x) -> torch.Tensor:
if self.mode == 'index':
return x[self.index]
else:
return torch.cat(x, dim=1)
def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1):
if padding is None:
padding = ((stride - 1) + dilation * (k - 1)) // 2
return nn.Sequential(
nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_chs),
nn.ReLU(inplace=True)
)
class SelecSlsBlock(nn.Module):
def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1):
super(SelecSlsBlock, self).__init__()
self.stride = stride
self.is_first = is_first
assert stride in [1, 2]
# Process input with 4 conv blocks with the same number of input and output channels
self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation)
self.conv2 = conv_bn(mid_chs, mid_chs, 1)
self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1)
self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1)
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if not isinstance(x, list):
x = [x]
assert len(x) in [1, 2]
d1 = self.conv1(x[0])
d2 = self.conv3(self.conv2(d1))
d3 = self.conv5(self.conv4(d2))
if self.is_first:
out = self.conv6(torch.cat([d1, d2, d3], 1))
return [out, out]
else:
return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]]
class SelecSls(nn.Module):
"""SelecSls42 / SelecSls60 / SelecSls84
Parameters
----------
cfg : network config dictionary specifying block type, feature, and head args
num_classes : int, default 1000
Number of classification classes.
in_chans : int, default 3
Number of input (color) channels.
drop_rate : float, default 0.
Dropout probability before classifier, for training
global_pool : str, default 'avg'
Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
"""
def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
self.num_classes = num_classes
super(SelecSls, self).__init__()
self.stem = conv_bn(in_chans, 32, stride=2)
self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']])
self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way
self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']])
self.num_features = cfg['num_features']
self.feature_info = cfg['feature_info']
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^features\.(\d+)',
blocks_head=r'^head'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.features(x)
x = self.head(self.from_seq(x))
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.fc(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_selecsls(variant, pretrained, **kwargs):
cfg = {}
feature_info = [dict(num_chs=32, reduction=2, module='stem.2')]
if variant.startswith('selecsls42'):
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 144, 144, True, 2),
(144, 144, 144, 288, False, 1),
(288, 0, 304, 304, True, 2),
(304, 304, 304, 480, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.3'),
dict(num_chs=480, reduction=16, module='features.5'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls42b':
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant.startswith('selecsls60'):
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 128, 128, True, 2),
(128, 128, 128, 128, False, 1),
(128, 128, 128, 288, False, 1),
(288, 0, 288, 288, True, 2),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 416, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.4'),
dict(num_chs=416, reduction=16, module='features.8'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls60b':
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant == 'selecsls84':
cfg['block'] = SelecSlsBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 144, False, 1),
(144, 0, 144, 144, True, 2),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 304, False, 1),
(304, 0, 304, 304, True, 2),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 512, False, 1),
]
feature_info.extend([
dict(num_chs=144, reduction=4, module='features.1'),
dict(num_chs=304, reduction=8, module='features.6'),
dict(num_chs=512, reduction=16, module='features.12'),
])
# Head can be replaced with alternative configurations depending on the problem
cfg['head'] = [
(512, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 3, 1),
]
cfg['num_features'] = 1280
feature_info.extend([
dict(num_chs=1024, reduction=32, module='head.1'),
dict(num_chs=1280, reduction=64, module='head.3')
])
else:
raise ValueError('Invalid net configuration ' + variant + ' !!!')
cfg['feature_info'] = feature_info
# this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises?
return build_model_with_cfg(
SelecSls,
variant,
pretrained,
model_cfg=cfg,
feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'selecsls42.untrained': _cfg(
interpolation='bicubic'),
'selecsls42b.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls60.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls60b.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'selecsls84.untrained': _cfg(
interpolation='bicubic'),
})
@register_model
def selecsls42(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls42 model.
"""
return _create_selecsls('selecsls42', pretrained, **kwargs)
@register_model
def selecsls42b(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls42_B model.
"""
return _create_selecsls('selecsls42b', pretrained, **kwargs)
@register_model
def selecsls60(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls60 model.
"""
return _create_selecsls('selecsls60', pretrained, **kwargs)
@register_model
def selecsls60b(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls60_B model.
"""
return _create_selecsls('selecsls60b', pretrained, **kwargs)
@register_model
def selecsls84(pretrained=False, **kwargs) -> SelecSls:
"""Constructs a SelecSls84 model.
"""
return _create_selecsls('selecsls84', pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/swin_transformer.py | """ Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
S3 (AutoFormerV2, https://arxiv.org/abs/2111.14725) Swin weights from
- https://github.com/microsoft/Cream/tree/main/AutoFormerV2
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import logging
import math
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, ClassifierHead, to_2tuple, to_ntuple, trunc_normal_, \
_assert, use_fused_attn, resize_rel_pos_bias_table, resample_patch_embed
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq, named_apply
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
from .vision_transformer import get_init_weights_vit
__all__ = ['SwinTransformer'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
_int_or_tuple_2_t = Union[int, Tuple[int, int]]
def window_partition(
x: torch.Tensor,
window_size: Tuple[int, int],
) -> torch.Tensor:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def get_relative_position_index(win_h: int, win_w: int):
# get pair-wise relative position index for each token inside the window
coords = torch.stack(torch.meshgrid([torch.arange(win_h), torch.arange(win_w)])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += win_h - 1 # shift to start from 0
relative_coords[:, :, 1] += win_w - 1
relative_coords[:, :, 0] *= 2 * win_w - 1
return relative_coords.sum(-1) # Wh*Ww, Wh*Ww
class WindowAttention(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports shifted and non-shifted windows.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
"""
Args:
dim: Number of input channels.
num_heads: Number of attention heads.
head_dim: Number of channels per head (dim // num_heads if not set)
window_size: The height and width of the window.
qkv_bias: If True, add a learnable bias to query, key, value.
attn_drop: Dropout ratio of attention weight.
proj_drop: Dropout ratio of output.
"""
super().__init__()
self.dim = dim
self.window_size = to_2tuple(window_size) # Wh, Ww
win_h, win_w = self.window_size
self.window_area = win_h * win_w
self.num_heads = num_heads
head_dim = head_dim or dim // num_heads
attn_dim = head_dim * num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True) # NOTE not tested for prime-time yet
# define a parameter table of relative position bias, shape: 2*Wh-1 * 2*Ww-1, nH
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads))
# get pair-wise relative position index for each token inside the window
self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False)
self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(attn_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def _get_rel_pos_bias(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
attn_mask = self._get_rel_pos_bias()
if mask is not None:
num_win = mask.shape[0]
mask = mask.view(1, num_win, 1, N, N).expand(B_ // num_win, -1, self.num_heads, -1, -1)
attn_mask = attn_mask + mask.reshape(-1, self.num_heads, N, N)
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn + self._get_rel_pos_bias()
if mask is not None:
num_win = mask.shape[0]
attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B_, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
""" Swin Transformer Block.
"""
def __init__(
self,
dim: int,
input_resolution: _int_or_tuple_2_t,
num_heads: int = 4,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
shift_size: int = 0,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
input_resolution: Input resolution.
window_size: Window size.
num_heads: Number of attention heads.
head_dim: Enforce the number of channels per head
shift_size: Shift size for SW-MSA.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
ws, ss = self._calc_window_shift(window_size, shift_size)
self.window_size: Tuple[int, int] = ws
self.shift_size: Tuple[int, int] = ss
self.window_area = self.window_size[0] * self.window_size[1]
self.mlp_ratio = mlp_ratio
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
num_heads=num_heads,
head_dim=head_dim,
window_size=to_2tuple(self.window_size),
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if any(self.shift_size):
# calculate attention mask for SW-MSA
H, W = self.input_resolution
H = math.ceil(H / self.window_size[0]) * self.window_size[0]
W = math.ceil(W / self.window_size[1]) * self.window_size[1]
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
cnt = 0
for h in (
slice(0, -self.window_size[0]),
slice(-self.window_size[0], -self.shift_size[0]),
slice(-self.shift_size[0], None)):
for w in (
slice(0, -self.window_size[1]),
slice(-self.window_size[1], -self.shift_size[1]),
slice(-self.shift_size[1], None)):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask, persistent=False)
def _calc_window_shift(self, target_window_size, target_shift_size) -> Tuple[Tuple[int, int], Tuple[int, int]]:
target_window_size = to_2tuple(target_window_size)
target_shift_size = to_2tuple(target_shift_size)
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return tuple(window_size), tuple(shift_size)
def _attn(self, x):
B, H, W, C = x.shape
# cyclic shift
has_shift = any(self.shift_size)
if has_shift:
shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
else:
shifted_x = x
# pad for resolution not divisible by window size
pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
shifted_x = shifted_x[:, :H, :W, :].contiguous()
# reverse cyclic shift
if has_shift:
x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
else:
x = shifted_x
return x
def forward(self, x):
B, H, W, C = x.shape
x = x + self.drop_path1(self._attn(self.norm1(x)))
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer.
"""
def __init__(
self,
dim: int,
out_dim: Optional[int] = None,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels (or 2 * dim if None)
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.out_dim = out_dim or 2 * dim
self.norm = norm_layer(4 * dim)
self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False)
def forward(self, x):
B, H, W, C = x.shape
_assert(H % 2 == 0, f"x height ({H}) is not even.")
_assert(W % 2 == 0, f"x width ({W}) is not even.")
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.norm(x)
x = self.reduction(x)
return x
class SwinTransformerStage(nn.Module):
""" A basic Swin Transformer layer for one stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
input_resolution: Tuple[int, int],
depth: int,
downsample: bool = True,
num_heads: int = 4,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
input_resolution: Input resolution.
depth: Number of blocks.
downsample: Downsample layer at the end of the layer.
num_heads: Number of attention heads.
head_dim: Channels per head (dim // num_heads if not set)
window_size: Local window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Projection dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution
self.depth = depth
self.grad_checkpointing = False
window_size = to_2tuple(window_size)
shift_size = tuple([w // 2 for w in window_size])
# patch merging layer
if downsample:
self.downsample = PatchMerging(
dim=dim,
out_dim=out_dim,
norm_layer=norm_layer,
)
else:
assert dim == out_dim
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.Sequential(*[
SwinTransformerBlock(
dim=out_dim,
input_resolution=self.output_resolution,
num_heads=num_heads,
head_dim=head_dim,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)])
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class SwinTransformer(nn.Module):
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
"""
def __init__(
self,
img_size: _int_or_tuple_2_t = 224,
patch_size: int = 4,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.1,
embed_layer: Callable = PatchEmbed,
norm_layer: Union[str, Callable] = nn.LayerNorm,
weight_init: str = '',
**kwargs,
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Swin Transformer layer.
num_heads: Number of attention heads in different layers.
head_dim: Dimension of self-attention heads.
window_size: Window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
drop_rate: Dropout rate.
attn_drop_rate (float): Attention dropout rate.
drop_path_rate (float): Stochastic depth rate.
embed_layer: Patch embedding layer.
norm_layer (nn.Module): Normalization layer.
"""
super().__init__()
assert global_pool in ('', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.output_fmt = 'NHWC'
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.feature_info = []
if not isinstance(embed_dim, (tuple, list)):
embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# split image into non-overlapping patches
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
norm_layer=norm_layer,
output_fmt='NHWC',
)
self.patch_grid = self.patch_embed.grid_size
# build layers
head_dim = to_ntuple(self.num_layers)(head_dim)
if not isinstance(window_size, (list, tuple)):
window_size = to_ntuple(self.num_layers)(window_size)
elif len(window_size) == 2:
window_size = (window_size,) * self.num_layers
assert len(window_size) == self.num_layers
mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
layers = []
in_dim = embed_dim[0]
scale = 1
for i in range(self.num_layers):
out_dim = embed_dim[i]
layers += [SwinTransformerStage(
dim=in_dim,
out_dim=out_dim,
input_resolution=(
self.patch_grid[0] // scale,
self.patch_grid[1] // scale
),
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
head_dim=head_dim[i],
window_size=window_size[i],
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)]
in_dim = out_dim
if i > 0:
scale *= 2
self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
if weight_init != 'skip':
self.init_weights(weight_init)
@torch.jit.ignore
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
named_apply(get_init_weights_vit(mode, head_bias=head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
nwd = set()
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^layers\.(\d+)' if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.layers:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
old_weights = True
if 'head.fc.weight' in state_dict:
old_weights = False
import re
out_dict = {}
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
for k, v in state_dict.items():
if any([n in k for n in ('relative_position_index', 'attn_mask')]):
continue # skip buffers that should not be persistent
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-2] != H or v.shape[-1] != W:
v = resample_patch_embed(
v,
(H, W),
interpolation='bicubic',
antialias=True,
verbose=True,
)
if k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
if old_weights:
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformer, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'swin_small_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22kto1k_finetune.pth', ),
'swin_base_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',),
'swin_base_patch4_window12_384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'swin_large_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',),
'swin_large_patch4_window12_384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'swin_tiny_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',),
'swin_small_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',),
'swin_base_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth',),
'swin_base_patch4_window12_384.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
# tiny 22k pretrain is worse than 1k, so moved after (untagged priority is based on order)
'swin_tiny_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22kto1k_finetune.pth',),
'swin_tiny_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_small_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_base_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_base_patch4_window12_384.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841),
'swin_s3_tiny_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth'),
'swin_s3_small_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth'),
'swin_s3_base_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth'),
})
@register_model
def swin_tiny_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-T @ 224x224, trained ImageNet-1k
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer(
'swin_tiny_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_small_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer(
'swin_small_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_base_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-B @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer(
'swin_base_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_base_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-B @ 384x384
"""
model_args = dict(patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer(
'swin_base_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_large_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-L @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer(
'swin_large_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_large_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-L @ 384x384
"""
model_args = dict(patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer(
'swin_large_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_tiny_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-T @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_small_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-S @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_base_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-B @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'swin_base_patch4_window7_224_in22k': 'swin_base_patch4_window7_224.ms_in22k',
'swin_base_patch4_window12_384_in22k': 'swin_base_patch4_window12_384.ms_in22k',
'swin_large_patch4_window7_224_in22k': 'swin_large_patch4_window7_224.ms_in22k',
'swin_large_patch4_window12_384_in22k': 'swin_large_patch4_window12_384.ms_in22k',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_efficientnet_blocks.py | """ EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2019, Ross Wightman
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, get_norm_act_layer
__all__ = [
'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual']
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class SqueezeExcite(nn.Module):
""" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
Args:
in_chs (int): input channels to layer
rd_ratio (float): ratio of squeeze reduction
act_layer (nn.Module): activation layer of containing block
gate_layer (Callable): attention gate function
force_act_layer (nn.Module): override block's activation fn if this is set/bound
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
"""
def __init__(
self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU,
gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):
super(SqueezeExcite, self).__init__()
if rd_channels is None:
rd_round_fn = rd_round_fn or round
rd_channels = rd_round_fn(in_chs * rd_ratio)
act_layer = force_act_layer or act_layer
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class ConvBnAct(nn.Module):
""" Conv + Norm Layer + Activation w/ optional skip connection
"""
def __init__(
self, in_chs, out_chs, kernel_size, stride=1, dilation=1, group_size=0, pad_type='',
skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.):
super(ConvBnAct, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
groups = num_groups(group_size, in_chs)
self.has_skip = skip and stride == 1 and in_chs == out_chs
self.conv = create_conv2d(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(out_chs, inplace=True)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv.out_channels)
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
se_layer=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
groups = num_groups(group_size, in_chs)
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, groups=groups)
self.bn1 = norm_act_layer(in_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pw.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
* MNasNet - https://arxiv.org/abs/1807.11626
* EfficientNet - https://arxiv.org/abs/1905.11946
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, mid_chs)
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
groups=groups, padding=pad_type, **conv_kwargs)
self.bn2 = norm_act_layer(mid_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_pw(x)
x = self.bn1(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, group_size=group_size,
pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
shortcut = x
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
* MobileDet - https://arxiv.org/abs/2004.14525
* EfficientNet-X - https://arxiv.org/abs/2102.05610
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
"""
def __init__(
self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, group_size=0, pad_type='',
force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.):
super(EdgeResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
if force_in_chs > 0:
mid_chs = make_divisible(force_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, in_chs)
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_exp(x)
x = self.bn1(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/resnet.py | """PyTorch ResNet
This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with
additional dropout and dynamic global avg/max pool.
ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
Copyright 2019, Ross Wightman
"""
import math
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, LayerType, create_attn, \
get_attn, get_act_layer, get_norm_layer, create_classifier
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this
def get_padding(kernel_size: int, stride: int, dilation: int = 1) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
def create_aa(aa_layer: Type[nn.Module], channels: int, stride: int = 2, enable: bool = True) -> nn.Module:
if not aa_layer or not enable:
return nn.Identity()
if issubclass(aa_layer, nn.AvgPool2d):
return aa_layer(stride)
else:
return aa_layer(channels=channels, stride=stride)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
cardinality: int = 1,
base_width: int = 64,
reduce_first: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
attn_layer: Optional[Type[nn.Module]] = None,
aa_layer: Optional[Type[nn.Module]] = None,
drop_block: Optional[Type[nn.Module]] = None,
drop_path: Optional[nn.Module] = None,
):
"""
Args:
inplanes: Input channel dimensionality.
planes: Used to determine output channel dimensionalities.
stride: Stride used in convolution layers.
downsample: Optional downsample layer for residual path.
cardinality: Number of convolution groups.
base_width: Base width used to determine output channel dimensionality.
reduce_first: Reduction factor for first convolution output width of residual blocks.
dilation: Dilation rate for convolution layers.
first_dilation: Dilation rate for first convolution layer.
act_layer: Activation layer.
norm_layer: Normalization layer.
attn_layer: Attention layer.
aa_layer: Anti-aliasing layer.
drop_block: Class for DropBlock layer.
drop_path: Optional DropPath layer.
"""
super(BasicBlock, self).__init__()
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock does not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(
inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation,
dilation=first_dilation, bias=False)
self.bn1 = norm_layer(first_planes)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act1 = act_layer(inplace=True)
self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa)
self.conv2 = nn.Conv2d(
first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act2 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.bn2, 'weight', None) is not None:
nn.init.zeros_(self.bn2.weight)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.drop_block(x)
x = self.act1(x)
x = self.aa(x)
x = self.conv2(x)
x = self.bn2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act2(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
cardinality: int = 1,
base_width: int = 64,
reduce_first: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
attn_layer: Optional[Type[nn.Module]] = None,
aa_layer: Optional[Type[nn.Module]] = None,
drop_block: Optional[Type[nn.Module]] = None,
drop_path: Optional[nn.Module] = None,
):
"""
Args:
inplanes: Input channel dimensionality.
planes: Used to determine output channel dimensionalities.
stride: Stride used in convolution layers.
downsample: Optional downsample layer for residual path.
cardinality: Number of convolution groups.
base_width: Base width used to determine output channel dimensionality.
reduce_first: Reduction factor for first convolution output width of residual blocks.
dilation: Dilation rate for convolution layers.
first_dilation: Dilation rate for first convolution layer.
act_layer: Activation layer.
norm_layer: Normalization layer.
attn_layer: Attention layer.
aa_layer: Anti-aliasing layer.
drop_block: Class for DropBlock layer.
drop_path: Optional DropPath layer.
"""
super(Bottleneck, self).__init__()
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.conv2 = nn.Conv2d(
first_planes, width, kernel_size=3, stride=1 if use_aa else stride,
padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa)
self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.drop_block(x)
x = self.act2(x)
x = self.aa(x)
x = self.conv3(x)
x = self.bn3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x
def downsample_conv(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
norm_layer: Optional[Type[nn.Module]] = None,
) -> nn.Module:
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1
p = get_padding(kernel_size, stride, first_dilation)
return nn.Sequential(*[
nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),
norm_layer(out_channels)
])
def downsample_avg(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
norm_layer: Optional[Type[nn.Module]] = None,
) -> nn.Module:
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
if stride == 1 and dilation == 1:
pool = nn.Identity()
else:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
return nn.Sequential(*[
pool,
nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),
norm_layer(out_channels)
])
def drop_blocks(drop_prob: float = 0.):
return [
None, None,
partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None,
partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None]
def make_blocks(
block_fn: Union[BasicBlock, Bottleneck],
channels: List[int],
block_repeats: List[int],
inplanes: int,
reduce_first: int = 1,
output_stride: int = 32,
down_kernel_size: int = 1,
avg_down: bool = False,
drop_block_rate: float = 0.,
drop_path_rate: float = 0.,
**kwargs,
) -> Tuple[List[Tuple[str, nn.Module]], List[Dict[str, Any]]]:
stages = []
feature_info = []
net_num_blocks = sum(block_repeats)
net_block_idx = 0
net_stride = 4
dilation = prev_dilation = 1
for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):
stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it
stride = 1 if stage_idx == 0 else 2
if net_stride >= output_stride:
dilation *= stride
stride = 1
else:
net_stride *= stride
downsample = None
if stride != 1 or inplanes != planes * block_fn.expansion:
down_kwargs = dict(
in_channels=inplanes,
out_channels=planes * block_fn.expansion,
kernel_size=down_kernel_size,
stride=stride,
dilation=dilation,
first_dilation=prev_dilation,
norm_layer=kwargs.get('norm_layer'),
)
downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)
block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)
blocks = []
for block_idx in range(num_blocks):
downsample = downsample if block_idx == 0 else None
stride = stride if block_idx == 0 else 1
block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule
blocks.append(block_fn(
inplanes,
planes,
stride,
downsample,
first_dilation=prev_dilation,
drop_path=DropPath(block_dpr) if block_dpr > 0. else None,
**block_kwargs,
))
prev_dilation = dilation
inplanes = planes * block_fn.expansion
net_block_idx += 1
stages.append((stage_name, nn.Sequential(*blocks)))
feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))
return stages, feature_info
class ResNet(nn.Module):
"""ResNet / ResNeXt / SE-ResNeXt / SE-Net
This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that
* have > 1 stride in the 3x3 conv layer of bottleneck
* have conv-bn-act ordering
This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s
variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the
'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.
ResNet variants (the same modifications can be used in SE/ResNeXt models as well):
* normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b
* c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64)
* d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample
* e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample
* s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128)
* t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample
* tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample
ResNeXt
* normal - 7x7 stem, stem_width = 64, standard cardinality and base widths
* same c,d, e, s variants as ResNet can be enabled
SE-ResNeXt
* normal - 7x7 stem, stem_width = 64
* same c, d, e, s variants as ResNet can be enabled
SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64,
reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block
"""
def __init__(
self,
block: Union[BasicBlock, Bottleneck],
layers: List[int],
num_classes: int = 1000,
in_chans: int = 3,
output_stride: int = 32,
global_pool: str = 'avg',
cardinality: int = 1,
base_width: int = 64,
stem_width: int = 64,
stem_type: str = '',
replace_stem_pool: bool = False,
block_reduce_first: int = 1,
down_kernel_size: int = 1,
avg_down: bool = False,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[Type[nn.Module]] = None,
drop_rate: float = 0.0,
drop_path_rate: float = 0.,
drop_block_rate: float = 0.,
zero_init_last: bool = True,
block_args: Optional[Dict[str, Any]] = None,
):
"""
Args:
block (nn.Module): class for the residual block. Options are BasicBlock, Bottleneck.
layers (List[int]) : number of layers in each block
num_classes (int): number of classification classes (default 1000)
in_chans (int): number of input (color) channels. (default 3)
output_stride (int): output stride of the network, 32, 16, or 8. (default 32)
global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
cardinality (int): number of convolution groups for 3x3 conv in Bottleneck. (default 1)
base_width (int): bottleneck channels factor. `planes * base_width / 64 * cardinality` (default 64)
stem_width (int): number of channels in stem convolutions (default 64)
stem_type (str): The type of stem (default ''):
* '', default - a single 7x7 conv with a width of stem_width
* 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
* 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
replace_stem_pool (bool): replace stem max-pooling layer with a 3x3 stride-2 convolution
block_reduce_first (int): Reduction factor for first convolution output width of residual blocks,
1 for all archs except senets, where 2 (default 1)
down_kernel_size (int): kernel size of residual block downsample path,
1x1 for most, 3x3 for senets (default: 1)
avg_down (bool): use avg pooling for projection skip connection between stages/downsample (default False)
act_layer (str, nn.Module): activation layer
norm_layer (str, nn.Module): normalization layer
aa_layer (nn.Module): anti-aliasing layer
drop_rate (float): Dropout probability before classifier, for training (default 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default 0.)
drop_block_rate (float): Drop block rate (default 0.)
zero_init_last (bool): zero-init the last weight in residual path (usually last BN affine weight)
block_args (dict): Extra kwargs to pass through to block module
"""
super(ResNet, self).__init__()
block_args = block_args or dict()
assert output_stride in (8, 16, 32)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
act_layer = get_act_layer(act_layer)
norm_layer = get_norm_layer(norm_layer)
# Stem
deep_stem = 'deep' in stem_type
inplanes = stem_width * 2 if deep_stem else 64
if deep_stem:
stem_chs = (stem_width, stem_width)
if 'tiered' in stem_type:
stem_chs = (3 * (stem_width // 4), stem_width)
self.conv1 = nn.Sequential(*[
nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False),
norm_layer(stem_chs[0]),
act_layer(inplace=True),
nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False),
norm_layer(stem_chs[1]),
act_layer(inplace=True),
nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)])
else:
self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(inplanes)
self.act1 = act_layer(inplace=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
# Stem pooling. The name 'maxpool' remains for weight compatibility.
if replace_stem_pool:
self.maxpool = nn.Sequential(*filter(None, [
nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False),
create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None,
norm_layer(inplanes),
act_layer(inplace=True),
]))
else:
if aa_layer is not None:
if issubclass(aa_layer, nn.AvgPool2d):
self.maxpool = aa_layer(2)
else:
self.maxpool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=inplanes, stride=2)])
else:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Feature Blocks
channels = [64, 128, 256, 512]
stage_modules, stage_feature_info = make_blocks(
block,
channels,
layers,
inplanes,
cardinality=cardinality,
base_width=base_width,
output_stride=output_stride,
reduce_first=block_reduce_first,
avg_down=avg_down,
down_kernel_size=down_kernel_size,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
drop_block_rate=drop_block_rate,
drop_path_rate=drop_path_rate,
**block_args,
)
for stage in stage_modules:
self.add_module(*stage) # layer1, layer2, etc
self.feature_info.extend(stage_feature_info)
# Head (Pooling and Classifier)
self.num_features = 512 * block.expansion
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.init_weights(zero_init_last=zero_init_last)
@torch.jit.ignore
def init_weights(self, zero_init_last: bool = True):
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if zero_init_last:
for m in self.modules():
if hasattr(m, 'zero_init_last'):
m.zero_init_last()
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self, name_only: bool = False):
return 'fc' if name_only else self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.maxpool(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True)
else:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
return x if pre_logits else self.fc(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_resnet(variant, pretrained: bool = False, **kwargs) -> ResNet:
return build_model_with_cfg(ResNet, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
def _tcfg(url='', **kwargs):
return _cfg(url=url, **dict({'interpolation': 'bicubic'}, **kwargs))
def _ttcfg(url='', **kwargs):
return _cfg(url=url, **dict({
'interpolation': 'bicubic', 'test_input_size': (3, 288, 288), 'test_crop_pct': 0.95,
'origin_url': 'https://github.com/huggingface/pytorch-image-models',
}, **kwargs))
def _rcfg(url='', **kwargs):
return _cfg(url=url, **dict({
'interpolation': 'bicubic', 'crop_pct': 0.95, 'test_input_size': (3, 288, 288), 'test_crop_pct': 1.0,
'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476'
}, **kwargs))
def _r3cfg(url='', **kwargs):
return _cfg(url=url, **dict({
'interpolation': 'bicubic', 'input_size': (3, 160, 160), 'pool_size': (5, 5),
'crop_pct': 0.95, 'test_input_size': (3, 224, 224), 'test_crop_pct': 0.95,
'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476',
}, **kwargs))
def _gcfg(url='', **kwargs):
return _cfg(url=url, **dict({
'interpolation': 'bicubic',
'origin_url': 'https://cv.gluon.ai/model_zoo/classification.html',
}, **kwargs))
default_cfgs = generate_default_cfgs({
# ResNet and Wide ResNet trained w/ timm (RSB paper and others)
'resnet10t.c3_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224),
first_conv='conv1.0'),
'resnet14t.c3_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224),
first_conv='conv1.0'),
'resnet18.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a1_0-d63eafa0.pth'),
'resnet18.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a2_0-b61bd467.pth'),
'resnet18.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a3_0-40c531c8.pth'),
'resnet18d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth',
first_conv='conv1.0'),
'resnet34.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a1_0-46f8f793.pth'),
'resnet34.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a2_0-82d47d71.pth'),
'resnet34.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a3_0-a20cabb6.pth',
crop_pct=0.95),
'resnet34.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
'resnet34d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth',
first_conv='conv1.0'),
'resnet26.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth'),
'resnet26d.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth',
first_conv='conv1.0'),
'resnet26t.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'resnet50.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth'),
'resnet50.a1h_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth',
input_size=(3, 176, 176), pool_size=(6, 6), crop_pct=0.9, test_input_size=(3, 224, 224), test_crop_pct=1.0),
'resnet50.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a2_0-a2746f79.pth'),
'resnet50.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a3_0-59cae1ef.pth'),
'resnet50.b1k_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b1k-532a802a.pth'),
'resnet50.b2k_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b2k-1ba180c1.pth'),
'resnet50.c1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c1-5ba5e060.pth'),
'resnet50.c2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c2-d01e05b2.pth'),
'resnet50.d_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_d-f39db8af.pth'),
'resnet50.ram_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth'),
'resnet50.am_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_am-6c502b37.pth'),
'resnet50.ra_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ra-85ebb6e5.pth'),
'resnet50.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth'),
'resnet50d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth',
first_conv='conv1.0'),
'resnet50d.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a1_0-e20cff14.pth',
first_conv='conv1.0'),
'resnet50d.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a2_0-a3adc64d.pth',
first_conv='conv1.0'),
'resnet50d.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a3_0-403fdfad.pth',
first_conv='conv1.0'),
'resnet50t.untrained': _ttcfg(first_conv='conv1.0'),
'resnet101.a1h_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth'),
'resnet101.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1_0-cdcb52a9.pth'),
'resnet101.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a2_0-6edb36c7.pth'),
'resnet101.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a3_0-1db14157.pth'),
'resnet101d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95,
test_crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet152.a1h_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth'),
'resnet152.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1_0-2eee8a7a.pth'),
'resnet152.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a2_0-b4c6978f.pth'),
'resnet152.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a3_0-134d4688.pth'),
'resnet152d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95,
test_crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet200.untrained': _ttcfg(),
'resnet200d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95,
test_crop_pct=1.0, test_input_size=(3, 320, 320)),
'wide_resnet50_2.racm_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth'),
# torchvision resnet weights
'resnet18.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet18-5c106cde.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet34.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet34-333f7ec4.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet50.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet50-19c8e357.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet50.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet101.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet101.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet101-cd907fc2.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet152.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet152-b121ed2d.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnet152.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnet152-f82ba261.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'wide_resnet50_2.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'wide_resnet50_2.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'wide_resnet101_2.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'wide_resnet101_2.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
# ResNets w/ alternative norm layers
'resnet50_gn.a1h_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth',
crop_pct=0.94),
# ResNeXt trained in timm (RSB paper and others)
'resnext50_32x4d.a1h_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth'),
'resnext50_32x4d.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1_0-b5a91a1d.pth'),
'resnext50_32x4d.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a2_0-efc76add.pth'),
'resnext50_32x4d.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a3_0-3e450271.pth'),
'resnext50_32x4d.ra_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth'),
'resnext50d_32x4d.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth',
first_conv='conv1.0'),
'resnext101_32x4d.untrained': _ttcfg(),
'resnext101_64x4d.c1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth'),
# torchvision ResNeXt weights
'resnext50_32x4d.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnext101_32x8d.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnext101_64x4d.tv_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth',
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnext50_32x4d.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
'resnext101_32x8d.tv2_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth',
input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965,
license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'),
# ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags
# from https://github.com/facebookresearch/WSL-Images
# Please note the CC-BY-NC 4.0 license on these weights, non-commercial use only.
'resnext101_32x8d.fb_wsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'),
'resnext101_32x16d.fb_wsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'),
'resnext101_32x32d.fb_wsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'),
'resnext101_32x48d.fb_wsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'),
# Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
# Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
'resnet18.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnet50.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
# Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
# Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
'resnet18.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnet50.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext50_32x4d.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x4d.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x8d.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
'resnext101_32x16d.fb_swsl_ig1b_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth',
license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'),
# Efficient Channel Attention ResNets
'ecaresnet26t.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
test_crop_pct=0.95, test_input_size=(3, 320, 320)),
'ecaresnetlight.miil_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth',
test_crop_pct=0.95, test_input_size=(3, 288, 288)),
'ecaresnet50d.miil_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth',
first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)),
'ecaresnet50d_pruned.miil_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth',
first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)),
'ecaresnet50t.ra2_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
test_crop_pct=0.95, test_input_size=(3, 320, 320)),
'ecaresnet50t.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a1_0-99bd76a8.pth',
first_conv='conv1.0'),
'ecaresnet50t.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a2_0-b1c7b745.pth',
first_conv='conv1.0'),
'ecaresnet50t.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a3_0-8cc311f1.pth',
first_conv='conv1.0'),
'ecaresnet101d.miil_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth',
first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)),
'ecaresnet101d_pruned.miil_in1k': _tcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth',
first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)),
'ecaresnet200d.untrained': _ttcfg(
first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8)),
'ecaresnet269d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth',
first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.95,
test_crop_pct=1.0, test_input_size=(3, 352, 352)),
# Efficient Channel Attention ResNeXts
'ecaresnext26t_32x4d.untrained': _tcfg(first_conv='conv1.0'),
'ecaresnext50t_32x4d.untrained': _tcfg(first_conv='conv1.0'),
# Squeeze-Excitation ResNets, to eventually replace the models in senet.py
'seresnet18.untrained': _ttcfg(),
'seresnet34.untrained': _ttcfg(),
'seresnet50.a1_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a1_0-ffa00869.pth',
crop_pct=0.95),
'seresnet50.a2_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a2_0-850de0d9.pth',
crop_pct=0.95),
'seresnet50.a3_in1k': _r3cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a3_0-317ecd56.pth',
crop_pct=0.95),
'seresnet50.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth'),
'seresnet50t.untrained': _ttcfg(
first_conv='conv1.0'),
'seresnet101.untrained': _ttcfg(),
'seresnet152.untrained': _ttcfg(),
'seresnet152d.ra2_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth',
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95,
test_crop_pct=1.0, test_input_size=(3, 320, 320)
),
'seresnet200d.untrained': _ttcfg(
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)),
'seresnet269d.untrained': _ttcfg(
first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)),
# Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py
'seresnext26d_32x4d.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth',
first_conv='conv1.0'),
'seresnext26t_32x4d.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth',
first_conv='conv1.0'),
'seresnext50_32x4d.racm_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth'),
'seresnext101_32x4d.untrained': _ttcfg(),
'seresnext101_32x8d.ah_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth'),
'seresnext101d_32x8d.ah_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth',
first_conv='conv1.0'),
# ResNets with anti-aliasing / blur pool
'resnetaa50d.sw_in12k_ft_in1k': _ttcfg(
hf_hub_id='timm/',
first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'resnetaa101d.sw_in12k_ft_in1k': _ttcfg(
hf_hub_id='timm/',
first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'seresnextaa101d_32x8d.sw_in12k_ft_in1k_288': _ttcfg(
hf_hub_id='timm/',
crop_pct=0.95, input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0,
first_conv='conv1.0'),
'seresnextaa101d_32x8d.sw_in12k_ft_in1k': _ttcfg(
hf_hub_id='timm/',
first_conv='conv1.0', test_crop_pct=1.0),
'seresnextaa201d_32x8d.sw_in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', first_conv='conv1.0', pool_size=(12, 12), input_size=(3, 384, 384), crop_pct=1.0),
'seresnextaa201d_32x8d.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821, interpolation='bicubic', first_conv='conv1.0',
crop_pct=0.95, input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0),
'resnetaa50d.sw_in12k': _ttcfg(
hf_hub_id='timm/',
num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'resnetaa50d.d_in12k': _ttcfg(
hf_hub_id='timm/',
num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'resnetaa101d.sw_in12k': _ttcfg(
hf_hub_id='timm/',
num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'seresnextaa101d_32x8d.sw_in12k': _ttcfg(
hf_hub_id='timm/',
num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0),
'resnetblur18.untrained': _ttcfg(),
'resnetblur50.bt_in1k': _ttcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth'),
'resnetblur50d.untrained': _ttcfg(first_conv='conv1.0'),
'resnetblur101d.untrained': _ttcfg(first_conv='conv1.0'),
'resnetaa34d.untrained': _ttcfg(first_conv='conv1.0'),
'resnetaa50.a1h_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth'),
'seresnetaa50d.untrained': _ttcfg(first_conv='conv1.0'),
'seresnextaa101d_32x8d.ah_in1k': _rcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth',
first_conv='conv1.0'),
# ResNet-RS models
'resnetrs50.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth',
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs101.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth',
input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs152.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs200.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs270.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs350.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs420.tf_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416),
interpolation='bicubic', first_conv='conv1.0'),
# gluon resnet weights
'resnet18.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'),
'resnet34.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'),
'resnet50.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'),
'resnet101.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'),
'resnet152.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'),
'resnet50c.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth',
first_conv='conv1.0'),
'resnet101c.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth',
first_conv='conv1.0'),
'resnet152c.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth',
first_conv='conv1.0'),
'resnet50d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth',
first_conv='conv1.0'),
'resnet101d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth',
first_conv='conv1.0'),
'resnet152d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth',
first_conv='conv1.0'),
'resnet50s.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth',
first_conv='conv1.0'),
'resnet101s.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth',
first_conv='conv1.0'),
'resnet152s.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth',
first_conv='conv1.0'),
'resnext50_32x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'),
'resnext101_32x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'),
'resnext101_64x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'),
'seresnext50_32x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'),
'seresnext101_32x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'),
'seresnext101_64x4d.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'),
'senet154.gluon_in1k': _gcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth',
first_conv='conv1.0'),
})
@register_model
def resnet10t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-10-T model.
"""
model_args = dict(block=BasicBlock, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True)
return _create_resnet('resnet10t', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet14t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-14-T model.
"""
model_args = dict(block=Bottleneck, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True)
return _create_resnet('resnet14t', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet18(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-18 model.
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2])
return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet18d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-18-D model.
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet18d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet34(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-34 model.
"""
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3])
return _create_resnet('resnet34', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet34d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-34-D model.
"""
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet34d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet26(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-26 model.
"""
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2])
return _create_resnet('resnet26', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet26t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-26-T model.
"""
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True)
return _create_resnet('resnet26t', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet26d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-26-D model.
"""
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet26d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3])
return _create_resnet('resnet50', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50c(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-C model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep')
return _create_resnet('resnet50c', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet50d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50s(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-S model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep')
return _create_resnet('resnet50s', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-T model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True)
return _create_resnet('resnet50t', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet101(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3])
return _create_resnet('resnet101', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet101c(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-C model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep')
return _create_resnet('resnet101c', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet101d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-D model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet101d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet101s(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-S model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep')
return _create_resnet('resnet101s', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet152(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-152 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3])
return _create_resnet('resnet152', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet152c(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-152-C model.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep')
return _create_resnet('resnet152c', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet152d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-152-D model.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet152d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet152s(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-152-S model.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep')
return _create_resnet('resnet152s', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet200(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-200 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3])
return _create_resnet('resnet200', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet200d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-200-D model.
"""
model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnet200d', pretrained, **dict(model_args, **kwargs))
@register_model
def wide_resnet50_2(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a Wide ResNet-50-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128)
return _create_resnet('wide_resnet50_2', pretrained, **dict(model_args, **kwargs))
@register_model
def wide_resnet101_2(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a Wide ResNet-101-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128)
return _create_resnet('wide_resnet101_2', pretrained, **dict(model_args, **kwargs))
@register_model
def resnet50_gn(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50 model w/ GroupNorm
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], norm_layer='groupnorm')
return _create_resnet('resnet50_gn', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt50-32x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4)
return _create_resnet('resnext50_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext50d_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnext50d_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt-101 32x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4)
return _create_resnet('resnext101_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt-101 32x8d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8)
return _create_resnet('resnext101_32x8d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext101_32x16d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt-101 32x16d model
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16)
return _create_resnet('resnext101_32x16d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext101_32x32d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt-101 32x32d model
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32)
return _create_resnet('resnext101_32x32d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNeXt101-64x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4)
return _create_resnet('resnext101_64x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet26t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs an ECA-ResNeXt-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem and ECA attn.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet26t', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet50d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet50d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet50d_pruned(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D model pruned with eca.
The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs))
@register_model
def ecaresnet50t(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs an ECA-ResNet-50-T model.
Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet50t', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnetlight(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D light model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnetlight', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet101d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-D model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet101d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet101d_pruned(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-D model pruned with eca.
The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs))
@register_model
def ecaresnet200d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-200-D model with ECA.
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet200d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnet269d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-269-D model with ECA.
"""
model_args = dict(
block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnet269d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs an ECA-ResNeXt-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem. This model replaces SE module with the ECA module
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnext26t_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def ecaresnext50t_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs an ECA-ResNeXt-50-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem. This model replaces SE module with the ECA module
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'))
return _create_resnet('ecaresnext50t_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet18(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'))
return _create_resnet('seresnet18', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet34(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'))
return _create_resnet('seresnet34', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet50(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'))
return _create_resnet('seresnet50', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet50t(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered',
avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnet50t', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet101(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'))
return _create_resnet('seresnet101', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet152(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'))
return _create_resnet('seresnet152', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet152d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep',
avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnet152d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet200d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-200-D model with SE attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep',
avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnet200d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnet269d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-269-D model with SE attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep',
avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnet269d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext26d_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a SE-ResNeXt-26-D model.`
This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for
combination of deep stem and avg_pool in downsample.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnext26d_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a SE-ResNet-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnext26t_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnext50_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnext101_32x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnext101_32x8d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,
stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnext101d_32x8d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnext101_64x4d', pretrained, **dict(model_args, **kwargs))
@register_model
def senet154(pretrained: bool = False, **kwargs) -> ResNet:
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep',
down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'))
return _create_resnet('senet154', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetblur18(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-18 model with blur anti-aliasing
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d)
return _create_resnet('resnetblur18', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetblur50(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50 model with blur anti-aliasing
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d)
return _create_resnet('resnetblur50', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetblur50d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D model with blur anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d,
stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnetblur50d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetblur101d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-D model with blur anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d,
stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnetblur101d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetaa34d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-34-D model w/ avgpool anti-aliasing
"""
model_args = dict(
block=BasicBlock, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnetaa34d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetaa50(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50 model with avgpool anti-aliasing
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d)
return _create_resnet('resnetaa50', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetaa50d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-50-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnetaa50d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetaa101d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-101-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True)
return _create_resnet('resnetaa101d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnetaa50d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a SE=ResNet-50-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'))
return _create_resnet('seresnetaa50d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnextaa101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,
stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnextaa101d_32x8d', pretrained, **dict(model_args, **kwargs))
@register_model
def seresnextaa201d_32x8d(pretrained: bool = False, **kwargs):
"""Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 4], cardinality=32, base_width=8,
stem_width=64, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d,
block_args=dict(attn_layer='se'))
return _create_resnet('seresnextaa201d_32x8d', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs50(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-50 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs50', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs101(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-101 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs101', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs152(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-152 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs152', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs200(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-200 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs200', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs270(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-270 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs270', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs350(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-350 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs350', pretrained, **dict(model_args, **kwargs))
@register_model
def resnetrs420(pretrained: bool = False, **kwargs) -> ResNet:
"""Constructs a ResNet-RS-420 model
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer))
return _create_resnet('resnetrs420', pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'tv_resnet34': 'resnet34.tv_in1k',
'tv_resnet50': 'resnet50.tv_in1k',
'tv_resnet101': 'resnet101.tv_in1k',
'tv_resnet152': 'resnet152.tv_in1k',
'tv_resnext50_32x4d' : 'resnext50_32x4d.tv_in1k',
'ig_resnext101_32x8d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k',
'ig_resnext101_32x16d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k',
'ig_resnext101_32x32d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k',
'ig_resnext101_32x48d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k',
'ssl_resnet18': 'resnet18.fb_ssl_yfcc100m_ft_in1k',
'ssl_resnet50': 'resnet50.fb_ssl_yfcc100m_ft_in1k',
'ssl_resnext50_32x4d': 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k',
'ssl_resnext101_32x4d': 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k',
'ssl_resnext101_32x8d': 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k',
'ssl_resnext101_32x16d': 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k',
'swsl_resnet18': 'resnet18.fb_swsl_ig1b_ft_in1k',
'swsl_resnet50': 'resnet50.fb_swsl_ig1b_ft_in1k',
'swsl_resnext50_32x4d': 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k',
'swsl_resnext101_32x4d': 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k',
'swsl_resnext101_32x8d': 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k',
'swsl_resnext101_32x16d': 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k',
'gluon_resnet18_v1b': 'resnet18.gluon_in1k',
'gluon_resnet34_v1b': 'resnet34.gluon_in1k',
'gluon_resnet50_v1b': 'resnet50.gluon_in1k',
'gluon_resnet101_v1b': 'resnet101.gluon_in1k',
'gluon_resnet152_v1b': 'resnet152.gluon_in1k',
'gluon_resnet50_v1c': 'resnet50c.gluon_in1k',
'gluon_resnet101_v1c': 'resnet101c.gluon_in1k',
'gluon_resnet152_v1c': 'resnet152c.gluon_in1k',
'gluon_resnet50_v1d': 'resnet50d.gluon_in1k',
'gluon_resnet101_v1d': 'resnet101d.gluon_in1k',
'gluon_resnet152_v1d': 'resnet152d.gluon_in1k',
'gluon_resnet50_v1s': 'resnet50s.gluon_in1k',
'gluon_resnet101_v1s': 'resnet101s.gluon_in1k',
'gluon_resnet152_v1s': 'resnet152s.gluon_in1k',
'gluon_resnext50_32x4d': 'resnext50_32x4d.gluon_in1k',
'gluon_resnext101_32x4d': 'resnext101_32x4d.gluon_in1k',
'gluon_resnext101_64x4d': 'resnext101_64x4d.gluon_in1k',
'gluon_seresnext50_32x4d': 'seresnext50_32x4d.gluon_in1k',
'gluon_seresnext101_32x4d': 'seresnext101_32x4d.gluon_in1k',
'gluon_seresnext101_64x4d': 'seresnext101_64x4d.gluon_in1k',
'gluon_senet154': 'senet154.gluon_in1k',
'seresnext26tn_32x4d': 'seresnext26t_32x4d',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/resnetv2.py | """Pre-Activation ResNet v2 with GroupNorm and Weight Standardization.
A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfer (BiT) source code
at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have
been included here as pretrained models from their original .NPZ checkpoints.
Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and
extra padding support to allow porting of official Hybrid ResNet pretrained weights from
https://github.com/google-research/vision_transformer
Thanks to the Google team for the above two repositories and associated papers:
* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370
* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929
* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020.
"""
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict # pylint: disable=g-importing-member
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \
DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(
self,
in_chs,
out_chs=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
act_layer=None,
conv_layer=None,
norm_layer=None,
proj_layer=None,
drop_path_rate=0.,
):
super().__init__()
first_dilation = first_dilation or dilation
conv_layer = conv_layer or StdConv2d
norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
out_chs = out_chs or in_chs
mid_chs = make_divisible(out_chs * bottle_ratio)
if proj_layer is not None:
self.downsample = proj_layer(
in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True,
conv_layer=conv_layer, norm_layer=norm_layer)
else:
self.downsample = None
self.norm1 = norm_layer(in_chs)
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.norm2 = norm_layer(mid_chs)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
self.norm3 = norm_layer(mid_chs)
self.conv3 = conv_layer(mid_chs, out_chs, 1)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv3.weight)
def forward(self, x):
x_preact = self.norm1(x)
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x_preact)
# residual branch
x = self.conv1(x_preact)
x = self.conv2(self.norm2(x))
x = self.conv3(self.norm3(x))
x = self.drop_path(x)
return x + shortcut
class Bottleneck(nn.Module):
"""Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT.
"""
def __init__(
self,
in_chs,
out_chs=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
act_layer=None,
conv_layer=None,
norm_layer=None,
proj_layer=None,
drop_path_rate=0.,
):
super().__init__()
first_dilation = first_dilation or dilation
act_layer = act_layer or nn.ReLU
conv_layer = conv_layer or StdConv2d
norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
out_chs = out_chs or in_chs
mid_chs = make_divisible(out_chs * bottle_ratio)
if proj_layer is not None:
self.downsample = proj_layer(
in_chs, out_chs, stride=stride, dilation=dilation, preact=False,
conv_layer=conv_layer, norm_layer=norm_layer)
else:
self.downsample = None
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.norm1 = norm_layer(mid_chs)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
self.norm2 = norm_layer(mid_chs)
self.conv3 = conv_layer(mid_chs, out_chs, 1)
self.norm3 = norm_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.act3 = act_layer(inplace=True)
def zero_init_last(self):
if getattr(self.norm3, 'weight', None) is not None:
nn.init.zeros_(self.norm3.weight)
def forward(self, x):
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
# residual
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.drop_path(x)
x = self.act3(x + shortcut)
return x
class DownsampleConv(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
first_dilation=None,
preact=True,
conv_layer=None,
norm_layer=None,
):
super(DownsampleConv, self).__init__()
self.conv = conv_layer(in_chs, out_chs, 1, stride=stride)
self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
def forward(self, x):
return self.norm(self.conv(x))
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
first_dilation=None,
preact=True,
conv_layer=None,
norm_layer=None,
):
""" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
super(DownsampleAvg, self).__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = conv_layer(in_chs, out_chs, 1, stride=1)
self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
def forward(self, x):
return self.norm(self.conv(self.pool(x)))
class ResNetStage(nn.Module):
"""ResNet Stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
bottle_ratio=0.25,
groups=1,
avg_down=False,
block_dpr=None,
block_fn=PreActBottleneck,
act_layer=None,
conv_layer=None,
norm_layer=None,
**block_kwargs,
):
super(ResNetStage, self).__init__()
first_dilation = 1 if dilation in (1, 2) else 2
layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer)
proj_layer = DownsampleAvg if avg_down else DownsampleConv
prev_chs = in_chs
self.blocks = nn.Sequential()
for block_idx in range(depth):
drop_path_rate = block_dpr[block_idx] if block_dpr else 0.
stride = stride if block_idx == 0 else 1
self.blocks.add_module(str(block_idx), block_fn(
prev_chs,
out_chs,
stride=stride,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
first_dilation=first_dilation,
proj_layer=proj_layer,
drop_path_rate=drop_path_rate,
**layer_kwargs,
**block_kwargs,
))
prev_chs = out_chs
first_dilation = dilation
proj_layer = None
def forward(self, x):
x = self.blocks(x)
return x
def is_stem_deep(stem_type):
return any([s in stem_type for s in ('deep', 'tiered')])
def create_resnetv2_stem(
in_chs,
out_chs=64,
stem_type='',
preact=True,
conv_layer=StdConv2d,
norm_layer=partial(GroupNormAct, num_groups=32),
):
stem = OrderedDict()
assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered')
# NOTE conv padding mode can be changed by overriding the conv_layer def
if is_stem_deep(stem_type):
# A 3 deep 3x3 conv stack as in ResNet V1D models
if 'tiered' in stem_type:
stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py
else:
stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets
stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2)
stem['norm1'] = norm_layer(stem_chs[0])
stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1)
stem['norm2'] = norm_layer(stem_chs[1])
stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1)
if not preact:
stem['norm3'] = norm_layer(out_chs)
else:
# The usual 7x7 stem conv
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
if not preact:
stem['norm'] = norm_layer(out_chs)
if 'fixed' in stem_type:
# 'fixed' SAME padding approximation that is used in BiT models
stem['pad'] = nn.ConstantPad2d(1, 0.)
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
elif 'same' in stem_type:
# full, input size based 'SAME' padding, used in ViT Hybrid model
stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same')
else:
# the usual PyTorch symmetric padding
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
return nn.Sequential(stem)
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode.
"""
def __init__(
self,
layers,
channels=(256, 512, 1024, 2048),
num_classes=1000,
in_chans=3,
global_pool='avg',
output_stride=32,
width_factor=1,
stem_chs=64,
stem_type='',
avg_down=False,
preact=True,
act_layer=nn.ReLU,
norm_layer=partial(GroupNormAct, num_groups=32),
conv_layer=StdConv2d,
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=False,
):
"""
Args:
layers (List[int]) : number of layers in each block
channels (List[int]) : number of channels in each block:
num_classes (int): number of classification classes (default 1000)
in_chans (int): number of input (color) channels. (default 3)
global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
output_stride (int): output stride of the network, 32, 16, or 8. (default 32)
width_factor (int): channel (width) multiplication factor
stem_chs (int): stem width (default: 64)
stem_type (str): stem type (default: '' == 7x7)
avg_down (bool): average pooling in residual downsampling (default: False)
preact (bool): pre-activiation (default: True)
act_layer (Union[str, nn.Module]): activation layer
norm_layer (Union[str, nn.Module]): normalization layer
conv_layer (nn.Module): convolution module
drop_rate: classifier dropout rate (default: 0.)
drop_path_rate: stochastic depth rate (default: 0.)
zero_init_last: zero-init last weight in residual path (default: False)
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
wf = width_factor
norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
act_layer = get_act_layer(act_layer)
self.feature_info = []
stem_chs = make_divisible(stem_chs * wf)
self.stem = create_resnetv2_stem(
in_chans,
stem_chs,
stem_type,
preact,
conv_layer=conv_layer,
norm_layer=norm_layer,
)
stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm'
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat))
prev_chs = stem_chs
curr_stride = 4
dilation = 1
block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
block_fn = PreActBottleneck if preact else Bottleneck
self.stages = nn.Sequential()
for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)):
out_chs = make_divisible(c * wf)
stride = 1 if stage_idx == 0 else 2
if curr_stride >= output_stride:
dilation *= stride
stride = 1
stage = ResNetStage(
prev_chs,
out_chs,
stride=stride,
dilation=dilation,
depth=d,
avg_down=avg_down,
act_layer=act_layer,
conv_layer=conv_layer,
norm_layer=norm_layer,
block_dpr=bdpr,
block_fn=block_fn,
)
prev_chs = out_chs
curr_stride *= stride
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')]
self.stages.add_module(str(stage_idx), stage)
self.num_features = prev_chs
self.norm = norm_layer(self.num_features) if preact else nn.Identity()
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
use_conv=True,
)
self.init_weights(zero_init_last=zero_init_last)
self.grad_checkpointing = False
@torch.jit.ignore
def init_weights(self, zero_init_last=True):
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix='resnet/'):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x, flatten=True)
else:
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module: nn.Module, name: str = '', zero_init_last=True):
if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
@torch.no_grad()
def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'):
import numpy as np
def t2p(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
weights = np.load(checkpoint_path)
stem_conv_w = adapt_input_conv(
model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel']))
model.stem.conv.weight.copy_(stem_conv_w)
model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma']))
model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta']))
if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \
model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]:
model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel']))
model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias']))
for i, (sname, stage) in enumerate(model.stages.named_children()):
for j, (bname, block) in enumerate(stage.blocks.named_children()):
cname = 'standardized_conv2d'
block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/'
block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel']))
block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel']))
block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel']))
block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma']))
block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma']))
block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma']))
block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta']))
block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta']))
block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta']))
if block.downsample is not None:
w = weights[f'{block_prefix}a/proj/{cname}/kernel']
block.downsample.conv.weight.copy_(t2p(w))
def _create_resnetv2(variant, pretrained=False, **kwargs):
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
ResNetV2, variant, pretrained,
feature_cfg=feature_cfg,
**kwargs,
)
def _create_resnetv2_bit(variant, pretrained=False, **kwargs):
return _create_resnetv2(
variant,
pretrained=pretrained,
stem_type='fixed',
conv_layer=partial(StdConv2d, eps=1e-8),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
'resnetv2_50x1_bit.goog_distilled_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', custom_load=True),
'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', custom_load=True),
'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True),
# pretrained on imagenet21k, finetuned on imagenet1k
'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480?
# trained on imagenet-21k
'resnetv2_50x1_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_50x3_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_101x1_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_101x3_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_152x2_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_152x4_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_50.a1h_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_50t.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_101.a1h_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_101d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_152.untrained': _cfg(
interpolation='bicubic'),
'resnetv2_152d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_50d_gn.ah_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', first_conv='stem.conv1',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d_evos.ah_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', first_conv='stem.conv1',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d_frn.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
})
@register_model
def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs)
@register_model
def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs)
@register_model
def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs)
@register_model
def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs)
@register_model
def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs)
@register_model
def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs)
@register_model
def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='tiered', avg_down=True)
return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs))
# Experimental configs (may change / be removed)
@register_model
def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k',
'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k',
'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k',
'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k',
'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k',
'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k',
'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k',
'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k',
'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k',
'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k',
'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k',
'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k',
'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k',
'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k',
'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/repghost.py | """
An implementation of RepGhostNet Model as defined in:
RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization. https://arxiv.org/abs/2211.06088
Original implementation: https://github.com/ChengpengChen/RepGhost
"""
import copy
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, Linear, make_divisible
from ._builder import build_model_with_cfg
from ._efficientnet_blocks import SqueezeExcite, ConvBnAct
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['RepGhostNet']
_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
class RepGhostModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
dw_size=3,
stride=1,
relu=True,
reparam=True,
):
super(RepGhostModule, self).__init__()
self.out_chs = out_chs
init_chs = out_chs
new_chs = out_chs
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
nn.ReLU(inplace=True) if relu else nn.Identity(),
)
fusion_conv = []
fusion_bn = []
if reparam:
fusion_conv.append(nn.Identity())
fusion_bn.append(nn.BatchNorm2d(init_chs))
self.fusion_conv = nn.Sequential(*fusion_conv)
self.fusion_bn = nn.Sequential(*fusion_bn)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
# nn.ReLU(inplace=True) if relu else nn.Identity(),
)
self.relu = nn.ReLU(inplace=False) if relu else nn.Identity()
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
for conv, bn in zip(self.fusion_conv, self.fusion_bn):
x2 = x2 + bn(conv(x1))
return self.relu(x2)
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1])
for conv, bn in zip(self.fusion_conv, self.fusion_bn):
kernel, bias = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device)
kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel)
bias3x3 += bias
return kernel3x3, bias3x3
@staticmethod
def _pad_1x1_to_3x3_tensor(kernel1x1):
if kernel1x1 is None:
return 0
else:
return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
@staticmethod
def _fuse_bn_tensor(conv, bn, in_channels=None, device=None):
in_channels = in_channels if in_channels else bn.running_mean.shape[0]
device = device if device else bn.weight.device
if isinstance(conv, nn.Conv2d):
kernel = conv.weight
assert conv.bias is None
else:
assert isinstance(conv, nn.Identity)
kernel = torch.ones(in_channels, 1, 1, 1, device=device)
if isinstance(bn, nn.BatchNorm2d):
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
assert isinstance(bn, nn.Identity)
return kernel, torch.zeros(in_channels).to(kernel.device)
def switch_to_deploy(self):
if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0:
return
kernel, bias = self.get_equivalent_kernel_bias()
self.cheap_operation = nn.Conv2d(
in_channels=self.cheap_operation[0].in_channels,
out_channels=self.cheap_operation[0].out_channels,
kernel_size=self.cheap_operation[0].kernel_size,
padding=self.cheap_operation[0].padding,
dilation=self.cheap_operation[0].dilation,
groups=self.cheap_operation[0].groups,
bias=True)
self.cheap_operation.weight.data = kernel
self.cheap_operation.bias.data = bias
self.__delattr__('fusion_conv')
self.__delattr__('fusion_bn')
self.fusion_conv = []
self.fusion_bn = []
def reparameterize(self):
self.switch_to_deploy()
class RepGhostBottleneck(nn.Module):
""" RepGhost bottleneck w/ optional SE"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
act_layer=nn.ReLU,
se_ratio=0.,
reparam=True,
):
super(RepGhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
# Point-wise linear projection
self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(shortcut)
return x
class RepGhostNet(nn.Module):
def __init__(
self,
cfgs,
num_classes=1000,
width=1.0,
in_chans=3,
output_stride=32,
global_pool='avg',
drop_rate=0.2,
reparam=True,
):
super(RepGhostNet, self).__init__()
# setting of inverted residual blocks
assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
self.cfgs = cfgs
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
# building first layer
stem_chs = make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
self.bn1 = nn.BatchNorm2d(stem_chs)
self.act1 = nn.ReLU(inplace=True)
prev_chs = stem_chs
# building inverted residual blocks
stages = nn.ModuleList([])
block = RepGhostBottleneck
stage_idx = 0
net_stride = 2
for cfg in self.cfgs:
layers = []
s = 1
for k, exp_size, c, se_ratio, s in cfg:
out_chs = make_divisible(c * width, 4)
mid_chs = make_divisible(exp_size * width, 4)
layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam))
prev_chs = out_chs
if s > 1:
net_stride *= 2
self.feature_info.append(dict(
num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
stages.append(nn.Sequential(*layers))
stage_idx += 1
out_chs = make_divisible(exp_size * width * 2, 4)
stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
self.pool_dim = prev_chs = out_chs
self.blocks = nn.Sequential(*stages)
# building last several layers
self.num_features = out_chs = 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x):
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classifier(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def convert_to_deploy(self):
repghost_model_convert(self, do_copy=False)
def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True):
"""
taken from from https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
"""
if do_copy:
model = copy.deepcopy(model)
for module in model.modules():
if hasattr(module, 'switch_to_deploy'):
module.switch_to_deploy()
if save_path is not None:
torch.save(model.state_dict(), save_path)
return model
def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs):
"""
Constructs a RepGhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 8, 16, 0, 1]],
# stage2
[[3, 24, 24, 0, 2]],
[[3, 36, 24, 0, 1]],
# stage3
[[5, 36, 40, 0.25, 2]],
[[5, 60, 40, 0.25, 1]],
# stage4
[[3, 120, 80, 0, 2]],
[[3, 100, 80, 0, 1],
[3, 120, 80, 0, 1],
[3, 120, 80, 0, 1],
[3, 240, 112, 0.25, 1],
[3, 336, 112, 0.25, 1]
],
# stage5
[[5, 336, 160, 0.25, 2]],
[[5, 480, 160, 0, 1],
[5, 480, 160, 0.25, 1],
[5, 480, 160, 0, 1],
[5, 480, 160, 0.25, 1]
]
]
model_kwargs = dict(
cfgs=cfgs,
width=width,
**kwargs,
)
return build_model_with_cfg(
RepGhostNet,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**model_kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'repghostnet_050.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_5x_43M_66.95.pth.tar'
),
'repghostnet_058.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_58x_60M_68.94.pth.tar'
),
'repghostnet_080.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_8x_96M_72.24.pth.tar'
),
'repghostnet_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_0x_142M_74.22.pth.tar'
),
'repghostnet_111.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_11x_170M_75.07.pth.tar'
),
'repghostnet_130.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_3x_231M_76.37.pth.tar'
),
'repghostnet_150.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_5x_301M_77.45.pth.tar'
),
'repghostnet_200.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_2_0x_516M_78.81.pth.tar'
),
})
@register_model
def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.5x """
model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.58x """
model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.8x """
model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.0x """
model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.11x """
model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.3x """
model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.5x """
model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-2.0x """
model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_pretrained.py | import copy
from collections import deque, defaultdict
from dataclasses import dataclass, field, replace, asdict
from typing import Any, Deque, Dict, Tuple, Optional, Union
__all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg']
@dataclass
class PretrainedCfg:
"""
"""
# weight source locations
url: Optional[Union[str, Tuple[str, str]]] = None # remote URL
file: Optional[str] = None # local / shared filesystem path
state_dict: Optional[Dict[str, Any]] = None # in-memory state dict
hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model')
hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default)
source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub)
architecture: Optional[str] = None # architecture variant can be set when not implicit
tag: Optional[str] = None # pretrained tag of source
custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files)
# input / data config
input_size: Tuple[int, int, int] = (3, 224, 224)
test_input_size: Optional[Tuple[int, int, int]] = None
min_input_size: Optional[Tuple[int, int, int]] = None
fixed_input_size: bool = False
interpolation: str = 'bicubic'
crop_pct: float = 0.875
test_crop_pct: Optional[float] = None
crop_mode: str = 'center'
mean: Tuple[float, ...] = (0.485, 0.456, 0.406)
std: Tuple[float, ...] = (0.229, 0.224, 0.225)
# head / classifier config and meta-data
num_classes: int = 1000
label_offset: Optional[int] = None
label_names: Optional[Tuple[str]] = None
label_descriptions: Optional[Dict[str, str]] = None
# model attributes that vary with above or required for pretrained adaptation
pool_size: Optional[Tuple[int, ...]] = None
test_pool_size: Optional[Tuple[int, ...]] = None
first_conv: Optional[str] = None
classifier: Optional[str] = None
license: Optional[str] = None
description: Optional[str] = None
origin_url: Optional[str] = None
paper_name: Optional[str] = None
paper_ids: Optional[Union[str, Tuple[str]]] = None
notes: Optional[Tuple[str]] = None
@property
def has_weights(self):
return self.url or self.file or self.hf_hub_id
def to_dict(self, remove_source=False, remove_null=True):
return filter_pretrained_cfg(
asdict(self),
remove_source=remove_source,
remove_null=remove_null
)
def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True):
filtered_cfg = {}
keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none
for k, v in cfg.items():
if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}:
continue
if remove_null and v is None and k not in keep_null:
continue
filtered_cfg[k] = v
return filtered_cfg
@dataclass
class DefaultCfg:
tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default)
cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag
is_pretrained: bool = False # at least one of the configs has a pretrained source set
@property
def default(self):
return self.cfgs[self.tags[0]]
@property
def default_with_tag(self):
tag = self.tags[0]
return tag, self.cfgs[tag]
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/byoanet.py | """ Bring-Your-Own-Attention Network
A flexible network w/ dataclass based config for stacking NN blocks including
self-attention (or similar) layers.
Currently used to implement experimental variants of:
* Bottleneck Transformers
* Lambda ResNets
* HaloNets
Consider all of the models definitions here as experimental WIP and likely to change.
Hacked together by / copyright Ross Wightman, 2021.
"""
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks
__all__ = []
model_cfgs = dict(
botnet26t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
fixed_input_size=True,
self_attn_layer='bottleneck',
self_attn_kwargs=dict()
),
sebotnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
act_layer='silu',
num_features=1280,
attn_layer='se',
self_attn_layer='bottleneck',
self_attn_kwargs=dict()
),
botnet50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
fixed_input_size=True,
self_attn_layer='bottleneck',
self_attn_kwargs=dict()
),
eca_botnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
fixed_input_size=True,
act_layer='silu',
attn_layer='eca',
self_attn_layer='bottleneck',
self_attn_kwargs=dict(dim_head=16)
),
halonet_h1=ByoModelCfg(
blocks=(
ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0),
ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0),
ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0),
ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0),
),
stem_chs=64,
stem_type='7x7',
stem_pool='maxpool',
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=8, halo_size=3),
),
halonet26t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=8, halo_size=2)
),
sehalonet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
act_layer='silu',
num_features=1280,
attn_layer='se',
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=8, halo_size=3)
),
halonet50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
interleave_blocks(
types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25,
self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)),
interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=8, halo_size=3)
),
eca_halonext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16)
),
lambda_resnet26t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
self_attn_layer='lambda',
self_attn_kwargs=dict(r=9)
),
lambda_resnet50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
self_attn_layer='lambda',
self_attn_kwargs=dict(r=9)
),
lambda_resnet26rpt_256=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25),
interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25),
ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
self_attn_layer='lambda',
self_attn_kwargs=dict(r=None)
),
# experimental
haloregnetz_b=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3),
interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3),
ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
self_attn_layer='halo',
self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33)
),
# experimental
lamhalobotnet50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
interleave_blocks(
types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25,
self_attn_layer='lambda', self_attn_kwargs=dict(r=13)),
interleave_blocks(
types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25,
self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)),
interleave_blocks(
types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25,
self_attn_layer='bottleneck', self_attn_kwargs=dict()),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
act_layer='silu',
),
halo2botnet50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25),
interleave_blocks(
types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25,
self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)),
interleave_blocks(
types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25,
self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)),
interleave_blocks(
types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25,
self_attn_layer='bottleneck', self_attn_kwargs=dict()),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
act_layer='silu',
),
)
def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc',
'fixed_input_size': False, 'min_input_size': (3, 224, 224),
**kwargs
}
default_cfgs = generate_default_cfgs({
# GPU-Efficient (ResNet) weights
'botnet26t_256.c1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'sebotnet33ts_256.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94),
'botnet50ts_256.untrained': _cfg(
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'eca_botnext26ts_256.c1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'halonet_h1.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
'halonet26t.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)),
'sehalonet33ts.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94),
'halonet50ts.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94),
'eca_halonext26ts.c1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94),
'lambda_resnet26t.c1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth',
hf_hub_id='timm/',
min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94),
'lambda_resnet50ts.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth',
hf_hub_id='timm/',
min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)),
'lambda_resnet26rpt_256.c1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94),
'haloregnetz_b.ra3_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94),
'lamhalobotnet50ts_256.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
'halo2botnet50ts_256.a1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth',
hf_hub_id='timm/',
fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)),
})
@register_model
def botnet26t_256(pretrained=False, **kwargs) -> ByobNet:
""" Bottleneck Transformer w/ ResNet26-T backbone.
"""
kwargs.setdefault('img_size', 256)
return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs)
@register_model
def sebotnet33ts_256(pretrained=False, **kwargs) -> ByobNet:
""" Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU,
"""
return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs)
@register_model
def botnet50ts_256(pretrained=False, **kwargs) -> ByobNet:
""" Bottleneck Transformer w/ ResNet50-T backbone, silu act.
"""
kwargs.setdefault('img_size', 256)
return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs)
@register_model
def eca_botnext26ts_256(pretrained=False, **kwargs) -> ByobNet:
""" Bottleneck Transformer w/ ResNet26-T backbone, silu act.
"""
kwargs.setdefault('img_size', 256)
return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs)
@register_model
def halonet_h1(pretrained=False, **kwargs) -> ByobNet:
""" HaloNet-H1. Halo attention in all stages as per the paper.
NOTE: This runs very slowly!
"""
return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs)
@register_model
def halonet26t(pretrained=False, **kwargs) -> ByobNet:
""" HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages
"""
return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs)
@register_model
def sehalonet33ts(pretrained=False, **kwargs) -> ByobNet:
""" HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4.
"""
return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs)
@register_model
def halonet50ts(pretrained=False, **kwargs) -> ByobNet:
""" HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages
"""
return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs)
@register_model
def eca_halonext26ts(pretrained=False, **kwargs) -> ByobNet:
""" HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages
"""
return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs)
@register_model
def lambda_resnet26t(pretrained=False, **kwargs) -> ByobNet:
""" Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages.
"""
return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs)
@register_model
def lambda_resnet50ts(pretrained=False, **kwargs) -> ByobNet:
""" Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages.
"""
return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs)
@register_model
def lambda_resnet26rpt_256(pretrained=False, **kwargs) -> ByobNet:
""" Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages.
"""
kwargs.setdefault('img_size', 256)
return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs)
@register_model
def haloregnetz_b(pretrained=False, **kwargs) -> ByobNet:
""" Halo + RegNetZ
"""
return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs)
@register_model
def lamhalobotnet50ts_256(pretrained=False, **kwargs) -> ByobNet:
""" Combo Attention (Lambda + Halo + Bot) Network
"""
return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs)
@register_model
def halo2botnet50ts_256(pretrained=False, **kwargs) -> ByobNet:
""" Combo Attention (Halo + Halo + Bot) Network
"""
return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/pnasnet.py | """
pnasnet5large implementation grabbed from Cadene's pretrained models
Additional credit to https://github.com/creafz
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['PNASNet5Large']
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = create_conv2d(
in_channels, in_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=in_channels)
self.pointwise_conv2d = create_conv2d(
in_channels, out_channels, kernel_size=1, padding=padding)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''):
super(BranchSeparables, self).__init__()
middle_channels = out_channels if stem_cell else in_channels
self.act_1 = nn.ReLU()
self.separable_1 = SeparableConv2d(
in_channels, middle_channels, kernel_size, stride=stride, padding=padding)
self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001)
self.act_2 = nn.ReLU()
self.separable_2 = SeparableConv2d(
middle_channels, out_channels, kernel_size, stride=1, padding=padding)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act_1(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.act_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class ActConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''):
super(ActConvBn, self).__init__()
self.act = nn.ReLU()
self.conv = create_conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act(x)
x = self.conv(x)
x = self.bn(x)
return x
class FactorizedReduction(nn.Module):
def __init__(self, in_channels, out_channels, padding=''):
super(FactorizedReduction, self).__init__()
self.act = nn.ReLU()
self.path_1 = nn.Sequential(OrderedDict([
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
]))
self.path_2 = nn.Sequential(OrderedDict([
('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
]))
self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act(x)
x_path1 = self.path_1(x)
x_path2 = self.path_2(x)
out = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
return out
class CellBase(nn.Module):
def cell_forward(self, x_left, x_right):
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2)
x_comb_iter_3_right = self.comb_iter_3_right(x_right)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_left)
if self.comb_iter_4_right is not None:
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
else:
x_comb_iter_4_right = x_right
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem0(CellBase):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(CellStem0, self).__init__()
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(
in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type)
self.comb_iter_0_right = nn.Sequential(OrderedDict([
('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)),
('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)),
('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)),
]))
self.comb_iter_1_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type)
self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type)
self.comb_iter_2_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type)
self.comb_iter_3_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, padding=pad_type)
self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(
in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type)
self.comb_iter_4_right = ActConvBn(
out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type)
def forward(self, x_left):
x_right = self.conv_1x1(x_left)
x_out = self.cell_forward(x_left, x_right)
return x_out
class Cell(CellBase):
def __init__(
self,
in_chs_left,
out_chs_left,
in_chs_right,
out_chs_right,
pad_type='',
is_reduction=False,
match_prev_layer_dims=False,
):
super(Cell, self).__init__()
# If `is_reduction` is set to `True` stride 2 is used for
# convolution and pooling layers to reduce the spatial size of
# the output of a cell approximately by a factor of 2.
stride = 2 if is_reduction else 1
# If `match_prev_layer_dimensions` is set to `True`
# `FactorizedReduction` is used to reduce the spatial size
# of the left input of a cell approximately by a factor of 2.
self.match_prev_layer_dimensions = match_prev_layer_dims
if match_prev_layer_dims:
self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type)
else:
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(
out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type)
self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_1_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type)
self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_2_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type)
self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3)
self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(
out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type)
if is_reduction:
self.comb_iter_4_right = ActConvBn(
out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type)
else:
self.comb_iter_4_right = None
def forward(self, x_left, x_right):
x_left = self.conv_prev_1x1(x_left)
x_right = self.conv_1x1(x_right)
x_out = self.cell_forward(x_left, x_right)
return x_out
class PNASNet5Large(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
pad_type='',
):
super(PNASNet5Large, self).__init__()
self.num_classes = num_classes
self.num_features = 4320
assert output_stride == 32
self.conv_0 = ConvNormAct(
in_chans, 96, kernel_size=3, stride=2, padding=0,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False)
self.cell_stem_0 = CellStem0(
in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type)
self.cell_stem_1 = Cell(
in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type,
match_prev_layer_dims=True, is_reduction=True)
self.cell_0 = Cell(
in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_1 = Cell(
in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_2 = Cell(
in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_3 = Cell(
in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_4 = Cell(
in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type,
is_reduction=True)
self.cell_5 = Cell(
in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_6 = Cell(
in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)
self.cell_7 = Cell(
in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)
self.cell_8 = Cell(
in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type,
is_reduction=True)
self.cell_9 = Cell(
in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_10 = Cell(
in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)
self.cell_11 = Cell(
in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)
self.act = nn.ReLU()
self.feature_info = [
dict(num_chs=96, reduction=2, module='conv_0'),
dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'),
dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'),
dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'),
dict(num_chs=4320, reduction=32, module='act'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)')
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x_conv_0 = self.conv_0(x)
x_stem_0 = self.cell_stem_0(x_conv_0)
x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_0, x_stem_1)
x_cell_1 = self.cell_1(x_stem_1, x_cell_0)
x_cell_2 = self.cell_2(x_cell_0, x_cell_1)
x_cell_3 = self.cell_3(x_cell_1, x_cell_2)
x_cell_4 = self.cell_4(x_cell_2, x_cell_3)
x_cell_5 = self.cell_5(x_cell_3, x_cell_4)
x_cell_6 = self.cell_6(x_cell_4, x_cell_5)
x_cell_7 = self.cell_7(x_cell_5, x_cell_6)
x_cell_8 = self.cell_8(x_cell_6, x_cell_7)
x_cell_9 = self.cell_9(x_cell_7, x_cell_8)
x_cell_10 = self.cell_10(x_cell_8, x_cell_9)
x_cell_11 = self.cell_11(x_cell_9, x_cell_10)
x = self.act(x_cell_11)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_pnasnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
PNASNet5Large,
variant,
pretrained,
feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model
**kwargs,
)
default_cfgs = generate_default_cfgs({
'pnasnet5large.tf_in1k': {
'hf_hub_id': 'timm/',
'input_size': (3, 331, 331),
'pool_size': (11, 11),
'crop_pct': 0.911,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv_0.conv',
'classifier': 'last_linear',
},
})
@register_model
def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large:
r"""PNASNet-5 model architecture from the
`"Progressive Neural Architecture Search"
<https://arxiv.org/abs/1712.00559>`_ paper.
"""
model_kwargs = dict(pad_type='same', **kwargs)
return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/coat.py | """
CoaT architecture.
Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399
Official CoaT code at: https://github.com/mlpc-ucsd/CoaT
Modified from timm/models/vision_transformer.py
"""
from functools import partial
from typing import Tuple, List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['CoaT']
class ConvRelPosEnc(nn.Module):
""" Convolutional relative position encoding. """
def __init__(self, head_chs, num_heads, window):
"""
Initialization.
Ch: Channels per head.
h: Number of heads.
window: Window size(s) in convolutional relative positional encoding. It can have two forms:
1. An integer of window size, which assigns all attention heads with the same window s
size in ConvRelPosEnc.
2. A dict mapping window size to #attention head splits (
e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2})
It will apply different window size to the attention head splits.
"""
super().__init__()
if isinstance(window, int):
# Set the same window size for all attention heads.
window = {window: num_heads}
self.window = window
elif isinstance(window, dict):
self.window = window
else:
raise ValueError()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1
# Determine padding size.
# Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338
padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2
cur_conv = nn.Conv2d(
cur_head_split * head_chs,
cur_head_split * head_chs,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * head_chs,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * head_chs for x in self.head_splits]
def forward(self, q, v, size: Tuple[int, int]):
B, num_heads, N, C = q.shape
H, W = size
_assert(N == 1 + H * W, '')
# Convolutional relative position encoding.
q_img = q[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W)
v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels
conv_v_img_list = []
for i, conv in enumerate(self.conv_list):
conv_v_img_list.append(conv(v_img_list[i]))
conv_v_img = torch.cat(conv_v_img_list, dim=1)
conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2)
EV_hat = q_img * conv_v_img
EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch].
return EV_hat
class FactorAttnConvRelPosEnc(nn.Module):
""" Factorized attention with convolutional relative position encoding class. """
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
shared_crpe=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used.
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
# Generate Q, K, V.
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # [B, h, N, Ch]
# Factorized attention.
k_softmax = k.softmax(dim=2)
factor_att = k_softmax.transpose(-1, -2) @ v
factor_att = q @ factor_att
# Convolutional relative position encoding.
crpe = self.crpe(q, v, size=size) # [B, h, N, Ch]
# Merge and reshape.
x = self.scale * factor_att + crpe
x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C]
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
return x
class ConvPosEnc(nn.Module):
""" Convolutional Position Encoding.
Note: This module is similar to the conditional position encoding in CPVT.
"""
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim)
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
# Extract CLS token and image tokens.
cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C]
# Depthwise convolution.
feat = img_tokens.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
# Combine with CLS token.
x = torch.cat((cls_token, x), dim=1)
return x
class SerialBlock(nn.Module):
""" Serial block class.
Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_cpe=None,
shared_crpe=None,
):
super().__init__()
# Conv-Attention.
self.cpe = shared_cpe
self.norm1 = norm_layer(dim)
self.factoratt_crpe = FactorAttnConvRelPosEnc(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpe,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x, size: Tuple[int, int]):
# Conv-Attention.
x = self.cpe(x, size)
cur = self.norm1(x)
cur = self.factoratt_crpe(cur, size)
x = x + self.drop_path(cur)
# MLP.
cur = self.norm2(x)
cur = self.mlp(cur)
x = x + self.drop_path(cur)
return x
class ParallelBlock(nn.Module):
""" Parallel block class. """
def __init__(
self,
dims,
num_heads,
mlp_ratios=[],
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_crpes=None,
):
super().__init__()
# Conv-Attention.
self.norm12 = norm_layer(dims[1])
self.norm13 = norm_layer(dims[2])
self.norm14 = norm_layer(dims[3])
self.factoratt_crpe2 = FactorAttnConvRelPosEnc(
dims[1],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[1],
)
self.factoratt_crpe3 = FactorAttnConvRelPosEnc(
dims[2],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[2],
)
self.factoratt_crpe4 = FactorAttnConvRelPosEnc(
dims[3],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[3],
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm22 = norm_layer(dims[1])
self.norm23 = norm_layer(dims[2])
self.norm24 = norm_layer(dims[3])
# In parallel block, we assume dimensions are the same and share the linear transformation.
assert dims[1] == dims[2] == dims[3]
assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3]
mlp_hidden_dim = int(dims[1] * mlp_ratios[1])
self.mlp2 = self.mlp3 = self.mlp4 = Mlp(
in_features=dims[1],
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def upsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map up-sampling. """
return self.interpolate(x, scale_factor=factor, size=size)
def downsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map down-sampling. """
return self.interpolate(x, scale_factor=1.0/factor, size=size)
def interpolate(self, x, scale_factor: float, size: Tuple[int, int]):
""" Feature map interpolation. """
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
cls_token = x[:, :1, :]
img_tokens = x[:, 1:, :]
img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W)
img_tokens = F.interpolate(
img_tokens,
scale_factor=scale_factor,
recompute_scale_factor=False,
mode='bilinear',
align_corners=False,
)
img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2)
out = torch.cat((cls_token, img_tokens), dim=1)
return out
def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]):
_, S2, S3, S4 = sizes
cur2 = self.norm12(x2)
cur3 = self.norm13(x3)
cur4 = self.norm14(x4)
cur2 = self.factoratt_crpe2(cur2, size=S2)
cur3 = self.factoratt_crpe3(cur3, size=S3)
cur4 = self.factoratt_crpe4(cur4, size=S4)
upsample3_2 = self.upsample(cur3, factor=2., size=S3)
upsample4_3 = self.upsample(cur4, factor=2., size=S4)
upsample4_2 = self.upsample(cur4, factor=4., size=S4)
downsample2_3 = self.downsample(cur2, factor=2., size=S2)
downsample3_4 = self.downsample(cur3, factor=2., size=S3)
downsample2_4 = self.downsample(cur2, factor=4., size=S2)
cur2 = cur2 + upsample3_2 + upsample4_2
cur3 = cur3 + upsample4_3 + downsample2_3
cur4 = cur4 + downsample3_4 + downsample2_4
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
# MLP.
cur2 = self.norm22(x2)
cur3 = self.norm23(x3)
cur4 = self.norm24(x4)
cur2 = self.mlp2(cur2)
cur3 = self.mlp3(cur3)
cur4 = self.mlp4(cur4)
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
return x1, x2, x3, x4
class CoaT(nn.Module):
""" CoaT class. """
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dims=(64, 128, 320, 512),
serial_depths=(3, 4, 6, 3),
parallel_depth=0,
num_heads=8,
mlp_ratios=(4, 4, 4, 4),
qkv_bias=True,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm,
return_interm_layers=False,
out_features=None,
crpe_window=None,
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
crpe_window = crpe_window or {3: 2, 5: 3, 7: 3}
self.return_interm_layers = return_interm_layers
self.out_features = out_features
self.embed_dims = embed_dims
self.num_features = embed_dims[-1]
self.num_classes = num_classes
self.global_pool = global_pool
# Patch embeddings.
img_size = to_2tuple(img_size)
self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dims[0], norm_layer=nn.LayerNorm)
self.patch_embed2 = PatchEmbed(
img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], norm_layer=nn.LayerNorm)
self.patch_embed3 = PatchEmbed(
img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], norm_layer=nn.LayerNorm)
self.patch_embed4 = PatchEmbed(
img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], norm_layer=nn.LayerNorm)
# Class tokens.
self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0]))
self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1]))
self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2]))
self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))
# Convolutional position encodings.
self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3)
self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3)
self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3)
self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3)
# Convolutional relative position encodings.
self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window)
# Disable stochastic depth.
dpr = drop_path_rate
assert dpr == 0.0
skwargs = dict(
num_heads=num_heads,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr,
norm_layer=norm_layer,
)
# Serial blocks 1.
self.serial_blocks1 = nn.ModuleList([
SerialBlock(
dim=embed_dims[0],
mlp_ratio=mlp_ratios[0],
shared_cpe=self.cpe1,
shared_crpe=self.crpe1,
**skwargs,
)
for _ in range(serial_depths[0])]
)
# Serial blocks 2.
self.serial_blocks2 = nn.ModuleList([
SerialBlock(
dim=embed_dims[1],
mlp_ratio=mlp_ratios[1],
shared_cpe=self.cpe2,
shared_crpe=self.crpe2,
**skwargs,
)
for _ in range(serial_depths[1])]
)
# Serial blocks 3.
self.serial_blocks3 = nn.ModuleList([
SerialBlock(
dim=embed_dims[2],
mlp_ratio=mlp_ratios[2],
shared_cpe=self.cpe3,
shared_crpe=self.crpe3,
**skwargs,
)
for _ in range(serial_depths[2])]
)
# Serial blocks 4.
self.serial_blocks4 = nn.ModuleList([
SerialBlock(
dim=embed_dims[3],
mlp_ratio=mlp_ratios[3],
shared_cpe=self.cpe4,
shared_crpe=self.crpe4,
**skwargs,
)
for _ in range(serial_depths[3])]
)
# Parallel blocks.
self.parallel_depth = parallel_depth
if self.parallel_depth > 0:
self.parallel_blocks = nn.ModuleList([
ParallelBlock(
dims=embed_dims,
mlp_ratios=mlp_ratios,
shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4),
**skwargs,
)
for _ in range(parallel_depth)]
)
else:
self.parallel_blocks = None
# Classification head(s).
if not self.return_interm_layers:
if self.parallel_blocks is not None:
self.norm2 = norm_layer(embed_dims[1])
self.norm3 = norm_layer(embed_dims[2])
else:
self.norm2 = self.norm3 = None
self.norm4 = norm_layer(embed_dims[3])
if self.parallel_depth > 0:
# CoaT series: Aggregate features of last three scales for classification.
assert embed_dims[1] == embed_dims[2] == embed_dims[3]
self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
else:
# CoaT-Lite series: Use feature of last scale for classification.
self.aggregate = None
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Initialize weights.
trunc_normal_(self.cls_token1, std=.02)
trunc_normal_(self.cls_token2, std=.02)
trunc_normal_(self.cls_token3, std=.02)
trunc_normal_(self.cls_token4, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'}
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem1=r'^cls_token1|patch_embed1|crpe1|cpe1',
serial_blocks1=r'^serial_blocks1\.(\d+)',
stem2=r'^cls_token2|patch_embed2|crpe2|cpe2',
serial_blocks2=r'^serial_blocks2\.(\d+)',
stem3=r'^cls_token3|patch_embed3|crpe3|cpe3',
serial_blocks3=r'^serial_blocks3\.(\d+)',
stem4=r'^cls_token4|patch_embed4|crpe4|cpe4',
serial_blocks4=r'^serial_blocks4\.(\d+)',
parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks??
(r'^parallel_blocks\.(\d+)', None),
(r'^norm|aggregate', (99999,)),
]
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x0):
B = x0.shape[0]
# Serial blocks 1.
x1 = self.patch_embed1(x0)
H1, W1 = self.patch_embed1.grid_size
x1 = insert_cls(x1, self.cls_token1)
for blk in self.serial_blocks1:
x1 = blk(x1, size=(H1, W1))
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 2.
x2 = self.patch_embed2(x1_nocls)
H2, W2 = self.patch_embed2.grid_size
x2 = insert_cls(x2, self.cls_token2)
for blk in self.serial_blocks2:
x2 = blk(x2, size=(H2, W2))
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 3.
x3 = self.patch_embed3(x2_nocls)
H3, W3 = self.patch_embed3.grid_size
x3 = insert_cls(x3, self.cls_token3)
for blk in self.serial_blocks3:
x3 = blk(x3, size=(H3, W3))
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 4.
x4 = self.patch_embed4(x3_nocls)
H4, W4 = self.patch_embed4.grid_size
x4 = insert_cls(x4, self.cls_token4)
for blk in self.serial_blocks4:
x4 = blk(x4, size=(H4, W4))
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
# Only serial blocks: Early return.
if self.parallel_blocks is None:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
# Return features for classification.
x4 = self.norm4(x4)
return x4
# Parallel blocks.
for blk in self.parallel_blocks:
x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4))
x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)])
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
x2 = self.norm2(x2)
x3 = self.norm3(x3)
x4 = self.norm4(x4)
return [x2, x3, x4]
def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False):
if isinstance(x_feat, list):
assert self.aggregate is not None
if self.global_pool == 'avg':
x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C]
else:
x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C]
x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C]
else:
x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x) -> torch.Tensor:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features (for down-stream tasks).
return self.forward_features(x)
else:
# Return features for classification.
x_feat = self.forward_features(x)
x = self.forward_head(x_feat)
return x
def insert_cls(x, cls_token):
""" Insert CLS token. """
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
return x
def remove_cls(x):
""" Remove CLS token. """
return x[:, 1:, :]
def checkpoint_filter_fn(state_dict, model):
out_dict = {}
state_dict = state_dict.get('model', state_dict)
for k, v in state_dict.items():
# original model had unused norm layers, removing them requires filtering pretrained checkpoints
if k.startswith('norm1') or \
(k.startswith('norm2') and getattr(model, 'norm2', None) is None) or \
(k.startswith('norm3') and getattr(model, 'norm3', None) is None) or \
(k.startswith('norm4') and getattr(model, 'norm4', None) is None) or \
(k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or \
(k.startswith('head') and getattr(model, 'head', None) is None):
continue
out_dict[k] = v
return out_dict
def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
CoaT,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg_coat(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed1.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium_384.in1k': _cfg_coat(
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash',
),
})
@register_model
def coat_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs)
model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/fastvit.py | # FastViT for PyTorch
#
# Original implementation and weights from https://github.com/apple/ml-fastvit
#
# For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main
# Original work is copyright (C) 2023 Apple Inc. All Rights Reserved.
#
import os
from functools import partial
from typing import Tuple, Optional, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \
ClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MobileOneBlock(nn.Module):
"""MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
inference_mode: bool = False,
use_se: bool = False,
use_act: bool = True,
use_scale_branch: bool = True,
num_conv_branches: int = 1,
act_layer: nn.Module = nn.GELU,
) -> None:
"""Construct a MobileOneBlock module.
Args:
in_chs: Number of channels in the input.
out_chs: Number of channels produced by the block.
kernel_size: Size of the convolution kernel.
stride: Stride size.
dilation: Kernel dilation factor.
group_size: Convolution group size.
inference_mode: If True, instantiates model in inference mode.
use_se: Whether to use SE-ReLU activations.
use_act: Whether to use activation. Default: ``True``
use_scale_branch: Whether to use scale branch. Default: ``True``
num_conv_branches: Number of linear conv branches.
"""
super(MobileOneBlock, self).__init__()
self.inference_mode = inference_mode
self.groups = num_groups(group_size, in_chs)
self.stride = stride
self.dilation = dilation
self.kernel_size = kernel_size
self.in_chs = in_chs
self.out_chs = out_chs
self.num_conv_branches = num_conv_branches
# Check if SE-ReLU is requested
self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity()
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=self.groups,
bias=True,
)
else:
# Re-parameterizable skip connection
self.reparam_conv = None
self.identity = (
nn.BatchNorm2d(num_features=in_chs)
if out_chs == in_chs and stride == 1
else None
)
# Re-parameterizable conv branches
if num_conv_branches > 0:
self.conv_kxk = nn.ModuleList([
ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
) for _ in range(self.num_conv_branches)
])
else:
self.conv_kxk = None
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1 and use_scale_branch:
self.conv_scale = ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=1,
stride=self.stride,
groups=self.groups,
apply_act=False
)
self.act = act_layer() if use_act else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Apply forward pass."""
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.se(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Identity branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other kxk conv branches
out = scale_out + identity_out
if self.conv_kxk is not None:
for rc in self.conv_kxk:
out += rc(x)
return self.act(self.se(out))
def reparameterize(self):
"""Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = create_conv2d(
in_channels=self.in_chs,
out_channels=self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("conv_kxk")
self.__delattr__("conv_scale")
if hasattr(self, "identity"):
self.__delattr__("identity")
self.inference_mode = True
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.kernel_size // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
if self.conv_kxk is not None:
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(
self, branch: Union[nn.Sequential, nn.BatchNorm2d]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with preceeding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
Args:
branch: Sequence of ops to be fused.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, "id_tensor"):
input_dim = self.in_chs // self.groups
kernel_value = torch.zeros(
(self.in_chs, input_dim, self.kernel_size, self.kernel_size),
dtype=branch.weight.dtype,
device=branch.weight.device,
)
for i in range(self.in_chs):
kernel_value[
i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2
] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class ReparamLargeKernelConv(nn.Module):
"""Building Block of RepLKNet
This class defines overparameterized large kernel conv block
introduced in `RepLKNet <https://arxiv.org/abs/2203.06717>`_
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int,
group_size: int,
small_kernel: Optional[int] = None,
inference_mode: bool = False,
act_layer: Optional[nn.Module] = None,
) -> None:
"""Construct a ReparamLargeKernelConv module.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
kernel_size: Kernel size of the large kernel conv branch.
stride: Stride size. Default: 1
group_size: Group size. Default: 1
small_kernel: Kernel size of small kernel conv branch.
inference_mode: If True, instantiates model in inference mode. Default: ``False``
act_layer: Activation module. Default: ``nn.GELU``
"""
super(ReparamLargeKernelConv, self).__init__()
self.stride = stride
self.groups = num_groups(group_size, in_chs)
self.in_chs = in_chs
self.out_chs = out_chs
self.kernel_size = kernel_size
self.small_kernel = small_kernel
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=1,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.large_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
if small_kernel is not None:
assert (
small_kernel <= kernel_size
), "The kernel size for re-param cannot be larger than the large kernel!"
self.small_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=small_kernel,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
# FIXME output of this act was not used in original impl, likely due to bug
self.act = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
out = self.reparam_conv(x)
else:
out = self.large_conv(x)
if self.small_conv is not None:
out = out + self.small_conv(x)
out = self.act(out)
return out
def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn)
if hasattr(self, "small_conv"):
small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn)
eq_b += small_b
eq_k += nn.functional.pad(
small_k, [(self.kernel_size - self.small_kernel) // 2] * 4
)
return eq_k, eq_b
def reparameterize(self) -> None:
"""
Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
eq_k, eq_b = self.get_kernel_bias()
self.reparam_conv = create_conv2d(
self.in_chs,
self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = eq_k
self.reparam_conv.bias.data = eq_b
self.__delattr__("large_conv")
if hasattr(self, "small_conv"):
self.__delattr__("small_conv")
@staticmethod
def _fuse_bn(
conv: torch.Tensor, bn: nn.BatchNorm2d
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with conv layer.
Args:
conv: Convolutional kernel weights.
bn: Batchnorm 2d layer.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def convolutional_stem(
in_chs: int,
out_chs: int,
act_layer: nn.Module = nn.GELU,
inference_mode: bool = False
) -> nn.Sequential:
"""Build convolutional stem with MobileOne blocks.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
Returns:
nn.Sequential object with stem elements.
"""
return nn.Sequential(
MobileOneBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
group_size=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=1,
stride=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
)
class Attention(nn.Module):
"""Multi-headed Self Attention module.
Source modified from:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
head_dim: int = 32,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""Build MHSA module that can handle 3D or 4D input tensors.
Args:
dim: Number of embedding dimensions.
head_dim: Number of hidden dimensions per head. Default: ``32``
qkv_bias: Use bias or not. Default: ``False``
attn_drop: Dropout rate for attention tensor.
proj_drop: Dropout rate for projection tensor.
"""
super().__init__()
assert dim % head_dim == 0, "dim should be divisible by head_dim"
self.head_dim = head_dim
self.num_heads = dim // head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(-2, -1) # (B, N, C)
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
x = x.transpose(-2, -1).reshape(B, C, H, W)
return x
class PatchEmbed(nn.Module):
"""Convolutional patch embedding layer."""
def __init__(
self,
patch_size: int,
stride: int,
in_chs: int,
embed_dim: int,
act_layer: nn.Module = nn.GELU,
lkc_use_act: bool = False,
inference_mode: bool = False,
) -> None:
"""Build patch embedding layer.
Args:
patch_size: Patch size for embedding computation.
stride: Stride for convolutional embedding layer.
in_chs: Number of channels of input tensor.
embed_dim: Number of embedding dimensions.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
"""
super().__init__()
self.proj = nn.Sequential(
ReparamLargeKernelConv(
in_chs=in_chs,
out_chs=embed_dim,
kernel_size=patch_size,
stride=stride,
group_size=1,
small_kernel=3,
inference_mode=inference_mode,
act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act
),
MobileOneBlock(
in_chs=embed_dim,
out_chs=embed_dim,
kernel_size=1,
stride=1,
act_layer=act_layer,
inference_mode=inference_mode,
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RepMixer(nn.Module):
"""Reparameterizable token mixer.
For more details, please refer to our paper:
`FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
dim,
kernel_size=3,
layer_scale_init_value=1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Module.
Args:
dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`.
kernel_size: Kernel size for spatial mixing. Default: 3
layer_scale_init_value: Initial value for layer scale. Default: 1e-5
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super().__init__()
self.dim = dim
self.kernel_size = kernel_size
self.inference_mode = inference_mode
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
groups=self.dim,
bias=True,
)
else:
self.reparam_conv = None
self.norm = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
use_scale_branch=False,
num_conv_branches=0,
)
self.mixer = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = x + self.layer_scale(self.mixer(x) - self.norm(x))
return x
def reparameterize(self) -> None:
"""Reparameterize mixer and norm into a single
convolutional layer for efficient inference.
"""
if self.inference_mode:
return
self.mixer.reparameterize()
self.norm.reparameterize()
if isinstance(self.layer_scale, LayerScale2d):
w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * (
self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight
)
b = torch.squeeze(self.layer_scale.gamma) * (
self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
)
else:
w = (
self.mixer.id_tensor
+ self.mixer.reparam_conv.weight
- self.norm.reparam_conv.weight
)
b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
self.reparam_conv = create_conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
groups=self.dim,
bias=True,
)
self.reparam_conv.weight.data = w
self.reparam_conv.bias.data = b
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("mixer")
self.__delattr__("norm")
self.__delattr__("layer_scale")
class ConvMlp(nn.Module):
"""Convolutional FFN Module."""
def __init__(
self,
in_chs: int,
hidden_channels: Optional[int] = None,
out_chs: Optional[int] = None,
act_layer: nn.Module = nn.GELU,
drop: float = 0.0,
) -> None:
"""Build convolutional FFN module.
Args:
in_chs: Number of input channels.
hidden_channels: Number of channels after expansion. Default: None
out_chs: Number of output channels. Default: None
act_layer: Activation layer. Default: ``GELU``
drop: Dropout rate. Default: ``0.0``.
"""
super().__init__()
out_chs = out_chs or in_chs
hidden_channels = hidden_channels or in_chs
self.conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=7,
groups=in_chs,
apply_act=False,
)
self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepConditionalPosEnc(nn.Module):
"""Implementation of conditional positional encoding.
For more details refer to paper:
`Conditional Positional Encodings for Vision Transformers <https://arxiv.org/pdf/2102.10882.pdf>`_
In our implementation, we can reparameterize this module to eliminate a skip connection.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
spatial_shape: Union[int, Tuple[int, int]] = (7, 7),
inference_mode=False,
) -> None:
"""Build reparameterizable conditional positional encoding
Args:
dim: Number of input channels.
dim_out: Number of embedding dimensions. Default: 768
spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7)
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super(RepConditionalPosEnc, self).__init__()
if isinstance(spatial_shape, int):
spatial_shape = tuple([spatial_shape] * 2)
assert isinstance(spatial_shape, Tuple), (
f'"spatial_shape" must by a sequence or int, '
f"get {type(spatial_shape)} instead."
)
assert len(spatial_shape) == 2, (
f'Length of "spatial_shape" should be 2, '
f"got {len(spatial_shape)} instead."
)
self.spatial_shape = spatial_shape
self.dim = dim
self.dim_out = dim_out or dim
self.groups = dim
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=spatial_shape[0] // 2,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.pos_enc = nn.Conv2d(
self.dim,
self.dim_out,
spatial_shape,
1,
int(spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = self.pos_enc(x) + x
return x
def reparameterize(self) -> None:
# Build equivalent Id tensor
input_dim = self.dim // self.groups
kernel_value = torch.zeros(
(
self.dim,
input_dim,
self.spatial_shape[0],
self.spatial_shape[1],
),
dtype=self.pos_enc.weight.dtype,
device=self.pos_enc.weight.device,
)
for i in range(self.dim):
kernel_value[
i,
i % input_dim,
self.spatial_shape[0] // 2,
self.spatial_shape[1] // 2,
] = 1
id_tensor = kernel_value
# Reparameterize Id tensor and conv
w_final = id_tensor + self.pos_enc.weight
b_final = self.pos_enc.bias
# Introduce reparam conv
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=int(self.spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = w_final
self.reparam_conv.bias.data = b_final
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("pos_enc")
class RepMixerBlock(nn.Module):
"""Implementation of Metaformer block with RepMixer as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Block.
Args:
dim: Number of embedding dimensions.
kernel_size: Kernel size for repmixer. Default: 3
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super().__init__()
self.token_mixer = RepMixer(
dim,
kernel_size=kernel_size,
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
)
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = self.token_mixer(x)
x = x + self.drop_path(self.layer_scale(self.mlp(x)))
return x
class AttentionBlock(nn.Module):
"""Implementation of metaformer block with MHSA as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.BatchNorm2d,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
):
"""Build Attention Block.
Args:
dim: Number of embedding dimensions.
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
norm_layer: Normalization layer. Default: ``nn.BatchNorm2d``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
"""
super().__init__()
self.norm = norm_layer(dim)
self.token_mixer = Attention(dim=dim)
if layer_scale_init_value is not None:
self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_1 = nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_2 = nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x))))
x = x + self.drop_path2(self.layer_scale_2(self.mlp(x)))
return x
class FastVitStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
token_mixer_type: str,
downsample: bool = True,
down_patch_size: int = 7,
down_stride: int = 2,
pos_emb_layer: Optional[nn.Module] = None,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.BatchNorm2d,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: Optional[float] = 1e-5,
lkc_use_act=False,
inference_mode=False,
):
"""FastViT stage.
Args:
dim: Number of embedding dimensions.
depth: Number of blocks in stage
token_mixer_type: Token mixer type.
kernel_size: Kernel size for repmixer.
mlp_ratio: MLP expansion ratio.
act_layer: Activation layer.
norm_layer: Normalization layer.
proj_drop_rate: Dropout rate.
drop_path_rate: Drop path rate.
layer_scale_init_value: Layer scale value at initialization.
inference_mode: Flag to instantiate block in inference mode.
"""
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = PatchEmbed(
patch_size=down_patch_size,
stride=down_stride,
in_chs=dim,
embed_dim=dim_out,
act_layer=act_layer,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
else:
assert dim == dim_out
self.downsample = nn.Identity()
if pos_emb_layer is not None:
self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode)
else:
self.pos_emb = nn.Identity()
blocks = []
for block_idx in range(depth):
if token_mixer_type == "repmixer":
blocks.append(RepMixerBlock(
dim_out,
kernel_size=kernel_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
))
elif token_mixer_type == "attention":
blocks.append(AttentionBlock(
dim_out,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
raise ValueError(
"Token mixer type: {} not supported".format(token_mixer_type)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.pos_emb(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class FastVit(nn.Module):
fork_feat: torch.jit.Final[bool]
"""
This class implements `FastViT architecture <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
in_chans: int = 3,
layers: Tuple[int, ...] = (2, 2, 6, 2),
token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"),
embed_dims: Tuple[int, ...] = (64, 128, 256, 512),
mlp_ratios: Tuple[float, ...] = (4,) * 4,
downsamples: Tuple[bool, ...] = (False, True, True, True),
repmixer_kernel_size: int = 3,
num_classes: int = 1000,
pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4,
down_patch_size: int = 7,
down_stride: int = 2,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: float = 1e-5,
fork_feat: bool = False,
cls_ratio: float = 2.0,
global_pool: str = 'avg',
norm_layer: nn.Module = nn.BatchNorm2d,
act_layer: nn.Module = nn.GELU,
lkc_use_act: bool = False,
inference_mode: bool = False,
) -> None:
super().__init__()
self.num_classes = 0 if fork_feat else num_classes
self.fork_feat = fork_feat
self.global_pool = global_pool
self.feature_info = []
# Convolutional stem
self.stem = convolutional_stem(
in_chans,
embed_dims[0],
act_layer,
inference_mode,
)
# Build the main stages of the network architecture
prev_dim = embed_dims[0]
scale = 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
stages = []
for i in range(len(layers)):
downsample = downsamples[i] or prev_dim != embed_dims[i]
stage = FastVitStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=layers[i],
downsample=downsample,
down_patch_size=down_patch_size,
down_stride=down_stride,
pos_emb_layer=pos_embs[i],
token_mixer_type=token_mixers[i],
kernel_size=repmixer_kernel_size,
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop_rate=proj_drop_rate,
drop_path_rate=dpr[i],
layer_scale_init_value=layer_scale_init_value,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
stages.append(stage)
prev_dim = embed_dims[i]
if downsample:
scale *= 2
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_dim
# For segmentation and detection, extract intermediate output
if self.fork_feat:
# Add a norm layer for each output. self.stages is slightly different than self.network
# in the original code, the PatchEmbed layer is part of self.stages in this code where
# it was part of self.network in the original code. So we do not need to skip out indices.
self.out_indices = [0, 1, 2, 3]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get("FORK_LAST3", None):
"""For RetinaNet, `start_level=1`. The first norm layer will not used.
cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...`
"""
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
else:
# Classifier head
self.num_features = final_features = int(embed_dims[-1] * cls_ratio)
self.final_conv = MobileOneBlock(
in_chs=embed_dims[-1],
out_chs=final_features,
kernel_size=3,
stride=1,
group_size=1,
inference_mode=inference_mode,
use_se=True,
act_layer=act_layer,
num_conv_branches=1,
)
self.head = ClassifierHead(
final_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
"""Init. for classification"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return set()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+).pos_emb', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
# input embedding
x = self.stem(x)
outs = []
for idx, block in enumerate(self.stages):
x = block(x)
if self.fork_feat:
if idx in self.out_indices:
norm_layer = getattr(self, f"norm{idx}")
x_out = norm_layer(x)
outs.append(x_out)
if self.fork_feat:
# output the features of four stages for dense prediction
return outs
x = self.final_conv(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
if self.fork_feat:
return x
x = self.forward_head(x)
return x
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 256, 256),
"pool_size": (8, 8),
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'),
"classifier": "head.fc",
**kwargs,
}
default_cfgs = generate_default_cfgs({
"fastvit_t8.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa24.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa36.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_ma36.apple_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95
),
"fastvit_t8.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa24.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_ma36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95
),
})
def _create_fastvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
FastVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def fastvit_t8(pretrained=False, **kwargs):
"""Instantiate FastViT-T8 model variant."""
model_args = dict(
layers=(2, 2, 4, 2),
embed_dims=(48, 96, 192, 384),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer")
)
return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_t12(pretrained=False, **kwargs):
"""Instantiate FastViT-T12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_s12(pretrained=False, **kwargs):
"""Instantiate FastViT-S12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa12(pretrained=False, **kwargs):
"""Instantiate FastViT-SA12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa24(pretrained=False, **kwargs):
"""Instantiate FastViT-SA24 model variant."""
model_args = dict(
layers=(4, 4, 12, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa36(pretrained=False, **kwargs):
"""Instantiate FastViT-SA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_ma36(pretrained=False, **kwargs):
"""Instantiate FastViT-MA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(76, 152, 304, 608),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention")
)
return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/senet.py | """
SEResNet implementation from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Additional credit to https://github.com/creafz
Original model: https://github.com/hujie-frank/SENet
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate
support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here.
"""
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['SENet']
def _weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = x.mean((2, 3), keepdim=True)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out = self.se_module(out) + shortcut
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes, reduction=reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out = self.se_module(out) + shortcut
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(
self, block, layers, groups, reduction, drop_rate=0.2,
in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1,
downsample_padding=0, num_classes=1000, global_pool='avg'):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
self.num_classes = num_classes
self.drop_rate = drop_rate
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(
in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
# To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.
self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')]
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')]
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')]
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')]
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')]
self.num_features = 512 * block.expansion
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
for m in self.modules():
_weight_init(m)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size,
stride=stride, padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.layer0(x)
x = self.pool0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_senet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(SENet, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'layer0.conv1', 'classifier': 'last_linear',
**kwargs
}
default_cfgs = generate_default_cfgs({
'legacy_senet154.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'),
'legacy_seresnet18.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
interpolation='bicubic'),
'legacy_seresnet34.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'),
'legacy_seresnet50.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'),
'legacy_seresnet101.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'),
'legacy_seresnet152.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'),
'legacy_seresnext26_32x4d.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth',
interpolation='bicubic'),
'legacy_seresnext50_32x4d.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'),
'legacy_seresnext101_32x4d.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'),
})
@register_model
def legacy_seresnet18(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet18', pretrained, **model_args)
@register_model
def legacy_seresnet34(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet34', pretrained, **model_args)
@register_model
def legacy_seresnet50(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet50', pretrained, **model_args)
@register_model
def legacy_seresnet101(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet101', pretrained, **model_args)
@register_model
def legacy_seresnet152(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet152', pretrained, **model_args)
@register_model
def legacy_senet154(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16,
downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs)
return _create_senet('legacy_senet154', pretrained, **model_args)
@register_model
def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet:
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/visformer.py | """ Visformer
Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
From original at https://github.com/danczs/Visformer
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['Visformer']
class SpatialMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
group=8,
spatial_conv=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.in_features = in_features
self.out_features = out_features
self.spatial_conv = spatial_conv
if self.spatial_conv:
if group < 2: # net setting
hidden_features = in_features * 5 // 6
else:
hidden_features = in_features * 2
self.hidden_features = hidden_features
self.group = group
self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
self.act1 = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
if self.spatial_conv:
self.conv2 = nn.Conv2d(
hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
self.act2 = act_layer()
else:
self.conv2 = None
self.act2 = None
self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
self.drop3 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.drop1(x)
if self.conv2 is not None:
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop3(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = round(dim // num_heads * head_dim_ratio)
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True)
self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, C, H, W = x.shape
x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
q, k, v = x.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q.contiguous(), k.contiguous(), v.contiguous(),
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim_ratio=1.,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm2d,
group=8,
attn_disabled=False,
spatial_conv=False,
):
super().__init__()
self.spatial_conv = spatial_conv
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if attn_disabled:
self.norm1 = None
self.attn = None
else:
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
head_dim_ratio=head_dim_ratio,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.mlp = SpatialMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
group=group,
spatial_conv=spatial_conv,
)
def forward(self, x):
if self.attn is not None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Visformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
init_channels=32,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4.,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm2d,
attn_stage='111',
use_pos_embed=True,
spatial_conv='111',
vit_stem=False,
group=8,
global_pool='avg',
conv_init=False,
embed_norm=None,
):
super().__init__()
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.embed_dim = embed_dim
self.init_channels = init_channels
self.img_size = img_size
self.vit_stem = vit_stem
self.conv_init = conv_init
if isinstance(depth, (list, tuple)):
self.stage_num1, self.stage_num2, self.stage_num3 = depth
depth = sum(depth)
else:
self.stage_num1 = self.stage_num3 = depth // 3
self.stage_num2 = depth - self.stage_num1 - self.stage_num3
self.use_pos_embed = use_pos_embed
self.grad_checkpointing = False
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# stage 1
if self.vit_stem:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // patch_size for x in img_size]
else:
if self.init_channels is None:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 2,
in_chans=in_chans,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 2) for x in img_size]
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.init_channels),
nn.ReLU(inplace=True)
)
img_size = [x // 2 for x in img_size]
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 4,
in_chans=self.init_channels,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 4) for x in img_size]
if self.use_pos_embed:
if self.vit_stem:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
else:
self.pos_embed1 = None
self.stage1 = nn.Sequential(*[
Block(
dim=embed_dim//2,
num_heads=num_heads,
head_dim_ratio=0.5,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[0] == '0'),
spatial_conv=(spatial_conv[0] == '1'),
)
for i in range(self.stage_num1)
])
# stage2
if not self.vit_stem:
self.patch_embed2 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim // 2,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed2 = None
else:
self.patch_embed2 = None
self.stage2 = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[1] == '0'),
spatial_conv=(spatial_conv[1] == '1'),
)
for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
])
# stage 3
if not self.vit_stem:
self.patch_embed3 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim,
embed_dim=embed_dim * 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
else:
self.pos_embed3 = None
else:
self.patch_embed3 = None
self.stage3 = nn.Sequential(*[
Block(
dim=embed_dim * 2,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[2] == '0'),
spatial_conv=(spatial_conv[2] == '1'),
)
for i in range(self.stage_num1+self.stage_num2, depth)
])
self.num_features = embed_dim if self.vit_stem else embed_dim * 2
self.norm = norm_layer(self.num_features)
# head
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
# weights init
if self.use_pos_embed:
trunc_normal_(self.pos_embed1, std=0.02)
if not self.vit_stem:
trunc_normal_(self.pos_embed2, std=0.02)
trunc_normal_(self.pos_embed3, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
if self.conv_init:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
else:
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed1|pos_embed1|stem', # stem and embed
blocks=[
(r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None),
(r'^(?:patch_embed|pos_embed)(\d+)', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.stem is not None:
x = self.stem(x)
# stage 1
x = self.patch_embed1(x)
if self.pos_embed1 is not None:
x = self.pos_drop(x + self.pos_embed1)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage1, x)
else:
x = self.stage1(x)
# stage 2
if self.patch_embed2 is not None:
x = self.patch_embed2(x)
if self.pos_embed2 is not None:
x = self.pos_drop(x + self.pos_embed2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage2, x)
else:
x = self.stage2(x)
# stage3
if self.patch_embed3 is not None:
x = self.patch_embed3(x)
if self.pos_embed3 is not None:
x = self.pos_drop(x + self.pos_embed3)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage3, x)
else:
x = self.stage3(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'),
'visformer_small.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def visformer_tiny(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def visformer_small(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
# @register_model
# def visformer_net1(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net2(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net3(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net4(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net5(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net6(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net7(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/mvitv2.py | """ Multi-Scale Vision Transformer v2
@inproceedings{li2021improved,
title={MViTv2: Improved multiscale vision transformers for classification and detection},
author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph},
booktitle={CVPR},
year={2022}
}
Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit
Original copyright below.
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved.
import operator
from collections import OrderedDict
from dataclasses import dataclass
from functools import partial, reduce
from typing import Union, List, Tuple, Optional
import torch
import torch.utils.checkpoint as checkpoint
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import register_model, register_model_deprecations, generate_default_cfgs
__all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class MultiScaleVitCfg:
depths: Tuple[int, ...] = (2, 3, 16, 3)
embed_dim: Union[int, Tuple[int, ...]] = 96
num_heads: Union[int, Tuple[int, ...]] = 1
mlp_ratio: float = 4.
pool_first: bool = False
expand_attn: bool = True
qkv_bias: bool = True
use_cls_token: bool = False
use_abs_pos: bool = False
residual_pooling: bool = True
mode: str = 'conv'
kernel_qkv: Tuple[int, int] = (3, 3)
stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2))
stride_kv: Optional[Tuple[Tuple[int, int]]] = None
stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4)
patch_kernel: Tuple[int, int] = (7, 7)
patch_stride: Tuple[int, int] = (4, 4)
patch_padding: Tuple[int, int] = (3, 3)
pool_type: str = 'max'
rel_pos_type: str = 'spatial'
act_layer: Union[str, Tuple[str, str]] = 'gelu'
norm_layer: Union[str, Tuple[str, str]] = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
num_stages = len(self.depths)
if not isinstance(self.embed_dim, (tuple, list)):
self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages))
assert len(self.embed_dim) == num_stages
if not isinstance(self.num_heads, (tuple, list)):
self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages))
assert len(self.num_heads) == num_stages
if self.stride_kv_adaptive is not None and self.stride_kv is None:
_stride_kv = self.stride_kv_adaptive
pool_kv_stride = []
for i in range(num_stages):
if min(self.stride_q[i]) > 1:
_stride_kv = [
max(_stride_kv[d] // self.stride_q[i][d], 1)
for d in range(len(_stride_kv))
]
pool_kv_stride.append(tuple(_stride_kv))
self.stride_kv = tuple(pool_kv_stride)
def prod(iterable):
return reduce(operator.mul, iterable, 1)
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(
self,
dim_in=3,
dim_out=768,
kernel=(7, 7),
stride=(4, 4),
padding=(3, 3),
):
super().__init__()
self.proj = nn.Conv2d(
dim_in,
dim_out,
kernel_size=kernel,
stride=stride,
padding=padding,
)
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
x = self.proj(x)
# B C H W -> B HW C
return x.flatten(2).transpose(1, 2), x.shape[-2:]
@register_notrace_function
def reshape_pre_pool(
x,
feat_size: List[int],
has_cls_token: bool = True
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
H, W = feat_size
if has_cls_token:
cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :]
else:
cls_tok = None
x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous()
return x, cls_tok
@register_notrace_function
def reshape_post_pool(
x,
num_heads: int,
cls_tok: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, List[int]]:
feat_size = [x.shape[2], x.shape[3]]
L_pooled = x.shape[2] * x.shape[3]
x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3)
if cls_tok is not None:
x = torch.cat((cls_tok, x), dim=2)
return x, feat_size
@register_notrace_function
def cal_rel_pos_type(
attn: torch.Tensor,
q: torch.Tensor,
has_cls_token: bool,
q_size: List[int],
k_size: List[int],
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
):
"""
Spatial Relative Positional Embeddings.
"""
sp_idx = 1 if has_cls_token else 0
q_h, q_w = q_size
k_h, k_w = k_size
# Scale up rel pos if shapes for q and k are different.
q_h_ratio = max(k_h / q_h, 1.0)
k_h_ratio = max(q_h / k_h, 1.0)
dist_h = (
torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio -
torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio
)
dist_h += (k_h - 1) * k_h_ratio
q_w_ratio = max(k_w / q_w, 1.0)
k_w_ratio = max(q_w / k_w, 1.0)
dist_w = (
torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio -
torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio
)
dist_w += (k_w - 1) * k_w_ratio
rel_h = rel_pos_h[dist_h.long()]
rel_w = rel_pos_w[dist_w.long()]
B, n_head, q_N, dim = q.shape
r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim)
rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, rel_h)
rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, rel_w)
attn[:, :, sp_idx:, sp_idx:] = (
attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w)
+ rel_h.unsqueeze(-1)
+ rel_w.unsqueeze(-2)
).view(B, -1, q_h * q_w, k_h * k_w)
return attn
class MultiScaleAttentionPoolFirst(nn.Module):
def __init__(
self,
dim,
dim_out,
feat_size,
num_heads=8,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.head_dim = dim_out // num_heads
self.scale = self.head_dim ** -0.5
self.has_cls_token = has_cls_token
padding_q = tuple([int(q // 2) for q in kernel_q])
padding_kv = tuple([int(kv // 2) for kv in kernel_kv])
self.q = nn.Linear(dim, dim_out, bias=qkv_bias)
self.k = nn.Linear(dim, dim_out, bias=qkv_bias)
self.v = nn.Linear(dim, dim_out, bias=qkv_bias)
self.proj = nn.Linear(dim_out, dim_out)
# Skip pooling with kernel and stride size of (1, 1, 1).
if prod(kernel_q) == 1 and prod(stride_q) == 1:
kernel_q = None
if prod(kernel_kv) == 1 and prod(stride_kv) == 1:
kernel_kv = None
self.mode = mode
self.unshared = mode == 'conv_unshared'
self.pool_q, self.pool_k, self.pool_v = None, None, None
self.norm_q, self.norm_k, self.norm_v = None, None, None
if mode in ("avg", "max"):
pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d
if kernel_q:
self.pool_q = pool_op(kernel_q, stride_q, padding_q)
if kernel_kv:
self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv)
self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv)
elif mode == "conv" or mode == "conv_unshared":
dim_conv = dim // num_heads if mode == "conv" else dim
if kernel_q:
self.pool_q = nn.Conv2d(
dim_conv,
dim_conv,
kernel_q,
stride=stride_q,
padding=padding_q,
groups=dim_conv,
bias=False,
)
self.norm_q = norm_layer(dim_conv)
if kernel_kv:
self.pool_k = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_k = norm_layer(dim_conv)
self.pool_v = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_v = norm_layer(dim_conv)
else:
raise NotImplementedError(f"Unsupported model {mode}")
# relative pos embedding
self.rel_pos_type = rel_pos_type
if self.rel_pos_type == 'spatial':
assert feat_size[0] == feat_size[1]
size = feat_size[0]
q_size = size // stride_q[1] if len(stride_q) > 0 else size
kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size
rel_sp_dim = 2 * max(q_size, kv_size) - 1
self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
trunc_normal_tf_(self.rel_pos_h, std=0.02)
trunc_normal_tf_(self.rel_pos_w, std=0.02)
self.residual_pooling = residual_pooling
def forward(self, x, feat_size: List[int]):
B, N, _ = x.shape
fold_dim = 1 if self.unshared else self.num_heads
x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3)
q = k = v = x
if self.pool_q is not None:
q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token)
q = self.pool_q(q)
q, q_size = reshape_post_pool(q, self.num_heads, q_tok)
else:
q_size = feat_size
if self.norm_q is not None:
q = self.norm_q(q)
if self.pool_k is not None:
k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token)
k = self.pool_k(k)
k, k_size = reshape_post_pool(k, self.num_heads, k_tok)
else:
k_size = feat_size
if self.norm_k is not None:
k = self.norm_k(k)
if self.pool_v is not None:
v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token)
v = self.pool_v(v)
v, v_size = reshape_post_pool(v, self.num_heads, v_tok)
else:
v_size = feat_size
if self.norm_v is not None:
v = self.norm_v(v)
q_N = q_size[0] * q_size[1] + int(self.has_cls_token)
q = q.transpose(1, 2).reshape(B, q_N, -1)
q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2)
k_N = k_size[0] * k_size[1] + int(self.has_cls_token)
k = k.transpose(1, 2).reshape(B, k_N, -1)
k = self.k(k).reshape(B, k_N, self.num_heads, -1)
v_N = v_size[0] * v_size[1] + int(self.has_cls_token)
v = v.transpose(1, 2).reshape(B, v_N, -1)
v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2)
attn = (q * self.scale) @ k
if self.rel_pos_type == 'spatial':
attn = cal_rel_pos_type(
attn,
q,
self.has_cls_token,
q_size,
k_size,
self.rel_pos_h,
self.rel_pos_w,
)
attn = attn.softmax(dim=-1)
x = attn @ v
if self.residual_pooling:
x = x + q
x = x.transpose(1, 2).reshape(B, -1, self.dim_out)
x = self.proj(x)
return x, q_size
class MultiScaleAttention(nn.Module):
def __init__(
self,
dim,
dim_out,
feat_size,
num_heads=8,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.head_dim = dim_out // num_heads
self.scale = self.head_dim ** -0.5
self.has_cls_token = has_cls_token
padding_q = tuple([int(q // 2) for q in kernel_q])
padding_kv = tuple([int(kv // 2) for kv in kernel_kv])
self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias)
self.proj = nn.Linear(dim_out, dim_out)
# Skip pooling with kernel and stride size of (1, 1, 1).
if prod(kernel_q) == 1 and prod(stride_q) == 1:
kernel_q = None
if prod(kernel_kv) == 1 and prod(stride_kv) == 1:
kernel_kv = None
self.mode = mode
self.unshared = mode == 'conv_unshared'
self.norm_q, self.norm_k, self.norm_v = None, None, None
self.pool_q, self.pool_k, self.pool_v = None, None, None
if mode in ("avg", "max"):
pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d
if kernel_q:
self.pool_q = pool_op(kernel_q, stride_q, padding_q)
if kernel_kv:
self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv)
self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv)
elif mode == "conv" or mode == "conv_unshared":
dim_conv = dim_out // num_heads if mode == "conv" else dim_out
if kernel_q:
self.pool_q = nn.Conv2d(
dim_conv,
dim_conv,
kernel_q,
stride=stride_q,
padding=padding_q,
groups=dim_conv,
bias=False,
)
self.norm_q = norm_layer(dim_conv)
if kernel_kv:
self.pool_k = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_k = norm_layer(dim_conv)
self.pool_v = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_v = norm_layer(dim_conv)
else:
raise NotImplementedError(f"Unsupported model {mode}")
# relative pos embedding
self.rel_pos_type = rel_pos_type
if self.rel_pos_type == 'spatial':
assert feat_size[0] == feat_size[1]
size = feat_size[0]
q_size = size // stride_q[1] if len(stride_q) > 0 else size
kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size
rel_sp_dim = 2 * max(q_size, kv_size) - 1
self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
trunc_normal_tf_(self.rel_pos_h, std=0.02)
trunc_normal_tf_(self.rel_pos_w, std=0.02)
self.residual_pooling = residual_pooling
def forward(self, x, feat_size: List[int]):
B, N, _ = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(dim=0)
if self.pool_q is not None:
q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token)
q = self.pool_q(q)
q, q_size = reshape_post_pool(q, self.num_heads, q_tok)
else:
q_size = feat_size
if self.norm_q is not None:
q = self.norm_q(q)
if self.pool_k is not None:
k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token)
k = self.pool_k(k)
k, k_size = reshape_post_pool(k, self.num_heads, k_tok)
else:
k_size = feat_size
if self.norm_k is not None:
k = self.norm_k(k)
if self.pool_v is not None:
v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token)
v = self.pool_v(v)
v, _ = reshape_post_pool(v, self.num_heads, v_tok)
if self.norm_v is not None:
v = self.norm_v(v)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.rel_pos_type == 'spatial':
attn = cal_rel_pos_type(
attn,
q,
self.has_cls_token,
q_size,
k_size,
self.rel_pos_h,
self.rel_pos_w,
)
attn = attn.softmax(dim=-1)
x = attn @ v
if self.residual_pooling:
x = x + q
x = x.transpose(1, 2).reshape(B, -1, self.dim_out)
x = self.proj(x)
return x, q_size
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
num_heads,
feat_size,
mlp_ratio=4.0,
qkv_bias=True,
drop_path=0.0,
norm_layer=nn.LayerNorm,
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
mode="conv",
has_cls_token=True,
expand_attn=False,
pool_first=False,
rel_pos_type='spatial',
residual_pooling=True,
):
super().__init__()
proj_needed = dim != dim_out
self.dim = dim
self.dim_out = dim_out
self.has_cls_token = has_cls_token
self.norm1 = norm_layer(dim)
self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None
if stride_q and prod(stride_q) > 1:
kernel_skip = [s + 1 if s > 1 else s for s in stride_q]
stride_skip = stride_q
padding_skip = [int(skip // 2) for skip in kernel_skip]
self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip)
else:
self.shortcut_pool_attn = None
att_dim = dim_out if expand_attn else dim
attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention
self.attn = attn_layer(
dim,
att_dim,
num_heads=num_heads,
feat_size=feat_size,
qkv_bias=qkv_bias,
kernel_q=kernel_q,
kernel_kv=kernel_kv,
stride_q=stride_q,
stride_kv=stride_kv,
norm_layer=norm_layer,
has_cls_token=has_cls_token,
mode=mode,
rel_pos_type=rel_pos_type,
residual_pooling=residual_pooling,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(att_dim)
mlp_dim_out = dim_out
self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None
self.mlp = Mlp(
in_features=att_dim,
hidden_features=int(att_dim * mlp_ratio),
out_features=mlp_dim_out,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def _shortcut_pool(self, x, feat_size: List[int]):
if self.shortcut_pool_attn is None:
return x
if self.has_cls_token:
cls_tok, x = x[:, :1, :], x[:, 1:, :]
else:
cls_tok = None
B, L, C = x.shape
H, W = feat_size
x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous()
x = self.shortcut_pool_attn(x)
x = x.reshape(B, C, -1).transpose(1, 2)
if cls_tok is not None:
x = torch.cat((cls_tok, x), dim=1)
return x
def forward(self, x, feat_size: List[int]):
x_norm = self.norm1(x)
# NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj
x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm)
x_shortcut = self._shortcut_pool(x_shortcut, feat_size)
x, feat_size_new = self.attn(x_norm, feat_size)
x = x_shortcut + self.drop_path1(x)
x_norm = self.norm2(x)
x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm)
x = x_shortcut + self.drop_path2(self.mlp(x_norm))
return x, feat_size_new
class MultiScaleVitStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
num_heads,
feat_size,
mlp_ratio=4.0,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
expand_attn=False,
pool_first=False,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
drop_path=0.0,
):
super().__init__()
self.grad_checkpointing = False
self.blocks = nn.ModuleList()
if expand_attn:
out_dims = (dim_out,) * depth
else:
out_dims = (dim,) * (depth - 1) + (dim_out,)
for i in range(depth):
attention_block = MultiScaleBlock(
dim=dim,
dim_out=out_dims[i],
num_heads=num_heads,
feat_size=feat_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
kernel_q=kernel_q,
kernel_kv=kernel_kv,
stride_q=stride_q if i == 0 else (1, 1),
stride_kv=stride_kv,
mode=mode,
has_cls_token=has_cls_token,
pool_first=pool_first,
rel_pos_type=rel_pos_type,
residual_pooling=residual_pooling,
expand_attn=expand_attn,
norm_layer=norm_layer,
drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path,
)
dim = out_dims[i]
self.blocks.append(attention_block)
if i == 0:
feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)])
self.feat_size = feat_size
def forward(self, x, feat_size: List[int]):
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x, feat_size = checkpoint.checkpoint(blk, x, feat_size)
else:
x, feat_size = blk(x, feat_size)
return x, feat_size
class MultiScaleVit(nn.Module):
"""
Improved Multiscale Vision Transformers for Classification and Detection
Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik,
Christoph Feichtenhofer*
https://arxiv.org/abs/2112.01526
Multiscale Vision Transformers
Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik,
Christoph Feichtenhofer*
https://arxiv.org/abs/2104.11227
"""
def __init__(
self,
cfg: MultiScaleVitCfg,
img_size: Tuple[int, int] = (224, 224),
in_chans: int = 3,
global_pool: Optional[str] = None,
num_classes: int = 1000,
drop_path_rate: float = 0.,
drop_rate: float = 0.,
):
super().__init__()
img_size = to_2tuple(img_size)
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
self.num_classes = num_classes
self.drop_rate = drop_rate
if global_pool is None:
global_pool = 'token' if cfg.use_cls_token else 'avg'
self.global_pool = global_pool
self.depths = tuple(cfg.depths)
self.expand_attn = cfg.expand_attn
embed_dim = cfg.embed_dim[0]
self.patch_embed = PatchEmbed(
dim_in=in_chans,
dim_out=embed_dim,
kernel=cfg.patch_kernel,
stride=cfg.patch_stride,
padding=cfg.patch_padding,
)
patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1])
num_patches = prod(patch_dims)
if cfg.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.num_prefix_tokens = 1
pos_embed_dim = num_patches + 1
else:
self.num_prefix_tokens = 0
self.cls_token = None
pos_embed_dim = num_patches
if cfg.use_abs_pos:
self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim))
else:
self.pos_embed = None
num_stages = len(cfg.embed_dim)
feat_size = patch_dims
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
self.stages = nn.ModuleList()
for i in range(num_stages):
if cfg.expand_attn:
dim_out = cfg.embed_dim[i]
else:
dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)]
stage = MultiScaleVitStage(
dim=embed_dim,
dim_out=dim_out,
depth=cfg.depths[i],
num_heads=cfg.num_heads[i],
feat_size=feat_size,
mlp_ratio=cfg.mlp_ratio,
qkv_bias=cfg.qkv_bias,
mode=cfg.mode,
pool_first=cfg.pool_first,
expand_attn=cfg.expand_attn,
kernel_q=cfg.kernel_qkv,
kernel_kv=cfg.kernel_qkv,
stride_q=cfg.stride_q[i],
stride_kv=cfg.stride_kv[i],
has_cls_token=cfg.use_cls_token,
rel_pos_type=cfg.rel_pos_type,
residual_pooling=cfg.residual_pooling,
norm_layer=norm_layer,
drop_path=dpr[i],
)
embed_dim = dim_out
feat_size = stage.feat_size
self.stages.append(stage)
self.num_features = embed_dim
self.norm = norm_layer(embed_dim)
self.head = nn.Sequential(OrderedDict([
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
if self.pos_embed is not None:
trunc_normal_tf_(self.pos_embed, std=0.02)
if self.cls_token is not None:
trunc_normal_tf_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_tf_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters()
if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Sequential(OrderedDict([
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
def forward_features(self, x):
x, feat_size = self.patch_embed(x)
B, N, C = x.shape
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
for stage in self.stages:
x, feat_size = stage(x, feat_size)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
if self.global_pool == 'avg':
x = x[:, self.num_prefix_tokens:].mean(1)
else:
x = x[:, 0]
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'stages.0.blocks.0.norm1.weight' in state_dict:
return state_dict
import re
if 'model_state' in state_dict:
state_dict = state_dict['model_state']
depths = getattr(model, 'depths', None)
expand_attn = getattr(model, 'expand_attn', True)
assert depths is not None, 'model requires depth attribute to remap checkpoints'
depth_map = {}
block_idx = 0
for stage_idx, d in enumerate(depths):
depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)})
block_idx += d
out_dict = {}
for k, v in state_dict.items():
k = re.sub(
r'blocks\.(\d+)',
lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}',
k)
if expand_attn:
k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k)
else:
k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k)
if 'head' in k:
k = k.replace('head.projection', 'head.fc')
out_dict[k] = v
# for k, v in state_dict.items():
# if model.pos_embed is not None and k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(
# v,
# model.pos_embed,
# 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
# model.patch_embed.grid_size
# )
return out_dict
model_cfgs = dict(
mvitv2_tiny=MultiScaleVitCfg(
depths=(1, 2, 5, 2),
),
mvitv2_small=MultiScaleVitCfg(
depths=(1, 2, 11, 2),
),
mvitv2_base=MultiScaleVitCfg(
depths=(2, 3, 16, 3),
),
mvitv2_large=MultiScaleVitCfg(
depths=(2, 6, 36, 4),
embed_dim=144,
num_heads=2,
expand_attn=False,
),
mvitv2_small_cls=MultiScaleVitCfg(
depths=(1, 2, 11, 2),
use_cls_token=True,
),
mvitv2_base_cls=MultiScaleVitCfg(
depths=(2, 3, 16, 3),
use_cls_token=True,
),
mvitv2_large_cls=MultiScaleVitCfg(
depths=(2, 6, 36, 4),
embed_dim=144,
num_heads=2,
use_cls_token=True,
expand_attn=True,
),
mvitv2_huge_cls=MultiScaleVitCfg(
depths=(4, 8, 60, 8),
embed_dim=192,
num_heads=3,
use_cls_token=True,
expand_attn=True,
),
)
def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Multiscale Vision Transformer models.')
return build_model_with_cfg(
MultiScaleVit,
variant,
pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
'mvitv2_tiny.fb_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_small_cls': _cfg(url=''),
'mvitv2_base_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
'mvitv2_large_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
'mvitv2_huge_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
})
@register_model
def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/efficientnet.py | """ The EfficientNet Family in PyTorch
An implementation of EfficienNet that covers variety of related models with efficient architectures:
* EfficientNet-V2
- `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports)
- EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946
- CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971
- Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665
- Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252
* MixNet (Small, Medium, and Large)
- MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595
* MNasNet B1, A1 (SE), Small
- MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626
* FBNet-C
- FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443
* Single-Path NAS Pixel1
- Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877
* TinyNet
- Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819
- Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch
* And likely more...
The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available
by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing
the models and weights open source!
Hacked together by / Copyright 2019, Ross Wightman
"""
from functools import partial
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, GroupNormAct
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['EfficientNet', 'EfficientNetFeatures']
class EfficientNet(nn.Module):
""" EfficientNet
A flexible and performant PyTorch implementation of efficient network architectures, including:
* EfficientNet-V2 Small, Medium, Large, XL & B0-B3
* EfficientNet B0-B8, L2
* EfficientNet-EdgeTPU
* EfficientNet-CondConv
* MixNet S, M, L, XL
* MnasNet A1, B1, and small
* MobileNet-V2
* FBNet C
* Single-Path NAS Pixel1
* TinyNet
"""
def __init__(
self,
block_args,
num_classes=1000,
num_features=1280,
in_chans=3,
stem_size=32,
fix_stem=False,
output_stride=32,
pad_type='',
round_chs_fn=round_channels,
act_layer=None,
norm_layer=None,
se_layer=None,
drop_rate=0.,
drop_path_rate=0.,
global_pool='avg'
):
super(EfficientNet, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
head_chs = builder.in_chs
# Head + Pooling
self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type)
self.bn2 = norm_act_layer(self.num_features, inplace=True)
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.conv_head, self.bn2, self.global_pool])
layers.extend([nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head|bn2', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
x = self.conv_head(x)
x = self.bn2(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
class EfficientNetFeatures(nn.Module):
""" EfficientNet Feature Extractor
A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args,
out_indices=(0, 1, 2, 3, 4),
feature_location='bottleneck',
in_chans=3,
stem_size=32,
fix_stem=False,
output_stride=32,
pad_type='',
round_chs_fn=round_channels,
act_layer=None,
norm_layer=None,
se_layer=None,
drop_rate=0.,
drop_path_rate=0.
):
super(EfficientNetFeatures, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_effnet(variant, pretrained=False, **kwargs):
features_mode = ''
model_cls = EfficientNet
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool')
model_cls = EfficientNetFeatures
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg)
return model
def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-a1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_noskip'],
# stage 1, 112x112 in
['ir_r2_k3_s2_e6_c24'],
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25'],
# stage 3, 28x28 in
['ir_r4_k3_s2_e6_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'],
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-b1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_c16_noskip'],
# stage 1, 112x112 in
['ir_r3_k3_s2_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r3_k5_s2_e6_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c96'],
# stage 5, 14x14in
['ir_r4_k5_s2_e6_c192'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320_noskip']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a mnasnet-b1 model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
Paper: https://arxiv.org/pdf/1807.11626.pdf.
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
['ds_r1_k3_s1_c8'],
['ir_r1_k3_s2_e3_c16'],
['ir_r2_k3_s2_e6_c16'],
['ir_r4_k5_s2_e6_c32_se0.25'],
['ir_r3_k3_s1_e6_c32_se0.25'],
['ir_r3_k5_s2_e6_c88_se0.25'],
['ir_r1_k3_s1_e6_c144']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=8,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v2(
variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs):
""" Generate MobileNet-V2 network
Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
Paper: https://arxiv.org/abs/1801.04381
"""
arch_def = [
['ds_r1_k3_s1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r3_k3_s2_e6_c32'],
['ir_r4_k3_s2_e6_c64'],
['ir_r3_k3_s1_e6_c96'],
['ir_r3_k3_s2_e6_c160'],
['ir_r1_k3_s1_e6_c320'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head),
num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)),
stem_size=32,
fix_stem=fix_stem_head,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'relu6'),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
""" FBNet-C
Paper: https://arxiv.org/abs/1812.03443
Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py
NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,
it was used to confirm some building block details
"""
arch_def = [
['ir_r1_k3_s1_e1_c16'],
['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'],
['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'],
['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'],
['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'],
['ir_r4_k5_s2_e6_c184'],
['ir_r1_k3_s1_e6_c352'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
num_features=1984, # paper suggests this, but is not 100% clear
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates the Single-Path NAS model from search targeted for Pixel1 phone.
Paper: https://arxiv.org/abs/1904.02877
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_c16_noskip'],
# stage 1, 112x112 in
['ir_r3_k3_s2_e3_c24'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'],
# stage 4, 14x14in
['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'],
# stage 5, 14x14in
['ir_r4_k5_s2_e6_c192'],
# stage 6, 7x7 in
['ir_r1_k3_s1_e6_c320_noskip']
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet(
variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8,
group_size=None, pretrained=False, **kwargs):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'],
['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'],
['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
act_layer=resolve_act_layer(kwargs, 'swish'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_edge(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs):
""" Creates an EfficientNet-EdgeTPU model
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu
"""
arch_def = [
# NOTE `fc` is present to override a mismatch between stem channels and in chs not
# present in other models
['er_r1_k3_s1_e4_c24_fc24_noskip'],
['er_r2_k3_s2_e8_c32'],
['er_r4_k3_s2_e8_c48'],
['ir_r5_k5_s2_e8_c96'],
['ir_r4_k5_s1_e8_c144'],
['ir_r2_k5_s2_e8_c192'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'relu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_condconv(
variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs):
"""Creates an EfficientNet-CondConv model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'],
['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'],
['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25_cc4'],
['ir_r4_k5_s2_e6_c192_se0.25_cc4'],
['ir_r1_k3_s1_e6_c320_se0.25_cc4'],
]
# NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and
# the expert_multiplier increases that on a per-model basis as with depth/channel multipliers
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'swish'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""Creates an EfficientNet-Lite model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-lite0': (1.0, 1.0, 224, 0.2),
'efficientnet-lite1': (1.0, 1.1, 240, 0.2),
'efficientnet-lite2': (1.1, 1.2, 260, 0.3),
'efficientnet-lite3': (1.2, 1.4, 280, 0.3),
'efficientnet-lite4': (1.4, 1.8, 300, 0.3),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
arch_def = [
['ds_r1_k3_s1_e1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r2_k5_s2_e6_c40'],
['ir_r3_k3_s2_e6_c80'],
['ir_r3_k5_s1_e6_c112'],
['ir_r4_k5_s2_e6_c192'],
['ir_r1_k3_s1_e6_c320'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True),
num_features=1280,
stem_size=32,
fix_stem=True,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
act_layer=resolve_act_layer(kwargs, 'relu6'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_base(
variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
""" Creates an EfficientNet-V2 base model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r1_k3_s1_e1_c16_skip'],
['er_r2_k3_s2_e4_c32'],
['er_r2_k3_s2_e4_c48'],
['ir_r3_k3_s2_e4_c96_se0.25'],
['ir_r5_k3_s1_e6_c112_se0.25'],
['ir_r8_k3_s2_e6_c192_se0.25'],
]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=round_chs_fn(1280),
stem_size=32,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_s(
variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs):
""" Creates an EfficientNet-V2 Small model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model,
before ref the impl was released.
"""
arch_def = [
['cn_r2_k3_s1_e1_c24_skip'],
['er_r4_k3_s2_e4_c48'],
['er_r4_k3_s2_e4_c64'],
['ir_r6_k3_s2_e4_c128_se0.25'],
['ir_r9_k3_s1_e6_c160_se0.25'],
['ir_r15_k3_s2_e6_c256_se0.25'],
]
num_features = 1280
if rw:
# my original variant, based on paper figure differs from the official release
arch_def[0] = ['er_r2_k3_s1_e1_c24']
arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25']
num_features = 1792
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size),
num_features=round_chs_fn(num_features),
stem_size=24,
round_chs_fn=round_chs_fn,
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
""" Creates an EfficientNet-V2 Medium model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r3_k3_s1_e1_c24_skip'],
['er_r5_k3_s2_e4_c48'],
['er_r5_k3_s2_e4_c80'],
['ir_r7_k3_s2_e4_c160_se0.25'],
['ir_r14_k3_s1_e6_c176_se0.25'],
['ir_r18_k3_s2_e6_c304_se0.25'],
['ir_r5_k3_s1_e6_c512_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=1280,
stem_size=24,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
""" Creates an EfficientNet-V2 Large model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r4_k3_s1_e1_c32_skip'],
['er_r7_k3_s2_e4_c64'],
['er_r7_k3_s2_e4_c96'],
['ir_r10_k3_s2_e4_c192_se0.25'],
['ir_r19_k3_s1_e6_c224_se0.25'],
['ir_r25_k3_s2_e6_c384_se0.25'],
['ir_r7_k3_s1_e6_c640_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=1280,
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
""" Creates an EfficientNet-V2 Xtra-Large model
Ref impl: https://github.com/google/automl/tree/master/efficientnetv2
Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298
"""
arch_def = [
['cn_r4_k3_s1_e1_c32_skip'],
['er_r8_k3_s2_e4_c64'],
['er_r8_k3_s2_e4_c96'],
['ir_r16_k3_s2_e4_c192_se0.25'],
['ir_r24_k3_s1_e6_c256_se0.25'],
['ir_r32_k3_s2_e6_c512_se0.25'],
['ir_r8_k3_s1_e6_c640_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier),
num_features=1280,
stem_size=32,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'silu'),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MixNet Small model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'], # relu
# stage 1, 112x112 in
['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=1536,
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
"""Creates a MixNet Medium-Large model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c24'], # relu
# stage 1, 112x112 in
['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_features=1536,
stem_size=24,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _gen_tinynet(
variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs
):
"""Creates a TinyNet model.
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),
num_features=max(1280, round_channels(1280, model_width, 8, None)),
stem_size=32,
fix_stem=True,
round_chs_fn=partial(round_channels, multiplier=model_width),
act_layer=resolve_act_layer(kwargs, 'swish'),
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
**kwargs,
)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mnasnet_050.untrained': _cfg(),
'mnasnet_075.untrained': _cfg(),
'mnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth',
hf_hub_id='timm/'),
'mnasnet_140.untrained': _cfg(),
'semnasnet_050.untrained': _cfg(),
'semnasnet_075.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth',
hf_hub_id='timm/'),
'semnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth',
hf_hub_id='timm/'),
'semnasnet_140.untrained': _cfg(),
'mnasnet_small.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth',
hf_hub_id='timm/'),
'mobilenetv2_035.untrained': _cfg(),
'mobilenetv2_050.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
'mobilenetv2_075.untrained': _cfg(),
'mobilenetv2_100.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth',
hf_hub_id='timm/'),
'mobilenetv2_110d.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth',
hf_hub_id='timm/'),
'mobilenetv2_120d.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth',
hf_hub_id='timm/'),
'mobilenetv2_140.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth',
hf_hub_id='timm/'),
'fbnetc_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth',
hf_hub_id='timm/',
interpolation='bilinear'),
'spnasnet_100.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth',
hf_hub_id='timm/',
interpolation='bilinear'),
# NOTE experimenting with alternate attention
'efficientnet_b0.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth',
hf_hub_id='timm/'),
'efficientnet_b1.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=1.0),
'efficientnet_b2.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0),
'efficientnet_b3.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth',
hf_hub_id='timm/',
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0),
'efficientnet_b4.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth',
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0),
'efficientnet_b5.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'),
'efficientnet_b5.sw_in12k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821),
'efficientnet_b6.untrained': _cfg(
url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'efficientnet_b7.untrained': _cfg(
url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'efficientnet_b8.untrained': _cfg(
url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'efficientnet_l2.untrained': _cfg(
url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961),
# FIXME experimental
'efficientnet_b0_gn.untrained': _cfg(),
'efficientnet_b0_g8_gn.untrained': _cfg(),
'efficientnet_b0_g16_evos.untrained': _cfg(),
'efficientnet_b3_gn.untrained': _cfg(
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0),
'efficientnet_b3_g8_gn.untrained': _cfg(
input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0),
'efficientnet_es.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth',
hf_hub_id='timm/'),
'efficientnet_em.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_el.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_es_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth',
hf_hub_id='timm/'),
'efficientnet_el_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_cc_b0_4e.untrained': _cfg(),
'efficientnet_cc_b0_8e.untrained': _cfg(),
'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_lite0.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth',
hf_hub_id='timm/'),
'efficientnet_lite1.untrained': _cfg(
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'efficientnet_lite2.untrained': _cfg(
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'efficientnet_lite3.untrained': _cfg(
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'efficientnet_lite4.untrained': _cfg(
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'efficientnet_b1_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8),
crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnet_b2_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9),
crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnet_b3_pruned.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10),
crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'efficientnetv2_rw_t.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0),
'gc_efficientnetv2_rw_t.agc_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0),
'efficientnetv2_rw_s.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth',
hf_hub_id='timm/',
input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0),
'efficientnetv2_rw_m.agc_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth',
hf_hub_id='timm/',
input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0),
'efficientnetv2_s.untrained': _cfg(
input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0),
'efficientnetv2_m.untrained': _cfg(
input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0),
'efficientnetv2_l.untrained': _cfg(
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
'efficientnetv2_xl.untrained': _cfg(
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0),
'tf_efficientnet_b0.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth',
hf_hub_id='timm/',
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_l2.ns_jft_in1k_475': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth',
hf_hub_id='timm/',
input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936),
'tf_efficientnet_l2.ns_jft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth',
hf_hub_id='timm/',
input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96),
'tf_efficientnet_b0.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)),
'tf_efficientnet_b1.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b8.ap_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'tf_efficientnet_b5.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b7.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b8.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth',
hf_hub_id='timm/',
input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954),
'tf_efficientnet_b0.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.aa_in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_b6.aa_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth',
hf_hub_id='timm/',
input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942),
'tf_efficientnet_b7.aa_in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth',
hf_hub_id='timm/',
input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949),
'tf_efficientnet_b0.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth',
hf_hub_id='timm/',
input_size=(3, 224, 224)),
'tf_efficientnet_b1.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_b2.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth',
hf_hub_id='timm/',
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890),
'tf_efficientnet_b3.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth',
hf_hub_id='timm/',
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_b4.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth',
hf_hub_id='timm/',
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922),
'tf_efficientnet_b5.in1k': _cfg(
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth',
hf_hub_id='timm/',
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
'tf_efficientnet_es.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), ),
'tf_efficientnet_em.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_el.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904),
'tf_efficientnet_cc_b0_4e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_efficientnet_cc_b0_8e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_efficientnet_cc_b1_8e.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882),
'tf_efficientnet_lite0.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite1.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882,
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite2.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890,
interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res
),
'tf_efficientnet_lite3.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'),
'tf_efficientnet_lite4.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'),
'tf_efficientnetv2_s.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_s.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_s.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
'tf_efficientnetv2_m.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_l.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_xl.in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth',
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843,
input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'tf_efficientnetv2_b0.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth',
hf_hub_id='timm/',
input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)),
'tf_efficientnetv2_b1.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth',
hf_hub_id='timm/',
input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882),
'tf_efficientnetv2_b2.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth',
hf_hub_id='timm/',
input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890),
'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'),
'tf_efficientnetv2_b3.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904),
'tf_efficientnetv2_b3.in21k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843,
input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904),
'mixnet_s.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth',
hf_hub_id='timm/'),
'mixnet_m.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth',
hf_hub_id='timm/'),
'mixnet_l.ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth',
hf_hub_id='timm/'),
'mixnet_xl.ra_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth',
hf_hub_id='timm/'),
'mixnet_xxl.untrained': _cfg(),
'tf_mixnet_s.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth',
hf_hub_id='timm/'),
'tf_mixnet_m.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth',
hf_hub_id='timm/'),
'tf_mixnet_l.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth',
hf_hub_id='timm/'),
"tinynet_a.in1k": _cfg(
input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth',
hf_hub_id='timm/'),
"tinynet_b.in1k": _cfg(
input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth',
hf_hub_id='timm/'),
"tinynet_c.in1k": _cfg(
input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth',
hf_hub_id='timm/'),
"tinynet_d.in1k": _cfg(
input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth',
hf_hub_id='timm/'),
"tinynet_e.in1k": _cfg(
input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475)
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth',
hf_hub_id='timm/'),
})
@register_model
def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 0.5. """
model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 0.75. """
model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 1.0. """
model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet B1, depth multiplier of 1.4 """
model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 0.5 """
model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 0.75. """
model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 1.0. """
model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet A1 (w/ SE), depth multiplier of 1.4. """
model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet:
""" MNASNet Small, depth multiplier of 1.0. """
model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.35 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.5 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 0.75 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.0 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.4 channel multiplier """
model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers"""
model = _gen_mobilenet_v2(
'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet:
""" MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """
model = _gen_mobilenet_v2(
'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet:
""" FBNet-C """
if pretrained:
# pretrained model trained with non-default BN epsilon
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet:
""" Single-Path NAS Pixel1"""
model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B6 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B7 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B8 """
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-L2."""
# NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
# FIXME experimental group cong / GroupNorm / EvoNorm experiments
@register_model
def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 + GroupNorm"""
model = _gen_efficientnet(
'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 w/ group conv + GroupNorm"""
model = _gen_efficientnet(
'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8),
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0 w/ group 16 conv + EvoNorm"""
model = _gen_efficientnet(
'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16,
pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16),
return model
@register_model
def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 w/ GroupNorm """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16,
norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 w/ grouped conv + BN"""
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet(
'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16,
norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small. """
model = _gen_efficientnet_edge(
'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0"""
model = _gen_efficientnet_edge(
'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Medium. """
model = _gen_efficientnet_edge(
'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large. """
model = _gen_efficientnet_edge(
'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0"""
model = _gen_efficientnet_edge(
'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B1 w/ 8 Experts """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_condconv(
'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
model = _gen_efficientnet_lite(
'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
variant = 'efficientnet_b1_pruned'
model = _gen_efficientnet(
variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """
model = _gen_efficientnetv2_s(
'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs)
return model
@register_model
def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """
model = _gen_efficientnetv2_s(
'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9,
rw=False, se_layer='gc', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small (RW variant).
NOTE: This is my initial (pre official code release) w/ some differences.
See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding
"""
model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium (RW variant).
"""
model = _gen_efficientnetv2_s(
'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True,
pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small. """
model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium. """
model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Large. """
model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs)
return model
@register_model
def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Xtra-Large. """
model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B0. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B1. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B2. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B3. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B4. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B5. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B6. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B7. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-B8. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.5
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet(
'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge Small. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Medium. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Edge-Large. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_edge(
'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_condconv(
'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,
pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite0 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite1 """
# NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite2 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite3 """
# NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-Lite4 """
# NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnet_lite(
'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Small. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Medium. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Large. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2 Xtra-Large. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B0. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B1. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B2. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet:
""" EfficientNet-V2-B3. Tensorflow compatible variant """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_efficientnetv2_base(
'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_s(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Small model.
"""
model = _gen_mixnet_s(
'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_m(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Medium model.
"""
model = _gen_mixnet_m(
'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_l(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Large model.
"""
model = _gen_mixnet_m(
'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Extra-Large model.
Not a paper spec, experimental def by RW w/ depth scaling.
"""
model = _gen_mixnet_m(
'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Double Extra Large model.
Not a paper spec, experimental def by RW w/ depth scaling.
"""
model = _gen_mixnet_m(
'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Small model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_s(
'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Medium model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_m(
'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet:
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mixnet_m(
'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_a(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_b(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_c(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_d(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs)
return model
@register_model
def tinynet_e(pretrained=False, **kwargs) -> EfficientNet:
model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k',
'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k',
'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k',
'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k',
'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k',
'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k',
'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k',
'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k',
'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k',
'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k',
'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k',
'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k',
'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k',
'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k',
'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k',
'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k',
'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k',
'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475',
'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k',
'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k',
'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k',
'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k',
'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k',
'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k',
'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k',
'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k',
'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k',
'efficientnet_b2a': 'efficientnet_b2',
'efficientnet_b3a': 'efficientnet_b3',
'mnasnet_a1': 'semnasnet_100',
'mnasnet_b1': 'mnasnet_100',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/__init__.py | from .beit import *
from .byoanet import *
from .byobnet import *
from .cait import *
from .coat import *
from .convit import *
from .convmixer import *
from .convnext import *
from .crossvit import *
from .cspnet import *
from .davit import *
from .deit import *
from .densenet import *
from .dla import *
from .dpn import *
from .edgenext import *
from .efficientformer import *
from .efficientformer_v2 import *
from .efficientnet import *
from .efficientvit_mit import *
from .efficientvit_msra import *
from .eva import *
from .fastvit import *
from .focalnet import *
from .gcvit import *
from .ghostnet import *
from .hardcorenas import *
from .hrnet import *
from .inception_next import *
from .inception_resnet_v2 import *
from .inception_v3 import *
from .inception_v4 import *
from .levit import *
from .maxxvit import *
from .metaformer import *
from .mlp_mixer import *
from .mobilenetv3 import *
from .mobilevit import *
from .mvitv2 import *
from .nasnet import *
from .nest import *
from .nfnet import *
from .pit import *
from .pnasnet import *
from .pvt_v2 import *
from .regnet import *
from .repghost import *
from .repvit import *
from .res2net import *
from .resnest import *
from .resnet import *
from .resnetv2 import *
from .rexnet import *
from .selecsls import *
from .senet import *
from .sequencer import *
from .sknet import *
from .swin_transformer import *
from .swin_transformer_v2 import *
from .swin_transformer_v2_cr import *
from .tiny_vit import *
from .tnt import *
from .tresnet import *
from .twins import *
from .vgg import *
from .visformer import *
from .vision_transformer import *
from .vision_transformer_hybrid import *
from .vision_transformer_relpos import *
from .vision_transformer_sam import *
from .volo import *
from .vovnet import *
from .xception import *
from .xception_aligned import *
from .xcit import *
from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, \
set_pretrained_download_progress, set_pretrained_check_hash
from ._factory import create_model, parse_model_name, safe_model_name
from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet
from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, \
register_notrace_module, is_notrace_module, get_notrace_modules, \
register_notrace_function, is_notrace_function, get_notrace_functions
from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint
from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub
from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, \
group_modules, group_parameters, checkpoint_seq, adapt_input_conv
from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg
from ._prune import adapt_model_from_string
from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, \
register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, \
is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/factory.py | from ._factory import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/eva.py | """ EVA
EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636
@article{EVA,
title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale},
author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang,
Tiejun and Wang, Xinlong and Cao, Yue},
journal={arXiv preprint arXiv:2211.07636},
year={2022}
}
EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331
@article{EVA02,
title={EVA-02: A Visual Representation for Neon Genesis},
author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue},
journal={arXiv preprint arXiv:2303.11331},
year={2023}
}
This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py.
Modifications by / Copyright 2023 Ross Wightman, original copyrights below
"""
# EVA models Copyright (c) 2022 BAAI-Vision
# EVA02 models Copyright (c) 2023 BAAI-Vision
import math
from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \
apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \
to_2tuple, use_fused_attn
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model
__all__ = ['Eva']
class EvaAttention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
qkv_fused: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
attn_head_dim: Optional[int] = None,
norm_layer: Optional[Callable] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
attn_drop:
proj_drop:
attn_head_dim:
norm_layer:
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
if qkv_fused:
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
self.q_proj = self.k_proj = self.v_proj = None
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = self.k_bias = self.v_bias = None
else:
self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias)
self.qkv = None
self.q_bias = self.k_bias = self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity()
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(
self,
x,
rope: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
):
B, N, C = x.shape
if self.qkv is not None:
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
else:
q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C
k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2)
v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2)
if rope is not None:
q = torch.cat([q[:, :, :1, :], apply_rot_embed_cat(q[:, :, 1:, :], rope)], 2).type_as(v)
k = torch.cat([k[:, :, :1, :], apply_rot_embed_cat(k[:, :, 1:, :], rope)], 2).type_as(v)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
if attn_mask is not None:
attn_mask = attn_mask.to(torch.bool)
attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
class EvaBlock(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
attn_head_dim: Optional[int] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
proj_drop:
attn_drop:
drop_path:
init_values:
act_layer:
norm_layer:
attn_head_dim:
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = EvaAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
attn_drop=attn_drop,
proj_drop=proj_drop,
attn_head_dim=attn_head_dim,
norm_layer=norm_layer if scale_attn_inner else None,
)
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
hidden_features = int(dim * mlp_ratio)
if swiglu_mlp:
if scale_mlp:
# when norm in SwiGLU used, an impl with separate fc for gate & x is used
self.mlp = SwiGLU(
in_features=dim,
hidden_features=hidden_features,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
# w/o any extra norm, an impl with packed weights is used, matches existing GluMLP
self.mlp = GluMlp(
in_features=dim,
hidden_features=hidden_features * 2,
norm_layer=norm_layer if scale_mlp else None,
act_layer=nn.SiLU,
gate_last=False,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=hidden_features,
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask))
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class EvaBlockPostNorm(nn.Module):
""" EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None, # ignore for post-norm
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
attn_head_dim: Optional[int] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
proj_drop:
attn_drop:
drop_path:
init_values:
act_layer:
norm_layer:
attn_head_dim:
"""
super().__init__()
self.attn = EvaAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
attn_drop=attn_drop,
proj_drop=proj_drop,
attn_head_dim=attn_head_dim,
norm_layer=norm_layer if scale_attn_inner else None,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
hidden_features = int(dim * mlp_ratio)
if swiglu_mlp:
if scale_mlp:
# when norm in SwiGLU used, an impl with separate fc for gate & x is used
self.mlp = SwiGLU(
in_features=dim,
hidden_features=hidden_features,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
# w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP
self.mlp = GluMlp(
in_features=dim,
hidden_features=hidden_features * 2,
norm_layer=norm_layer if scale_mlp else None,
act_layer=nn.SiLU,
gate_last=False,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=hidden_features,
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class Eva(nn.Module):
""" Eva Vision Transformer w/ Abs & Rotary Pos Embed
This class implements the EVA and EVA02 models that were based on the BEiT ViT variant
* EVA - abs pos embed, global avg pool
* EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer)
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
norm_layer: Callable = LayerNorm,
init_values: Optional[float] = None,
class_token: bool = True,
use_abs_pos_emb: bool = True,
use_rot_pos_emb: bool = False,
use_post_norm: bool = False,
dynamic_img_size: bool = False,
dynamic_img_pad: bool = False,
ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None,
head_init_scale: float = 0.001,
):
"""
Args:
img_size:
patch_size:
in_chans:
num_classes:
global_pool:
embed_dim:
depth:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
drop_rate:
pos_drop_rate:
proj_drop_rate:
attn_drop_rate:
drop_path_rate:
norm_layer:
init_values:
class_token:
use_abs_pos_emb:
use_rot_pos_emb:
use_post_norm:
ref_feat_shape:
head_init_scale:
"""
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.dynamic_img_size = dynamic_img_size
self.grad_checkpointing = False
embed_args = {}
if dynamic_img_size:
# flatten deferred until after pos embed
embed_args.update(dict(strict_img_size=False, output_fmt='NHWC'))
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
dynamic_img_pad=dynamic_img_pad,
**embed_args,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=self.num_prefix_tokens,
return_indices=True,
)
else:
self.patch_drop = None
if use_rot_pos_emb:
ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None
self.rope = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=None if dynamic_img_size else self.patch_embed.grid_size,
ref_feat_shape=ref_feat_shape,
)
else:
self.rope = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock
self.blocks = nn.ModuleList([
block_fn(
dim=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
mlp_ratio=mlp_ratio,
swiglu_mlp=swiglu_mlp,
scale_mlp=scale_mlp,
scale_attn_inner=scale_attn_inner,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
)
for i in range(depth)])
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
if self.dynamic_img_size:
B, H, W, C = x.shape
if self.pos_embed is not None:
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=self.num_prefix_tokens,
)
else:
pos_embed = None
x = x.view(B, -1, C)
rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None
else:
pos_embed = self.pos_embed
rot_pos_embed = self.rope.get_embed() if self.rope is not None else None
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if pos_embed is not None:
x = x + pos_embed
x = self.pos_drop(x)
# obtain shared rotary position embedding and apply patch dropout
if self.patch_drop is not None:
x, keep_indices = self.patch_drop(x)
if rot_pos_embed is not None and keep_indices is not None:
rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices)
return x, rot_pos_embed
def forward_features(self, x):
x = self.patch_embed(x)
x, rot_pos_embed = self._pos_embed(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, rope=rot_pos_embed)
else:
x = blk(x, rope=rot_pos_embed)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(
state_dict,
model,
interpolation='bicubic',
antialias=True,
):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
state_dict = state_dict.get('model_ema', state_dict)
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('module', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
# prefix for loading OpenCLIP compatible weights
if 'visual.trunk.pos_embed' in state_dict:
prefix = 'visual.trunk.'
elif 'visual.pos_embed' in state_dict:
prefix = 'visual.'
else:
prefix = ''
mim_weights = prefix + 'mask_token' in state_dict
no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict
len_prefix = len(prefix)
for k, v in state_dict.items():
if prefix:
if k.startswith(prefix):
k = k[len_prefix:]
else:
continue
if 'rope' in k:
# fixed embedding no need to load buffer from checkpoint
continue
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1)
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
k = k.replace('mlp.ffn_ln', 'mlp.norm')
k = k.replace('attn.inner_attn_ln', 'attn.norm')
k = k.replace('mlp.w12', 'mlp.fc1')
k = k.replace('mlp.w1', 'mlp.fc1_g')
k = k.replace('mlp.w2', 'mlp.fc1_x')
k = k.replace('mlp.w3', 'mlp.fc2')
if no_qkv:
k = k.replace('q_bias', 'q_proj.bias')
k = k.replace('v_bias', 'v_proj.bias')
if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'):
if k == 'norm.weight' or k == 'norm.bias':
# try moving norm -> fc norm on fine-tune, probably a better starting point than new init
k = k.replace('norm', 'fc_norm')
else:
# skip pretrain mask token & head weights
continue
out_dict[k] = v
return out_dict
def _create_eva(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Eva models.')
model = build_model_with_cfg(
Eva, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
# EVA 01 CLIP fine-tuned on imagenet-1k
'eva_giant_patch14_224.clip_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt',
hf_hub_id='timm/',
),
'eva_giant_patch14_336.clip_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
# MIM EVA 01 pretrain, ft on in22k -> in1k
'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'),
# in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune
'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg(
hf_hub_id='timm/',
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
# in22k or m3m MIM pretrain w/ in1k fine-tune
'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0,
),
'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0,
),
'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
# in22k or m3m MIM pretrain w/ in22k fine-tune
'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
# in22k or m38m MIM pretrain
'eva02_tiny_patch14_224.mim_in22k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_small_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_base_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_large_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_large_patch14_224.mim_m38m': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
# EVA01 and EVA02 CLIP image towers
'eva_giant_patch14_clip_224.laion400m': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt',
hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva_giant_patch14_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt',
hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_base_patch16_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=512,
),
'eva02_large_patch14_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=768,
),
'eva02_large_patch14_clip_336.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 336, 336), crop_pct=1.0,
num_classes=768,
),
'eva02_enormous_patch14_clip_224.laion2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt',
hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt',
hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_enormous_patch14_clip_224.pretrain': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt',
num_classes=0,
),
})
@register_model
def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=448,
patch_size=14,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=448,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" EVA-g CLIP model (only difference from non-CLIP is the pooling) """
model_args = dict(
patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408,
global_pool=kwargs.pop('global_pool', 'token'))
model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """
model_args = dict(
img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that uses residual post-norm in blocks """
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1792,
depth=64,
num_heads=16,
mlp_ratio=15360 / 1792,
use_post_norm=True,
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/tiny_vit.py | """ TinyViT
Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers`
- https://arxiv.org/abs/2207.10666
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT
"""
__all__ = ['TinyVit']
import math
import itertools
from functools import partial
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\
trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchEmbed(nn.Module):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.stride = 4
self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
self.act = act_layer()
self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
return x
class MBConv(nn.Module):
def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path):
super().__init__()
mid_chs = int(in_chs * expand_ratio)
self.conv1 = ConvNorm(in_chs, mid_chs, ks=1)
self.act1 = act_layer()
self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs)
self.act2 = act_layer()
self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0)
self.act3 = act_layer()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop_path(x)
x += shortcut
x = self.act3(x)
return x
class PatchMerging(nn.Module):
def __init__(self, dim, out_dim, act_layer):
super().__init__()
self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0)
self.act1 = act_layer()
self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim)
self.act2 = act_layer()
self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
return x
class ConvLayer(nn.Module):
def __init__(
self,
dim,
depth,
act_layer,
drop_path=0.,
conv_expand_ratio=4.,
):
super().__init__()
self.dim = dim
self.depth = depth
self.blocks = nn.Sequential(*[
MBConv(
dim, dim, conv_expand_ratio, act_layer,
drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(depth)
])
def forward(self, x):
x = self.blocks(x)
return x
class NormMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.norm = norm_layer(in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop)
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Attention(torch.nn.Module):
fused_attn: torch.jit.Final[bool]
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=(14, 14),
):
super().__init__()
assert isinstance(resolution, tuple) and len(resolution) == 2
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.out_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.resolution = resolution
self.fused_attn = use_fused_attn()
self.norm = nn.LayerNorm(dim)
self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim))
self.proj = nn.Linear(self.out_dim, dim)
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
attn_bias = self.get_attention_biases(x.device)
B, N, _ = x.shape
# Normalization
x = self.norm(x)
qkv = self.qkv(x)
# (B, N, num_heads, d)
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
# (B, num_heads, N, d)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, self.out_dim)
x = self.proj(x)
return x
class TinyVitBlock(nn.Module):
""" TinyViT Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
local_conv_size (int): the kernel size of the convolution between
Attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
local_conv_size=3,
act_layer=nn.GELU
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
assert window_size > 0, 'window_size must be greater than 0'
self.window_size = window_size
self.mlp_ratio = mlp_ratio
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
head_dim = dim // num_heads
window_resolution = (window_size, window_size)
self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = NormMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
pad = local_conv_size // 2
self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x):
B, H, W, C = x.shape
L = H * W
shortcut = x
if H == self.window_size and W == self.window_size:
x = x.reshape(B, L, C)
x = self.attn(x)
x = x.view(B, H, W, C)
else:
pad_b = (self.window_size - H % self.window_size) % self.window_size
pad_r = (self.window_size - W % self.window_size) % self.window_size
padding = pad_b > 0 or pad_r > 0
if padding:
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
# window partition
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_size
nW = pW // self.window_size
x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
B * nH * nW, self.window_size * self.window_size, C
)
x = self.attn(x)
# window reverse
x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
if padding:
x = x[:, :H, :W].contiguous()
x = shortcut + self.drop_path1(x)
x = x.permute(0, 3, 1, 2)
x = self.local_conv(x)
x = x.reshape(B, C, L).transpose(1, 2)
x = x + self.drop_path2(self.mlp(x))
return x.view(B, H, W, C)
def extra_repr(self) -> str:
return f"dim={self.dim}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
register_notrace_module(TinyVitBlock)
class TinyVitStage(nn.Module):
""" A basic TinyViT layer for one stage.
Args:
dim (int): Number of input channels.
out_dim: the output dimension of the layer
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
out_dim,
depth,
num_heads,
window_size,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
downsample=None,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.depth = depth
self.out_dim = out_dim
# patch merging layer
if downsample is not None:
self.downsample = downsample(
dim=dim,
out_dim=out_dim,
act_layer=act_layer,
)
else:
self.downsample = nn.Identity()
assert dim == out_dim
# build blocks
self.blocks = nn.Sequential(*[
TinyVitBlock(
dim=out_dim,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
local_conv_size=local_conv_size,
act_layer=act_layer,
)
for i in range(depth)])
def forward(self, x):
x = self.downsample(x)
x = x.permute(0, 2, 3, 1) # BCHW -> BHWC
x = self.blocks(x)
x = x.permute(0, 3, 1, 2) # BHWC -> BCHW
return x
def extra_repr(self) -> str:
return f"dim={self.out_dim}, depth={self.depth}"
class TinyVit(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dims=(96, 192, 384, 768),
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
window_sizes=(7, 7, 14, 7),
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.num_stages = len(depths)
self.mlp_ratio = mlp_ratio
self.grad_checkpointing = use_checkpoint
self.patch_embed = PatchEmbed(
in_chs=in_chans,
out_chs=embed_dims[0],
act_layer=act_layer,
)
# stochastic depth rate rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# build stages
self.stages = nn.Sequential()
stride = self.patch_embed.stride
prev_dim = embed_dims[0]
self.feature_info = []
for stage_idx in range(self.num_stages):
if stage_idx == 0:
stage = ConvLayer(
dim=prev_dim,
depth=depths[stage_idx],
act_layer=act_layer,
drop_path=dpr[:depths[stage_idx]],
conv_expand_ratio=mbconv_expand_ratio,
)
else:
out_dim = embed_dims[stage_idx]
drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])]
stage = TinyVitStage(
dim=embed_dims[stage_idx - 1],
out_dim=out_dim,
depth=depths[stage_idx],
num_heads=num_heads[stage_idx],
window_size=window_sizes[stage_idx],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
drop_path=drop_path_rate,
downsample=PatchMerging,
act_layer=act_layer,
)
prev_dim = out_dim
stride *= 2
self.stages.append(stage)
self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')]
# Classifier head
self.num_features = embed_dims[-1]
norm_layer_cf = partial(LayerNorm2d, eps=1e-5)
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
norm_layer=norm_layer_cf,
)
# init weights
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'attention_biases'}
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x):
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict.keys():
state_dict = state_dict['model']
target_sd = model.state_dict()
out_dict = {}
for k, v in state_dict.items():
if k.endswith('attention_bias_idxs'):
continue
if 'attention_biases' in k:
# TODO: whether move this func into model for dynamic input resolution? (high risk)
v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.fc',
'pool_size': (7, 7),
'input_size': (3, 224, 224),
'crop_pct': 0.95,
**kwargs,
}
default_cfgs = generate_default_cfgs({
'tiny_vit_5m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth'
),
'tiny_vit_5m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth'
),
'tiny_vit_11m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth'
),
'tiny_vit_11m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth'
),
'tiny_vit_21m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth'
),
'tiny_vit_21m_224.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth'
),
'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash',
),
})
def _create_tiny_vit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
TinyVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs
)
return model
@register_model
def tiny_vit_5m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 160, 320],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 5, 10],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.0,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_11m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 256, 448],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 14],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.2,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_384(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[12, 12, 24, 12],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_512(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[16, 16, 32, 16],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/inception_next.py | """
InceptionNeXt paper: https://arxiv.org/abs/2303.16900
Original implementation & weights from: https://github.com/sail-sg/inceptionnext
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class InceptionDWConv2d(nn.Module):
""" Inception depthwise convolution
"""
def __init__(
self,
in_chs,
square_kernel_size=3,
band_kernel_size=11,
branch_ratio=0.125,
dilation=1,
):
super().__init__()
gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch
square_padding = get_padding(square_kernel_size, dilation=dilation)
band_padding = get_padding(band_kernel_size, dilation=dilation)
self.dwconv_hw = nn.Conv2d(
gc, gc, square_kernel_size,
padding=square_padding, dilation=dilation, groups=gc)
self.dwconv_w = nn.Conv2d(
gc, gc, (1, band_kernel_size),
padding=(0, band_padding), dilation=(1, dilation), groups=gc)
self.dwconv_h = nn.Conv2d(
gc, gc, (band_kernel_size, 1),
padding=(band_padding, 0), dilation=(dilation, 1), groups=gc)
self.split_indexes = (in_chs - 3 * gc, gc, gc, gc)
def forward(self, x):
x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1)
return torch.cat((
x_id,
self.dwconv_hw(x_hw),
self.dwconv_w(x_w),
self.dwconv_h(x_h)
), dim=1,
)
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.ReLU,
norm_layer=None,
bias=True,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
bias = to_2tuple(bias)
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.drop = nn.Dropout(drop)
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
class MlpClassifierHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
dim,
num_classes=1000,
pool_type='avg',
mlp_ratio=3,
act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop=0.,
bias=True
):
super().__init__()
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
in_features = dim * self.global_pool.feat_mult()
hidden_features = int(mlp_ratio * in_features)
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.global_pool(x)
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.drop(x)
x = self.fc2(x)
return x
class MetaNeXtBlock(nn.Module):
""" MetaNeXtBlock Block
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
dim,
dilation=1,
token_mixer=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
mlp_layer=ConvMlp,
mlp_ratio=4,
act_layer=nn.GELU,
ls_init_value=1e-6,
drop_path=0.,
):
super().__init__()
self.token_mixer = token_mixer(dim, dilation=dilation)
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.token_mixer(x)
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class MetaNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
token_mixer=InceptionDWConv2d,
act_layer=nn.GELU,
norm_layer=None,
mlp_ratio=4,
):
super().__init__()
self.grad_checkpointing = False
if stride > 1 or dilation[0] != dilation[1]:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(
in_chs,
out_chs,
kernel_size=2,
stride=stride,
dilation=dilation[0],
),
)
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(MetaNeXtBlock(
dim=out_chs,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
token_mixer=token_mixer,
act_layer=act_layer,
norm_layer=norm_layer,
mlp_ratio=mlp_ratio,
))
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class MetaNeXt(nn.Module):
r""" MetaNeXt
A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3)
dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768)
token_mixers: Token mixer function. Default: nn.Identity
norm_layer: Normalization layer. Default: nn.BatchNorm2d
act_layer: Activation function for MLP. Default: nn.GELU
mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3)
head_fn: classifier head
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
act_layer=nn.GELU,
mlp_ratios=(4, 4, 4, 3),
head_fn=MlpClassifierHead,
drop_rate=0.,
drop_path_rate=0.,
ls_init_value=1e-6,
):
super().__init__()
num_stage = len(depths)
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * num_stage
if not isinstance(mlp_ratios, (list, tuple)):
mlp_ratios = [mlp_ratios] * num_stage
self.num_classes = num_classes
self.global_pool = global_pool
self.drop_rate = drop_rate
self.feature_info = []
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
norm_layer(dims[0])
)
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
prev_chs = dims[0]
curr_stride = 4
dilation = 1
# feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.Sequential()
for i in range(num_stage):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
self.stages.append(MetaNeXtStage(
prev_chs,
out_chs,
stride=stride if i > 0 else 1,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
act_layer=act_layer,
token_mixer=token_mixers[i],
norm_layer=norm_layer,
mlp_ratio=mlp_ratios[i],
))
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.num_features = prev_chs
if self.num_classes > 0:
if issubclass(head_fn, MlpClassifierHead):
assert self.global_pool, 'Cannot disable global pooling with MLP head present.'
self.head = head_fn(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate)
else:
if self.global_pool:
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
]
)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc2
def reset_classifier(self, num_classes=0, global_pool=None, head_fn=MlpClassifierHead):
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
if issubclass(head_fn, MlpClassifierHead):
assert self.global_pool, 'Cannot disable global pooling with MLP head present.'
self.head = head_fn(self.num_features, num_classes, pool_type=self.global_pool, drop=self.drop_rate)
else:
if self.global_pool:
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def no_weight_decay(self):
return set()
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if pre_logits:
if hasattr(self.head, 'global_pool'):
x = self.head.global_pool(x)
return x
return self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc2',
**kwargs
}
default_cfgs = generate_default_cfgs({
'inception_next_tiny.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth',
),
'inception_next_small.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth',
),
'inception_next_base.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth',
crop_pct=0.95,
),
'inception_next_base.sail_in1k_384': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
})
def _create_inception_next(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
MetaNeXt, variant, pretrained,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def inception_next_tiny(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_small(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_base(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vision_transformer_sam.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'Exploring Plain Vision Transformer Backbones for Object Detection'
- https://arxiv.org/abs/2203.16527
'Segment Anything Model (SAM)'
- https://github.com/facebookresearch/segment-anything/
"""
import logging
from functools import partial
from typing import Callable, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead,\
Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
from ._features_fx import register_notrace_function
# model_registry will add each entrypoint fn to this
__all__ = ['VisionTransformerSAM']
_logger = logging.getLogger(__name__)
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
register_notrace_function(get_rel_pos)
def get_decomposed_rel_pos_bias(
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
bias (Tensor): attention bias to add to attention map
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return attn_bias.reshape(-1, q_h * q_w, k_h * k_w)
class Attention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
qk_norm=False,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
use_rel_pos: bool = False,
input_size: Optional[Tuple[int, int]] = None,
rope: Optional[nn.Module] = None,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert rope is None
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(
2 * input_size[0] - 1, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(
2 * input_size[1] - 1, self.head_dim))
self.rope = rope
def forward(self, x):
B, H, W, _ = x.shape
N = H * W
x = x.reshape(B, N, -1)
qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# qkv with shape (3, B, nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0)
# q, k, v with shape (B * nHead, H * W, C)
q, k = self.q_norm(q), self.k_norm(k)
if self.use_rel_pos:
attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
else:
attn_bias = None
if self.rope is not None:
rope = self.rope.get_embed()
q = apply_rot_embed_cat(q, rope).type_as(v)
k = apply_rot_embed_cat(k, rope).type_as(v)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = x.view(B, H, W, -1)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=True,
qk_norm=False,
proj_drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
mlp_layer=Mlp,
use_rel_pos=False,
window_size=0,
input_size=None,
rope=None,
):
super().__init__()
self.window_size = window_size
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
use_rel_pos=use_rel_pos,
input_size=input_size if window_size == 0 else (window_size, window_size),
rope=rope,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
B, H, W, _ = x.shape
shortcut = x
x = self.norm1(x)
# Window partition
pad_hw: Optional[Tuple[int, int]] = None
if self.window_size > 0:
x, pad_hw = window_partition(x, self.window_size)
x = self.drop_path1(self.ls1(self.attn(x)))
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, (H, W), pad_hw)
x = shortcut + x
x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
x = x.reshape(B, H, W, -1)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None,
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw if pad_hw is not None else hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
x = x[:, :H, :W, :].contiguous()
return x
class VisionTransformerSAM(nn.Module):
""" Vision Transformer for Segment-Anything Model(SAM)
A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
num_classes: int = 768,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = None,
pre_norm: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: str = '',
embed_layer: Callable = partial(
PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False),
norm_layer: Optional[Callable] = nn.LayerNorm,
act_layer: Optional[Callable] = nn.GELU,
block_fn: Callable = Block,
mlp_layer: Callable = Mlp,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
use_rope: bool = False,
window_size: int = 14,
global_attn_indexes: Tuple[int, ...] = (),
neck_chans: int = 256,
global_pool: str = 'avg',
head_hidden_size: Optional[int] = None,
ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of image input channels.
num_classes: Mumber of classes for classification head.
global_pool: Type of global pooling for final sequence (default: 'token').
embed_dim: Transformer embedding dimension.
depth: Depth of transformer.
num_heads: Number of attention heads.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: Enable bias for qkv projections if True.
init_values: Layer-scale init values (layer-scale enabled if not None).
drop_rate: Head dropout rate.
pos_drop_rate: Position embedding dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
weight_init: Weight initialization scheme.
embed_layer: Patch embedding layer.
norm_layer: Normalization layer.
act_layer: MLP activation layer.
block_fn: Transformer block layer.
use_abs_pos: If True, use absolute positional embeddings.
use_rel_pos: If True, add relative positional embeddings to the attention map.
use_rope: If True, add rotary position embeddings to q/k in attention block.
window_size: Window size for window attention blocks. If 0, not use window attention.
global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0.
global_pool: Global pooling type.
head_hidden_size: If set, use NormMlpHead
ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local)
"""
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
# num_features for consistency with other models
self.num_features = self.embed_dim = embed_dim
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used
)
grid_size = self.patch_embed.grid_size
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=0,
)
else:
self.patch_drop = nn.Identity()
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
if use_rope:
assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time"
if ref_feat_shape is not None:
assert len(ref_feat_shape) == 2
ref_feat_shape_global = to_2tuple(ref_feat_shape[0])
ref_feat_shape_window = to_2tuple(ref_feat_shape[1])
else:
ref_feat_shape_global = ref_feat_shape_window = None
self.rope_global = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=grid_size,
ref_feat_shape=ref_feat_shape_global,
)
self.rope_window = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=to_2tuple(window_size),
ref_feat_shape=ref_feat_shape_window,
)
else:
self.rope_global = None
self.rope_window = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
use_rel_pos=use_rel_pos,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=grid_size,
rope=self.rope_window if i not in global_attn_indexes else self.rope_global,
)
for i in range(depth)])
if neck_chans:
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
neck_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(neck_chans),
nn.Conv2d(
neck_chans,
neck_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(neck_chans),
)
self.num_features = neck_chans
else:
if head_hidden_size:
self.neck = nn.Identity()
else:
# should have a final norm with standard ClassifierHead
self.neck = LayerNorm2d(embed_dim)
neck_chans = embed_dim
# Classifier Head
if head_hidden_size:
self.head = NormMlpClassifierHead(
neck_chans,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
self.head = ClassifierHead(
neck_chans,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes=0, global_pool=None):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
# dynamically resize abs pos embedding if needed
x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])
x = self.pos_drop(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(
state_dict,
model,
):
""" Remap SAM checkpoints -> timm """
sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict
out_dict = {}
for k, v in state_dict.items():
if k.startswith('image_encoder.'):
k = k[14:]
k = k.replace('mlp.lin', 'mlp.fc')
else:
if sam_checkpoint:
continue
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only)
'samvit_base_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_large_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_huge_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_base_patch16_224': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000,
input_size=(3, 224, 224), crop_pct=0.9),
})
def _create_vision_transformer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError(
'features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(
VisionTransformerSAM,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
@register_model
def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-L/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-H/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 based on samvit arch
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None,
)
model = _create_vision_transformer(
'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_prune.py | import os
import pkgutil
from copy import deepcopy
from torch import nn as nn
from timm.layers import Conv2dSame, BatchNormAct2d, Linear
__all__ = ['extract_layer', 'set_layer', 'adapt_model_from_string', 'adapt_model_from_file']
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
elif isinstance(old_module, BatchNormAct2d):
new_bn = BatchNormAct2d(
state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
new_bn.drop = old_module.drop
new_bn.act = old_module.act
set_layer(new_module, n, new_bn)
elif isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
elif isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_data = pkgutil.get_data(__name__, os.path.join('_pruned', model_variant + '.txt'))
return adapt_model_from_string(parent_module, adapt_data.decode('utf-8').strip())
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vision_transformer_relpos.py | """ Relative Position Vision Transformer (ViT) in PyTorch
NOTE: these models are experimental / WIP, expect changes
Hacked together by / Copyright 2022, Ross Wightman
"""
import logging
import math
from functools import partial
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.jit import Final
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model
__all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class RelPosAttention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = self.q_norm(q)
k = self.k_norm(k)
if self.fused_attn:
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
else:
attn_bias = None
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostRelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.init_values = init_values
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self):
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class VisionTransformerRelPos(nn.Module):
""" Vision Transformer w/ Relative Position Bias
Differing from classic vit, this impl
* uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed
* defaults to no class token (can be enabled)
* defaults to global avg pool for head (can be changed)
* layer-scale (residual branch gain) enabled
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
qk_norm=False,
init_values=1e-6,
class_token=False,
fc_norm=False,
rel_pos_type='mlp',
rel_pos_dim=None,
shared_rel_pos=False,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
weight_init='skip',
embed_layer=PatchEmbed,
norm_layer=None,
act_layer=None,
block_fn=RelPosBlock
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'avg')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_norm (bool): Enable normalization of query and key in attention
init_values: (float): layer-scale init values
class_token (bool): use class token (default: False)
fc_norm (bool): use pre classifier norm instead of pre-pool
rel_pos_ty pe (str): type of relative position
shared_rel_pos (bool): share relative pos across all blocks
drop_rate (float): dropout rate
proj_drop_rate (float): projection dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
weight_init (str): weight init scheme
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
assert class_token or global_pool != 'token'
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
feat_size = self.patch_embed.grid_size
rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens)
if rel_pos_type.startswith('mlp'):
if rel_pos_dim:
rel_pos_args['hidden_dim'] = rel_pos_dim
if 'swin' in rel_pos_type:
rel_pos_args['mode'] = 'swin'
rel_pos_cls = partial(RelPosMlp, **rel_pos_args)
else:
rel_pos_cls = partial(RelPosBias, **rel_pos_args)
self.shared_rel_pos = None
if shared_rel_pos:
self.shared_rel_pos = rel_pos_cls(num_heads=num_heads)
# NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both...
rel_pos_cls = None
self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity()
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'moco', '')
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
# FIXME weight init scheme using PyTorch defaults curently
#named_apply(get_init_weights_vit(mode, head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes: int, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos)
else:
x = blk(x, shared_rel_pos=shared_rel_pos)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256)),
'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)),
'vit_relpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth',
hf_hub_id='timm/'),
'vit_srelpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth',
hf_hub_id='timm/'),
'vit_srelpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_cls_224.untrained': _cfg(),
'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth',
hf_hub_id='timm/'),
'vit_relpos_small_patch16_rpn_224.untrained': _cfg(),
'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_rpn_224.untrained': _cfg(),
})
@register_model
def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False,
rel_pos_dim=384, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=512, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-M/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=256, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled
Leaving here for comparisons w/ a future re-train as it performs quite well.
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/xception_aligned.py | """Pytorch impl of Aligned Xception 41, 65, 71
This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at
https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md
Hacked together by / Copyright 2020 Ross Wightman
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import ClassifierHead, ConvNormAct, create_conv2d, get_norm_act_layer
from timm.layers.helpers import to_3tuple
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['XceptionAligned']
class SeparableConv2d(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=3,
stride=1,
dilation=1,
padding='',
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
):
super(SeparableConv2d, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
# depthwise convolution
self.conv_dw = create_conv2d(
in_chs, in_chs, kernel_size, stride=stride,
padding=padding, dilation=dilation, depthwise=True)
self.bn_dw = norm_layer(in_chs)
self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity()
# pointwise convolution
self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1)
self.bn_pw = norm_layer(out_chs)
self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity()
def forward(self, x):
x = self.conv_dw(x)
x = self.bn_dw(x)
x = self.act_dw(x)
x = self.conv_pw(x)
x = self.bn_pw(x)
x = self.act_pw(x)
return x
class PreSeparableConv2d(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=3,
stride=1,
dilation=1,
padding='',
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
first_act=True,
):
super(PreSeparableConv2d, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
self.kernel_size = kernel_size
self.dilation = dilation
self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity()
# depthwise convolution
self.conv_dw = create_conv2d(
in_chs, in_chs, kernel_size, stride=stride,
padding=padding, dilation=dilation, depthwise=True)
# pointwise convolution
self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1)
def forward(self, x):
x = self.norm(x)
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
class XceptionModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
pad_type='',
start_with_relu=True,
no_skip=False,
act_layer=nn.ReLU,
norm_layer=None,
):
super(XceptionModule, self).__init__()
out_chs = to_3tuple(out_chs)
self.in_channels = in_chs
self.out_channels = out_chs[-1]
self.no_skip = no_skip
if not no_skip and (self.out_channels != self.in_channels or stride != 1):
self.shortcut = ConvNormAct(
in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False)
else:
self.shortcut = None
separable_act_layer = None if start_with_relu else act_layer
self.stack = nn.Sequential()
for i in range(3):
if start_with_relu:
self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0))
self.stack.add_module(f'conv{i + 1}', SeparableConv2d(
in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type,
act_layer=separable_act_layer, norm_layer=norm_layer))
in_chs = out_chs[i]
def forward(self, x):
skip = x
x = self.stack(x)
if self.shortcut is not None:
skip = self.shortcut(skip)
if not self.no_skip:
x = x + skip
return x
class PreXceptionModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
pad_type='',
no_skip=False,
act_layer=nn.ReLU,
norm_layer=None,
):
super(PreXceptionModule, self).__init__()
out_chs = to_3tuple(out_chs)
self.in_channels = in_chs
self.out_channels = out_chs[-1]
self.no_skip = no_skip
if not no_skip and (self.out_channels != self.in_channels or stride != 1):
self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride)
else:
self.shortcut = nn.Identity()
self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True)
self.stack = nn.Sequential()
for i in range(3):
self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d(
in_chs,
out_chs[i],
3,
stride=stride if i == 2 else 1,
dilation=dilation,
padding=pad_type,
act_layer=act_layer,
norm_layer=norm_layer,
first_act=i > 0,
))
in_chs = out_chs[i]
def forward(self, x):
x = self.norm(x)
skip = x
x = self.stack(x)
if not self.no_skip:
x = x + self.shortcut(skip)
return x
class XceptionAligned(nn.Module):
"""Modified Aligned Xception
"""
def __init__(
self,
block_cfg,
num_classes=1000,
in_chans=3,
output_stride=32,
preact=False,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
drop_rate=0.,
global_pool='avg',
):
super(XceptionAligned, self).__init__()
assert output_stride in (8, 16, 32)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
layer_args = dict(act_layer=act_layer, norm_layer=norm_layer)
self.stem = nn.Sequential(*[
ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args),
create_conv2d(32, 64, kernel_size=3, stride=1) if preact else
ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args)
])
curr_dilation = 1
curr_stride = 2
self.feature_info = []
self.blocks = nn.Sequential()
module_fn = PreXceptionModule if preact else XceptionModule
for i, b in enumerate(block_cfg):
b['dilation'] = curr_dilation
if b['stride'] > 1:
name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3'
self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)]
next_stride = curr_stride * b['stride']
if next_stride > output_stride:
curr_dilation *= b['stride']
b['stride'] = 1
else:
curr_stride = next_stride
self.blocks.add_module(str(i), module_fn(**b, **layer_args))
self.num_features = self.blocks[-1].out_channels
self.feature_info += [dict(
num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))]
self.act = act_layer(inplace=True) if preact else nn.Identity()
self.head = ClassifierHead(
in_features=self.num_features,
num_classes=num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.act(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _xception(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
XceptionAligned,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, feature_cls='hook'),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10),
'crop_pct': 0.903, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'xception65.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
'xception41.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception65.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception71.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception41p.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
'xception65p.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
})
@register_model
def xception41(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-41
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 8),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception41', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception65(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-65
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception65', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception71(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-71
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=1),
dict(in_chs=256, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=1),
dict(in_chs=728, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception71', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception41p(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-41 w/ Pre-Act
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 8),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1),
]
model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d)
return _xception('xception41p', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception65p(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-65 w/ Pre-Act
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True),
]
model_args = dict(
block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception65p', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/pvt_v2.py | """ Pyramid Vision Transformer v2
@misc{wang2021pvtv2,
title={PVTv2: Improved Baselines with Pyramid Vision Transformer},
author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and
Tong Lu and Ping Luo and Ling Shao},
year={2021},
eprint={2106.13797},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
Based on Apache 2.0 licensed code at https://github.com/whai362/PVT
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
import math
from typing import Tuple, List, Callable, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['PyramidVisionTransformerV2']
class MlpWithDepthwiseConv(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
extra_relu=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.relu = nn.ReLU() if extra_relu else nn.Identity()
self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x, feat_size: List[int]):
x = self.fc1(x)
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1])
x = self.relu(x)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim,
num_heads=8,
sr_ratio=1,
linear_attn=False,
qkv_bias=True,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if not linear_attn:
self.pool = None
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.sr = None
self.norm = None
self.act = None
else:
self.pool = nn.AdaptiveAvgPool2d(7)
self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
self.norm = nn.LayerNorm(dim)
self.act = nn.GELU()
def forward(self, x, feat_size: List[int]):
B, N, C = x.shape
H, W = feat_size
q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
if self.pool is not None:
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
x = self.act(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
else:
if self.sr is not None:
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
sr_ratio=1,
linear_attn=False,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
sr_ratio=sr_ratio,
linear_attn=linear_attn,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = MlpWithDepthwiseConv(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
extra_relu=linear_attn,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, feat_size: List[int]):
x = x + self.drop_path1(self.attn(self.norm1(x), feat_size))
x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
patch_size = to_2tuple(patch_size)
assert max(patch_size) > stride, "Set larger patch_size than stride"
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_chans, embed_dim, patch_size,
stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
x = self.proj(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
return x
class PyramidVisionTransformerStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
downsample: bool = True,
num_heads: int = 8,
sr_ratio: int = 1,
linear_attn: bool = False,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.0,
norm_layer: Callable = LayerNorm,
):
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = OverlapPatchEmbed(
patch_size=3,
stride=2,
in_chans=dim,
embed_dim=dim_out,
)
else:
assert dim == dim_out
self.downsample = None
self.blocks = nn.ModuleList([Block(
dim=dim_out,
num_heads=num_heads,
sr_ratio=sr_ratio,
linear_attn=linear_attn,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
) for i in range(depth)])
self.norm = norm_layer(dim_out)
def forward(self, x):
# x is either B, C, H, W (if downsample) or B, H, W, C if not
if self.downsample is not None:
# input to downsample is B, C, H, W
x = self.downsample(x) # output B, H, W, C
B, H, W, C = x.shape
feat_size = (H, W)
x = x.reshape(B, -1, C)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x, feat_size)
else:
x = blk(x, feat_size)
x = self.norm(x)
x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous()
return x
class PyramidVisionTransformerV2(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(3, 4, 6, 3),
embed_dims=(64, 128, 256, 512),
num_heads=(1, 2, 4, 8),
sr_ratios=(8, 4, 2, 1),
mlp_ratios=(8., 8., 4., 4.),
qkv_bias=True,
linear=False,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm,
):
super().__init__()
self.num_classes = num_classes
assert global_pool in ('avg', '')
self.global_pool = global_pool
self.depths = depths
num_stages = len(depths)
mlp_ratios = to_ntuple(num_stages)(mlp_ratios)
num_heads = to_ntuple(num_stages)(num_heads)
sr_ratios = to_ntuple(num_stages)(sr_ratios)
assert(len(embed_dims)) == num_stages
self.feature_info = []
self.patch_embed = OverlapPatchEmbed(
patch_size=7,
stride=4,
in_chans=in_chans,
embed_dim=embed_dims[0],
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
cur = 0
prev_dim = embed_dims[0]
stages = []
for i in range(num_stages):
stages += [PyramidVisionTransformerStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
sr_ratio=sr_ratios[i],
mlp_ratio=mlp_ratios[i],
linear_attn=linear,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)]
prev_dim = embed_dims[i]
cur += depths[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2**i, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# classification head
self.num_features = embed_dims[-1]
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def freeze_patch_emb(self):
self.patch_embed.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)'
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('avg', '')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x.mean(dim=(-1, -2))
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'patch_embed.proj.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
out_dict = {}
import re
for k, v in state_dict.items():
if k.startswith('patch_embed'):
k = k.replace('patch_embed1', 'patch_embed')
k = k.replace('patch_embed2', 'stages.1.downsample')
k = k.replace('patch_embed3', 'stages.2.downsample')
k = k.replace('patch_embed4', 'stages.3.downsample')
k = k.replace('dwconv.dwconv', 'dwconv')
k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k)
k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k)
out_dict[k] = v
return out_dict
def _create_pvt2(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(4))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
PyramidVisionTransformerV2,
variant,
pretrained,
pretrained_filter_fn=_checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False,
**kwargs
}
default_cfgs = generate_default_cfgs({
'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(
depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4))
return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(
depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True)
return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/deit.py | """ DeiT - Data-efficient Image Transformers
DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below
paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118
Modifications copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from functools import partial
from typing import Sequence, Union
import torch
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import resample_abs_pos_embed
from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this
class VisionTransformerDistilled(VisionTransformer):
""" Vision Transformer w/ Distillation Token and Head
Distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, *args, **kwargs):
weight_init = kwargs.pop('weight_init', '')
super().__init__(*args, **kwargs, weight_init='skip')
assert self.global_pool in ('token',)
self.num_prefix_tokens = 2
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.init_weights(weight_init)
def init_weights(self, mode=''):
trunc_normal_(self.dist_token, std=.02)
super().init_weights(mode=mode)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed|dist_token',
blocks=[
(r'^blocks\.(\d+)', None),
(r'^norm', (99999,))] # final norm w/ last block
)
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def _pos_embed(self, x):
if self.dynamic_img_size:
B, H, W, C = x.shape
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
)
x = x.view(B, -1, C)
else:
pos_embed = self.pos_embed
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + pos_embed
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
x = x + pos_embed
return self.pos_drop(x)
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
x, x_dist = x[:, 0], x[:, 1]
if pre_logits:
return (x + x_dist) / 2
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
def _create_deit(variant, pretrained=False, distilled=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model_cls = VisionTransformerDistilled if distilled else VisionTransformer
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'deit_tiny_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'),
'deit_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'),
'deit_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'),
'deit_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit_tiny_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
classifier=('head', 'head_dist')),
'deit_small_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
input_size=(3, 384, 384), crop_pct=1.0,
classifier=('head', 'head_dist')),
'deit3_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'),
'deit3_small_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'),
'deit3_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'),
'deit3_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'),
'deit3_large_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'),
'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth',
crop_pct=1.0),
'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth',
crop_pct=1.0),
'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth',
crop_pct=1.0),
})
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit(
'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit(
'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k',
'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k',
'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k',
'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k',
'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k',
'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k'
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/hub.py | from ._hub import *
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/resnest.py | """ ResNeSt Models
Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang
Modified for torchscript compat, and consistency with timm by Ross Wightman
"""
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SplitAttn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class ResNestBottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
radix=1,
cardinality=1,
base_width=64,
avd=False,
avd_first=False,
is_first=False,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(ResNestBottleneck, self).__init__()
assert reduce_first == 1 # not supported
assert attn_layer is None # not supported
assert aa_layer is None # TODO not yet supported
assert drop_path is None # TODO not yet supported
group_width = int(planes * (base_width / 64.)) * cardinality
first_dilation = first_dilation or dilation
if avd and (stride > 1 or is_first):
avd_stride = stride
stride = 1
else:
avd_stride = 0
self.radix = radix
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.act1 = act_layer(inplace=True)
self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None
if self.radix >= 1:
self.conv2 = SplitAttn(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block)
self.bn2 = nn.Identity()
self.drop_block = nn.Identity()
self.act2 = nn.Identity()
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None
self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
if self.avd_first is not None:
out = self.avd_first(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop_block(out)
out = self.act2(out)
if self.avd_last is not None:
out = self.avd_last(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.act3(out)
return out
def _create_resnest(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest50d.in1k': _cfg(hf_hub_id='timm/'),
'resnest101e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'resnest200e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'),
'resnest269e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'),
'resnest50d_4s2x40d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'resnest50d_1s4x24d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic')
})
@register_model
def resnest14d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-14d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[1, 1, 1, 1],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest26d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-26d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[2, 2, 2, 2],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest101e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 23, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest200e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 24, 36, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest269e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 30, 48, 8],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2,
block_args=dict(radix=4, avd=True, avd_first=True))
return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4,
block_args=dict(radix=1, avd=True, avd_first=True))
return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/xcit.py | """ Cross-Covariance Image Transformer (XCiT) in PyTorch
Paper:
- https://arxiv.org/abs/2106.09681
Same as the official implementation, with some minor adaptations, original copyright below
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import math
from functools import partial
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, to_2tuple
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
from .cait import ClassAttn
from .vision_transformer import Mlp
__all__ = ['Xcit'] # model_registry will add each entrypoint fn to this
@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper.
Based on the official XCiT code
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int):
device = self.token_projection.weight.device
y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution + batch norm"""
return torch.nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes)
)
class ConvPatchEmbed(nn.Module):
"""Image to Patch Embedding using multiple convolutional layers"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU):
super().__init__()
img_size = to_2tuple(img_size)
num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size == 16:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 8, 2),
act_layer(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 8:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise('For convolutional projection, patch size has to be in [8, 16]')
def forward(self, x):
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2) # (B, N, C)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the
implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable
3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
eta=1.,
tokens_norm=False,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim))
self.gamma2 = nn.Parameter(eta * torch.ones(dim))
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
self.tokens_norm = tokens_norm
def forward(self, x):
x_norm1 = self.norm1(x)
x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1)
x = x + self.drop_path(self.gamma1 * x_attn)
if self.tokens_norm:
x = self.norm2(x)
else:
x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1)
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
""" Cross-Covariance Attention (XCA)
Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax
normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N)
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# Paper section 3.2 l2-Normalization and temperature scaling
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# (B, H, C', N), permute -> (B, N, H, C')
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
eta=1.,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
self.gamma1 = nn.Parameter(eta * torch.ones(dim))
self.gamma3 = nn.Parameter(eta * torch.ones(dim))
self.gamma2 = nn.Parameter(eta * torch.ones(dim))
def forward(self, x, H: int, W: int):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
# NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class Xcit(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_layer=None,
norm_layer=None,
cls_attn_layers=2,
use_pos_embed=True,
eta=1.,
tokens_norm=False,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP
pos_drop_rate: position embedding dropout rate
proj_drop_rate (float): projection dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate (constant across all layers)
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos_embed: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
Notes:
- Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch
interaction (class LPI) and the patch embedding (class ConvPatchEmbed)
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
img_size = to_2tuple(img_size)
assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \
'`patch_size` should divide image dimensions evenly'
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.global_pool = global_pool
self.grad_checkpointing = False
self.patch_embed = ConvPatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
act_layer=act_layer,
)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_rate,
act_layer=act_layer,
norm_layer=norm_layer,
eta=eta,
)
for _ in range(depth)])
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=drop_rate,
attn_drop=attn_drop_rate,
act_layer=act_layer,
norm_layer=norm_layer,
eta=eta,
tokens_norm=tokens_norm,
)
for _ in range(cls_attn_layers)])
# Classifier head
self.norm = norm_layer(embed_dim)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=r'^blocks\.(\d+)',
cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
# x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches)
x, (Hp, Wp) = self.patch_embed(x)
if self.pos_embed is not None:
# `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C)
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, Hp, Wp)
else:
x = blk(x, Hp, Wp)
x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1)
for blk in self.cls_attn_blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x)
else:
x = blk(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict:
state_dict = state_dict['model']
# For consistency with timm's transformer models while being compatible with official weights source we rename
# pos_embeder to pos_embed. Also account for use_pos_embed == False
use_pos_embed = getattr(model, 'pos_embed', None) is not None
pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]
for k in pos_embed_keys:
if use_pos_embed:
state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)
else:
del state_dict[k]
# timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors
# for all tokens, just the class token. To use official weights source we must split qkv into q, k, v
if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():
num_ca_blocks = len(model.cls_attn_blocks)
for i in range(num_ca_blocks):
qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')
qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]
qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)
if qkv_bias is not None:
qkv_bias = qkv_bias.reshape(3, -1)
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]
return state_dict
def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Cross-Covariance Image Transformers models.')
model = build_model_with_cfg(
Xcit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Patch size 16
'xcit_nano_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'),
'xcit_nano_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'),
'xcit_nano_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'),
'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'),
'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'),
'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'),
'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_12_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'),
'xcit_small_12_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'),
'xcit_small_12_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'),
'xcit_small_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'),
'xcit_small_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_medium_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'),
'xcit_medium_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'),
'xcit_medium_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)),
'xcit_large_24_p16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'),
'xcit_large_24_p16_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'),
'xcit_large_24_p16_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)),
# Patch size 8
'xcit_nano_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'),
'xcit_nano_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'),
'xcit_nano_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'),
'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'),
'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_tiny_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'),
'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'),
'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_12_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'),
'xcit_small_12_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'),
'xcit_small_12_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_small_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'),
'xcit_small_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'),
'xcit_small_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_medium_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'),
'xcit_medium_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'),
'xcit_medium_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)),
'xcit_large_24_p8_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'),
'xcit_large_24_p8_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'),
'xcit_large_24_p8_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)),
})
@register_model
def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384)
model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
# Patch size 8x8 models
@register_model
def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False)
model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True)
model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit:
model_args = dict(
patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True)
model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
# Patch size 16
'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k',
'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k',
'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k',
'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k',
'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k',
'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k',
'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k',
'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k',
'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k',
'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k',
'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k',
'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k',
'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k',
# Patch size 8
'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k',
'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k',
'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k',
'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k',
'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k',
'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k',
'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k',
'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k',
'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k',
'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k',
'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k',
'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k',
'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k',
'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/dpn.py | """ PyTorch implementation of DualPathNetworks
Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights from cypw's MXNet implementation.
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['DPN']
class CatBnAct(nn.Module):
def __init__(self, in_chs, norm_layer=BatchNormAct2d):
super(CatBnAct, self).__init__()
self.bn = norm_layer(in_chs, eps=0.001)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass
def forward(self, x):
if isinstance(x, tuple):
x = torch.cat(x, dim=1)
return self.bn(x)
class BnActConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d):
super(BnActConv2d, self).__init__()
self.bn = norm_layer(in_chs, eps=0.001)
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups)
def forward(self, x):
return self.conv(self.bn(x))
class DualPathBlock(nn.Module):
def __init__(
self,
in_chs,
num_1x1_a,
num_3x3_b,
num_1x1_c,
inc,
groups,
block_type='normal',
b=False,
):
super(DualPathBlock, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.b = b
if block_type == 'proj':
self.key_stride = 1
self.has_proj = True
elif block_type == 'down':
self.key_stride = 2
self.has_proj = True
else:
assert block_type == 'normal'
self.key_stride = 1
self.has_proj = False
self.c1x1_w_s1 = None
self.c1x1_w_s2 = None
if self.has_proj:
# Using different member names here to allow easier parameter key matching for conversion
if self.key_stride == 2:
self.c1x1_w_s2 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
else:
self.c1x1_w_s1 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
self.c3x3_b = BnActConv2d(
in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups)
if b:
self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1)
self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1)
else:
self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)
self.c1x1_c1 = None
self.c1x1_c2 = None
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
pass
def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]:
if isinstance(x, tuple):
x_in = torch.cat(x, dim=1)
else:
x_in = x
if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None:
# self.has_proj == False, torchscript requires condition on module == None
x_s1 = x[0]
x_s2 = x[1]
else:
# self.has_proj == True
if self.c1x1_w_s1 is not None:
# self.key_stride = 1
x_s = self.c1x1_w_s1(x_in)
else:
# self.key_stride = 2
x_s = self.c1x1_w_s2(x_in)
x_s1 = x_s[:, :self.num_1x1_c, :, :]
x_s2 = x_s[:, self.num_1x1_c:, :, :]
x_in = self.c1x1_a(x_in)
x_in = self.c3x3_b(x_in)
x_in = self.c1x1_c(x_in)
if self.c1x1_c1 is not None:
# self.b == True, using None check for torchscript compat
out1 = self.c1x1_c1(x_in)
out2 = self.c1x1_c2(x_in)
else:
out1 = x_in[:, :self.num_1x1_c, :, :]
out2 = x_in[:, self.num_1x1_c:, :, :]
resid = x_s1 + out1
dense = torch.cat([x_s2, out2], dim=1)
return resid, dense
class DPN(nn.Module):
def __init__(
self,
k_sec=(3, 4, 20, 3),
inc_sec=(16, 32, 24, 128),
k_r=96,
groups=32,
num_classes=1000,
in_chans=3,
output_stride=32,
global_pool='avg',
small=False,
num_init_features=64,
b=False,
drop_rate=0.,
norm_layer='batchnorm2d',
act_layer='relu',
fc_act_layer='elu',
):
super(DPN, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.b = b
assert output_stride == 32 # FIXME look into dilation support
norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001)
fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False)
bw_factor = 1 if small else 4
blocks = OrderedDict()
# conv1
blocks['conv1_1'] = ConvNormAct(
in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer)
blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')]
# conv2
bw = 64 * bw_factor
inc = inc_sec[0]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[0] + 1):
blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')]
# conv3
bw = 128 * bw_factor
inc = inc_sec[1]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[1] + 1):
blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')]
# conv4
bw = 256 * bw_factor
inc = inc_sec[2]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[2] + 1):
blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')]
# conv5
bw = 512 * bw_factor
inc = inc_sec[3]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[3] + 1):
blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')]
blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer)
self.num_features = in_chs
self.features = nn.Sequential(blocks)
# Using 1x1 conv for the FC layer to allow the extra pooling scheme
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^features\.conv1',
blocks=[
(r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None),
(r'^features\.conv5_bn_ac', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
if pre_logits:
return self.flatten(x)
x = self.classifier(x)
return self.flatten(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_dpn(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
DPN,
variant,
pretrained,
feature_cfg=dict(feature_concat=True, flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD,
'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn68b.ra_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn107.mx_in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def dpn48b(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu')
return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn68(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64))
return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn68b(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64))
return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn92(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=64, k_r=96, groups=32,
k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128))
return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn98(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=96, k_r=160, groups=40,
k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128))
return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn131(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=128, k_r=160, groups=40,
k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128))
return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn107(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=128, k_r=200, groups=50,
k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128))
return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_helpers.py | """ Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
from collections import OrderedDict
from typing import Any, Callable, Dict, Optional, Union
import torch
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
_logger = logging.getLogger(__name__)
__all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint']
def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]:
# 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training
cleaned_state_dict = {}
for k, v in state_dict.items():
name = k[7:] if k.startswith('module.') else k
cleaned_state_dict[name] = v
return cleaned_state_dict
def load_state_dict(
checkpoint_path: str,
use_ema: bool = True,
device: Union[str, torch.device] = 'cpu',
) -> Dict[str, Any]:
if checkpoint_path and os.path.isfile(checkpoint_path):
# Check if safetensors or not and load weights accordingly
if str(checkpoint_path).endswith(".safetensors"):
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
checkpoint = safetensors.torch.load_file(checkpoint_path, device=device)
else:
checkpoint = torch.load(checkpoint_path, map_location=device)
state_dict_key = ''
if isinstance(checkpoint, dict):
if use_ema and checkpoint.get('state_dict_ema', None) is not None:
state_dict_key = 'state_dict_ema'
elif use_ema and checkpoint.get('model_ema', None) is not None:
state_dict_key = 'model_ema'
elif 'state_dict' in checkpoint:
state_dict_key = 'state_dict'
elif 'model' in checkpoint:
state_dict_key = 'model'
state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint)
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(
model: torch.nn.Module,
checkpoint_path: str,
use_ema: bool = True,
device: Union[str, torch.device] = 'cpu',
strict: bool = True,
remap: bool = False,
filter_fn: Optional[Callable] = None,
):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema, device=device)
if remap:
state_dict = remap_state_dict(state_dict, model)
elif filter_fn:
state_dict = filter_fn(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def remap_state_dict(
state_dict: Dict[str, Any],
model: torch.nn.Module,
allow_reshape: bool = True
):
""" remap checkpoint by iterating over state dicts in order (ignoring original keys).
This assumes models (and originating state dict) were created with params registered in same order.
"""
out_dict = {}
for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()):
assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
if va.shape != vb.shape:
if allow_reshape:
vb = vb.reshape(va.shape)
else:
assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
out_dict[ka] = vb
return out_dict
def resume_checkpoint(
model: torch.nn.Module,
checkpoint_path: str,
optimizer: torch.optim.Optimizer = None,
loss_scaler: Any = None,
log_info: bool = True,
):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
state_dict = clean_state_dict(checkpoint['state_dict'])
model.load_state_dict(state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vgg.py | """VGG
Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for
timm functionality.
Copyright 2021 Ross Wightman
"""
from typing import Union, List, Dict, Any, cast
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
__all__ = ['VGG']
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class ConvMlp(nn.Module):
def __init__(
self,
in_features=512,
out_features=4096,
kernel_size=7,
mlp_ratio=1.0,
drop_rate: float = 0.2,
act_layer: nn.Module = None,
conv_layer: nn.Module = None,
):
super(ConvMlp, self).__init__()
self.input_kernel_size = kernel_size
mid_features = int(out_features * mlp_ratio)
self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True)
self.act1 = act_layer(True)
self.drop = nn.Dropout(drop_rate)
self.fc2 = conv_layer(mid_features, out_features, 1, bias=True)
self.act2 = act_layer(True)
def forward(self, x):
if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size:
# keep the input size >= 7x7
output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1]))
x = F.adaptive_avg_pool2d(x, output_size)
x = self.fc1(x)
x = self.act1(x)
x = self.drop(x)
x = self.fc2(x)
x = self.act2(x)
return x
class VGG(nn.Module):
def __init__(
self,
cfg: List[Any],
num_classes: int = 1000,
in_chans: int = 3,
output_stride: int = 32,
mlp_ratio: float = 1.0,
act_layer: nn.Module = nn.ReLU,
conv_layer: nn.Module = nn.Conv2d,
norm_layer: nn.Module = None,
global_pool: str = 'avg',
drop_rate: float = 0.,
) -> None:
super(VGG, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = 4096
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.use_norm = norm_layer is not None
self.feature_info = []
prev_chs = in_chans
net_stride = 1
pool_layer = nn.MaxPool2d
layers: List[nn.Module] = []
for v in cfg:
last_idx = len(layers) - 1
if v == 'M':
self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}'))
layers += [pool_layer(kernel_size=2, stride=2)]
net_stride *= 2
else:
v = cast(int, v)
conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1)
if norm_layer is not None:
layers += [conv2d, norm_layer(v), act_layer(inplace=True)]
else:
layers += [conv2d, act_layer(inplace=True)]
prev_chs = v
self.features = nn.Sequential(*layers)
self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}'))
self.pre_logits = ConvMlp(
prev_chs,
self.num_features,
7,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
act_layer=act_layer,
conv_layer=conv_layer,
)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self._initialize_weights()
@torch.jit.ignore
def group_matcher(self, coarse=False):
# this treats BN layers as separate groups for bn variants, a lot of effort to fix that
return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)')
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.head = ClassifierHead(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
x = self.pre_logits(x)
return x if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _filter_fn(state_dict):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
k_r = k
k_r = k_r.replace('classifier.0', 'pre_logits.fc1')
k_r = k_r.replace('classifier.3', 'pre_logits.fc2')
k_r = k_r.replace('classifier.6', 'head.fc')
if 'classifier.0.weight' in k:
v = v.reshape(-1, 512, 7, 7)
if 'classifier.3.weight' in k:
v = v.reshape(-1, 4096, 1, 1)
out_dict[k_r] = v
return out_dict
def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG:
cfg = variant.split('_')[0]
# NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5]
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5))
model = build_model_with_cfg(
VGG,
variant,
pretrained,
model_cfg=cfgs[cfg],
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_filter_fn=_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg11', pretrained=pretrained, **model_args)
@register_model
def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args)
@register_model
def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg13', pretrained=pretrained, **model_args)
@register_model
def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args)
@register_model
def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg16', pretrained=pretrained, **model_args)
@register_model
def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args)
@register_model
def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(**kwargs)
return _create_vgg('vgg19', pretrained=pretrained, **model_args)
@register_model
def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
"""
model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/efficientformer.py | """ EfficientFormer
@article{li2022efficientformer,
title={EfficientFormer: Vision Transformers at MobileNet Speed},
author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov,
Sergey and Wang, Yanzhi and Ren, Jian},
journal={arXiv preprint arXiv:2206.01191},
year={2022}
}
Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc.
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
from typing import Dict
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['EfficientFormer'] # model_registry will add each entrypoint fn to this
EfficientFormer_width = {
'l1': (48, 96, 224, 448),
'l3': (64, 128, 320, 512),
'l7': (96, 192, 384, 768),
}
EfficientFormer_depth = {
'l1': (3, 2, 6, 4),
'l3': (4, 4, 12, 6),
'l7': (6, 6, 18, 8),
}
class Attention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim=384,
key_dim=32,
num_heads=8,
attn_ratio=4,
resolution=7
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.key_attn_dim = key_dim * num_heads
self.val_dim = int(attn_ratio * key_dim)
self.val_attn_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim)
self.proj = nn.Linear(self.val_attn_dim, dim)
resolution = to_2tuple(resolution)
pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
self.register_buffer('attention_bias_idxs', rel_pos)
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x): # x (B,N,C)
B, N, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim)
x = self.proj(x)
return x
class Stem4(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super().__init__()
self.stride = 4
self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1))
self.add_module('norm1', norm_layer(out_chs // 2))
self.add_module('act1', act_layer())
self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1))
self.add_module('norm2', norm_layer(out_chs))
self.add_module('act2', act_layer())
class Downsample(nn.Module):
"""
Downsampling via strided conv w/ norm
Input: tensor in shape [B, C, H, W]
Output: tensor in shape [B, C, H/stride, W/stride]
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d):
super().__init__()
if padding is None:
padding = kernel_size // 2
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = norm_layer(out_chs)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Flat(nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class ConvMlpWithNorm(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.norm2(x)
x = self.drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class MetaBlock1d(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Attention(dim)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.ls1 = LayerScale(dim, layer_scale_init_value)
self.ls2 = LayerScale(dim, layer_scale_init_value)
def forward(self, x):
x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x))))
x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x))))
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class MetaBlock2d(nn.Module):
def __init__(
self,
dim,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.token_mixer = Pooling(pool_size=pool_size)
self.ls1 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = ConvMlpWithNorm(
dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer,
drop=proj_drop,
)
self.ls2 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.ls1(self.token_mixer(x)))
x = x + self.drop_path2(self.ls2(self.mlp(x)))
return x
class EfficientFormerStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
downsample=True,
num_vit=1,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
proj_drop=.0,
drop_path=0.,
layer_scale_init_value=1e-5,
):
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer)
dim = dim_out
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
if num_vit and num_vit >= depth:
blocks.append(Flat())
for block_idx in range(depth):
remain_idx = depth - block_idx - 1
if num_vit and num_vit > remain_idx:
blocks.append(
MetaBlock1d(
dim,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer_cl,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
blocks.append(
MetaBlock2d(
dim,
pool_size=pool_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
if num_vit and num_vit == remain_idx:
blocks.append(Flat())
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EfficientFormer(nn.Module):
def __init__(
self,
depths,
embed_dims=None,
in_chans=3,
num_classes=1000,
global_pool='avg',
downsamples=None,
num_vit=0,
mlp_ratios=4,
pool_size=3,
layer_scale_init_value=1e-5,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
drop_rate=0.,
proj_drop_rate=0.,
drop_path_rate=0.,
**kwargs
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer)
prev_dim = embed_dims[0]
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
downsamples = downsamples or (False,) + (True,) * (len(depths) - 1)
stages = []
for i in range(len(depths)):
stage = EfficientFormerStage(
prev_dim,
embed_dims[i],
depths[i],
downsample=downsamples[i],
num_vit=num_vit if i == 3 else 0,
pool_size=pool_size,
mlp_ratio=mlp_ratios,
act_layer=act_layer,
norm_layer_cl=norm_layer_cl,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=dpr[i],
layer_scale_init_value=layer_scale_init_value,
)
prev_dim = embed_dims[i]
stages.append(stage)
self.stages = nn.Sequential(*stages)
# Classifier head
self.num_features = embed_dims[-1]
self.norm = norm_layer_cl(self.num_features)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# assuming model is always distilled (valid for current checkpoints, will split def if that changes)
self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.apply(self._init_weights)
# init for classification
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters() if 'attention_biases' in k}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
if pre_logits:
return x
x, x_dist = self.head(x), self.head_dist(x)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train/finetune, inference average the classifier predictions
return (x + x_dist) / 2
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
out_dict = {}
import re
stage_idx = 0
for k, v in state_dict.items():
if k.startswith('patch_embed'):
k = k.replace('patch_embed.0', 'stem.conv1')
k = k.replace('patch_embed.1', 'stem.norm1')
k = k.replace('patch_embed.3', 'stem.conv2')
k = k.replace('patch_embed.4', 'stem.norm2')
if re.match(r'network\.(\d+)\.proj\.weight', k):
stage_idx += 1
k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k)
k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k)
k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k)
k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k)
k = k.replace('dist_head', 'head_dist')
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True,
'crop_pct': .95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'efficientformer_l1.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l3.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l7.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
})
def _create_efficientformer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for EfficientFormer models.')
model = build_model_with_cfg(
EfficientFormer, variant, pretrained,
pretrained_filter_fn=_checkpoint_filter_fn,
**kwargs)
return model
@register_model
def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l1'],
embed_dims=EfficientFormer_width['l1'],
num_vit=1,
)
return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l3'],
embed_dims=EfficientFormer_width['l3'],
num_vit=4,
)
return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l7'],
embed_dims=EfficientFormer_width['l7'],
num_vit=8,
)
return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/metaformer.py | """
Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418
IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer
from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452
All implemented models support feature extraction and variable input resolution.
Original implementation by Weihao Yu et al.,
adapted for timm by Fredo Guan and Ross Wightman.
Adapted from https://github.com/sail-sg/metaformer, original copyright below
"""
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \
use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MetaFormer']
class Stem(nn.Module):
"""
Stem implemented by a layer of convolution.
Conv2d params constant across all models.
"""
def __init__(
self,
in_channels,
out_channels,
norm_layer=None,
):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=4,
padding=2
)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Downsampling(nn.Module):
"""
Downsampling implemented by a layer of convolution.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
norm_layer=None,
):
super().__init__()
self.norm = norm_layer(in_channels) if norm_layer else nn.Identity()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding
)
def forward(self, x):
x = self.norm(x)
x = self.conv(x)
return x
class Scale(nn.Module):
"""
Scale vector by element multiplications.
"""
def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True):
super().__init__()
self.shape = (dim, 1, 1) if use_nchw else (dim,)
self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable)
def forward(self, x):
return x * self.scale.view(self.shape)
class SquaredReLU(nn.Module):
"""
Squared ReLU: https://arxiv.org/abs/2109.08668
"""
def __init__(self, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return torch.square(self.relu(x))
class StarReLU(nn.Module):
"""
StarReLU: s * relu(x) ** 2 + b
"""
def __init__(
self,
scale_value=1.0,
bias_value=0.0,
scale_learnable=True,
bias_learnable=True,
mode=None,
inplace=False
):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable)
self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable)
def forward(self, x):
return self.scale * self.relu(x) ** 2 + self.bias
class Attention(nn.Module):
"""
Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762.
Modified from timm.
"""
fused_attn: Final[bool]
def __init__(
self,
dim,
head_dim=32,
num_heads=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
proj_bias=False,
**kwargs
):
super().__init__()
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.num_heads = num_heads if num_heads else dim // head_dim
if self.num_heads == 0:
self.num_heads = 1
self.attention_dim = self.num_heads * self.head_dim
self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# custom norm modules that disable the bias term, since the original models defs
# used a custom norm with a weight term but no bias term.
class GroupNorm1NoBias(GroupNorm1):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNorm2dNoBias(LayerNorm2d):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNormNoBias(nn.LayerNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class SepConv(nn.Module):
r"""
Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
"""
def __init__(
self,
dim,
expansion_ratio=2,
act1_layer=StarReLU,
act2_layer=nn.Identity,
bias=False,
kernel_size=7,
padding=3,
**kwargs
):
super().__init__()
mid_channels = int(expansion_ratio * dim)
self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias)
self.act1 = act1_layer()
self.dwconv = nn.Conv2d(
mid_channels, mid_channels, kernel_size=kernel_size,
padding=padding, groups=mid_channels, bias=bias) # depthwise conv
self.act2 = act2_layer()
self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias)
def forward(self, x):
x = self.pwconv1(x)
x = self.act1(x)
x = self.dwconv(x)
x = self.act2(x)
x = self.pwconv2(x)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418
"""
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
y = self.pool(x)
return y - x
class MlpHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
dim,
num_classes=1000,
mlp_ratio=4,
act_layer=SquaredReLU,
norm_layer=LayerNorm,
drop_rate=0.,
bias=True
):
super().__init__()
hidden_features = int(mlp_ratio * dim)
self.fc1 = nn.Linear(dim, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.head_drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.head_drop(x)
x = self.fc2(x)
return x
class MetaFormerBlock(nn.Module):
"""
Implementation of one MetaFormer block.
"""
def __init__(
self,
dim,
token_mixer=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
norm_layer=LayerNorm2d,
proj_drop=0.,
drop_path=0.,
use_nchw=True,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs
):
super().__init__()
ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw)
rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw)
self.norm1 = norm_layer(dim)
self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
dim,
int(4 * dim),
act_layer=mlp_act,
bias=mlp_bias,
drop=proj_drop,
use_conv=use_nchw,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity()
def forward(self, x):
x = self.res_scale1(x) + \
self.layer_scale1(
self.drop_path1(
self.token_mixer(self.norm1(x))
)
)
x = self.res_scale2(x) + \
self.layer_scale2(
self.drop_path2(
self.mlp(self.norm2(x))
)
)
return x
class MetaFormerStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=2,
token_mixer=nn.Identity,
mlp_act=StarReLU,
mlp_bias=False,
downsample_norm=LayerNorm2d,
norm_layer=LayerNorm2d,
proj_drop=0.,
dp_rates=[0.] * 2,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs,
):
super().__init__()
self.grad_checkpointing = False
self.use_nchw = not issubclass(token_mixer, Attention)
# don't downsample if in_chs and out_chs are the same
self.downsample = nn.Identity() if in_chs == out_chs else Downsampling(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1,
norm_layer=downsample_norm,
)
self.blocks = nn.Sequential(*[MetaFormerBlock(
dim=out_chs,
token_mixer=token_mixer,
mlp_act=mlp_act,
mlp_bias=mlp_bias,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value,
res_scale_init_value=res_scale_init_value,
use_nchw=self.use_nchw,
**kwargs,
) for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
B, C, H, W = x.shape
if not self.use_nchw:
x = x.reshape(B, C, -1).transpose(1, 2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
if not self.use_nchw:
x = x.transpose(1, 2).reshape(B, C, H, W)
return x
class MetaFormer(nn.Module):
r""" MetaFormer
A PyTorch impl of : `MetaFormer Baselines for Vision` -
https://arxiv.org/abs/2210.13452
Args:
in_chans (int): Number of input image channels.
num_classes (int): Number of classes for classification head.
global_pool: Pooling for classifier head.
depths (list or tuple): Number of blocks at each stage.
dims (list or tuple): Feature dimension at each stage.
token_mixers (list, tuple or token_fcn): Token mixer for each stage.
mlp_act: Activation layer for MLP.
mlp_bias (boolean): Enable or disable mlp bias term.
drop_path_rate (float): Stochastic depth rate.
drop_rate (float): Dropout rate.
layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale.
None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239.
res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections.
None means not use the res scale. From: https://arxiv.org/abs/2110.09456.
downsample_norm (nn.Module): Norm layer used in stem and downsampling layers.
norm_layers (list, tuple or norm_fcn): Norm layers for each stage.
output_norm: Norm layer before classifier head.
use_mlp_head: Use MLP classification head.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(2, 2, 6, 2),
dims=(64, 128, 320, 512),
token_mixers=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
drop_path_rate=0.,
proj_drop_rate=0.,
drop_rate=0.0,
layer_scale_init_values=None,
res_scale_init_values=(None, None, 1.0, 1.0),
downsample_norm=LayerNorm2dNoBias,
norm_layers=LayerNorm2dNoBias,
output_norm=LayerNorm2d,
use_mlp_head=True,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = dims[-1]
self.drop_rate = drop_rate
self.use_mlp_head = use_mlp_head
self.num_stages = len(depths)
# convert everything to lists if they aren't indexable
if not isinstance(depths, (list, tuple)):
depths = [depths] # it means the model has only one stage
if not isinstance(dims, (list, tuple)):
dims = [dims]
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * self.num_stages
if not isinstance(norm_layers, (list, tuple)):
norm_layers = [norm_layers] * self.num_stages
if not isinstance(layer_scale_init_values, (list, tuple)):
layer_scale_init_values = [layer_scale_init_values] * self.num_stages
if not isinstance(res_scale_init_values, (list, tuple)):
res_scale_init_values = [res_scale_init_values] * self.num_stages
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chans,
dims[0],
norm_layer=downsample_norm
)
stages = []
prev_dim = dims[0]
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
for i in range(self.num_stages):
stages += [MetaFormerStage(
prev_dim,
dims[i],
depth=depths[i],
token_mixer=token_mixers[i],
mlp_act=mlp_act,
mlp_bias=mlp_bias,
proj_drop=proj_drop_rate,
dp_rates=dp_rates[i],
layer_scale_init_value=layer_scale_init_values[i],
res_scale_init_value=res_scale_init_values[i],
downsample_norm=downsample_norm,
norm_layer=norm_layers[i],
**kwargs,
)]
prev_dim = dims[i]
self.feature_info += [dict(num_chs=dims[i], reduction=2, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# if using MlpHead, dropout is handled by MlpHead
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', output_norm(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()),
('fc', final)
]))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
if global_pool is not None:
self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity()
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head.fc = final
def forward_head(self, x: Tensor, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward_features(self, x: Tensor):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward(self, x: Tensor):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# this works but it's long and breaks backwards compatability with weights from the poolformer-only impl
def checkpoint_filter_fn(state_dict, model):
if 'stem.conv.weight' in state_dict:
return state_dict
import re
out_dict = {}
is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict
model_state_dict = model.state_dict()
for k, v in state_dict.items():
if is_poolformerv1:
k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k)
k = k.replace('network.1', 'downsample_layers.1')
k = k.replace('network.3', 'downsample_layers.2')
k = k.replace('network.5', 'downsample_layers.3')
k = k.replace('network.2', 'network.1')
k = k.replace('network.4', 'network.2')
k = k.replace('network.6', 'network.3')
k = k.replace('network', 'stages')
k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('patch_embed.proj', 'patch_embed.conv')
k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k)
k = k.replace('stages.0.downsample', 'patch_embed')
k = k.replace('patch_embed', 'stem')
k = k.replace('post_norm', 'norm')
k = k.replace('pre_norm', 'norm')
k = re.sub(r'^head', 'head.fc', k)
k = re.sub(r'^norm', 'head.norm', k)
if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel():
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_metaformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
MetaFormer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': 'head.fc', 'first_conv': 'stem.conv',
**kwargs
}
default_cfgs = generate_default_cfgs({
'poolformer_s12.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s24.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformer_m48.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'),
'convformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
})
@register_model
def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/dla.py | """ Deep Layer Aggregation and DLA w/ Res2Net
DLA original adapted from Official Pytorch impl at: https://github.com/ucbdrive/dla
DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
Res2Net additions from: https://github.com/gasvn/Res2Net/
Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['DLA']
class DlaBasic(nn.Module):
"""DLA Basic"""
def __init__(self, inplanes, planes, stride=1, dilation=1, **_):
super(DlaBasic, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3,
stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3,
stride=1, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += shortcut
out = self.relu(out)
return out
class DlaBottleneck(nn.Module):
"""DLA/DLA-X Bottleneck"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):
super(DlaBottleneck, self).__init__()
self.stride = stride
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.conv2 = nn.Conv2d(
mid_planes, mid_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
class DlaBottle2neck(nn.Module):
""" Res2Net/Res2NeXT DLA Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py
"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):
super(DlaBottle2neck, self).__init__()
self.is_first = stride > 1
self.scale = scale
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.width = mid_planes
self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes * scale)
num_scale_convs = max(1, scale - 1)
convs = []
bns = []
for _ in range(num_scale_convs):
convs.append(nn.Conv2d(
mid_planes, mid_planes, kernel_size=3,
stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False))
bns.append(nn.BatchNorm2d(mid_planes))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None
self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
sp = spx[0] # redundant, for torchscript
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
if i == 0 or self.is_first:
sp = spx[i]
else:
sp = sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
if self.pool is not None: # self.is_first == True, None check for torchscript
spo.append(self.pool(spx[-1]))
else:
spo.append(spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
class DlaRoot(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, shortcut):
super(DlaRoot, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.shortcut = shortcut
def forward(self, x_children: List[torch.Tensor]):
x = self.conv(torch.cat(x_children, 1))
x = self.bn(x)
if self.shortcut:
x += x_children[0]
x = self.relu(x)
return x
class DlaTree(nn.Module):
def __init__(
self,
levels,
block,
in_channels,
out_channels,
stride=1,
dilation=1,
cardinality=1,
base_width=64,
level_root=False,
root_dim=0,
root_kernel_size=1,
root_shortcut=False,
):
super(DlaTree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity()
self.project = nn.Identity()
cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride, **cargs)
self.tree2 = block(out_channels, out_channels, 1, **cargs)
if in_channels != out_channels:
# NOTE the official impl/weights have project layers in levels > 1 case that are never
# used, I've moved the project layer here to avoid wasted params but old checkpoints will
# need strict=False while loading.
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels))
self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut)
else:
cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut))
self.tree1 = DlaTree(
levels - 1,
block,
in_channels,
out_channels,
stride,
root_dim=0,
**cargs,
)
self.tree2 = DlaTree(
levels - 1,
block,
out_channels,
out_channels,
root_dim=root_dim + out_channels,
**cargs,
)
self.root = None
self.level_root = level_root
self.root_dim = root_dim
self.levels = levels
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if children is None:
children = []
bottom = self.downsample(x)
shortcut = self.project(bottom)
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, shortcut)
if self.root is not None: # levels == 1
x2 = self.tree2(x1)
x = self.root([x2, x1] + children)
else:
children.append(x1)
x = self.tree2(x1, None, children)
return x
class DLA(nn.Module):
def __init__(
self,
levels,
channels,
output_stride=32,
num_classes=1000,
in_chans=3,
global_pool='avg',
cardinality=1,
base_width=64,
block=DlaBottle2neck,
shortcut_root=False,
drop_rate=0.0,
):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.cardinality = cardinality
self.base_width = base_width
assert output_stride == 32 # FIXME support dilation
self.base_layer = nn.Sequential(
nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),
nn.BatchNorm2d(channels[0]),
nn.ReLU(inplace=True),
)
self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)
cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root)
self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)
self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)
self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)
self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)
self.feature_info = [
dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level
dict(num_chs=channels[1], reduction=2, module='level1'),
dict(num_chs=channels[2], reduction=4, module='level2'),
dict(num_chs=channels[3], reduction=8, module='level3'),
dict(num_chs=channels[4], reduction=16, module='level4'),
dict(num_chs=channels[5], reduction=32, module='level5'),
]
self.num_features = channels[-1]
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
use_conv=True,
drop_rate=drop_rate,
)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(
inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^base_layer',
blocks=r'^level(\d+)' if coarse else [
# an unusual arch, this achieves somewhat more granularity without getting super messy
(r'^level(\d+)\.tree(\d+)', None),
(r'^level(\d+)\.root', (2,)),
(r'^level(\d+)', (1,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
def forward_features(self, x):
x = self.base_layer(x)
x = self.level0(x)
x = self.level1(x)
x = self.level2(x)
x = self.level3(x)
x = self.level4(x)
x = self.level5(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
if pre_logits:
return self.flatten(x)
x = self.fc(x)
return self.flatten(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_dla(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
DLA,
variant,
pretrained,
pretrained_strict=False,
feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'base_layer.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dla34.in1k': _cfg(hf_hub_id='timm/'),
'dla46_c.in1k': _cfg(hf_hub_id='timm/'),
'dla46x_c.in1k': _cfg(hf_hub_id='timm/'),
'dla60x_c.in1k': _cfg(hf_hub_id='timm/'),
'dla60.in1k': _cfg(hf_hub_id='timm/'),
'dla60x.in1k': _cfg(hf_hub_id='timm/'),
'dla102.in1k': _cfg(hf_hub_id='timm/'),
'dla102x.in1k': _cfg(hf_hub_id='timm/'),
'dla102x2.in1k': _cfg(hf_hub_id='timm/'),
'dla169.in1k': _cfg(hf_hub_id='timm/'),
'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'),
'dla60_res2next.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def dla60_res2net(pretrained=False, **kwargs) -> DLA:
model_args = dict(
levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=1, base_width=28)
return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60_res2next(pretrained=False,**kwargs):
model_args = dict(
levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=8, base_width=4)
return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs))
@register_model
def dla34(pretrained=False, **kwargs) -> DLA: # DLA-34
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic)
return _create_dla('dla34', pretrained, **dict(model_args, **kwargs))
@register_model
def dla46_c(pretrained=False, **kwargs) -> DLA: # DLA-46-C
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck)
return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla46x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-46-C
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-60-C
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60(pretrained=False, **kwargs) -> DLA: # DLA-60
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck)
return _create_dla('dla60', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60x(pretrained=False, **kwargs) -> DLA: # DLA-X-60
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102(pretrained=False, **kwargs) -> DLA: # DLA-102
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, shortcut_root=True)
return _create_dla('dla102', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102x(pretrained=False, **kwargs) -> DLA: # DLA-X-102
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True)
return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102x2(pretrained=False, **kwargs) -> DLA: # DLA-X-102 64
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True)
return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs))
@register_model
def dla169(pretrained=False, **kwargs) -> DLA: # DLA-169
model_args = dict(
levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, shortcut_root=True)
return _create_dla('dla169', pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vovnet.py | """ VoVNet (V1 & V2)
Papers:
* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730
* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
Looked at https://github.com/youngwanLEE/vovnet-detectron2 &
https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
for some reference, rewrote most of the code.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import List
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \
create_attn, create_norm_act_layer
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['VovNet'] # model_registry will add each entrypoint fn to this
class SequentialAppendList(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendList, self).__init__(*args)
def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor:
for i, module in enumerate(self):
if i == 0:
concat_list.append(module(x))
else:
concat_list.append(module(concat_list[-1]))
x = torch.cat(concat_list, dim=1)
return x
class OsaBlock(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
layer_per_block,
residual=False,
depthwise=False,
attn='',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path=None,
):
super(OsaBlock, self).__init__()
self.residual = residual
self.depthwise = depthwise
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
next_in_chs = in_chs
if self.depthwise and next_in_chs != mid_chs:
assert not residual
self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs)
else:
self.conv_reduction = None
mid_convs = []
for i in range(layer_per_block):
if self.depthwise:
conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs)
else:
conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs)
next_in_chs = mid_chs
mid_convs.append(conv)
self.conv_mid = SequentialAppendList(*mid_convs)
# feature aggregation
next_in_chs = in_chs + layer_per_block * mid_chs
self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs)
self.attn = create_attn(attn, out_chs) if attn else None
self.drop_path = drop_path
def forward(self, x):
output = [x]
if self.conv_reduction is not None:
x = self.conv_reduction(x)
x = self.conv_mid(x, output)
x = self.conv_concat(x)
if self.attn is not None:
x = self.attn(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.residual:
x = x + output[0]
return x
class OsaStage(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
block_per_stage,
layer_per_block,
downsample=True,
residual=True,
depthwise=False,
attn='ese',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path_rates=None,
):
super(OsaStage, self).__init__()
self.grad_checkpointing = False
if downsample:
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
else:
self.pool = None
blocks = []
for i in range(block_per_stage):
last_block = i == block_per_stage - 1
if drop_path_rates is not None and drop_path_rates[i] > 0.:
drop_path = DropPath(drop_path_rates[i])
else:
drop_path = None
blocks += [OsaBlock(
in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise,
attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path)
]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.pool is not None:
x = self.pool(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class VovNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_rate=0.,
drop_path_rate=0.,
**kwargs,
):
"""
Args:
cfg (dict): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
global_pool (str): Global pooling type (default: 'avg')
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
norm_layer (Union[str, nn.Module]): normalization layer
act_layer (Union[str, nn.Module]): activation layer
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super(VovNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride == 32 # FIXME support dilation
cfg = dict(cfg, **kwargs)
stem_stride = cfg.get("stem_stride", 4)
stem_chs = cfg["stem_chs"]
stage_conv_chs = cfg["stage_conv_chs"]
stage_out_chs = cfg["stage_out_chs"]
block_per_stage = cfg["block_per_stage"]
layer_per_block = cfg["layer_per_block"]
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
# Stem module
last_stem_stride = stem_stride // 2
conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct
self.stem = nn.Sequential(*[
ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs),
conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs),
conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs),
])
self.feature_info = [dict(
num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')]
current_stride = stem_stride
# OSA stages
stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage)
in_ch_list = stem_chs[-1:] + stage_out_chs[:-1]
stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs)
stages = []
for i in range(4): # num_stages
downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4
stages += [OsaStage(
in_ch_list[i],
stage_conv_chs[i],
stage_out_chs[i],
block_per_stage[i],
layer_per_block,
downsample=downsample,
drop_path_rates=stage_dpr[i],
**stage_args,
)]
self.num_features = stage_out_chs[i]
current_stride *= 2 if downsample else 1
self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
return self.stages(x)
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 &
# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
model_cfgs = dict(
vovnet39a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=False,
depthwise=False,
attn='',
),
vovnet57a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=False,
depthwise=False,
attn='',
),
ese_vovnet19b_slim_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_slim=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet19b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet57b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet99b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 3, 9, 3],
residual=True,
depthwise=False,
attn='ese',
),
eca_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='eca',
),
)
model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b']
def _create_vovnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
VovNet,
variant,
pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs,
}
default_cfgs = generate_default_cfgs({
'vovnet39a.untrained': _cfg(url=''),
'vovnet57a.untrained': _cfg(url=''),
'ese_vovnet19b_slim_dw.untrained': _cfg(url=''),
'ese_vovnet19b_dw.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet19b_slim.untrained': _cfg(url=''),
'ese_vovnet39b.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet57b.untrained': _cfg(url=''),
'ese_vovnet99b.untrained': _cfg(url=''),
'eca_vovnet39b.untrained': _cfg(url=''),
'ese_vovnet39b_evos.untrained': _cfg(url=''),
})
@register_model
def vovnet39a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs)
@register_model
def vovnet57a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs)
@register_model
def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs)
# Experimental Models
@register_model
def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet:
def norm_act_fn(num_features, **nkwargs):
return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs)
return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/edgenext.py | """ EdgeNeXt
Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications`
- https://arxiv.org/abs/2206.10589
Original code and weights from https://github.com/mmaaz60/EdgeNeXt
Modifications and additions for timm by / Copyright 2022, Ross Wightman
"""
import math
from collections import OrderedDict
from functools import partial
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, SelectAdaptivePool2d, create_conv2d, \
use_fused_attn, NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import named_apply, checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this
@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method
class PositionalEncodingFourier(nn.Module):
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, shape: Tuple[int, int, int]):
device = self.token_projection.weight.device
dtype = self.token_projection.weight.dtype
inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool)
y_embed = inv_mask.cumsum(1, dtype=dtype)
x_embed = inv_mask.cumsum(2, dtype=dtype)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=dtype, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
class ConvBlock(nn.Module):
def __init__(
self,
dim,
dim_out=None,
kernel_size=7,
stride=1,
conv_bias=True,
expand_ratio=4,
ls_init_value=1e-6,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU, drop_path=0.,
):
super().__init__()
dim_out = dim_out or dim
self.shortcut_after_dw = stride > 1 or dim != dim_out
self.conv_dw = create_conv2d(
dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias)
self.norm = norm_layer(dim_out)
self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.shortcut_after_dw:
shortcut = x
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = shortcut + self.drop_path(x)
return x
class CrossCovarianceAttn(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1)
q, k, v = qkv.unbind(0)
# NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map
attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v)
x = x.permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class SplitTransposeBlock(nn.Module):
def __init__(
self,
dim,
num_scales=1,
num_heads=8,
expand_ratio=4,
use_pos_emb=True,
conv_bias=True,
qkv_bias=True,
ls_init_value=1e-6,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop_path=0.,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales)))
self.width = width
self.num_scales = max(1, num_scales - 1)
convs = []
for i in range(self.num_scales):
convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias))
self.convs = nn.ModuleList(convs)
self.pos_embd = None
if use_pos_emb:
self.pos_embd = PositionalEncodingFourier(dim=dim)
self.norm_xca = norm_layer(dim)
self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.xca = CrossCovarianceAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.norm = norm_layer(dim, eps=1e-6)
self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
# scales code re-written for torchscript as per my res2net fixes -rw
# NOTE torch.split(x, self.width, 1) causing issues with ONNX export
spx = x.chunk(len(self.convs) + 1, dim=1)
spo = []
sp = spx[0]
for i, conv in enumerate(self.convs):
if i > 0:
sp = sp + spx[i]
sp = conv(sp)
spo.append(sp)
spo.append(spx[-1])
x = torch.cat(spo, 1)
# XCA
B, C, H, W = x.shape
x = x.reshape(B, C, H * W).permute(0, 2, 1)
if self.pos_embd is not None:
pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x)))
x = x.reshape(B, H, W, C)
# Inverted Bottleneck
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = shortcut + self.drop_path(x)
return x
class EdgeNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=2,
depth=2,
num_global_blocks=1,
num_heads=4,
scales=2,
kernel_size=7,
expand_ratio=4,
use_pos_emb=False,
downsample_block=False,
conv_bias=True,
ls_init_value=1.0,
drop_path_rates=None,
norm_layer=LayerNorm2d,
norm_layer_cl=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU
):
super().__init__()
self.grad_checkpointing = False
if downsample_block or stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias)
)
in_chs = out_chs
stage_blocks = []
for i in range(depth):
if i < depth - num_global_blocks:
stage_blocks.append(
ConvBlock(
dim=in_chs,
dim_out=out_chs,
stride=stride if downsample_block and i == 0 else 1,
conv_bias=conv_bias,
kernel_size=kernel_size,
expand_ratio=expand_ratio,
ls_init_value=ls_init_value,
drop_path=drop_path_rates[i],
norm_layer=norm_layer_cl,
act_layer=act_layer,
)
)
else:
stage_blocks.append(
SplitTransposeBlock(
dim=in_chs,
num_scales=scales,
num_heads=num_heads,
expand_ratio=expand_ratio,
use_pos_emb=use_pos_emb,
conv_bias=conv_bias,
ls_init_value=ls_init_value,
drop_path=drop_path_rates[i],
norm_layer=norm_layer_cl,
act_layer=act_layer,
)
)
in_chs = out_chs
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EdgeNeXt(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
dims=(24, 48, 88, 168),
depths=(3, 3, 9, 3),
global_block_counts=(0, 1, 1, 1),
kernel_sizes=(3, 5, 7, 9),
heads=(8, 8, 8, 8),
d2_scales=(2, 2, 3, 4),
use_pos_emb=(False, True, False, False),
ls_init_value=1e-6,
head_init_scale=1.,
expand_ratio=4,
downsample_block=False,
conv_bias=True,
stem_type='patch',
head_norm_first=False,
act_layer=nn.GELU,
drop_path_rate=0.,
drop_rate=0.,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.drop_rate = drop_rate
norm_layer = partial(LayerNorm2d, eps=1e-6)
norm_layer_cl = partial(nn.LayerNorm, eps=1e-6)
self.feature_info = []
assert stem_type in ('patch', 'overlap')
if stem_type == 'patch':
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias),
norm_layer(dims[0]),
)
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias),
norm_layer(dims[0]),
)
curr_stride = 4
stages = []
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
in_chs = dims[0]
for i in range(4):
stride = 2 if curr_stride == 2 or i > 0 else 1
# FIXME support dilation / output_stride
curr_stride *= stride
stages.append(EdgeNeXtStage(
in_chs=in_chs,
out_chs=dims[i],
stride=stride,
depth=depths[i],
num_global_blocks=global_block_counts[i],
num_heads=heads[i],
drop_path_rates=dp_rates[i],
scales=d2_scales[i],
expand_ratio=expand_ratio,
kernel_size=kernel_sizes[i],
use_pos_emb=use_pos_emb[i],
ls_init_value=ls_init_value,
downsample_block=downsample_block,
conv_bias=conv_bias,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
act_layer=act_layer,
))
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
in_chs = dims[i]
self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = dims[-1]
if head_norm_first:
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_tf_(module.weight, std=.02)
nn.init.zeros_(module.bias)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
# models were released as train checkpoints... :/
if 'model_ema' in state_dict:
state_dict = state_dict['model_ema']
elif 'model' in state_dict:
state_dict = state_dict['model']
elif 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
out_dict = {}
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_edgenext(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
EdgeNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'edgenext_xx_small.in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'edgenext_x_small.in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'edgenext_small.usi_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_base.usi_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_base.in21k_ft_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_small_rw.sw_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
})
@register_model
def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 1.33M & 260.58M @ 256 resolution
# 71.23% Top-1 accuracy
# No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=51.66 versus 47.67 for MobileViT_XXS
# For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS
model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4))
return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 2.34M & 538.0M @ 256 resolution
# 75.00% Top-1 accuracy
# No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=31.61 versus 28.49 for MobileViT_XS
# For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS
model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4))
return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 5.59M & 1260.59M @ 256 resolution
# 79.43% Top-1 accuracy
# AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=20.47 versus 18.86 for MobileViT_S
# For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S
model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304))
return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt:
# 18.51M & 3840.93M @ 256 resolution
# 82.5% (normal) 83.7% (USI) Top-1 accuracy
# AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=xx.xx versus xx.xx for MobileViT_S
# For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx
model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584])
return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt:
model_args = dict(
depths=(3, 3, 9, 3), dims=(48, 96, 192, 384),
downsample_block=True, conv_bias=False, stem_type='overlap')
return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/twins.py | """ Twins
A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`
- https://arxiv.org/pdf/2104.13840.pdf
Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below
"""
# --------------------------------------------------------
# Twins
# Copyright (c) 2021 Meituan
# Licensed under The Apache 2.0 License [see LICENSE for details]
# Written by Xinjie Li, Xiangxiang Chu
# --------------------------------------------------------
import math
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Attention
__all__ = ['Twins'] # model_registry will add each entrypoint fn to this
Size_ = Tuple[int, int]
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class LocallyGroupedAttn(nn.Module):
""" LSA: self attention within a group
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):
assert ws != 1
super(LocallyGroupedAttn, self).__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.ws = ws
def forward(self, x, size: Size_):
# There are two implementations for this function, zero padding or mask. We don't observe obvious difference for
# both. You can choose any one, we recommend forward_padding because it's neat. However,
# the masking implementation is more reasonable and accurate.
B, N, C = x.shape
H, W = size
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.ws - W % self.ws) % self.ws
pad_b = (self.ws - H % self.ws) % self.ws
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
_h, _w = Hp // self.ws, Wp // self.ws
x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)
qkv = self.qkv(x).reshape(
B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# def forward_mask(self, x, size: Size_):
# B, N, C = x.shape
# H, W = size
# x = x.view(B, H, W, C)
# pad_l = pad_t = 0
# pad_r = (self.ws - W % self.ws) % self.ws
# pad_b = (self.ws - H % self.ws) % self.ws
# x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
# _, Hp, Wp, _ = x.shape
# _h, _w = Hp // self.ws, Wp // self.ws
# mask = torch.zeros((1, Hp, Wp), device=x.device)
# mask[:, -pad_b:, :].fill_(1)
# mask[:, :, -pad_r:].fill_(1)
#
# x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C
# mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)
# attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))
# qkv = self.qkv(x).reshape(
# B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
# # n_h, B, _w*_h, nhead, ws*ws, dim
# q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head
# attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws
# attn = attn + attn_mask.unsqueeze(2)
# attn = attn.softmax(dim=-1)
# attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head
# attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
# x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
# if pad_r > 0 or pad_b > 0:
# x = x[:, :H, :W, :].contiguous()
# x = x.reshape(B, N, C)
# x = self.proj(x)
# x = self.proj_drop(x)
# return x
class GlobalSubSampleAttn(nn.Module):
""" GSA: using a key to summarize the information for a group to be efficient.
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, dim, bias=True)
self.kv = nn.Linear(dim, dim * 2, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.sr = None
self.norm = None
def forward(self, x, size: Size_):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr is not None:
x = x.permute(0, 2, 1).reshape(B, C, *size)
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
sr_ratio=1,
ws=None,
):
super().__init__()
self.norm1 = norm_layer(dim)
if ws is None:
self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop)
elif ws == 1:
self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio)
else:
self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, size: Size_):
x = x + self.drop_path1(self.attn(self.norm1(x), size))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
class PosConv(nn.Module):
# PEG from https://arxiv.org/abs/2102.10882
def __init__(self, in_chans, embed_dim=768, stride=1):
super(PosConv, self).__init__()
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim),
)
self.stride = stride
def forward(self, x, size: Size_):
B, N, C = x.shape
cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
x = self.proj(cnn_feat_token)
if self.stride == 1:
x += cnn_feat_token
x = x.flatten(2).transpose(1, 2)
return x
def no_weight_decay(self):
return ['proj.%d.weight' % i for i in range(4)]
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x) -> Tuple[torch.Tensor, Size_]:
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
out_size = (H // self.patch_size[0], W // self.patch_size[1])
return x, out_size
class Twins(nn.Module):
""" Twins Vision Transfomer (Revisiting Spatial Attention)
Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git
"""
def __init__(
self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dims=(64, 128, 256, 512),
num_heads=(1, 2, 4, 8),
mlp_ratios=(4, 4, 4, 4),
depths=(3, 4, 6, 3),
sr_ratios=(8, 4, 2, 1),
wss=None,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
block_cls=Block,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.depths = depths
self.embed_dims = embed_dims
self.num_features = embed_dims[-1]
self.grad_checkpointing = False
img_size = to_2tuple(img_size)
prev_chs = in_chans
self.patch_embeds = nn.ModuleList()
self.pos_drops = nn.ModuleList()
for i in range(len(depths)):
self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))
self.pos_drops.append(nn.Dropout(p=pos_drop_rate))
prev_chs = embed_dims[i]
img_size = tuple(t // patch_size for t in img_size)
patch_size = 2
self.blocks = nn.ModuleList()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for k in range(len(depths)):
_block = nn.ModuleList([block_cls(
dim=embed_dims[k],
num_heads=num_heads[k],
mlp_ratio=mlp_ratios[k],
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[cur + i],
norm_layer=norm_layer,
sr_ratio=sr_ratios[k],
ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])],
)
self.blocks.append(_block)
cur += depths[k]
self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])
self.norm = norm_layer(self.num_features)
# classification head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# init weights
self.apply(self._init_weights)
@torch.jit.ignore
def no_weight_decay(self):
return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embeds.0', # stem and embed
blocks=[
(r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None),
('^norm', (99999,))
] if coarse else [
(r'^blocks\.(\d+)\.(\d+)', None),
(r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward_features(self, x):
B = x.shape[0]
for i, (embed, drop, blocks, pos_blk) in enumerate(
zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):
x, size = embed(x)
x = drop(x)
for j, blk in enumerate(blocks):
x = blk(x, size)
if j == 0:
x = pos_blk(x, size) # PEG here
if i < len(self.depths) - 1:
x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_twins(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(Twins, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'),
'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'),
'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_small(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_base(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_large(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/cait.py | """ Class-Attention in Image Transformers (CaiT)
Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239
Original code and weights from https://github.com/facebookresearch/deit, copyright below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn']
class ClassAttn(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to do CA
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.fused_attn:
x_cls = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = attn @ v
x_cls = x_cls.transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls
class LayerScaleBlockClassAttn(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add CA and LayerScale
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attn_block=ClassAttn,
mlp_block=Mlp,
init_values=1e-4,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = attn_block(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = mlp_block(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x, x_cls):
u = torch.cat((x_cls, x), dim=1)
x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u)))
x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls)))
return x_cls
class TalkingHeadAttn(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf)
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_l = nn.Linear(num_heads, num_heads)
self.proj_w = nn.Linear(num_heads, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1)
attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attn = attn.softmax(dim=-1)
attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScaleBlock(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add layerScale
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attn_block=TalkingHeadAttn,
mlp_block=Mlp,
init_values=1e-4,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = attn_block(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = mlp_block(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class Cait(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to adapt to our cait models
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
block_layers=LayerScaleBlock,
block_layers_token=LayerScaleBlockClassAttn,
patch_layer=PatchEmbed,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
attn_block=TalkingHeadAttn,
mlp_block=Mlp,
init_values=1e-4,
attn_block_token_only=ClassAttn,
mlp_block_token_only=Mlp,
depth_token_only=2,
mlp_ratio_token_only=4.0
):
super().__init__()
assert global_pool in ('', 'token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim
self.grad_checkpointing = False
self.patch_embed = patch_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.Sequential(*[block_layers(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
attn_block=attn_block,
mlp_block=mlp_block,
init_values=init_values,
) for i in range(depth)])
self.blocks_token_only = nn.ModuleList([block_layers_token(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio_token_only,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
attn_block=attn_block_token_only,
mlp_block=mlp_block_token_only,
init_values=init_values,
) for _ in range(depth_token_only)])
self.norm = norm_layer(embed_dim)
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
def _matcher(name):
if any([name.startswith(n) for n in ('cls_token', 'pos_embed', 'patch_embed')]):
return 0
elif name.startswith('blocks.'):
return int(name.split('.')[1]) + 1
elif name.startswith('blocks_token_only.'):
# overlap token only blocks with last blocks
to_offset = len(self.blocks) - len(self.blocks_token_only) + 1
return int(name.split('.')[1]) + to_offset
elif name.startswith('norm.'):
return len(self.blocks)
else:
return float('inf')
return _matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
for i, blk in enumerate(self.blocks_token_only):
cls_tokens = blk(x, cls_tokens)
x = torch.cat((cls_tokens, x), dim=1)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model=None):
if 'model' in state_dict:
state_dict = state_dict['model']
checkpoint_no_module = {}
for k, v in state_dict.items():
checkpoint_no_module[k.replace('module.', '')] = v
return checkpoint_no_module
def _create_cait(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
Cait,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'cait_xxs24_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth',
input_size=(3, 224, 224),
),
'cait_xxs24_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth',
),
'cait_xxs36_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth',
input_size=(3, 224, 224),
),
'cait_xxs36_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth',
),
'cait_xs24_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth',
),
'cait_s24_224.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/S24_224.pth',
input_size=(3, 224, 224),
),
'cait_s24_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/S24_384.pth',
),
'cait_s36_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/S36_384.pth',
),
'cait_m36_384.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/M36_384.pth',
),
'cait_m48_448.fb_dist_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/M48_448.pth',
input_size=(3, 448, 448),
),
})
@register_model
def cait_xxs24_224(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5)
model = _create_cait('cait_xxs24_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_xxs24_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5)
model = _create_cait('cait_xxs24_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_xxs36_224(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5)
model = _create_cait('cait_xxs36_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_xxs36_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5)
model = _create_cait('cait_xxs36_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_xs24_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_values=1e-5)
model = _create_cait('cait_xs24_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_s24_224(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5)
model = _create_cait('cait_s24_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_s24_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5)
model = _create_cait('cait_s24_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_s36_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_values=1e-6)
model = _create_cait('cait_s36_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_m36_384(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_values=1e-6)
model = _create_cait('cait_m36_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def cait_m48_448(pretrained=False, **kwargs) -> Cait:
model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_values=1e-6)
model = _create_cait('cait_m48_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/hardcorenas.py | from functools import partial
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._builder import build_model_with_cfg
from ._builder import pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels
from ._registry import register_model, generate_default_cfgs
from .mobilenetv3 import MobileNetV3, MobileNetV3Features
__all__ = [] # model_registry will add each entrypoint fn to this
def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
"""Creates a hardcorenas model
Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS
Paper: https://arxiv.org/abs/2102.11646
"""
num_features = 1280
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=32,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=se_layer,
**kwargs,
)
features_only = False
model_cls = MobileNetV3
kwargs_filter = None
if model_kwargs.pop('features_only', False):
features_only = True
kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
pretrained_strict=not features_only,
kwargs_filter=kwargs_filter,
**model_kwargs,
)
if features_only:
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_A """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_B """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'],
['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_C """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre',
'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_E """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25',
'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_F """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k3_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs)
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/byobnet.py | """ Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
MobileOne - mobileone_*
Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040
Code and weights: https://github.com/apple/ml-mobileone, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from functools import partial
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, ConvNormAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, EvoNorm2dS0a
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1., 1., 1., 1.), se_blocks=(), num_conv_branches=1):
c = (64, 128, 256, 512)
prev_c = min(64, c[0] * wf[0])
se_blocks = se_blocks or (0,) * len(d)
bcfg = []
for d, c, w, se in zip(d, c, wf, se_blocks):
scfg = []
for i in range(d):
out_c = c * w
bk = dict(num_conv_branches=num_conv_branches)
ak = {}
if i >= d - se:
ak['attn_layer'] = 'se'
scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block
scfg += [ByoBlockCfg(
type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block
prev_c = out_c
bcfg += [scfg]
return bcfg
def interleave_blocks(
types: Tuple[str, str], d,
every: Union[int, List[int]] = 1,
first: bool = False,
**kwargs,
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvNormAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
apply_act: bool = False,
layers: LayerFn = None,
):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_shortcut(
downsample_type: str,
in_chs: int,
out_chs: int,
stride: int,
dilation: Tuple[int, int],
layers: LayerFn,
**kwargs,
):
assert downsample_type in ('avg', 'conv1x1', '')
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
if not downsample_type:
return None # no shortcut
elif downsample_type == 'avg':
return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs)
else:
return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs)
else:
return nn.Identity() # identity shortcut
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
group_size: Optional[int] = None,
bottle_ratio: float = 1.0,
downsample: str = 'avg',
attn_last: bool = True,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size,
dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = False,
linear_out: bool = False,
extra_conv: bool = False,
bottle_in: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = True,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = 'avg',
attn_last: bool = False,
linear_out: bool = False,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
downsample: str = '',
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
inference_mode: bool = False
):
super(RepVggBlock, self).__init__()
self.groups = groups = num_groups(group_size, in_chs)
layers = layers or LayerFn()
if inference_mode:
self.reparam_conv = nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=True,
)
else:
self.reparam_conv = None
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False,
)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.reparam_conv is not None:
return self.act(self.attn(self.reparam_conv(x)))
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x += identity
x = self.attn(x) # no attn in the paper / official impl, experimental
return self.act(x)
def reparameterize(self):
""" Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = nn.Conv2d(
in_channels=self.conv_kxk.conv.in_channels,
out_channels=self.conv_kxk.conv.out_channels,
kernel_size=self.conv_kxk.conv.kernel_size,
stride=self.conv_kxk.conv.stride,
padding=self.conv_kxk.conv.padding,
dilation=self.conv_kxk.conv.dilation,
groups=self.conv_kxk.conv.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__('conv_kxk')
self.__delattr__('conv_1x1')
self.__delattr__('identity')
self.__delattr__('drop_path')
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
"""
# get weights and bias of scale branch
kernel_1x1 = 0
bias_1x1 = 0
if self.conv_1x1 is not None:
kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.conv_kxk.conv.kernel_size[0] // 2
kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk)
kernel_final = kernel_conv + kernel_1x1 + kernel_identity
bias_final = bias_conv + bias_1x1 + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to fuse batchnorm layer with preceeding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, 'id_tensor'):
in_chs = self.conv_kxk.conv.in_channels
input_dim = in_chs // self.groups
kernel_size = self.conv_kxk.conv.kernel_size
kernel_value = torch.zeros_like(self.conv_kxk.conv.weight)
for i in range(in_chs):
kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class MobileOneBlock(nn.Module):
""" MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.0, # unused
group_size: Optional[int] = None,
downsample: str = '', # unused
inference_mode: bool = False,
num_conv_branches: int = 1,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
) -> None:
""" Construct a MobileOneBlock module.
"""
super(MobileOneBlock, self).__init__()
self.num_conv_branches = num_conv_branches
self.groups = groups = num_groups(group_size, in_chs)
layers = layers or LayerFn()
if inference_mode:
self.reparam_conv = nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=True)
else:
self.reparam_conv = None
# Re-parameterizable skip connection
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
# Re-parameterizable conv branches
convs = []
for _ in range(self.num_conv_branches):
convs.append(layers.conv_norm_act(
in_chs, out_chs, kernel_size=kernel_size,
stride=stride, groups=groups, apply_act=False))
self.conv_kxk = nn.ModuleList(convs)
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1:
self.conv_scale = layers.conv_norm_act(
in_chs, out_chs, kernel_size=1,
stride=stride, groups=groups, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.act = layers.act(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
""" Apply forward pass. """
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.attn(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Skip branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other branches
out = scale_out
for ck in self.conv_kxk:
out += ck(x)
out = self.drop_path(out)
out += identity_out
return self.act(self.attn(out))
def reparameterize(self):
""" Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = nn.Conv2d(
in_channels=self.conv_kxk[0].conv.in_channels,
out_channels=self.conv_kxk[0].conv.out_channels,
kernel_size=self.conv_kxk[0].conv.kernel_size,
stride=self.conv_kxk[0].conv.stride,
padding=self.conv_kxk[0].conv.padding,
dilation=self.conv_kxk[0].conv.dilation,
groups=self.conv_kxk[0].conv.groups,
bias=True)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__('conv_kxk')
self.__delattr__('conv_scale')
self.__delattr__('identity')
self.__delattr__('drop_path')
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.conv_kxk[0].conv.kernel_size[0] // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]:
""" Method to fuse batchnorm layer with preceeding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, 'id_tensor'):
in_chs = self.conv_kxk[0].conv.in_channels
input_dim = in_chs // self.groups
kernel_size = self.conv_kxk[0].conv.kernel_size
kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight)
for i in range(in_chs):
kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
bottle_ratio: float = 1.,
group_size: Optional[int] = None,
downsample: str = 'avg',
extra_conv: bool = False,
linear_out: bool = False,
bottle_in: bool = False,
post_attn_na: bool = True,
feat_size: Optional[Tuple[int, int]] = None,
layers: LayerFn = None,
drop_block: Callable = None,
drop_path_rate: float = 0.,
):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio)
groups = num_groups(group_size, mid_chs)
self.shortcut = create_shortcut(
downsample, in_chs, out_chs,
stride=stride, dilation=dilation, apply_act=False, layers=layers,
)
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size,
stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block,
)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = x
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
if self.shortcut is not None:
x = x + self.shortcut(shortcut)
return self.act(x)
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
one=MobileOneBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 4,
pool: str = 'maxpool',
num_rep: int = 3,
num_act: Optional[int] = None,
chs_decay: float = 0.5,
layers: LayerFn = None,
):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(
in_chs: int,
out_chs: int,
stem_type: str = '',
pool_type: str = '',
feat_prefix: str = 'stem',
layers: LayerFn = None,
):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif 'one' in stem_type:
stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg,
drop_path_rate: float,
output_stride: int,
stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs,
):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(
self,
cfg: ByoModelCfg,
num_classes: int = 1000,
in_chans: int = 3,
global_pool: str = 'avg',
output_stride: int = 32,
img_size: Optional[Union[int, Tuple[int, int]]] = None,
drop_rate: float = 0.,
drop_path_rate: float =0.,
zero_init_last: bool = True,
**kwargs,
):
"""
Args:
cfg: Model architecture configuration.
num_classes: Number of classifier classes.
in_chans: Number of input channels.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
img_size: Image size for fixed image size models (i.e. self-attn).
drop_rate: Classifier dropout rate.
drop_path_rate: Stochastic depth drop-path rate.
zero_init_last: Zero-init last weight of residual path.
**kwargs: Extra kwargs overlayed onto cfg.
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg,
drop_path_rate,
output_stride,
stem_feat[-1],
layers=layers,
feat_size=feat_size,
)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None),
(r'^final_conv', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.final_conv(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a0=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)),
stem_type='rep',
stem_chs=48,
),
repvgg_a1=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_d2se=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1),
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
# experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW
regnetz_b16=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_c16=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d32=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d8=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_e8=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
downsample='',
num_features=2048,
act_layer='silu',
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
# experimental EvoNorm configs
regnetz_b16_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_c16_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4),
ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4),
),
stem_chs=32,
stem_pool='',
downsample='',
num_features=1536,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
regnetz_d8_evos=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4),
ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4),
ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4),
),
stem_chs=64,
stem_type='deep',
stem_pool='',
downsample='',
num_features=1792,
act_layer='silu',
norm_layer=partial(EvoNorm2dS0a, group_size=16),
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.25),
block_kwargs=dict(bottle_in=True, linear_out=True),
),
mobileone_s0=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4),
stem_type='one',
stem_chs=48,
),
mobileone_s1=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)),
stem_type='one',
stem_chs=64,
),
mobileone_s2=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)),
stem_type='one',
stem_chs=64,
),
mobileone_s3=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)),
stem_type='one',
stem_chs=64,
),
mobileone_s4=ByoModelCfg(
blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)),
stem_type='one',
stem_chs=64,
),
)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
def _cfgr(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# GPU-Efficient (ResNet) weights
'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'),
'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'),
'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a0.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_a1.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_a2.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b0.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b1.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b1g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b2.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b2g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b3.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_b3g4.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'),
'repvgg_d2se.rvgg_in1k': _cfg(
hf_hub_id='timm/',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0,
),
# experimental ResNet configs
'resnet51q.ra2_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnet61q.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
# ResNeXt-26 models with different attention in Bottleneck blocks
'resnext26ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'seresnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_resnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'bat_resnext26ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
min_input_size=(3, 256, 256)),
# ResNet-32 / 33 models with different attention in Bottleneck blocks
'resnet32ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'seresnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_resnet33ts.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnet50t.ra2_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'gcresnext50ts.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
# custom `timm` specific RegNetZ inspired models w/ different sizing from paper
'regnetz_b16.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'regnetz_c16.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_d32.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)),
'regnetz_d8.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_e8.ra3_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'regnetz_b16_evos.untrained': _cfgr(
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)),
'regnetz_c16_evos.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth',
first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.95, test_input_size=(3, 320, 320)),
'regnetz_d8_evos.ch_in1k': _cfgr(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0),
'mobileone_s0.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.875,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s1.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s2.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s3.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
'mobileone_s4.apple_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9,
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
),
})
@register_model
def gernet_l(pretrained=False, **kwargs) -> ByobNet:
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs) -> ByobNet:
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs) -> ByobNet:
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a0(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a1(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet:
""" RepVGG-D2se
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
@register_model
def regnetz_b16(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs)
@register_model
def regnetz_c16(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d32(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d8(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs)
@register_model
def regnetz_e8(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs)
@register_model
def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs)
@register_model
def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs)
@register_model
def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s0(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s1(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s2(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s3(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs)
@register_model
def mobileone_s4(pretrained=False, **kwargs) -> ByobNet:
"""
"""
return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs)
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/densenet.py | """Pytorch Densenet implementation w/ tweaks
This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with
fixed kwargs passthrough and addition of dynamic global avg/max pool.
"""
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch.jit.annotations import List
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier
from ._builder import build_model_with_cfg
from ._manipulate import MATCH_PREV_GROUP
from ._registry import register_model, generate_default_cfgs
__all__ = ['DenseNet']
class DenseLayer(nn.Module):
def __init__(
self,
num_input_features,
growth_rate,
bn_size,
norm_layer=BatchNormAct2d,
drop_rate=0.,
grad_checkpointing=False,
):
super(DenseLayer, self).__init__()
self.add_module('norm1', norm_layer(num_input_features)),
self.add_module('conv1', nn.Conv2d(
num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', norm_layer(bn_size * growth_rate)),
self.add_module('conv2', nn.Conv2d(
bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = float(drop_rate)
self.grad_checkpointing = grad_checkpointing
def bottleneck_fn(self, xs):
# type: (List[torch.Tensor]) -> torch.Tensor
concated_features = torch.cat(xs, 1)
bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, x):
# type: (List[torch.Tensor]) -> bool
for tensor in x:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, x):
# type: (List[torch.Tensor]) -> torch.Tensor
def closure(*xs):
return self.bottleneck_fn(xs)
return cp.checkpoint(closure, *x)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, x): # noqa: F811
if isinstance(x, torch.Tensor):
prev_features = [x]
else:
prev_features = x
if self.grad_checkpointing and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bottleneck_fn(prev_features)
new_features = self.conv2(self.norm2(bottleneck_output))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers,
num_input_features,
bn_size,
growth_rate,
norm_layer=BatchNormAct2d,
drop_rate=0.,
grad_checkpointing=False,
):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
norm_layer=norm_layer,
drop_rate=drop_rate,
grad_checkpointing=grad_checkpointing,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class DenseTransition(nn.Sequential):
def __init__(
self,
num_input_features,
num_output_features,
norm_layer=BatchNormAct2d,
aa_layer=None,
):
super(DenseTransition, self).__init__()
self.add_module('norm', norm_layer(num_input_features))
self.add_module('conv', nn.Conv2d(
num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if aa_layer is not None:
self.add_module('pool', aa_layer(num_output_features, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate before classifier layer
proj_drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(
self,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_classes=1000,
in_chans=3,
global_pool='avg',
bn_size=4,
stem_type='',
act_layer='relu',
norm_layer='batchnorm2d',
aa_layer=None,
drop_rate=0.,
proj_drop_rate=0.,
memory_efficient=False,
aa_stem_only=True,
):
self.num_classes = num_classes
super(DenseNet, self).__init__()
norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
# Stem
deep_stem = 'deep' in stem_type # 3x3 deep stem
num_init_features = growth_rate * 2
if aa_layer is None:
stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
stem_pool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=num_init_features, stride=2)])
if deep_stem:
stem_chs_1 = stem_chs_2 = growth_rate
if 'tiered' in stem_type:
stem_chs_1 = 3 * (growth_rate // 4)
stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)),
('norm0', norm_layer(stem_chs_1)),
('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)),
('norm1', norm_layer(stem_chs_2)),
('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)),
('norm2', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
else:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
self.feature_info = [
dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')]
current_stride = 4
# DenseBlocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
norm_layer=norm_layer,
drop_rate=proj_drop_rate,
grad_checkpointing=memory_efficient,
)
module_name = f'denseblock{(i + 1)}'
self.features.add_module(module_name, block)
num_features = num_features + num_layers * growth_rate
transition_aa_layer = None if aa_stem_only else aa_layer
if i != len(block_config) - 1:
self.feature_info += [
dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)]
current_stride *= 2
trans = DenseTransition(
num_input_features=num_features,
num_output_features=num_features // 2,
norm_layer=norm_layer,
aa_layer=transition_aa_layer,
)
self.features.add_module(f'transition{i + 1}', trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', norm_layer(num_features))
self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')]
self.num_features = num_features
# Linear layer
global_pool, classifier = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.classifier = classifier
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]',
blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [
(r'^features\.denseblock(\d+)\.denselayer(\d+)', None),
(r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for b in self.features.modules():
if isinstance(b, DenseLayer):
b.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
x = self.head_drop(x)
x = self.classifier(x)
return x
def _filter_torchvision_pretrained(state_dict):
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs):
kwargs['growth_rate'] = growth_rate
kwargs['block_config'] = block_config
return build_model_with_cfg(
DenseNet,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
pretrained_filter_fn=_filter_torchvision_pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs,
}
default_cfgs = generate_default_cfgs({
'densenet121.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'densenetblur121d.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'densenet264d.untrained': _cfg(),
'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet161.tv_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def densenet121(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16))
model = _create_densenet('densenet121', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenetblur121d(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-121 w/ blur-pooling & 3-layer 3x3 stem
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', aa_layer=BlurPool2d)
model = _create_densenet('densenetblur121d', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet169(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 32, 32))
model = _create_densenet('densenet169', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet201(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 48, 32))
model = _create_densenet('densenet201', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet161(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=48, block_config=(6, 12, 36, 24))
model = _create_densenet('densenet161', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet264d(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-264 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep')
model = _create_densenet('densenet264d', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/swin_transformer_v2_cr.py | """ Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/pdf/2111.09883
Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below
This implementation is experimental and subject to change in manners that will break weight compat:
* Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads?
* currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head)
* The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at
GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial.
* num_heads per stage is not detailed for Huge and Giant model variants
* 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts
* experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme
Noteworthy additions over official Swin v1:
* MLP relative position embedding is looking promising and adapts to different image/window sizes
* This impl has been designed to allow easy change of image size with matching window size changes
* Non-square image size and window size are supported
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer V2 reimplementation
# Copyright (c) 2021 Christoph Reich
# Licensed under The MIT License [see LICENSE for details]
# Written by Christoph Reich
# --------------------------------------------------------
import logging
import math
from typing import Tuple, Optional, List, Union, Any, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import named_apply
from ._registry import generate_default_cfgs, register_model
__all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """
return x.permute(0, 2, 3, 1)
def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor:
"""Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """
return x.permute(0, 3, 1, 2)
def window_partition(x, window_size: Tuple[int, int]):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
"""
Args:
windows: (num_windows * B, window_size[0], window_size[1], C)
window_size (Tuple[int, int]): Window size
img_size (Tuple[int, int]): Image size
Returns:
x: (B, H, W, C)
"""
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowMultiHeadAttention(nn.Module):
r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias.
Args:
dim (int): Number of input features
window_size (int): Window size
num_heads (int): Number of attention heads
drop_attn (float): Dropout rate of attention map
drop_proj (float): Dropout rate after projection
meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network
sequential_attn (bool): If true sequential self-attention is performed
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
drop_attn: float = 0.0,
drop_proj: float = 0.0,
meta_hidden_dim: int = 384, # FIXME what's the optimal value?
sequential_attn: bool = False,
) -> None:
super(WindowMultiHeadAttention, self).__init__()
assert dim % num_heads == 0, \
"The number of input features (in_features) are not divisible by the number of heads (num_heads)."
self.in_features: int = dim
self.window_size: Tuple[int, int] = window_size
self.num_heads: int = num_heads
self.sequential_attn: bool = sequential_attn
self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True)
self.attn_drop = nn.Dropout(drop_attn)
self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True)
self.proj_drop = nn.Dropout(drop_proj)
# meta network for positional encodings
self.meta_mlp = Mlp(
2, # x, y
hidden_features=meta_hidden_dim,
out_features=num_heads,
act_layer=nn.ReLU,
drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without?
)
# NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads)))
self._make_pair_wise_relative_positions()
def _make_pair_wise_relative_positions(self) -> None:
"""Method initializes the pair-wise relative positions to compute the positional biases."""
device = self.logit_scale.device
coordinates = torch.stack(torch.meshgrid([
torch.arange(self.window_size[0], device=device),
torch.arange(self.window_size[1], device=device)]), dim=0).flatten(1)
relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :]
relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float()
relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(
1.0 + relative_coordinates.abs())
self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
def update_input_size(self, new_window_size: int, **kwargs: Any) -> None:
"""Method updates the window size and so the pair-wise relative positions
Args:
new_window_size (int): New window size
kwargs (Any): Unused
"""
# Set new window size and new pair-wise relative positions
self.window_size: int = new_window_size
self._make_pair_wise_relative_positions()
def _relative_positional_encodings(self) -> torch.Tensor:
"""Method computes the relative positional encodings
Returns:
relative_position_bias (torch.Tensor): Relative positional encodings
(1, number of heads, window size ** 2, window size ** 2)
"""
window_area = self.window_size[0] * self.window_size[1]
relative_position_bias = self.meta_mlp(self.relative_coordinates_log)
relative_position_bias = relative_position_bias.transpose(1, 0).reshape(
self.num_heads, window_area, window_area
)
relative_position_bias = relative_position_bias.unsqueeze(0)
return relative_position_bias
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
""" Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape (B * windows, N, C)
mask (Optional[torch.Tensor]): Attention mask for the shift case
Returns:
Output tensor of the shape [B * windows, N, C]
"""
Bw, L, C = x.shape
qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
query, key, value = qkv.unbind(0)
# compute attention map with scaled cosine attention
attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1))
logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp()
attn = attn * logit_scale
attn = attn + self._relative_positional_encodings()
if mask is not None:
# Apply mask if utilized
num_win: int = mask.shape[0]
attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L)
attn = attn + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, L, L)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerV2CrBlock(nn.Module):
r"""This class implements the Swin transformer block.
Args:
dim (int): Number of input channels
num_heads (int): Number of attention heads to be utilized
feat_size (Tuple[int, int]): Input resolution
window_size (Tuple[int, int]): Window size to be utilized
shift_size (int): Shifting size to be used
mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
proj_drop (float): Dropout in input mapping
drop_attn (float): Dropout rate of attention map
drop_path (float): Dropout in main path
extra_norm (bool): Insert extra norm on 'main' branch if True
sequential_attn (bool): If true sequential self-attention is performed
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized
"""
def __init__(
self,
dim: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
shift_size: Tuple[int, int] = (0, 0),
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0,
proj_drop: float = 0.0,
drop_attn: float = 0.0,
drop_path: float = 0.0,
extra_norm: bool = False,
sequential_attn: bool = False,
norm_layer: Type[nn.Module] = nn.LayerNorm,
) -> None:
super(SwinTransformerV2CrBlock, self).__init__()
self.dim: int = dim
self.feat_size: Tuple[int, int] = feat_size
self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size)
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.init_values: Optional[float] = init_values
# attn branch
self.attn = WindowMultiHeadAttention(
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
drop_attn=drop_attn,
drop_proj=proj_drop,
sequential_attn=sequential_attn,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# mlp branch
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
drop=proj_drop,
out_features=dim,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
# Extra main branch norm layer mentioned for Huge/Giant models in V2 paper.
# Also being used as final network norm and optional stage ending norm while still in a C-last format.
self.norm3 = norm_layer(dim) if extra_norm else nn.Identity()
self._make_attention_mask()
self.init_weights()
def _calc_window_shift(self, target_window_size):
window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)]
shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, self.target_shift_size)]
return tuple(window_size), tuple(shift_size)
def _make_attention_mask(self) -> None:
"""Method generates the attention mask used in shift case."""
# Make masks for shift case
if any(self.shift_size):
# calculate attention mask for SW-MSA
H, W = self.feat_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
cnt = 0
for h in (
slice(0, -self.window_size[0]),
slice(-self.window_size[0], -self.shift_size[0]),
slice(-self.shift_size[0], None)):
for w in (
slice(0, -self.window_size[1]),
slice(-self.window_size[1], -self.shift_size[1]),
slice(-self.shift_size[1], None)):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask, persistent=False)
def init_weights(self):
# extra, module specific weight init
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def update_input_size(self, new_window_size: Tuple[int, int], new_feat_size: Tuple[int, int]) -> None:
"""Method updates the image resolution to be processed and window size and so the pair-wise relative positions.
Args:
new_window_size (int): New window size
new_feat_size (Tuple[int, int]): New input resolution
"""
# Update input resolution
self.feat_size: Tuple[int, int] = new_feat_size
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(new_window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.attn.update_input_size(new_window_size=self.window_size)
self._make_attention_mask()
def _shifted_window_attn(self, x):
B, H, W, C = x.shape
# cyclic shift
sh, sw = self.shift_size
do_shift: bool = any(self.shift_size)
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, sh:], x[:, :sh]], dim=1)
# x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2)
x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2))
# partition windows
x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_windows * B, window_size * window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
x = window_reverse(attn_windows, self.window_size, self.feat_size) # B H' W' C
# reverse cyclic shift
if do_shift:
# FIXME PyTorch XLA needs cat impl, roll not lowered
# x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1)
# x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2)
x = torch.roll(x, shifts=(sh, sw), dims=(1, 2))
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W]
Returns:
output (torch.Tensor): Output tensor of the shape [B, C, H, W]
"""
# post-norm branches (op -> norm -> drop)
x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x)))
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.norm2(self.mlp(x)))
x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant)
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" This class implements the patch merging as a strided convolution with a normalization before.
Args:
dim (int): Number of input channels
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized.
"""
def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None:
super(PatchMerging, self).__init__()
self.norm = norm_layer(4 * dim)
self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
""" Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W]
Returns:
output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
"""
B, H, W, C = x.shape
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.norm(x)
x = self.reduction(x)
return x
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding """
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
_assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return x
class SwinTransformerV2CrStage(nn.Module):
r"""This class implements a stage of the Swin transformer including multiple layers.
Args:
embed_dim (int): Number of input channels
depth (int): Depth of the stage (number of layers)
downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper)
feat_size (Tuple[int, int]): input feature map size (H, W)
num_heads (int): Number of attention heads to be utilized
window_size (int): Window size to be utilized
mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
proj_drop (float): Dropout in input mapping
drop_attn (float): Dropout rate of attention map
drop_path (float): Dropout in main path
norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm
extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks
extra_norm_stage (bool): End each stage with an extra norm layer in main branch
sequential_attn (bool): If true sequential self-attention is performed
"""
def __init__(
self,
embed_dim: int,
depth: int,
downscale: bool,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0.0,
proj_drop: float = 0.0,
drop_attn: float = 0.0,
drop_path: Union[List[float], float] = 0.0,
norm_layer: Type[nn.Module] = nn.LayerNorm,
extra_norm_period: int = 0,
extra_norm_stage: bool = False,
sequential_attn: bool = False,
) -> None:
super(SwinTransformerV2CrStage, self).__init__()
self.downscale: bool = downscale
self.grad_checkpointing: bool = False
self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size
if downscale:
self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer)
embed_dim = embed_dim * 2
else:
self.downsample = nn.Identity()
def _extra_norm(index):
i = index + 1
if extra_norm_period and i % extra_norm_period == 0:
return True
return i == depth if extra_norm_stage else False
self.blocks = nn.Sequential(*[
SwinTransformerV2CrBlock(
dim=embed_dim,
num_heads=num_heads,
feat_size=self.feat_size,
window_size=window_size,
shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]),
mlp_ratio=mlp_ratio,
init_values=init_values,
proj_drop=proj_drop,
drop_attn=drop_attn,
drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path,
extra_norm=_extra_norm(index),
sequential_attn=sequential_attn,
norm_layer=norm_layer,
)
for index in range(depth)]
)
def update_input_size(self, new_window_size: int, new_feat_size: Tuple[int, int]) -> None:
"""Method updates the resolution to utilize and the window size and so the pair-wise relative positions.
Args:
new_window_size (int): New window size
new_feat_size (Tuple[int, int]): New input resolution
"""
self.feat_size: Tuple[int, int] = (
(new_feat_size[0] // 2, new_feat_size[1] // 2) if self.downscale else new_feat_size
)
for block in self.blocks:
block.update_input_size(new_window_size=new_window_size, new_feat_size=self.feat_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass.
Args:
x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C]
Returns:
output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
"""
x = bchw_to_bhwc(x)
x = self.downsample(x)
for block in self.blocks:
# Perform checkpointing if utilized
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(block, x)
else:
x = block(x)
x = bhwc_to_bchw(x)
return x
class SwinTransformerV2Cr(nn.Module):
r""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` -
https://arxiv.org/pdf/2111.09883
Args:
img_size: Input resolution.
window_size: Window size. If None, img_size // window_div
img_window_ratio: Window size to image size ratio.
patch_size: Patch size.
in_chans: Number of input channels.
depths: Depth of the stage (number of layers).
num_heads: Number of attention heads to be utilized.
embed_dim: Patch embedding dimension.
num_classes: Number of output classes.
mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels.
drop_rate: Dropout rate.
proj_drop_rate: Projection dropout rate.
attn_drop_rate: Dropout rate of attention map.
drop_path_rate: Stochastic depth rate.
norm_layer: Type of normalization layer to be utilized.
extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage
extra_norm_stage: End each stage with an extra norm layer in main branch
sequential_attn: If true sequential self-attention is performed.
"""
def __init__(
self,
img_size: Tuple[int, int] = (224, 224),
patch_size: int = 4,
window_size: Optional[int] = None,
img_window_ratio: int = 32,
in_chans: int = 3,
num_classes: int = 1000,
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
mlp_ratio: float = 4.0,
init_values: Optional[float] = 0.,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
norm_layer: Type[nn.Module] = nn.LayerNorm,
extra_norm_period: int = 0,
extra_norm_stage: bool = False,
sequential_attn: bool = False,
global_pool: str = 'avg',
weight_init='skip',
**kwargs: Any
) -> None:
super(SwinTransformerV2Cr, self).__init__()
img_size = to_2tuple(img_size)
window_size = tuple([
s // img_window_ratio for s in img_size]) if window_size is None else to_2tuple(window_size)
self.num_classes: int = num_classes
self.patch_size: int = patch_size
self.img_size: Tuple[int, int] = img_size
self.window_size: int = window_size
self.num_features: int = int(embed_dim * 2 ** (len(depths) - 1))
self.feature_info = []
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer,
)
patch_grid_size: Tuple[int, int] = self.patch_embed.grid_size
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
in_dim = embed_dim
in_scale = 1
for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)):
stages += [SwinTransformerV2CrStage(
embed_dim=in_dim,
depth=depth,
downscale=stage_idx != 0,
feat_size=(
patch_grid_size[0] // in_scale,
patch_grid_size[1] // in_scale
),
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
init_values=init_values,
proj_drop=proj_drop_rate,
drop_attn=attn_drop_rate,
drop_path=dpr[stage_idx],
extra_norm_period=extra_norm_period,
extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm
sequential_attn=sequential_attn,
norm_layer=norm_layer,
)]
if stage_idx != 0:
in_dim *= 2
in_scale *= 2
self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')]
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
# current weight init skips custom init and uses pytorch layer defaults, seems to work well
# FIXME more experiments needed
if weight_init != 'skip':
named_apply(init_weights, self)
def update_input_size(
self,
new_img_size: Optional[Tuple[int, int]] = None,
new_window_size: Optional[int] = None,
img_window_ratio: int = 32,
) -> None:
"""Method updates the image resolution to be processed and window size and so the pair-wise relative positions.
Args:
new_window_size (Optional[int]): New window size, if None based on new_img_size // window_div
new_img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used
img_window_ratio (int): divisor for calculating window size from image size
"""
# Check parameters
if new_img_size is None:
new_img_size = self.img_size
else:
new_img_size = to_2tuple(new_img_size)
if new_window_size is None:
new_window_size = tuple([s // img_window_ratio for s in new_img_size])
# Compute new patch resolution & update resolution of each stage
new_patch_grid_size = (new_img_size[0] // self.patch_size, new_img_size[1] // self.patch_size)
for index, stage in enumerate(self.stages):
stage_scale = 2 ** max(index - 1, 0)
stage.update_input_size(
new_window_size=new_window_size,
new_img_size=(new_patch_grid_size[0] // stage_scale, new_patch_grid_size[1] // stage_scale),
)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore()
def get_classifier(self) -> nn.Module:
"""Method returns the classification head of the model.
Returns:
head (nn.Module): Current classification head
"""
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
"""Method results the classification head
Args:
num_classes (int): Number of classes to be predicted
global_pool (str): Unused
"""
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights(module: nn.Module, name: str = ''):
# FIXME WIP determining if there's a better weight init
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
elif 'head' in name:
nn.init.zeros_(module.weight)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
if 'head.fc.weight' in state_dict:
return state_dict
out_dict = {}
for k, v in state_dict.items():
if 'tau' in k:
# convert old tau based checkpoints -> logit_scale (inverse)
v = torch.log(1 / v)
k = k.replace('tau', 'logit_scale')
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformerV2Cr, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.9,
'interpolation': 'bicubic',
'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj',
'classifier': 'head.fc',
**kwargs,
}
default_cfgs = generate_default_cfgs({
'swinv2_cr_tiny_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_tiny_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_tiny_ns_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_small_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_ns_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth",
input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_small_ns_256.untrained': _cfg(
url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)),
'swinv2_cr_base_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_base_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_base_ns_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_large_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_large_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_huge_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_huge_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
'swinv2_cr_giant_384.untrained': _cfg(
url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
'swinv2_cr_giant_224.untrained': _cfg(
url="", input_size=(3, 224, 224), crop_pct=0.9),
})
@register_model
def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms.
** Experimental, may make default if results are improved. **
"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-S V2 CR @ 256x256, trained ImageNet-1k"""
model_args = dict(
embed_dim=96,
depths=(2, 2, 18, 2),
num_heads=(3, 6, 12, 24),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
extra_norm_stage=True,
)
return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-L V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
)
return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-L V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=192,
depths=(2, 2, 18, 2),
num_heads=(6, 12, 24, 48),
)
return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-H V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=352,
depths=(2, 2, 18, 2),
num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-H V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=352,
depths=(2, 2, 18, 2),
num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-G V2 CR @ 384x384, trained ImageNet-1k"""
model_args = dict(
embed_dim=512,
depths=(2, 2, 42, 2),
num_heads=(16, 32, 64, 128),
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
"""Swin-G V2 CR @ 224x224, trained ImageNet-1k"""
model_args = dict(
embed_dim=512,
depths=(2, 2, 42, 2),
num_heads=(16, 32, 64, 128),
extra_norm_period=6,
)
return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/inception_v3.py | """ Inception-V3
Originally from torchvision Inception3 model
Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct
from ._builder import build_model_with_cfg
from ._builder import resolve_pretrained_cfg
from ._manipulate import flatten_modules
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features, conv_block=None):
super(InceptionA, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionB, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7, conv_block=None):
super(InceptionC, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionD, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionE, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes, conv_block=None):
super(InceptionAux, self).__init__()
conv_block = conv_block or ConvNormAct
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
self.conv1 = conv_block(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class InceptionV3(nn.Module):
"""Inception-V3
"""
aux_logits: torch.jit.Final[bool]
def __init__(
self,
num_classes=1000,
in_chans=3,
drop_rate=0.,
global_pool='avg',
aux_logits=False,
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV3, self).__init__()
self.num_classes = num_classes
self.aux_logits = aux_logits
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block)
self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block)
self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block)
self.Mixed_6a = InceptionB(288, conv_block=conv_block)
self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block)
self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block)
self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block)
self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block)
else:
self.AuxLogits = None
self.Mixed_7a = InceptionD(768, conv_block=conv_block)
self.Mixed_7b = InceptionE(1280, conv_block=conv_block)
self.Mixed_7c = InceptionE(2048, conv_block=conv_block)
self.feature_info = [
dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'),
dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'),
dict(num_chs=288, reduction=8, module='Mixed_5d'),
dict(num_chs=768, reduction=16, module='Mixed_6e'),
dict(num_chs=2048, reduction=32, module='Mixed_7c'),
]
self.num_features = 2048
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
trunc_normal_(m.weight, std=stddev)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))}
module_map.pop(('fc',))
def _matcher(name):
if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]):
return 0
elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]):
return 1
else:
for k in module_map.keys():
if k == tuple(name.split('.')[:len(k)]):
return module_map[k]
return float('inf')
return _matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_preaux(self, x):
x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147
x = self.Pool1(x) # N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71
x = self.Pool2(x) # N x 192 x 35 x 35
x = self.Mixed_5b(x) # N x 256 x 35 x 35
x = self.Mixed_5c(x) # N x 288 x 35 x 35
x = self.Mixed_5d(x) # N x 288 x 35 x 35
x = self.Mixed_6a(x) # N x 768 x 17 x 17
x = self.Mixed_6b(x) # N x 768 x 17 x 17
x = self.Mixed_6c(x) # N x 768 x 17 x 17
x = self.Mixed_6d(x) # N x 768 x 17 x 17
x = self.Mixed_6e(x) # N x 768 x 17 x 17
return x
def forward_postaux(self, x):
x = self.Mixed_7a(x) # N x 1280 x 8 x 8
x = self.Mixed_7b(x) # N x 2048 x 8 x 8
x = self.Mixed_7c(x) # N x 2048 x 8 x 8
return x
def forward_features(self, x):
x = self.forward_preaux(x)
if self.aux_logits:
aux = self.AuxLogits(x)
x = self.forward_postaux(x)
return x, aux
x = self.forward_postaux(x)
return x
def forward_head(self, x):
x = self.global_pool(x)
x = self.head_drop(x)
x = self.fc(x)
return x
def forward(self, x):
if self.aux_logits:
x, aux = self.forward_features(x)
x = self.forward_head(x)
return x, aux
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v3(variant, pretrained=False, **kwargs):
pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None))
aux_logits = kwargs.get('aux_logits', False)
has_aux_logits = False
if pretrained_cfg:
# only torchvision pretrained weights have aux logits
has_aux_logits = pretrained_cfg.tag == 'tv_in1k'
if aux_logits:
assert not kwargs.pop('features_only', False)
load_strict = has_aux_logits
else:
load_strict = not has_aux_logits
return build_model_with_cfg(
InceptionV3,
variant,
pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_strict=load_strict,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# original PyTorch weights, ported from Tensorflow but modified
'inception_v3.tv_in1k': _cfg(
# NOTE checkpoint has aux logit layer weights
hf_hub_id='timm/',
url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'),
# my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)
'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'),
# my port of Tensorflow adversarially trained Inception V3 from
# http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz
'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'),
# from gluon pretrained models, best performing in terms of accuracy/loss metrics
# https://gluon-cv.mxnet.io/model_zoo/classification.html
'inception_v3.gluon_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults
std=IMAGENET_DEFAULT_STD, # also works well with inception defaults
)
})
@register_model
def inception_v3(pretrained=False, **kwargs) -> InceptionV3:
model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'tf_inception_v3': 'inception_v3.tf_in1k',
'adv_inception_v3': 'inception_v3.tf_adv_in1k',
'gluon_inception_v3': 'inception_v3.gluon_in1k',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/cspnet.py | """PyTorch CspNet
A PyTorch implementation of Cross Stage Partial Networks including:
* CSPResNet50
* CSPResNeXt50
* CSPDarkNet53
* and DarkNet53 for good measure
Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks
Hacked together by / Copyright 2020 Ross Wightman
"""
from dataclasses import dataclass, asdict, replace
from functools import partial
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, ConvNormAct, ConvNormActAa, DropPath, get_attn, create_act_layer, make_divisible
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, MATCH_PREV_GROUP
from ._registry import register_model, generate_default_cfgs
__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this
@dataclass
class CspStemCfg:
out_chs: Union[int, Tuple[int, ...]] = 32
stride: Union[int, Tuple[int, ...]] = 2
kernel_size: int = 3
padding: Union[int, str] = ''
pool: Optional[str] = ''
def _pad_arg(x, n):
# pads an argument tuple to specified n by padding with last value
if not isinstance(x, (tuple, list)):
x = (x,)
curr_n = len(x)
pad_n = n - curr_n
if pad_n <= 0:
return x[:n]
return tuple(x + (x[-1],) * pad_n)
@dataclass
class CspStagesCfg:
depth: Tuple[int, ...] = (3, 3, 5, 2) # block depth (number of block repeats in stages)
out_chs: Tuple[int, ...] = (128, 256, 512, 1024) # number of output channels for blocks in stage
stride: Union[int, Tuple[int, ...]] = 2 # stride of stage
groups: Union[int, Tuple[int, ...]] = 1 # num kxk conv groups
block_ratio: Union[float, Tuple[float, ...]] = 1.0
bottle_ratio: Union[float, Tuple[float, ...]] = 1. # bottleneck-ratio of blocks in stage
avg_down: Union[bool, Tuple[bool, ...]] = False
attn_layer: Optional[Union[str, Tuple[str, ...]]] = None
attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None
stage_type: Union[str, Tuple[str]] = 'csp' # stage type ('csp', 'cs2', 'dark')
block_type: Union[str, Tuple[str]] = 'bottle' # blocks type for stages ('bottle', 'dark')
# cross-stage only
expand_ratio: Union[float, Tuple[float, ...]] = 1.0
cross_linear: Union[bool, Tuple[bool, ...]] = False
down_growth: Union[bool, Tuple[bool, ...]] = False
def __post_init__(self):
n = len(self.depth)
assert len(self.out_chs) == n
self.stride = _pad_arg(self.stride, n)
self.groups = _pad_arg(self.groups, n)
self.block_ratio = _pad_arg(self.block_ratio, n)
self.bottle_ratio = _pad_arg(self.bottle_ratio, n)
self.avg_down = _pad_arg(self.avg_down, n)
self.attn_layer = _pad_arg(self.attn_layer, n)
self.attn_kwargs = _pad_arg(self.attn_kwargs, n)
self.stage_type = _pad_arg(self.stage_type, n)
self.block_type = _pad_arg(self.block_type, n)
self.expand_ratio = _pad_arg(self.expand_ratio, n)
self.cross_linear = _pad_arg(self.cross_linear, n)
self.down_growth = _pad_arg(self.down_growth, n)
@dataclass
class CspModelCfg:
stem: CspStemCfg
stages: CspStagesCfg
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
act_layer: str = 'leaky_relu'
norm_layer: str = 'batchnorm'
aa_layer: Optional[str] = None # FIXME support string factory for this
def _cs3_cfg(
width_multiplier=1.0,
depth_multiplier=1.0,
avg_down=False,
act_layer='silu',
focus=False,
attn_layer=None,
attn_kwargs=None,
bottle_ratio=1.0,
block_type='dark',
):
if focus:
stem_cfg = CspStemCfg(
out_chs=make_divisible(64 * width_multiplier),
kernel_size=6, stride=2, padding=2, pool='')
else:
stem_cfg = CspStemCfg(
out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]),
kernel_size=3, stride=2, pool='')
return CspModelCfg(
stem=stem_cfg,
stages=CspStagesCfg(
out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]),
depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]),
stride=2,
bottle_ratio=bottle_ratio,
block_ratio=0.5,
avg_down=avg_down,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
stage_type='cs3',
block_type=block_type,
),
act_layer=act_layer,
)
class BottleneckBlock(nn.Module):
""" ResNe(X)t Bottleneck Block
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.25,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_last=False,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(BottleneckBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
attn_last = attn_layer is not None and attn_last
attn_first = attn_layer is not None and not attn_last
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.conv2 = ConvNormAct(
mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity()
self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs)
self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
self.act3 = create_act_layer(act_layer)
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.attn2(x)
x = self.conv3(x)
x = self.attn3(x)
x = self.drop_path(x) + shortcut
# FIXME partial shortcut needed if first block handled as per original, not used for my current impl
#x[:, :shortcut.size(1)] += shortcut
x = self.act3(x)
return x
class DarkBlock(nn.Module):
""" DarkNet Block
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.5,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(DarkBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity()
self.conv2 = ConvNormAct(
mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.attn(x)
x = self.conv2(x)
x = self.drop_path(x) + shortcut
return x
class EdgeBlock(nn.Module):
""" EdgeResidual / Fused-MBConv / MobileNetV1-like 3x3 + 1x1 block (w/ activated output)
"""
def __init__(
self,
in_chs,
out_chs,
dilation=1,
bottle_ratio=0.5,
groups=1,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
drop_block=None,
drop_path=0.
):
super(EdgeBlock, self).__init__()
mid_chs = int(round(out_chs * bottle_ratio))
ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(
in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups,
drop_layer=drop_block, **ckwargs)
self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity()
self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs)
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.attn(x)
x = self.conv2(x)
x = self.drop_path(x) + shortcut
return x
class CrossStage(nn.Module):
"""Cross Stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
expand_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
down_growth=False,
cross_linear=False,
block_dpr=None,
block_fn=BottleneckBlock,
**block_kwargs,
):
super(CrossStage, self).__init__()
first_dilation = first_dilation or dilation
down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels
self.expand_chs = exp_chs = int(round(out_chs * expand_ratio))
block_out_chs = int(round(out_chs * block_ratio))
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if stride != 1 or first_dilation != dilation:
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = down_chs
else:
self.conv_down = nn.Identity()
prev_chs = in_chs
# FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also,
# there is also special case for the first stage for some of the model that results in uneven split
# across the two paths. I did it this way for simplicity for now.
self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs)
prev_chs = exp_chs // 2 # output of conv_exp is always split in two
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs,
))
prev_chs = block_out_chs
# transition convs
self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs)
self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs)
def forward(self, x):
x = self.conv_down(x)
x = self.conv_exp(x)
xs, xb = x.split(self.expand_chs // 2, dim=1)
xb = self.blocks(xb)
xb = self.conv_transition_b(xb).contiguous()
out = self.conv_transition(torch.cat([xs, xb], dim=1))
return out
class CrossStage3(nn.Module):
"""Cross Stage 3.
Similar to CrossStage, but with only one transition conv for the output.
"""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
expand_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
down_growth=False,
cross_linear=False,
block_dpr=None,
block_fn=BottleneckBlock,
**block_kwargs,
):
super(CrossStage3, self).__init__()
first_dilation = first_dilation or dilation
down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels
self.expand_chs = exp_chs = int(round(out_chs * expand_ratio))
block_out_chs = int(round(out_chs * block_ratio))
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if stride != 1 or first_dilation != dilation:
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = down_chs
else:
self.conv_down = None
prev_chs = in_chs
# expansion conv
self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs)
prev_chs = exp_chs // 2 # expanded output is split in 2 for blocks and cross stage
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs,
))
prev_chs = block_out_chs
# transition convs
self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs)
def forward(self, x):
x = self.conv_down(x)
x = self.conv_exp(x)
x1, x2 = x.split(self.expand_chs // 2, dim=1)
x1 = self.blocks(x1)
out = self.conv_transition(torch.cat([x1, x2], dim=1))
return out
class DarkStage(nn.Module):
"""DarkNet stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
block_ratio=1.,
bottle_ratio=1.,
groups=1,
first_dilation=None,
avg_down=False,
block_fn=BottleneckBlock,
block_dpr=None,
**block_kwargs,
):
super(DarkStage, self).__init__()
first_dilation = first_dilation or dilation
conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'))
aa_layer = block_kwargs.pop('aa_layer', None)
if avg_down:
self.conv_down = nn.Sequential(
nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling
ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs)
)
else:
self.conv_down = ConvNormActAa(
in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups,
aa_layer=aa_layer, **conv_kwargs)
prev_chs = out_chs
block_out_chs = int(round(out_chs * block_ratio))
self.blocks = nn.Sequential()
for i in range(depth):
self.blocks.add_module(str(i), block_fn(
in_chs=prev_chs,
out_chs=block_out_chs,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
drop_path=block_dpr[i] if block_dpr is not None else 0.,
**block_kwargs
))
prev_chs = block_out_chs
def forward(self, x):
x = self.conv_down(x)
x = self.blocks(x)
return x
def create_csp_stem(
in_chans=3,
out_chs=32,
kernel_size=3,
stride=2,
pool='',
padding='',
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
aa_layer=None,
):
stem = nn.Sequential()
feature_info = []
if not isinstance(out_chs, (tuple, list)):
out_chs = [out_chs]
stem_depth = len(out_chs)
assert stem_depth
assert stride in (1, 2, 4)
prev_feat = None
prev_chs = in_chans
last_idx = stem_depth - 1
stem_stride = 1
for i, chs in enumerate(out_chs):
conv_name = f'conv{i + 1}'
conv_stride = 2 if (i == 0 and stride > 1) or (i == last_idx and stride > 2 and not pool) else 1
if conv_stride > 1 and prev_feat is not None:
feature_info.append(prev_feat)
stem.add_module(conv_name, ConvNormAct(
prev_chs, chs, kernel_size,
stride=conv_stride,
padding=padding if i == 0 else '',
act_layer=act_layer,
norm_layer=norm_layer,
))
stem_stride *= conv_stride
prev_chs = chs
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name]))
if pool:
assert stride > 2
if prev_feat is not None:
feature_info.append(prev_feat)
if aa_layer is not None:
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
stem.add_module('aa', aa_layer(channels=prev_chs, stride=2))
pool_name = 'aa'
else:
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
pool_name = 'pool'
stem_stride *= 2
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name]))
feature_info.append(prev_feat)
return stem, feature_info
def _get_stage_fn(stage_args):
stage_type = stage_args.pop('stage_type')
assert stage_type in ('dark', 'csp', 'cs3')
if stage_type == 'dark':
stage_args.pop('expand_ratio', None)
stage_args.pop('cross_linear', None)
stage_args.pop('down_growth', None)
stage_fn = DarkStage
elif stage_type == 'csp':
stage_fn = CrossStage
else:
stage_fn = CrossStage3
return stage_fn, stage_args
def _get_block_fn(stage_args):
block_type = stage_args.pop('block_type')
assert block_type in ('dark', 'edge', 'bottle')
if block_type == 'dark':
return DarkBlock, stage_args
elif block_type == 'edge':
return EdgeBlock, stage_args
else:
return BottleneckBlock, stage_args
def _get_attn_fn(stage_args):
attn_layer = stage_args.pop('attn_layer')
attn_kwargs = stage_args.pop('attn_kwargs', None) or {}
if attn_layer is not None:
attn_layer = get_attn(attn_layer)
if attn_kwargs:
attn_layer = partial(attn_layer, **attn_kwargs)
return attn_layer, stage_args
def create_csp_stages(
cfg: CspModelCfg,
drop_path_rate: float,
output_stride: int,
stem_feat: Dict[str, Any],
):
cfg_dict = asdict(cfg.stages)
num_stages = len(cfg.stages.depth)
cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else \
[x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)]
stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())]
block_kwargs = dict(
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
)
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
feature_info = []
stages = []
for stage_idx, stage_args in enumerate(stage_args):
stage_fn, stage_args = _get_stage_fn(stage_args)
block_fn, stage_args = _get_block_fn(stage_args)
attn_fn, stage_args = _get_attn_fn(stage_args)
stride = stage_args.pop('stride')
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
stages += [stage_fn(
prev_chs,
**stage_args,
stride=stride,
first_dilation=first_dilation,
dilation=dilation,
block_fn=block_fn,
aa_layer=cfg.aa_layer,
attn_layer=attn_fn, # will be passed through stage as block_kwargs
**block_kwargs,
)]
prev_chs = stage_args['out_chs']
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
class CspNet(nn.Module):
"""Cross Stage Partial base model.
Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks
NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the
darknet impl. I did it this way for simplicity and less special cases.
"""
def __init__(
self,
cfg: CspModelCfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=True,
**kwargs,
):
"""
Args:
cfg (CspModelCfg): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
global_pool (str): Global pooling type (default: 'avg')
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
zero_init_last (bool): Zero-init last weight of residual path
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride in (8, 16, 32)
cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg
layer_args = dict(
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
aa_layer=cfg.aa_layer
)
self.feature_info = []
# Construct the stem
self.stem, stem_feat_info = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args)
self.feature_info.extend(stem_feat_info[:-1])
# Construct the stages
self.stages, stage_feat_info = create_csp_stages(
cfg,
drop_path_rate=drop_path_rate,
output_stride=output_stride,
stem_feat=stem_feat_info[-1],
)
prev_chs = stage_feat_info[-1]['num_chs']
self.feature_info.extend(stage_feat_info)
# Construct the head
self.num_features = prev_chs
self.head = ClassifierHead(
in_features=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^stages\.(\d+)\..*transition', MATCH_PREV_GROUP), # map to last block in stage
(r'^stages\.(\d+)', (0,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name, zero_init_last=False):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
model_cfgs = dict(
cspresnet50=CspModelCfg(
stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(128, 256, 512, 1024),
stride=(1, 2),
expand_ratio=2.,
bottle_ratio=0.5,
cross_linear=True,
),
),
cspresnet50d=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(128, 256, 512, 1024),
stride=(1,) + (2,),
expand_ratio=2.,
bottle_ratio=0.5,
block_ratio=1.,
cross_linear=True,
),
),
cspresnet50w=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(256, 512, 1024, 2048),
stride=(1,) + (2,),
expand_ratio=1.,
bottle_ratio=0.25,
block_ratio=0.5,
cross_linear=True,
),
),
cspresnext50=CspModelCfg(
stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'),
stages=CspStagesCfg(
depth=(3, 3, 5, 2),
out_chs=(256, 512, 1024, 2048),
stride=(1,) + (2,),
groups=32,
expand_ratio=1.,
bottle_ratio=1.,
block_ratio=0.5,
cross_linear=True,
),
),
cspdarknet53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
expand_ratio=(2.,) + (1.,),
bottle_ratio=(0.5,) + (1.,),
block_ratio=(1.,) + (0.5,),
down_growth=True,
block_type='dark',
),
),
darknet17=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1,) * 5,
out_chs=(64, 128, 256, 512, 1024),
stride=(2,),
bottle_ratio=(0.5,),
block_ratio=(1.,),
stage_type='dark',
block_type='dark',
),
),
darknet21=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 1, 1, 2, 2),
out_chs=(64, 128, 256, 512, 1024),
stride=(2,),
bottle_ratio=(0.5,),
block_ratio=(1.,),
stage_type='dark',
block_type='dark',
),
),
sedarknet21=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 1, 1, 2, 2),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
attn_layer='se',
stage_type='dark',
block_type='dark',
),
),
darknet53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
stage_type='dark',
block_type='dark',
),
),
darknetaa53=CspModelCfg(
stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''),
stages=CspStagesCfg(
depth=(1, 2, 8, 8, 4),
out_chs=(64, 128, 256, 512, 1024),
stride=2,
bottle_ratio=0.5,
block_ratio=1.,
avg_down=True,
stage_type='dark',
block_type='dark',
),
),
cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5),
cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67),
cs3darknet_l=_cs3_cfg(),
cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33),
cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True),
cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True),
cs3darknet_focus_l=_cs3_cfg(focus=True),
cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True),
cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=.25)),
cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33),
cs3sedarknet_xdw=CspModelCfg(
stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''),
stages=CspStagesCfg(
depth=(3, 6, 12, 4),
out_chs=(256, 512, 1024, 2048),
stride=2,
groups=(1, 1, 256, 512),
bottle_ratio=0.5,
block_ratio=0.5,
attn_layer='se',
),
act_layer='silu',
),
cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'),
cs3se_edgenet_x=_cs3_cfg(
width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge',
attn_layer='se', attn_kwargs=dict(rd_ratio=.25)),
)
def _create_cspnet(variant, pretrained=False, **kwargs):
if variant.startswith('darknet') or variant.startswith('cspdarknet'):
# NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5]
default_out_indices = (0, 1, 2, 3, 4, 5)
else:
default_out_indices = (0, 1, 2, 3, 4)
out_indices = kwargs.pop('out_indices', default_out_indices)
return build_model_with_cfg(
CspNet, variant, pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.887, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'cspresnet50.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'),
'cspresnet50d.untrained': _cfg(),
'cspresnet50w.untrained': _cfg(),
'cspresnext50.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth',
),
'cspdarknet53.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'),
'darknet17.untrained': _cfg(),
'darknet21.untrained': _cfg(),
'sedarknet21.untrained': _cfg(),
'darknet53.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'darknetaa53.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3darknet_s.untrained': _cfg(interpolation='bicubic'),
'cs3darknet_m.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95,
),
'cs3darknet_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3darknet_focus_s.untrained': _cfg(interpolation='bicubic'),
'cs3darknet_focus_m.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_focus_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3darknet_focus_x.untrained': _cfg(interpolation='bicubic'),
'cs3sedarknet_l.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95),
'cs3sedarknet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3sedarknet_xdw.untrained': _cfg(interpolation='bicubic'),
'cs3edgenet_x.c2_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0),
'cs3se_edgenet_x.c2ns_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0),
})
@register_model
def cspresnet50(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs)
@register_model
def cspresnet50d(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs)
@register_model
def cspresnet50w(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs)
@register_model
def cspresnext50(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs)
@register_model
def cspdarknet53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs)
@register_model
def darknet17(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet17', pretrained=pretrained, **kwargs)
@register_model
def darknet21(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet21', pretrained=pretrained, **kwargs)
@register_model
def sedarknet21(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs)
@register_model
def darknet53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknet53', pretrained=pretrained, **kwargs)
@register_model
def darknetaa53(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_s(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_m(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_s(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_m(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs)
@register_model
def cs3darknet_focus_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_l(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3sedarknet_xdw(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs)
@register_model
def cs3edgenet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs)
@register_model
def cs3se_edgenet_x(pretrained=False, **kwargs) -> CspNet:
return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/_hub.py | import hashlib
import json
import logging
import os
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Iterable, Optional, Union
import torch
from torch.hub import HASH_REGEX, download_url_to_file, urlparse
try:
from torch.hub import get_dir
except ImportError:
from torch.hub import _get_torch_home as get_dir
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from timm import __version__
from timm.models._pretrained import filter_pretrained_cfg
try:
from huggingface_hub import (
create_repo, get_hf_file_metadata,
hf_hub_download, hf_hub_url,
repo_type_and_id_from_hf_id, upload_folder)
from huggingface_hub.utils import EntryNotFoundError
hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__)
_has_hf_hub = True
except ImportError:
hf_hub_download = None
_has_hf_hub = False
_logger = logging.getLogger(__name__)
__all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf',
'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub']
# Default name for a weights file hosted on the Huggingface Hub.
HF_WEIGHTS_NAME = "pytorch_model.bin" # default pytorch pkl
HF_SAFE_WEIGHTS_NAME = "model.safetensors" # safetensors version
HF_OPEN_CLIP_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl
HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version
def get_cache_dir(child_dir=''):
"""
Returns the location of the directory where models are cached (and creates it if necessary).
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_MODEL_ZOO'):
_logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
hub_dir = get_dir()
child_dir = () if not child_dir else (child_dir,)
model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir)
os.makedirs(model_dir, exist_ok=True)
return model_dir
def download_cached_file(url, check_hash=True, progress=False):
if isinstance(url, (list, tuple)):
url, filename = url
else:
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(get_cache_dir(), filename)
if not os.path.exists(cached_file):
_logger.info('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
if check_hash:
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
hash_prefix = r.group(1) if r else None
download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return cached_file
def check_cached_file(url, check_hash=True):
if isinstance(url, (list, tuple)):
url, filename = url
else:
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(get_cache_dir(), filename)
if os.path.exists(cached_file):
if check_hash:
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
hash_prefix = r.group(1) if r else None
if hash_prefix:
with open(cached_file, 'rb') as f:
hd = hashlib.sha256(f.read()).hexdigest()
if hd[:len(hash_prefix)] != hash_prefix:
return False
return True
return False
def has_hf_hub(necessary=False):
if not _has_hf_hub and necessary:
# if no HF Hub module installed, and it is necessary to continue, raise error
raise RuntimeError(
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
return _has_hf_hub
def hf_split(hf_id: str):
# FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme
rev_split = hf_id.split('@')
assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.'
hf_model_id = rev_split[0]
hf_revision = rev_split[-1] if len(rev_split) > 1 else None
return hf_model_id, hf_revision
def load_cfg_from_json(json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def download_from_hf(model_id: str, filename: str):
hf_model_id, hf_revision = hf_split(model_id)
return hf_hub_download(hf_model_id, filename, revision=hf_revision)
def load_model_config_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = download_from_hf(model_id, 'config.json')
hf_config = load_cfg_from_json(cached_file)
if 'pretrained_cfg' not in hf_config:
# old form, pull pretrain_cfg out of the base dict
pretrained_cfg = hf_config
hf_config = {}
hf_config['architecture'] = pretrained_cfg.pop('architecture')
hf_config['num_features'] = pretrained_cfg.pop('num_features', None)
if 'labels' in pretrained_cfg: # deprecated name for 'label_names'
pretrained_cfg['label_names'] = pretrained_cfg.pop('labels')
hf_config['pretrained_cfg'] = pretrained_cfg
# NOTE currently discarding parent config as only arch name and pretrained_cfg used in timm right now
pretrained_cfg = hf_config['pretrained_cfg']
pretrained_cfg['hf_hub_id'] = model_id # insert hf_hub id for pretrained weight load during model creation
pretrained_cfg['source'] = 'hf-hub'
# model should be created with base config num_classes if its exist
if 'num_classes' in hf_config:
pretrained_cfg['num_classes'] = hf_config['num_classes']
# label meta-data in base config overrides saved pretrained_cfg on load
if 'label_names' in hf_config:
pretrained_cfg['label_names'] = hf_config.pop('label_names')
if 'label_descriptions' in hf_config:
pretrained_cfg['label_descriptions'] = hf_config.pop('label_descriptions')
model_args = hf_config.get('model_args', {})
model_name = hf_config['architecture']
return pretrained_cfg, model_name, model_args
def load_state_dict_from_hf(model_id: str, filename: str = HF_WEIGHTS_NAME):
assert has_hf_hub(True)
hf_model_id, hf_revision = hf_split(model_id)
# Look for .safetensors alternatives and load from it if it exists
if _has_safetensors:
for safe_filename in _get_safe_alternatives(filename):
try:
cached_safe_file = hf_hub_download(repo_id=hf_model_id, filename=safe_filename, revision=hf_revision)
_logger.info(
f"[{model_id}] Safe alternative available for '{filename}' "
f"(as '{safe_filename}'). Loading weights using safetensors.")
return safetensors.torch.load_file(cached_safe_file, device="cpu")
except EntryNotFoundError:
pass
# Otherwise, load using pytorch.load
cached_file = hf_hub_download(hf_model_id, filename=filename, revision=hf_revision)
_logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.")
return torch.load(cached_file, map_location='cpu')
def save_config_for_hf(
model,
config_path: str,
model_config: Optional[dict] = None,
model_args: Optional[dict] = None
):
model_config = model_config or {}
hf_config = {}
pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True)
# set some values at root config level
hf_config['architecture'] = pretrained_cfg.pop('architecture')
hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes)
# NOTE these attr saved for informational purposes, do not impact model build
hf_config['num_features'] = model_config.pop('num_features', model.num_features)
global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None))
if isinstance(global_pool_type, str) and global_pool_type:
hf_config['global_pool'] = global_pool_type
# Save class label info
if 'labels' in model_config:
_logger.warning(
"'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'."
" Renaming provided 'labels' field to 'label_names'.")
model_config.setdefault('label_names', model_config.pop('labels'))
label_names = model_config.pop('label_names', None)
if label_names:
assert isinstance(label_names, (dict, list, tuple))
# map label id (classifier index) -> unique label name (ie synset for ImageNet, MID for OpenImages)
# can be a dict id: name if there are id gaps, or tuple/list if no gaps.
hf_config['label_names'] = label_names
label_descriptions = model_config.pop('label_descriptions', None)
if label_descriptions:
assert isinstance(label_descriptions, dict)
# maps label names -> descriptions
hf_config['label_descriptions'] = label_descriptions
if model_args:
hf_config['model_args'] = model_args
hf_config['pretrained_cfg'] = pretrained_cfg
hf_config.update(model_config)
with config_path.open('w') as f:
json.dump(hf_config, f, indent=2)
def save_for_hf(
model,
save_directory: str,
model_config: Optional[dict] = None,
model_args: Optional[dict] = None,
safe_serialization: Union[bool, Literal["both"]] = False,
):
assert has_hf_hub(True)
save_directory = Path(save_directory)
save_directory.mkdir(exist_ok=True, parents=True)
# Save model weights, either safely (using safetensors), or using legacy pytorch approach or both.
tensors = model.state_dict()
if safe_serialization is True or safe_serialization == "both":
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME)
if safe_serialization is False or safe_serialization == "both":
torch.save(tensors, save_directory / HF_WEIGHTS_NAME)
config_path = save_directory / 'config.json'
save_config_for_hf(
model,
config_path,
model_config=model_config,
model_args=model_args,
)
def push_to_hf_hub(
model: torch.nn.Module,
repo_id: str,
commit_message: str = 'Add model',
token: Optional[str] = None,
revision: Optional[str] = None,
private: bool = False,
create_pr: bool = False,
model_config: Optional[dict] = None,
model_card: Optional[dict] = None,
model_args: Optional[dict] = None,
safe_serialization: Union[bool, Literal["both"]] = False,
):
"""
Arguments:
(...)
safe_serialization (`bool` or `"both"`, *optional*, defaults to `False`):
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
Can be set to `"both"` in order to push both safe and unsafe weights.
"""
# Create repo if it doesn't exist yet
repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True)
# Infer complete repo_id from repo_url
# Can be different from the input `repo_id` if repo_owner was implicit
_, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url)
repo_id = f"{repo_owner}/{repo_name}"
# Check if README file already exist in repo
try:
get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision))
has_readme = True
except EntryNotFoundError:
has_readme = False
# Dump model and push to Hub
with TemporaryDirectory() as tmpdir:
# Save model weights and config.
save_for_hf(
model,
tmpdir,
model_config=model_config,
model_args=model_args,
safe_serialization=safe_serialization,
)
# Add readme if it does not exist
if not has_readme:
model_card = model_card or {}
model_name = repo_id.split('/')[-1]
readme_path = Path(tmpdir) / "README.md"
readme_text = generate_readme(model_card, model_name)
readme_path.write_text(readme_text)
# Upload model and return
return upload_folder(
repo_id=repo_id,
folder_path=tmpdir,
revision=revision,
create_pr=create_pr,
commit_message=commit_message,
)
def generate_readme(model_card: dict, model_name: str):
readme_text = "---\n"
readme_text += "tags:\n- image-classification\n- timm\n"
readme_text += "library_name: timm\n"
readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n"
if 'details' in model_card and 'Dataset' in model_card['details']:
readme_text += 'datasets:\n'
if isinstance(model_card['details']['Dataset'], (tuple, list)):
for d in model_card['details']['Dataset']:
readme_text += f"- {d.lower()}\n"
else:
readme_text += f"- {model_card['details']['Dataset'].lower()}\n"
if 'Pretrain Dataset' in model_card['details']:
if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)):
for d in model_card['details']['Pretrain Dataset']:
readme_text += f"- {d.lower()}\n"
else:
readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n"
readme_text += "---\n"
readme_text += f"# Model card for {model_name}\n"
if 'description' in model_card:
readme_text += f"\n{model_card['description']}\n"
if 'details' in model_card:
readme_text += f"\n## Model Details\n"
for k, v in model_card['details'].items():
if isinstance(v, (list, tuple)):
readme_text += f"- **{k}:**\n"
for vi in v:
readme_text += f" - {vi}\n"
elif isinstance(v, dict):
readme_text += f"- **{k}:**\n"
for ki, vi in v.items():
readme_text += f" - {ki}: {vi}\n"
else:
readme_text += f"- **{k}:** {v}\n"
if 'usage' in model_card:
readme_text += f"\n## Model Usage\n"
readme_text += model_card['usage']
readme_text += '\n'
if 'comparison' in model_card:
readme_text += f"\n## Model Comparison\n"
readme_text += model_card['comparison']
readme_text += '\n'
if 'citation' in model_card:
readme_text += f"\n## Citation\n"
if not isinstance(model_card['citation'], (list, tuple)):
citations = [model_card['citation']]
else:
citations = model_card['citation']
for c in citations:
readme_text += f"```bibtex\n{c}\n```\n"
return readme_text
def _get_safe_alternatives(filename: str) -> Iterable[str]:
"""Returns potential safetensors alternatives for a given filename.
Use case:
When downloading a model from the Huggingface Hub, we first look if a .safetensors file exists and if yes, we use it.
Main use case is filename "pytorch_model.bin" => check for "model.safetensors" or "pytorch_model.safetensors".
"""
if filename == HF_WEIGHTS_NAME:
yield HF_SAFE_WEIGHTS_NAME
if filename == HF_OPEN_CLIP_WEIGHTS_NAME:
yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME
if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith(".bin"):
yield filename[:-4] + ".safetensors"
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/convnext.py | """ ConvNeXt
Papers:
* `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
@Article{liu2022convnet,
author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie},
title = {A ConvNet for the 2020s},
journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2022},
}
* `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
@article{Woo2023ConvNeXtV2,
title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders},
author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie},
year={2023},
journal={arXiv preprint arXiv:2301.00808},
}
Original code and weights from:
* https://github.com/facebookresearch/ConvNeXt, original copyright below
* https://github.com/facebookresearch/ConvNeXt-V2, original copyright below
Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals.
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# ConvNeXt
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
# ConvNeXt-V2
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0))
# No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially.
from collections import OrderedDict
from functools import partial
from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \
LayerNorm2d, LayerNorm, create_conv2d, get_act_layer, make_divisible, to_ntuple
from timm.layers import NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this
class Downsample(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1):
super().__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
if in_chs != out_chs:
self.conv = create_conv2d(in_chs, out_chs, 1, stride=1)
else:
self.conv = nn.Identity()
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Union[int, Tuple[int, int]] = (1, 1),
mlp_ratio: float = 4,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
ls_init_value: Optional[float] = 1e-6,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Callable] = None,
drop_path: float = 0.,
):
"""
Args:
in_chs: Block input channels.
out_chs: Block output channels (same as in_chs if None).
kernel_size: Depthwise convolution kernel size.
stride: Stride of depthwise convolution.
dilation: Tuple specifying input and output dilation of block.
mlp_ratio: MLP expansion ratio.
conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True.
conv_bias: Apply bias for all convolution (linear) layers.
use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2)
ls_init_value: Layer-scale init values, layer-scale applied if not None.
act_layer: Activation layer.
norm_layer: Normalization layer (defaults to LN if not specified).
drop_path: Stochastic depth probability.
"""
super().__init__()
out_chs = out_chs or in_chs
dilation = to_ntuple(2)(dilation)
act_layer = get_act_layer(act_layer)
if not norm_layer:
norm_layer = LayerNorm2d if conv_mlp else LayerNorm
mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp)
self.use_conv_mlp = conv_mlp
self.conv_dw = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation[0],
depthwise=True,
bias=conv_bias,
)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0])
else:
self.shortcut = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + self.shortcut(shortcut)
return x
class ConvNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=7,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
conv_mlp=False,
conv_bias=True,
use_grn=False,
act_layer='gelu',
norm_layer=None,
norm_layer_cl=None
):
super().__init__()
self.grad_checkpointing = False
if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]:
ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1
pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used
self.downsample = nn.Sequential(
norm_layer(in_chs),
create_conv2d(
in_chs,
out_chs,
kernel_size=ds_ks,
stride=stride,
dilation=dilation[0],
padding=pad,
bias=conv_bias,
),
)
in_chs = out_chs
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(ConvNeXtBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=kernel_size,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer if conv_mlp else norm_layer_cl,
))
in_chs = out_chs
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
"""
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
output_stride: int = 32,
depths: Tuple[int, ...] = (3, 3, 9, 3),
dims: Tuple[int, ...] = (96, 192, 384, 768),
kernel_sizes: Union[int, Tuple[int, ...]] = 7,
ls_init_value: Optional[float] = 1e-6,
stem_type: str = 'patch',
patch_size: int = 4,
head_init_scale: float = 1.,
head_norm_first: bool = False,
head_hidden_size: Optional[int] = None,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Union[str, Callable]] = None,
norm_eps: Optional[float] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
depths: Number of blocks at each stage.
dims: Feature dimension at each stage.
kernel_sizes: Depthwise convolution kernel-sizes for each stage.
ls_init_value: Init value for Layer Scale, disabled if None.
stem_type: Type of stem.
patch_size: Stem patch size for patch stem.
head_init_scale: Init scaling value for classifier weights and biases.
head_norm_first: Apply normalization before global pool + head.
head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False.
conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last.
conv_bias: Use bias layers w/ all convolutions.
use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP.
act_layer: Activation layer type.
norm_layer: Normalization layer type.
drop_rate: Head pre-classifier dropout rate.
drop_path_rate: Stochastic depth drop rate.
"""
super().__init__()
assert output_stride in (8, 16, 32)
kernel_sizes = to_ntuple(4)(kernel_sizes)
if norm_layer is None:
norm_layer = LayerNorm2d
norm_layer_cl = norm_layer if conv_mlp else LayerNorm
if norm_eps is not None:
norm_layer = partial(norm_layer, eps=norm_eps)
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
norm_layer_cl = norm_layer
if norm_eps is not None:
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
assert stem_type in ('patch', 'overlap', 'overlap_tiered')
if stem_type == 'patch':
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias),
norm_layer(dims[0]),
)
stem_stride = patch_size
else:
mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0]
self.stem = nn.Sequential(
nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias),
norm_layer(dims[0]),
)
stem_stride = 4
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
prev_chs = dims[0]
curr_stride = stem_stride
dilation = 1
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(4):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
stages.append(ConvNeXtStage(
prev_chs,
out_chs,
kernel_size=kernel_sizes[i],
stride=stride,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
if head_norm_first:
assert not head_hidden_size
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
act_layer='gelu',
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
nn.init.zeros_(module.bias)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
if 'visual.trunk.stem.0.weight' in state_dict:
out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')}
if 'visual.head.proj.weight' in state_dict:
out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0])
elif 'visual.head.mlp.fc1.weight' in state_dict:
out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight']
out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias']
out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0])
return out_dict
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
if 'grn' in k:
k = k.replace('grn.beta', 'mlp.grn.bias')
k = k.replace('grn.gamma', 'mlp.grn.weight')
v = v.reshape(v.shape[-1])
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_convnext(variant, pretrained=False, **kwargs):
if kwargs.get('pretrained_cfg', '') == 'fcmae':
# NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`)
# This is workaround loading with num_classes=0 w/o removing norm-layer.
kwargs.setdefault('pretrained_strict', False)
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
def _cfgv2(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808',
'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders',
'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2',
**kwargs
}
default_cfgs = generate_default_cfgs({
# timm specific variants
'convnext_tiny.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_atto.d2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_atto_ols.a2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_femto.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_femto_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano_ols.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny_hnf.a2h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_nano.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_tiny.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_small.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_tiny.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_xlarge.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_tiny.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_small.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_base.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_large.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_xlarge.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt",
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'),
'convnextv2_atto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_femto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_pico.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_nano.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_huge.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_atto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_femto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_pico.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_nano.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_tiny.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_base.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_large.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_huge.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_small.untrained': _cfg(),
# CLIP weights, fine-tuned on in1k or in12k + in1k
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0
),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'
),
'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
# CLIP original image tower weights
'convnext_base.clip_laion2b': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laion2b_augreg': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_320': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_augreg_320': _cfg(
hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_large_mlp.clip_laion2b_augreg': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_320': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg(
hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_xxlarge.clip_laion2b_soup': _cfg(
hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
'convnext_xxlarge.clip_laion2b_rewind': _cfg(
hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind',
hf_hub_filename='open_clip_pytorch_model.bin',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
})
@register_model
def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True)
model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True)
model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True)
model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with overlapping 3x3 conv stem
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True)
model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt:
# experimental nano variant with overlapping conv stem
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap')
model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt:
# experimental tiny variant with norm before pooling in head (head norm first)
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True)
model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768))
model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768])
model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024])
model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536])
model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536)
model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048])
model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5))
model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(
depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(
depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k',
'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k',
'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k',
'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k',
'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k',
'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384',
'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384',
'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384',
'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384',
'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384',
'convnext_tiny_in22k': 'convnext_tiny.fb_in22k',
'convnext_small_in22k': 'convnext_small.fb_in22k',
'convnext_base_in22k': 'convnext_base.fb_in22k',
'convnext_large_in22k': 'convnext_large.fb_in22k',
'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/beit.py | """ BEiT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
Model from official source: https://github.com/microsoft/unilm/tree/master/beit
@inproceedings{beit,
title={{BEiT}: {BERT} Pre-Training of Image Transformers},
author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei},
booktitle={International Conference on Learning Representations},
year={2022},
url={https://openreview.net/forum?id=p-BhZSz59o4}
}
BEiT-v2 from https://github.com/microsoft/unilm/tree/master/beit2
@article{beitv2,
title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers},
author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei},
year={2022},
eprint={2208.06366},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
At this point only the 1k fine-tuned classification weights and model configs have been added,
see original source above for pre-training models and procedure.
Modifications by / Copyright 2021 Ross Wightman, original copyrights below
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn
from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model
from .vision_transformer import checkpoint_filter_fn
__all__ = ['Beit']
def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor:
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
coords = torch.stack(torch.meshgrid(
[torch.arange(window_size[0]),
torch.arange(window_size[1])])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
self.register_buffer("relative_position_index", gen_relative_position_index(window_size), persistent=False)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def _get_rel_pos_bias(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
if self.fused_attn:
rel_pos_bias = None
if self.relative_position_bias_table is not None:
rel_pos_bias = self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
rel_pos_bias = rel_pos_bias + shared_rel_pos_bias
elif shared_rel_pos_bias is not None:
rel_pos_bias = shared_rel_pos_bias
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=rel_pos_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
attn = attn + self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
attn = attn + shared_rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = False,
mlp_ratio: float = 4.,
scale_mlp: bool = False,
swiglu_mlp: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
window_size=window_size,
attn_head_dim=attn_head_dim,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if swiglu_mlp:
self.mlp = SwiGLU(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if init_values:
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
# trunc_normal_(self.relative_position_bias_table, std=.02)
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
def forward(self):
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class Beit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
qkv_bias: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
norm_layer: Callable = LayerNorm,
init_values: Optional[float] = None,
use_abs_pos_emb: bool = True,
use_rel_pos_bias: bool = False,
use_shared_rel_pos_bias: bool = False,
head_init_scale: float = 0.001,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.grid_size,
num_heads=num_heads,
)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
scale_mlp=scale_mlp,
swiglu_mlp=swiglu_mlp,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.grid_size if use_rel_pos_bias else None,
)
for i in range(depth)])
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_base_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 512, 512), crop_pct=1.0,
),
'beit_large_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
})
def _beit_checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True):
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('module', state_dict)
# beit v2 didn't strip module
out_dict = {}
for k, v in state_dict.items():
if 'relative_position_index' in k:
continue
if 'patch_embed.proj.weight' in k:
O, I, H, W = model.patch_embed.proj.weight.shape
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 1
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
out_dict[k] = v
return out_dict
def _create_beit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for BEiT models.')
model = build_model_with_cfg(
Beit, variant, pretrained,
# FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes
pretrained_filter_fn=_beit_checkpoint_filter_fn,
**kwargs)
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/tnt.py | """ Transformer in Transformer (TNT) in PyTorch
A PyTorch implement of TNT as described in
'Transformer in Transformer' - https://arxiv.org/abs/2103.00112
The official mindspore code is released and available at
https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT
"""
import math
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple
from ._builder import build_model_with_cfg
from ._registry import register_model
from .vision_transformer import resize_pos_embed
__all__ = ['TNT'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Attention(nn.Module):
""" Multi-Head Attention
"""
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop, inplace=True)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop, inplace=True)
def forward(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple)
v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
""" TNT Block
"""
def __init__(
self,
dim,
dim_out,
num_pixel,
num_heads_in=4,
num_heads_out=12,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
# Inner transformer
self.norm_in = norm_layer(dim)
self.attn_in = Attention(
dim,
dim,
num_heads=num_heads_in,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm_mlp_in = norm_layer(dim)
self.mlp_in = Mlp(
in_features=dim,
hidden_features=int(dim * 4),
out_features=dim,
act_layer=act_layer,
drop=proj_drop,
)
self.norm1_proj = norm_layer(dim)
self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True)
# Outer transformer
self.norm_out = norm_layer(dim_out)
self.attn_out = Attention(
dim_out,
dim_out,
num_heads=num_heads_out,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm_mlp = norm_layer(dim_out)
self.mlp = Mlp(
in_features=dim_out,
hidden_features=int(dim_out * mlp_ratio),
out_features=dim_out,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
# outer
B, N, C = patch_embed.size()
patch_embed = torch.cat(
[patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))],
dim=1)
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Module):
""" Image to Pixel Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
# grid_size property necessary for resizing positional embedding
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
num_patches = (self.grid_size[0]) * (self.grid_size[1])
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = [math.ceil(ps / stride) for ps in patch_size]
self.new_patch_size = new_patch_size
self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x)
x = self.unfold(x)
x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1])
x = x + pixel_pos
x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2)
return x
class TNT(nn.Module):
""" Transformer in Transformer - https://arxiv.org/abs/2103.00112
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
inner_dim=48,
depth=12,
num_heads_inner=4,
num_heads_outer=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
first_stride=4,
):
super().__init__()
assert global_pool in ('', 'token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.grad_checkpointing = False
self.pixel_embed = PixelEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
in_dim=inner_dim,
stride=first_stride,
)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size[0] * new_patch_size[1]
self.norm1_proj = norm_layer(num_pixel * inner_dim)
self.proj = nn.Linear(num_pixel * inner_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1]))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=inner_dim,
dim_out=embed_dim,
num_pixel=num_pixel,
num_heads_in=num_heads_inner,
num_heads_out=num_heads_outer,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
))
self.blocks = nn.ModuleList(blocks)
self.norm = norm_layer(embed_dim)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
trunc_normal_(self.patch_pos, std=.02)
trunc_normal_(self.pixel_pos, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos
blocks=[
(r'^blocks\.(\d+)', None),
(r'^norm', (99999,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'token', 'avg')
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1))))
patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1)
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
if self.grad_checkpointing and not torch.jit.is_scripting():
for blk in self.blocks:
pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed)
else:
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
if state_dict['patch_pos'].shape != model.patch_pos.shape:
state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'],
model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size)
return state_dict
def _create_tnt(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
TNT, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT:
model_cfg = dict(
patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6,
qkv_bias=False)
model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT:
model_cfg = dict(
patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10,
qkv_bias=False)
model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/davit.py | """ DaViT: Dual Attention Vision Transformers
As described in https://arxiv.org/abs/2204.03645
Input size invariant transformer architecture that combines channel and spacial
attention in each block. The attention mechanisms used are linear in complexity.
DaViT model defs and weights adapted from https://github.com/dingmyu/davit, original copyright below
"""
# Copyright (c) 2022 Mingyu Ding
# All rights reserved.
# This source code is licensed under the MIT license
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, Mlp, LayerNorm2d, get_norm_layer, use_fused_attn
from timm.layers import NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['DaVit']
class ConvPosEnc(nn.Module):
def __init__(self, dim: int, k: int = 3, act: bool = False):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim)
self.act = nn.GELU() if act else nn.Identity()
def forward(self, x: Tensor):
feat = self.proj(x)
x = x + self.act(feat)
return x
class Stem(nn.Module):
""" Size-agnostic implementation of 2D image to patch embedding,
allowing input size to be adjusted during model forward operation
"""
def __init__(
self,
in_chs=3,
out_chs=96,
stride=4,
norm_layer=LayerNorm2d,
):
super().__init__()
stride = to_2tuple(stride)
self.stride = stride
self.in_chs = in_chs
self.out_chs = out_chs
assert stride[0] == 4 # only setup for stride==4
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=7,
stride=stride,
padding=3,
)
self.norm = norm_layer(out_chs)
def forward(self, x: Tensor):
B, C, H, W = x.shape
x = F.pad(x, (0, (self.stride[1] - W % self.stride[1]) % self.stride[1]))
x = F.pad(x, (0, 0, 0, (self.stride[0] - H % self.stride[0]) % self.stride[0]))
x = self.conv(x)
x = self.norm(x)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs,
out_chs,
norm_layer=LayerNorm2d,
):
super().__init__()
self.in_chs = in_chs
self.out_chs = out_chs
self.norm = norm_layer(in_chs)
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=2,
stride=2,
padding=0,
)
def forward(self, x: Tensor):
B, C, H, W = x.shape
x = self.norm(x)
x = F.pad(x, (0, (2 - W % 2) % 2))
x = F.pad(x, (0, 0, 0, (2 - H % 2) % 2))
x = self.conv(x)
return x
class ChannelAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
k = k * self.scale
attention = k.transpose(-1, -2) @ v
attention = attention.softmax(dim=-1)
x = (attention @ q.transpose(-1, -2)).transpose(-1, -2)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x
class ChannelBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
ffn=True,
cpe_act=False,
):
super().__init__()
self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
self.ffn = ffn
self.norm1 = norm_layer(dim)
self.attn = ChannelAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
if self.ffn:
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
else:
self.norm2 = None
self.mlp = None
self.drop_path2 = None
def forward(self, x: Tensor):
B, C, H, W = x.shape
x = self.cpe1(x).flatten(2).transpose(1, 2)
cur = self.norm1(x)
cur = self.attn(cur)
x = x + self.drop_path1(cur)
x = self.cpe2(x.transpose(1, 2).view(B, C, H, W))
if self.mlp is not None:
x = x.flatten(2).transpose(1, 2)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
def window_partition(x: Tensor, window_size: Tuple[int, int]):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows: Tensor, window_size: Tuple[int, int], H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, window_size, num_heads, qkv_bias=True):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: Tensor):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = self.softmax(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
return x
class SpatialBlock(nn.Module):
r""" Windows Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
ffn=True,
cpe_act=False,
):
super().__init__()
self.dim = dim
self.ffn = ffn
self.num_heads = num_heads
self.window_size = to_2tuple(window_size)
self.mlp_ratio = mlp_ratio
self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
self.window_size,
num_heads=num_heads,
qkv_bias=qkv_bias,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
if self.ffn:
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
else:
self.norm2 = None
self.mlp = None
self.drop_path1 = None
def forward(self, x: Tensor):
B, C, H, W = x.shape
shortcut = self.cpe1(x).flatten(2).transpose(1, 2)
x = self.norm1(shortcut)
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
x_windows = window_partition(x, self.window_size)
x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
x = window_reverse(attn_windows, self.window_size, Hp, Wp)
# if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = shortcut + self.drop_path1(x)
x = self.cpe2(x.transpose(1, 2).view(B, C, H, W))
if self.mlp is not None:
x = x.flatten(2).transpose(1, 2)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
class DaVitStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=1,
downsample=True,
attn_types=('spatial', 'channel'),
num_heads=3,
window_size=7,
mlp_ratio=4,
qkv_bias=True,
drop_path_rates=(0, 0),
norm_layer=LayerNorm2d,
norm_layer_cl=nn.LayerNorm,
ffn=True,
cpe_act=False
):
super().__init__()
self.grad_checkpointing = False
# downsample embedding layer at the beginning of each stage
if downsample:
self.downsample = Downsample(in_chs, out_chs, norm_layer=norm_layer)
else:
self.downsample = nn.Identity()
'''
repeating alternating attention blocks in each stage
default: (spatial -> channel) x depth
potential opportunity to integrate with a more general version of ByobNet/ByoaNet
since the logic is similar
'''
stage_blocks = []
for block_idx in range(depth):
dual_attention_block = []
for attn_idx, attn_type in enumerate(attn_types):
if attn_type == 'spatial':
dual_attention_block.append(SpatialBlock(
dim=out_chs,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=drop_path_rates[block_idx],
norm_layer=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act,
window_size=window_size,
))
elif attn_type == 'channel':
dual_attention_block.append(ChannelBlock(
dim=out_chs,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=drop_path_rates[block_idx],
norm_layer=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act
))
stage_blocks.append(nn.Sequential(*dual_attention_block))
self.blocks = nn.Sequential(*stage_blocks)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class DaVit(nn.Module):
r""" DaViT
A PyTorch implementation of `DaViT: Dual Attention Vision Transformers` - https://arxiv.org/abs/2204.03645
Supports arbitrary input sizes and pyramid feature extraction
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks in each stage. Default: (1, 1, 3, 1)
embed_dims (tuple(int)): Patch embedding dimension. Default: (96, 192, 384, 768)
num_heads (tuple(int)): Number of attention heads in different layers. Default: (3, 6, 12, 24)
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
"""
def __init__(
self,
in_chans=3,
depths=(1, 1, 3, 1),
embed_dims=(96, 192, 384, 768),
num_heads=(3, 6, 12, 24),
window_size=7,
mlp_ratio=4,
qkv_bias=True,
norm_layer='layernorm2d',
norm_layer_cl='layernorm',
norm_eps=1e-5,
attn_types=('spatial', 'channel'),
ffn=True,
cpe_act=False,
drop_rate=0.,
drop_path_rate=0.,
num_classes=1000,
global_pool='avg',
head_norm_first=False,
):
super().__init__()
num_stages = len(embed_dims)
assert num_stages == len(num_heads) == len(depths)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
self.num_classes = num_classes
self.num_features = embed_dims[-1]
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(in_chans, embed_dims[0], norm_layer=norm_layer)
in_chs = embed_dims[0]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for stage_idx in range(num_stages):
out_chs = embed_dims[stage_idx]
stage = DaVitStage(
in_chs,
out_chs,
depth=depths[stage_idx],
downsample=stage_idx > 0,
attn_types=attn_types,
num_heads=num_heads[stage_idx],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path_rates=dpr[stage_idx],
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act,
)
in_chs = out_chs
stages.append(stage)
self.feature_info += [dict(num_chs=out_chs, reduction=2, module=f'stages.{stage_idx}')]
self.stages = nn.Sequential(*stages)
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default DaViT order, similar to ConvNeXt
# FIXME generalize this structure to ClassifierHead
if head_norm_first:
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.head.reset(num_classes, global_pool=global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap MSFT checkpoints -> timm """
if 'head.fc.weight' in state_dict:
return state_dict # non-MSFT checkpoint
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
import re
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'patch_embeds.([0-9]+)', r'stages.\1.downsample', k)
k = re.sub(r'main_blocks.([0-9]+)', r'stages.\1.blocks', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('stages.0.downsample', 'stem')
k = k.replace('head.', 'head.fc.')
k = k.replace('norms.', 'head.norm.')
k = k.replace('cpe.0', 'cpe1')
k = k.replace('cpe.1', 'cpe2')
out_dict[k] = v
return out_dict
def _create_davit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
DaVit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
# TODO contact authors to get larger pretrained models
default_cfgs = generate_default_cfgs({
# official microsoft weights from https://github.com/dingmyu/davit
'davit_tiny.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_small.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_base.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_large': _cfg(),
'davit_huge': _cfg(),
'davit_giant': _cfg(),
})
@register_model
def davit_tiny(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24))
return _create_davit('davit_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_small(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24))
return _create_davit('davit_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_base(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32))
return _create_davit('davit_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_large(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(192, 384, 768, 1536), num_heads=(6, 12, 24, 48))
return _create_davit('davit_large', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_huge(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64))
return _create_davit('davit_huge', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_giant(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 12, 3), embed_dims=(384, 768, 1536, 3072), num_heads=(12, 24, 48, 96))
return _create_davit('davit_giant', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/pit.py | """ Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import Sequence, Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, to_2tuple, LayerNorm
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self,
base_dim,
depth,
heads,
mlp_ratio,
pool=None,
proj_drop=.0,
attn_drop=.0,
drop_path_prob=None,
norm_layer=None,
):
super(Transformer, self).__init__()
embed_dim = base_dim * heads
self.pool = pool
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
token_length = cls_tokens.shape[1]
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.norm(x)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
return x, cls_tokens
class Pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(Pooling, self).__init__()
self.conv = nn.Conv2d(
in_feature,
out_feature,
kernel_size=stride + 1,
padding=stride // 2,
stride=stride,
padding_mode=padding_mode,
groups=in_feature,
)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(
self,
in_channels,
out_channels,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
padding: int = 0,
):
super(ConvEmbedding, self).__init__()
padding = padding
self.img_size = to_2tuple(img_size)
self.patch_size = to_2tuple(patch_size)
self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1)
self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1)
self.grid_size = (self.height, self.width)
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size,
stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(
self,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
stem_type: str = 'overlap',
base_dims: Sequence[int] = (48, 48, 48),
depth: Sequence[int] = (2, 6, 4),
heads: Sequence[int] = (2, 4, 8),
mlp_ratio: float = 4,
num_classes=1000,
in_chans=3,
global_pool='token',
distilled=False,
drop_rate=0.,
pos_drop_drate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
):
super(PoolingVisionTransformer, self).__init__()
assert global_pool in ('token',)
self.base_dims = base_dims
self.heads = heads
embed_dim = base_dims[0] * heads[0]
self.num_classes = num_classes
self.global_pool = global_pool
self.num_tokens = 2 if distilled else 1
self.feature_info = []
self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride)
self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width))
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_drate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
prev_dim = embed_dim
for i in range(len(depth)):
pool = None
embed_dim = base_dims[i] * heads[i]
if i > 0:
pool = Pooling(
prev_dim,
embed_dim,
stride=2,
)
transformers += [Transformer(
base_dims[i],
depth[i],
heads[i],
mlp_ratio,
pool=pool,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path_prob=dpr[i],
)]
prev_dim = embed_dim
self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.embed_dim = embed_dim
# Classifier head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
def get_classifier(self):
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
return cls_tokens
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
if self.head_dist is not None:
assert self.global_pool == 'token'
x, x_dist = x[:, 0], x[:, 1]
x = self.head_drop(x)
x_dist = self.head_drop(x)
if not pre_logits:
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
else:
if self.global_pool == 'token':
x = x[:, 0]
x = self.head_drop(x)
if not pre_logits:
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
PoolingVisionTransformer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_s_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_b_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_ti_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_s_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_b_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
})
@register_model
def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
)
return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
)
return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/swin_transformer_v2.py | """ Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/abs/2111.09883
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer V2
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import math
from typing import Callable, Optional, Tuple, Union, Set, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead,\
resample_patch_embed
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this
_int_or_tuple_2_t = Union[int, Tuple[int, int]]
def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor:
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor:
"""
Args:
windows: (num_windows * B, window_size[0], window_size[1], C)
window_size (Tuple[int, int]): Window size
img_size (Tuple[int, int]): Image size
Returns:
x: (B, H, W, C)
"""
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
"""
def __init__(
self,
dim: int,
window_size: Tuple[int, int],
num_heads: int,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
pretrained_window_size: Tuple[int, int] = (0, 0),
) -> None:
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.pretrained_window_size = pretrained_window_size
self.num_heads = num_heads
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
# mlp to generate continuous relative position bias
self.cpb_mlp = nn.Sequential(
nn.Linear(2, 512, bias=True),
nn.ReLU(inplace=True),
nn.Linear(512, num_heads, bias=False)
)
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
relative_coords_table = torch.stack(torch.meshgrid([
relative_coords_h,
relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
if pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
else:
relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
torch.abs(relative_coords_table) + 1.0) / math.log2(8)
self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
self.qkv = nn.Linear(dim, dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(dim))
self.register_buffer('k_bias', torch.zeros(dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias))
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
# cosine attention
attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp()
attn = attn * logit_scale
relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
num_win = mask.shape[0]
attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerV2Block(nn.Module):
""" Swin Transformer Block.
"""
def __init__(
self,
dim: int,
input_resolution: _int_or_tuple_2_t,
num_heads: int,
window_size: _int_or_tuple_2_t = 7,
shift_size: _int_or_tuple_2_t = 0,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
pretrained_window_size: _int_or_tuple_2_t = 0,
) -> None:
"""
Args:
dim: Number of input channels.
input_resolution: Input resolution.
num_heads: Number of attention heads.
window_size: Window size.
shift_size: Shift size for SW-MSA.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
pretrained_window_size: Window size in pretraining.
"""
super().__init__()
self.dim = dim
self.input_resolution = to_2tuple(input_resolution)
self.num_heads = num_heads
ws, ss = self._calc_window_shift(window_size, shift_size)
self.window_size: Tuple[int, int] = ws
self.shift_size: Tuple[int, int] = ss
self.window_area = self.window_size[0] * self.window_size[1]
self.mlp_ratio = mlp_ratio
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
pretrained_window_size=to_2tuple(pretrained_window_size),
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if any(self.shift_size):
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
cnt = 0
for h in (
slice(0, -self.window_size[0]),
slice(-self.window_size[0], -self.shift_size[0]),
slice(-self.shift_size[0], None)):
for w in (
slice(0, -self.window_size[1]),
slice(-self.window_size[1], -self.shift_size[1]),
slice(-self.shift_size[1], None)):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask, persistent=False)
def _calc_window_shift(self,
target_window_size: _int_or_tuple_2_t,
target_shift_size: _int_or_tuple_2_t) -> Tuple[Tuple[int, int], Tuple[int, int]]:
target_window_size = to_2tuple(target_window_size)
target_shift_size = to_2tuple(target_shift_size)
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return tuple(window_size), tuple(shift_size)
def _attn(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
# cyclic shift
has_shift = any(self.shift_size)
if has_shift:
shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
shifted_x = window_reverse(attn_windows, self.window_size, self.input_resolution) # B H' W' C
# reverse cyclic shift
if has_shift:
x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
else:
x = shifted_x
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
x = x + self.drop_path1(self.norm1(self._attn(x)))
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.norm2(self.mlp(x)))
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer.
"""
def __init__(self, dim: int, out_dim: Optional[int] = None, norm_layer: nn.Module = nn.LayerNorm) -> None:
"""
Args:
dim (int): Number of input channels.
out_dim (int): Number of output channels (or 2 * dim if None)
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.dim = dim
self.out_dim = out_dim or 2 * dim
self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False)
self.norm = norm_layer(self.out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
_assert(H % 2 == 0, f"x height ({H}) is not even.")
_assert(W % 2 == 0, f"x width ({W}) is not even.")
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.reduction(x)
x = self.norm(x)
return x
class SwinTransformerV2Stage(nn.Module):
""" A Swin Transformer V2 Stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
input_resolution: _int_or_tuple_2_t,
depth: int,
num_heads: int,
window_size: _int_or_tuple_2_t,
downsample: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
norm_layer: nn.Module = nn.LayerNorm,
pretrained_window_size: _int_or_tuple_2_t = 0,
output_nchw: bool = False,
) -> None:
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
input_resolution: Input resolution.
depth: Number of blocks.
num_heads: Number of attention heads.
window_size: Local window size.
downsample: Use downsample layer at start of the block.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Projection dropout rate
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
norm_layer: Normalization layer.
pretrained_window_size: Local window size in pretraining.
output_nchw: Output tensors on NCHW format instead of NHWC.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution
self.depth = depth
self.output_nchw = output_nchw
self.grad_checkpointing = False
window_size = to_2tuple(window_size)
shift_size = tuple([w // 2 for w in window_size])
# patch merging / downsample layer
if downsample:
self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer)
else:
assert dim == out_dim
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerV2Block(
dim=out_dim,
input_resolution=self.output_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
)
for i in range(depth)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.downsample(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x
def _init_respostnorm(self) -> None:
for blk in self.blocks:
nn.init.constant_(blk.norm1.bias, 0)
nn.init.constant_(blk.norm1.weight, 0)
nn.init.constant_(blk.norm2.bias, 0)
nn.init.constant_(blk.norm2.weight, 0)
class SwinTransformerV2(nn.Module):
""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/abs/2111.09883
"""
def __init__(
self,
img_size: _int_or_tuple_2_t = 224,
patch_size: int = 4,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
window_size: _int_or_tuple_2_t = 7,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.1,
norm_layer: Callable = nn.LayerNorm,
pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0),
**kwargs,
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Swin Transformer stage (layer).
num_heads: Number of attention heads in different layers.
window_size: Window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
drop_rate: Head dropout rate.
proj_drop_rate: Projection dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
norm_layer: Normalization layer.
patch_norm: If True, add normalization after patch embedding.
pretrained_window_sizes: Pretrained window sizes of each layer.
output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default.
"""
super().__init__()
self.num_classes = num_classes
assert global_pool in ('', 'avg')
self.global_pool = global_pool
self.output_fmt = 'NHWC'
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.feature_info = []
if not isinstance(embed_dim, (tuple, list)):
embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
norm_layer=norm_layer,
output_fmt='NHWC',
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
layers = []
in_dim = embed_dim[0]
scale = 1
for i in range(self.num_layers):
out_dim = embed_dim[i]
layers += [SwinTransformerV2Stage(
dim=in_dim,
out_dim=out_dim,
input_resolution=(
self.patch_embed.grid_size[0] // scale,
self.patch_embed.grid_size[1] // scale),
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_sizes[i],
)]
in_dim = out_dim
if i > 0:
scale *= 2
self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
self.apply(self._init_weights)
for bly in self.layers:
bly._init_respostnorm()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
nod = set()
for n, m in self.named_modules():
if any([kw in n for kw in ("cpb_mlp", "logit_scale")]):
nod.add(n)
return nod
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^absolute_pos_embed|patch_embed', # stem and embed
blocks=r'^layers\.(\d+)' if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.layers:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
native_checkpoint = 'head.fc.weight' in state_dict
out_dict = {}
import re
for k, v in state_dict.items():
if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]):
continue # skip buffers that should not be persistent
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-2] != H or v.shape[-1] != W:
v = resample_patch_embed(
v,
(H, W),
interpolation='bicubic',
antialias=True,
verbose=True,
)
if not native_checkpoint:
# skip layer remapping for updated checkpoints
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer_v2(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformerV2, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth',
),
'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth',
),
'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'swinv2_tiny_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth',
),
'swinv2_tiny_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth',
),
'swinv2_small_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth',
),
'swinv2_small_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth',
),
'swinv2_base_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth',
),
'swinv2_base_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth',
),
'swinv2_base_window12_192.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth',
num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
),
'swinv2_large_window12_192.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth',
num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
),
})
@register_model
def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer_v2(
'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k',
'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k',
'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k',
'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k',
'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k',
'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/mobilenetv3.py | """ MobileNet V3
A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by / Copyright 2019, Ross Wightman
"""
from functools import partial
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['MobileNetV3', 'MobileNetV3Features']
class MobileNetV3(nn.Module):
""" MobiletNet-V3
Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific
'efficient head', where global pooling is done before the head convolution without a final batch-norm
layer before the classifier.
Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include:
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class)
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* LCNet - https://arxiv.org/abs/2109.15099
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
num_features: int = 1280,
head_bias: bool = True,
pad_type: PadType = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
global_pool: str = 'avg',
):
"""
Args:
block_args: Arguments for blocks of the network.
num_classes: Number of classes for classification head.
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
num_features: Number of output channels of the conv head layer.
head_bias: If True, add a learnable bias to the conv head layer.
pad_type: Type of padding to use for convolution layers.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
round_chs_fn: Callable to round number of filters based on depth multiplier.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
global_pool: Type of pooling to use for global pooling features of the FC head.
"""
super(MobileNetV3, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
head_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = head_chs * self.global_pool.feat_mult()
self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
return dict(
stem=r'^conv_stem|bn1',
blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if pre_logits:
return x
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
class MobileNetV3Features(nn.Module):
""" MobileNetV3 Feature Extractor
A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args: BlockArgs,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
feature_location: str = 'bottleneck',
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: PadType = '',
round_chs_fn: Callable = round_channels,
se_from_exp: bool = True,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
"""
Args:
block_args: Arguments for blocks of the network.
out_indices: Output from stages at indices.
feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion']
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
output_stride: Output stride of the network.
pad_type: Type of padding to use for convolution layers.
round_chs_fn: Callable to round number of filters based on depth multiplier.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
"""
super(MobileNetV3Features, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3:
features_mode = ''
model_cls = MobileNetV3
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _gen_mobilenet_v3_rw(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
head_bias=False,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'),
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
if 'small' in variant:
num_features = 1024
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16'],
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'],
# stage 2, 28x28 in
['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'],
# stage 3, 14x14 in
['ir_r2_k3_s1_e3_c48'],
# stage 4, 14x14in
['ir_r3_k3_s2_e6_c96'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu
# stage 2, 28x28 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish
# stage 3, 14x14 in
['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish
# stage 4, 14x14in
['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'], # hard-swish
]
else:
num_features = 1280
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k3_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112'],
# stage 5, 14x14in
['ir_r3_k3_s2_e6_c160'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=16,
fix_stem=channel_multiplier < 0.75,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_fbnetv3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" FBNetV3
Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining`
- https://arxiv.org/abs/2006.02049
FIXME untested, this is a preliminary impl of some FBNet-V3 variants.
"""
vl = variant.split('_')[-1]
if vl in ('a', 'b'):
stem_size = 16
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'],
['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'],
['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'],
['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'],
['cn_r1_k1_s1_c1344'],
]
elif vl == 'd':
stem_size = 24
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'],
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'],
['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'],
['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'],
['cn_r1_k1_s1_c1440'],
]
elif vl == 'g':
stem_size = 32
arch_def = [
['ds_r3_k3_s1_e1_c24'],
['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'],
['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'],
['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'],
['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'],
['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'],
['cn_r1_k1_s1_c1728'],
]
else:
raise NotImplemented
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95)
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn)
act_layer = resolve_act_layer(kwargs, 'hard_swish')
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=1984,
head_bias=False,
stem_size=stem_size,
round_chs_fn=round_chs_fn,
se_from_exp=False,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" LCNet
Essentially a MobileNet-V3 crossed with a MobileNet-V1
Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['dsa_r1_k3_s1_c32'],
# stage 1, 112x112 in
['dsa_r2_k3_s2_c64'],
# stage 2, 56x56 in
['dsa_r2_k3_s2_c128'],
# stage 3, 28x28 in
['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'],
# stage 4, 14x14in
['dsa_r4_k5_s1_c256'],
# stage 5, 14x14in
['dsa_r2_k5_s2_c512_se0.25'],
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU),
num_features=1280,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" LCNet
Essentially a MobileNet-V3 crossed with a MobileNet-V1
Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['dsa_r1_k3_s1_c32'],
# stage 1, 112x112 in
['dsa_r2_k3_s2_c64'],
# stage 2, 56x56 in
['dsa_r2_k3_s2_c128'],
# stage 3, 28x28 in
['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'],
# stage 4, 14x14in
['dsa_r4_k5_s1_c256'],
# stage 5, 14x14in
['dsa_r2_k5_s2_c512_se0.25'],
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU),
num_features=1280,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _cfg(url: str = '', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mobilenetv3_large_075.untrained': _cfg(url=''),
'mobilenetv3_large_100.ra_in1k': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg(
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.),
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.miil_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth',
hf_hub_id='timm/',
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221),
'mobilenetv3_small_050.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_075.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_100.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_rw.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'tf_mobilenetv3_large_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100.in1k': _cfg(
url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'fbnetv3_b.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_d.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_g.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)),
"lcnet_035.untrained": _cfg(),
"lcnet_050.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_075.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_100.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_150.untrained": _cfg(),
})
@register_model
def mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_rw(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_b(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-B """
model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_d(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-D """
model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_g(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-G """
model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_035(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.35"""
model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.5"""
model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_150(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.5"""
model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k',
'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k',
})
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/nest.py | """ Nested Transformer (NesT) in PyTorch
A PyTorch implement of Aggregating Nested Transformers as described in:
'Aggregating Nested Transformers'
- https://arxiv.org/abs/2105.12723
The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights
have been converted with convert/convert_nest_flax.py
Acknowledgments:
* The paper authors for sharing their research, code, and model weights
* Ross Wightman's existing code off which I based this
Copyright 2021 Alexander Soare
"""
import collections.abc
import logging
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert
from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq, named_apply
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['Nest'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class Attention(nn.Module):
"""
This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with
an extra "image block" dim
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""
x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim)
"""
B, T, N, C = x.shape
# result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head)
qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1) # (B, H, T, N, N)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
# (B, H, T, N, C'), permute -> (B, T, N, C', H)
x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x # (B, T, N, C)
class TransformerLayer(nn.Module):
"""
This is much like `.vision_transformer.Block` but:
- Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks")
- Uses modified Attention layer that handles the "block" dimension
"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x):
y = self.norm1(x)
x = x + self.drop_path(self.attn(y))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConvPool(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, pad_type=''):
super().__init__()
self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True)
self.norm = norm_layer(out_channels)
self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type)
def forward(self, x):
"""
x is expected to have shape (B, C, H, W)
"""
_assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims')
_assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims')
x = self.conv(x)
# Layer norm done over channel dim only
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
x = self.pool(x)
return x # (B, C, H//2, W//2)
def blockify(x, block_size: int):
"""image to blocks
Args:
x (Tensor): with shape (B, H, W, C)
block_size (int): edge length of a single square block in units of H, W
"""
B, H, W, C = x.shape
_assert(H % block_size == 0, '`block_size` must divide input height evenly')
_assert(W % block_size == 0, '`block_size` must divide input width evenly')
grid_height = H // block_size
grid_width = W // block_size
x = x.reshape(B, grid_height, block_size, grid_width, block_size, C)
x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C)
return x # (B, T, N, C)
@register_notrace_function # reason: int receives Proxy
def deblockify(x, block_size: int):
"""blocks to image
Args:
x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block
block_size (int): edge length of a single square block in units of desired H, W
"""
B, T, _, C = x.shape
grid_size = int(math.sqrt(T))
height = width = grid_size * block_size
x = x.reshape(B, grid_size, grid_size, block_size, block_size, C)
x = x.transpose(2, 3).reshape(B, height, width, C)
return x # (B, H, W, C)
class NestLevel(nn.Module):
""" Single hierarchical level of a Nested Transformer
"""
def __init__(
self,
num_blocks,
block_size,
seq_length,
num_heads,
depth,
embed_dim,
prev_embed_dim=None,
mlp_ratio=4.,
qkv_bias=True,
proj_drop=0.,
attn_drop=0.,
drop_path=[],
norm_layer=None,
act_layer=None,
pad_type='',
):
super().__init__()
self.block_size = block_size
self.grad_checkpointing = False
self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim))
if prev_embed_dim is not None:
self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type)
else:
self.pool = nn.Identity()
# Transformer encoder
if len(drop_path):
assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers'
self.transformer_encoder = nn.Sequential(*[
TransformerLayer(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
for i in range(depth)])
def forward(self, x):
"""
expects x as (B, C, H, W)
"""
x = self.pool(x)
x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer
x = blockify(x, self.block_size) # (B, T, N, C')
x = x + self.pos_embed
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.transformer_encoder, x)
else:
x = self.transformer_encoder(x) # (B, T, N, C')
x = deblockify(x, self.block_size) # (B, H', W', C')
# Channel-first for block aggregation, and generally to replicate convnet feature map at each stage
return x.permute(0, 3, 1, 2) # (B, C, H', W')
class Nest(nn.Module):
""" Nested Transformer (NesT)
A PyTorch impl of : `Aggregating Nested Transformers`
- https://arxiv.org/abs/2105.12723
"""
def __init__(
self,
img_size=224,
in_chans=3,
patch_size=4,
num_levels=3,
embed_dims=(128, 256, 512),
num_heads=(4, 8, 16),
depths=(2, 2, 20),
num_classes=1000,
mlp_ratio=4.,
qkv_bias=True,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.5,
norm_layer=None,
act_layer=None,
pad_type='',
weight_init='',
global_pool='avg',
):
"""
Args:
img_size (int, tuple): input image size
in_chans (int): number of input channels
patch_size (int): patch size
num_levels (int): number of block hierarchies (T_d in the paper)
embed_dims (int, tuple): embedding dimensions of each level
num_heads (int, tuple): number of attention heads for each level
depths (int, tuple): number of transformer layers for each level
num_classes (int): number of classes for classification head
mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers
qkv_bias (bool): enable bias for qkv if True
drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer for transformer layers
act_layer: (nn.Module): activation layer in MLP of transformer layers
pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME
weight_init: (str): weight init scheme
global_pool: (str): type of pooling operation to apply to final feature map
Notes:
- Default values follow NesT-B from the original Jax code.
- `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`.
- For those following the paper, Table A1 may have errors!
- https://github.com/google-research/nested-transformer/issues/2
"""
super().__init__()
for param_name in ['embed_dims', 'num_heads', 'depths']:
param_value = locals()[param_name]
if isinstance(param_value, collections.abc.Sequence):
assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`'
embed_dims = to_ntuple(num_levels)(embed_dims)
num_heads = to_ntuple(num_levels)(num_heads)
depths = to_ntuple(num_levels)(depths)
self.num_classes = num_classes
self.num_features = embed_dims[-1]
self.feature_info = []
norm_layer = norm_layer or LayerNorm
act_layer = act_layer or nn.GELU
self.drop_rate = drop_rate
self.num_levels = num_levels
if isinstance(img_size, collections.abc.Sequence):
assert img_size[0] == img_size[1], 'Model only handles square inputs'
img_size = img_size[0]
assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly'
self.patch_size = patch_size
# Number of blocks at each level
self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist()
assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \
'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`'
# Block edge size in units of patches
# Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the
# number of blocks along edge of image
self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0]))
# Patch embedding
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dims[0],
flatten=False,
)
self.num_patches = self.patch_embed.num_patches
self.seq_length = self.num_patches // self.num_blocks[0]
# Build up each hierarchical level
levels = []
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
prev_dim = None
curr_stride = 4
for i in range(len(self.num_blocks)):
dim = embed_dims[i]
levels.append(NestLevel(
self.num_blocks[i],
self.block_size,
self.seq_length,
num_heads[i],
depths[i],
dim,
prev_dim,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dp_rates[i],
norm_layer=norm_layer,
act_layer=act_layer,
pad_type=pad_type,
))
self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')]
prev_dim = dim
curr_stride *= 2
self.levels = nn.Sequential(*levels)
# Final normalization layer
self.norm = norm_layer(embed_dims[-1])
# Classifier
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
self.init_weights(weight_init)
@torch.jit.ignore
def init_weights(self, mode=''):
assert mode in ('nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
for level in self.levels:
trunc_normal_(level.pos_embed, std=.02, a=-2, b=2)
named_apply(partial(_init_nest_weights, head_bias=head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
return {f'level.{i}.pos_embed' for i in range(len(self.levels))}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=[
(r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None),
(r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.levels:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
x = self.levels(x)
# Layer norm done over channel dim only (to NHWC and back)
x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.):
""" NesT weight initialization
Can replicate Jax implementation. Otherwise follows vision_transformer.py
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
trunc_normal_(module.weight, std=.02, a=-2, b=2)
nn.init.constant_(module.bias, head_bias)
else:
trunc_normal_(module.weight, std=.02, a=-2, b=2)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02, a=-2, b=2)
if module.bias is not None:
nn.init.zeros_(module.bias)
def resize_pos_embed(posemb, posemb_new):
"""
Rescale the grid of position embeddings when loading from state_dict
Expected shape of position embeddings is (1, T, N, C), and considers only square images
"""
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
seq_length_old = posemb.shape[2]
num_blocks_new, seq_length_new = posemb_new.shape[1:3]
size_new = int(math.sqrt(num_blocks_new*seq_length_new))
# First change to (1, C, H, W)
posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False)
# Now change to new (1, T, N, C)
posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new)))
return posemb
def checkpoint_filter_fn(state_dict, model):
""" resize positional embeddings of pretrained weights """
pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')]
for k in pos_embed_keys:
if state_dict[k].shape != getattr(model, k).shape:
state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k))
return state_dict
def _create_nest(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
Nest,
variant,
pretrained,
feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14],
'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'nest_base.untrained': _cfg(),
'nest_small.untrained': _cfg(),
'nest_tiny.untrained': _cfg(),
# (weights from official Google JAX impl, require 'SAME' padding)
'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def nest_base(pretrained=False, **kwargs) -> Nest:
""" Nest-B @ 224x224
"""
model_kwargs = dict(
embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_small(pretrained=False, **kwargs) -> Nest:
""" Nest-S @ 224x224
"""
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_tiny(pretrained=False, **kwargs) -> Nest:
""" Nest-T @ 224x224
"""
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs)
model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_base_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-B @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(
embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_small_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-S @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs)
model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs)
return model
@register_model
def nest_tiny_jx(pretrained=False, **kwargs) -> Nest:
""" Nest-T @ 224x224
"""
kwargs.setdefault('pad_type', 'same')
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs)
model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs)
return model
register_model_deprecations(__name__, {
'jx_nest_base': 'nest_base_jx',
'jx_nest_small': 'nest_small_jx',
'jx_nest_tiny': 'nest_tiny_jx',
}) | 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/efficientvit_msra.py | """ EfficientViT (by MSRA)
Paper: `EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention`
- https://arxiv.org/abs/2305.07027
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/EfficientViT
"""
__all__ = ['EfficientVitMsra']
import itertools
from collections import OrderedDict
from typing import Dict
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps)**0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps)**0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(torch.nn.Sequential):
def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.):
super().__init__()
self.bn = nn.BatchNorm1d(in_features)
self.drop = nn.Dropout(drop)
self.linear = nn.Linear(in_features, out_features, bias=bias)
trunc_normal_(self.linear.weight, std=std)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
@torch.no_grad()
def fuse(self):
bn, linear = self.bn, self.linear
w = bn.weight / (bn.running_var + bn.eps)**0.5
b = bn.bias - self.bn.running_mean * \
self.bn.weight / (bn.running_var + bn.eps)**0.5
w = linear.weight * w[None, :]
if linear.bias is None:
b = b @ self.linear.weight.T
else:
b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchMerging(torch.nn.Module):
def __init__(self, dim, out_dim):
super().__init__()
hid_dim = int(dim * 4)
self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0)
self.act = torch.nn.ReLU()
self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim)
self.se = SqueezeExcite(hid_dim, .25)
self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x))))))
return x
class ResidualDrop(torch.nn.Module):
def __init__(self, m, drop=0.):
super().__init__()
self.m = m
self.drop = drop
def forward(self, x):
if self.training and self.drop > 0:
return x + self.m(x) * torch.rand(
x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
return x + self.m(x)
class ConvMlp(torch.nn.Module):
def __init__(self, ed, h):
super().__init__()
self.pw1 = ConvNorm(ed, h)
self.act = torch.nn.ReLU()
self.pw2 = ConvNorm(h, ed, bn_weight_init=0)
def forward(self, x):
x = self.pw2(self.act(self.pw1(x)))
return x
class CascadedGroupAttention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
r""" Cascaded Group Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution, correspond to the window size.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.attn_ratio = attn_ratio
qkvs = []
dws = []
for i in range(num_heads):
qkvs.append(ConvNorm(dim // (num_heads), self.key_dim * 2 + self.val_dim))
dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim))
self.qkvs = torch.nn.ModuleList(qkvs)
self.dws = torch.nn.ModuleList(dws)
self.proj = torch.nn.Sequential(
torch.nn.ReLU(),
ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0)
)
points = list(itertools.product(range(resolution), range(resolution)))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
B, C, H, W = x.shape
feats_in = x.chunk(len(self.qkvs), dim=1)
feats_out = []
feat = feats_in[0]
attn_bias = self.get_attention_biases(x.device)
for head_idx, (qkv, dws) in enumerate(zip(self.qkvs, self.dws)):
if head_idx > 0:
feat = feat + feats_in[head_idx]
feat = qkv(feat)
q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1)
q = dws(q)
q, k, v = q.flatten(2), k.flatten(2), v.flatten(2)
q = q * self.scale
attn = q.transpose(-2, -1) @ k
attn = attn + attn_bias[head_idx]
attn = attn.softmax(dim=-1)
feat = v @ attn.transpose(-2, -1)
feat = feat.view(B, self.val_dim, H, W)
feats_out.append(feat)
x = self.proj(torch.cat(feats_out, 1))
return x
class LocalWindowAttention(torch.nn.Module):
r""" Local Window Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.resolution = resolution
assert window_resolution > 0, 'window_size must be greater than 0'
self.window_resolution = window_resolution
window_resolution = min(window_resolution, resolution)
self.attn = CascadedGroupAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=window_resolution,
kernels=kernels,
)
def forward(self, x):
H = W = self.resolution
B, C, H_, W_ = x.shape
# Only check this for classifcation models
_assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
_assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
if H <= self.window_resolution and W <= self.window_resolution:
x = self.attn(x)
else:
x = x.permute(0, 2, 3, 1)
pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution
pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution
x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b))
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_resolution
nW = pW // self.window_resolution
# window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw
x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3)
x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2)
x = self.attn(x)
# window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC
x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C)
x = x.transpose(2, 3).reshape(B, pH, pW, C)
x = x[:, :H, :W].contiguous()
x = x.permute(0, 3, 1, 2)
return x
class EfficientVitBlock(torch.nn.Module):
""" A basic EfficientVit building block.
Args:
dim (int): Number of input channels.
key_dim (int): Dimension for query and key in the token mixer.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
):
super().__init__()
self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
self.mixer = ResidualDrop(
LocalWindowAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=resolution,
window_resolution=window_resolution,
kernels=kernels,
)
)
self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
def forward(self, x):
return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x)))))
class EfficientVitStage(torch.nn.Module):
def __init__(
self,
in_dim,
out_dim,
key_dim,
downsample=('', 1),
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
depth=1,
):
super().__init__()
if downsample[0] == 'subsample':
self.resolution = (resolution - 1) // downsample[1] + 1
down_blocks = []
down_blocks.append((
'res1',
torch.nn.Sequential(
ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)),
ResidualDrop(ConvMlp(in_dim, int(in_dim * 2))),
)
))
down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim)))
down_blocks.append((
'res2',
torch.nn.Sequential(
ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)),
ResidualDrop(ConvMlp(out_dim, int(out_dim * 2))),
)
))
self.downsample = nn.Sequential(OrderedDict(down_blocks))
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
self.resolution = resolution
blocks = []
for d in range(depth):
blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class PatchEmbedding(torch.nn.Sequential):
def __init__(self, in_chans, dim):
super().__init__()
self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1))
self.add_module('relu1', torch.nn.ReLU())
self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1))
self.add_module('relu2', torch.nn.ReLU())
self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1))
self.add_module('relu3', torch.nn.ReLU())
self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1))
self.patch_size = 16
class EfficientVitMsra(nn.Module):
def __init__(
self,
img_size=224,
in_chans=3,
num_classes=1000,
embed_dim=(64, 128, 192),
key_dim=(16, 16, 16),
depth=(1, 2, 3),
num_heads=(4, 4, 4),
window_size=(7, 7, 7),
kernels=(5, 5, 5, 5),
down_ops=(('', 1), ('subsample', 2), ('subsample', 2)),
global_pool='avg',
drop_rate=0.,
):
super(EfficientVitMsra, self).__init__()
self.grad_checkpointing = False
self.num_classes = num_classes
self.drop_rate = drop_rate
# Patch embedding
self.patch_embed = PatchEmbedding(in_chans, embed_dim[0])
stride = self.patch_embed.patch_size
resolution = img_size // self.patch_embed.patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
# Build EfficientVit blocks
self.feature_info = []
stages = []
pre_ed = embed_dim[0]
for i, (ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
stage = EfficientVitStage(
in_dim=pre_ed,
out_dim=ed,
key_dim=kd,
downsample=do,
num_heads=nh,
attn_ratio=ar,
resolution=resolution,
window_resolution=wd,
kernels=kernels,
depth=dpth,
)
pre_ed = ed
if do[0] == 'subsample' and i != 0:
stride *= do[1]
resolution = stage.resolution
stages.append(stage)
self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.num_features = embed_dim[-1]
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.linear
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# def checkpoint_filter_fn(state_dict, model):
# if 'model' in state_dict.keys():
# state_dict = state_dict['model']
# tmp_dict = {}
# out_dict = {}
# target_keys = model.state_dict().keys()
# target_keys = [k for k in target_keys if k.startswith('stages.')]
#
# for k, v in state_dict.items():
# if 'attention_bias_idxs' in k:
# continue
# k = k.split('.')
# if k[-2] == 'c':
# k[-2] = 'conv'
# if k[-2] == 'l':
# k[-2] = 'linear'
# k = '.'.join(k)
# tmp_dict[k] = v
#
# for k, v in tmp_dict.items():
# if k.startswith('patch_embed'):
# k = k.split('.')
# k[1] = 'conv' + str(int(k[1]) // 2 + 1)
# k = '.'.join(k)
# elif k.startswith('blocks'):
# kw = '.'.join(k.split('.')[2:])
# find_kw = [a for a in list(sorted(tmp_dict.keys())) if kw in a]
# idx = find_kw.index(k)
# k = [a for a in target_keys if kw in a][idx]
# out_dict[k] = v
#
# return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.linear',
'fixed_input_size': True,
'pool_size': (4, 4),
**kwargs,
}
default_cfgs = generate_default_cfgs({
'efficientvit_m0.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m0.pth'
),
'efficientvit_m1.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m1.pth'
),
'efficientvit_m2.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m2.pth'
),
'efficientvit_m3.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m3.pth'
),
'efficientvit_m4.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m4.pth'
),
'efficientvit_m5.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m5.pth'
),
})
def _create_efficientvit_msra(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2))
model = build_model_with_cfg(
EfficientVitMsra,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def efficientvit_m0(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[64, 128, 192],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m1(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 144, 192],
depth=[1, 2, 3],
num_heads=[2, 3, 3],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m2(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 192, 224],
depth=[1, 2, 3],
num_heads=[4, 3, 2],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m3(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 240, 320],
depth=[1, 2, 3],
num_heads=[4, 3, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m4(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 256, 384],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m5(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[192, 288, 384],
depth=[1, 3, 4],
num_heads=[3, 3, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs))
| 0 |
hf_public_repos/pytorch-image-models/timm | hf_public_repos/pytorch-image-models/timm/models/vision_transformer_hybrid.py | """ Hybrid Vision Transformer (ViT) in PyTorch
A PyTorch implement of the Hybrid Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
NOTE These hybrid model definitions depend on code in vision_transformer.py.
They were moved here to keep file sizes sane.
Hacked together by / Copyright 2020, Ross Wightman
"""
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import StdConv2dSame, StdConv2d, to_2tuple, Format, nchw_to
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
from .resnet import resnet26d, resnet50d
from .resnetv2 import ResNetV2, create_resnetv2_stem
from .vision_transformer import _create_vision_transformer, VisionTransformer
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
output_fmt: Format
dynamic_img_pad: torch.jit.Final[bool]
def __init__(
self,
backbone,
img_size=224,
patch_size=1,
feature_size=None,
in_chans=3,
embed_dim=768,
bias=True,
flatten: bool = True,
output_fmt: Optional[str] = None,
strict_img_size: bool = True,
dynamic_img_pad: bool = False,
):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
if not dynamic_img_pad:
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
if output_fmt is not None:
self.flatten = False
self.output_fmt = Format(output_fmt)
else:
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.output_fmt = Format.NCHW
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
_, _, H, W = x.shape
if self.dynamic_img_pad:
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
x = F.pad(x, (0, pad_w, 0, pad_h))
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
return x
class HybridEmbedWithSize(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(
self,
backbone,
img_size=224,
patch_size=1,
feature_size=None,
in_chans=3,
embed_dim=768,
bias=True,
):
super().__init__(
backbone=backbone,
img_size=img_size,
patch_size=patch_size,
feature_size=feature_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=bias,
)
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x)
return x.flatten(2).transpose(1, 2), x.shape[-2:]
def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs):
embed_layer = partial(HybridEmbed, backbone=backbone)
kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set
return _create_vision_transformer(variant, pretrained=pretrained, embed_layer=embed_layer, **kwargs)
def _resnetv2(layers=(3, 4, 9), **kwargs):
""" ResNet-V2 backbone helper"""
padding_same = kwargs.get('padding_same', True)
stem_type = 'same' if padding_same else ''
conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8)
if len(layers):
backbone = ResNetV2(
layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3),
preact=False, stem_type=stem_type, conv_layer=conv_layer)
else:
backbone = create_resnetv2_stem(
kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer)
return backbone
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# hybrid in-1k models (weights from official JAX impl where they exist)
'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
first_conv='patch_embed.backbone.conv'),
'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True),
'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
),
'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, custom_load=True),
'vit_base_r26_s32_224.untrained': _cfg(),
'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
),
'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, custom_load=True,
),
# hybrid in-21k models (weights from official Google JAX impl where they exist)
'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True),
'vit_small_r26_s32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, custom_load=True),
'vit_base_r50_s16_224.orig_in21k': _cfg(
#url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth',
hf_hub_id='timm/',
num_classes=0, crop_pct=0.9),
'vit_large_r50_s32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, custom_load=True),
# hybrid models (using timm resnet backbones)
'vit_small_resnet26d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_small_resnet50d_s16_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_base_resnet26d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_base_resnet50d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
})
@register_model
def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224.
"""
backbone = _resnetv2(layers=(), **kwargs)
model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer_hybrid(
'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384.
"""
backbone = _resnetv2(layers=(), **kwargs)
model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer_hybrid(
'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-S/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer_hybrid(
'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-S/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer_hybrid(
'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-B/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
"""
backbone = _resnetv2((3, 4, 9), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
backbone = _resnetv2((3, 4, 9), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-L/S32 hybrid.
"""
backbone = _resnetv2((3, 4, 6, 3), **kwargs)
model_args = dict(embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer_hybrid(
'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-L/S32 hybrid.
"""
backbone = _resnetv2((3, 4, 6, 3), **kwargs)
model_args = dict(embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer_hybrid(
'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3)
model = _create_vision_transformer_hybrid(
'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3])
model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3)
model = _create_vision_transformer_hybrid(
'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k',
'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k',
'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k',
'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k',
'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k',
'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k'
})
| 0 |