| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293 |
- import torch
- import torch.nn as nn
- def get_activation(act_type=None):
- if act_type == 'relu':
- return nn.ReLU(inplace=True)
- elif act_type == 'lrelu':
- return nn.LeakyReLU(0.1, inplace=True)
- elif act_type == 'mish':
- return nn.Mish(inplace=True)
- elif act_type == 'silu':
- return nn.SiLU(inplace=True)
- elif act_type == 'gelu':
- return nn.GELU()
- elif act_type is None:
- return nn.Identity()
- else:
- raise NotImplementedError
-
- def get_norm(norm_type, dim):
- if norm_type == 'BN':
- return nn.BatchNorm2d(dim)
- elif norm_type == 'GN':
- return nn.GroupNorm(num_groups=32, num_channels=dim)
- elif norm_type is None:
- return nn.Identity()
- else:
- raise NotImplementedError
- # ----------------- MLP modules -----------------
- class MLP(nn.Module):
- def __init__(self, in_dim, hidden_dim, out_dim, num_layers):
- super().__init__()
- self.num_layers = num_layers
- h = [hidden_dim] * (num_layers - 1)
- self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([in_dim] + h, h + [out_dim]))
- def forward(self, x):
- for i, layer in enumerate(self.layers):
- x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
- return x
- class FFN(nn.Module):
- def __init__(self, d_model=256, mlp_ratio=4.0, dropout=0., act_type='relu'):
- super().__init__()
- self.fpn_dim = round(d_model * mlp_ratio)
- self.linear1 = nn.Linear(d_model, self.fpn_dim)
- self.activation = get_activation(act_type)
- self.dropout2 = nn.Dropout(dropout)
- self.linear2 = nn.Linear(self.fpn_dim, d_model)
- self.dropout3 = nn.Dropout(dropout)
- self.norm = nn.LayerNorm(d_model)
- def forward(self, src):
- src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
- src = src + self.dropout3(src2)
- src = self.norm(src)
-
- return src
-
- # ----------------- Basic CNN Ops -----------------
- class FrozenBatchNorm2d(torch.nn.Module):
- def __init__(self, n):
- super(FrozenBatchNorm2d, self).__init__()
- self.register_buffer("weight", torch.ones(n))
- self.register_buffer("bias", torch.zeros(n))
- self.register_buffer("running_mean", torch.zeros(n))
- self.register_buffer("running_var", torch.ones(n))
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- num_batches_tracked_key = prefix + 'num_batches_tracked'
- if num_batches_tracked_key in state_dict:
- del state_dict[num_batches_tracked_key]
- super(FrozenBatchNorm2d, self)._load_from_state_dict(
- state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs)
- def forward(self, x):
- # move reshapes to the beginning
- # to make it fuser-friendly
- w = self.weight.reshape(1, -1, 1, 1)
- b = self.bias.reshape(1, -1, 1, 1)
- rv = self.running_var.reshape(1, -1, 1, 1)
- rm = self.running_mean.reshape(1, -1, 1, 1)
- eps = 1e-5
- scale = w * (rv + eps).rsqrt()
- bias = b - rm * scale
- return x * scale + bias
|