-
Notifications
You must be signed in to change notification settings - Fork 1
/
model.py
135 lines (107 loc) · 4.48 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.ops import stochastic_depth
class LayerNorm(nn.Module):
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape,)
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight,
self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Permute(nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return torch.permute(x, self.dims)
class ConvNextBlock(nn.Module):
def __init__(self, filter_dim, layer_scale=1e-6):
super().__init__()
self.block = nn.Sequential(*[
nn.Conv2d(filter_dim,
filter_dim,
kernel_size=7,
padding=3,
groups=filter_dim),
Permute([0, 2, 3, 1]),
LayerNorm(filter_dim, eps=1e-6),
nn.Linear(filter_dim, filter_dim * 4),
nn.GELU(),
nn.Linear(filter_dim * 4, filter_dim),
Permute([0, 3, 1, 2])
])
self.gamma = nn.Parameter(torch.ones(filter_dim, 1, 1) * layer_scale)
def forward(self, x):
return self.block(x) * self.gamma
class ConvNextLayer(nn.Module):
def __init__(self, filter_dim, depth, drop_rates):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(depth):
self.blocks.append(ConvNextBlock(filter_dim=filter_dim))
self.drop_rates = drop_rates
def forward(self, x):
for idx, block in enumerate(self.blocks):
x = x + stochastic_depth(block(x),
self.drop_rates[idx],
mode="batch",
training=self.training)
return x
class ConvNext(nn.Module):
def __init__(self,
num_channels=3,
num_classes=10,
patch_size=4,
layer_dims=[96, 192, 384, 768],
depths=[3, 3, 9, 3],
drop_rate=0.):
super().__init__()
# init downsample layers with stem
self.downsample_layers = nn.ModuleList(
[nn.Sequential(
nn.Conv2d(num_channels, layer_dims[0], kernel_size=patch_size, stride=patch_size),
LayerNorm(layer_dims[0],
eps=1e-6,
data_format="channels_first")
)])
for idx in range(len(layer_dims) - 1):
self.downsample_layers.append(
nn.Sequential(
LayerNorm(layer_dims[idx],
eps=1e-6,
data_format="channels_first"),
nn.Conv2d(layer_dims[idx],
layer_dims[idx + 1],
kernel_size=2,
stride=2),
))
drop_rates=[x.item() for x in torch.linspace(0, drop_rate, sum(depths))]
self.stage_layers = nn.ModuleList([])
for idx, layer_dim in enumerate(layer_dims):
layer_dr = drop_rates[sum(depths[:idx]): sum(depths[:idx]) + depths[idx]]
self.stage_layers.append(
ConvNextLayer(filter_dim=layer_dim, depth=depths[idx], drop_rates=layer_dr))
self.cls = nn.Sequential(
LayerNorm(layer_dims[-1], eps=1e-6),
nn.Linear(layer_dims[-1], num_classes)
)
def forward(self, x):
all_layers = list(zip(self.downsample_layers, self.stage_layers))
for downsample_layer, stage_layer in all_layers:
x = downsample_layer(x)
x = stage_layer(x)
return self.cls(x.mean(dim=(-2, -1)))