-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathblocks.py
102 lines (83 loc) · 3.26 KB
/
blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class simple_block(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
groups=4,
is_down=True):
super(simple_block, self).__init__()
if is_down:
self.block = nn.Sequential(
nn.Conv3d(in_channels,
out_channels//2,
kernel_size,
stride=1,
padding=padding,
groups=groups),
nn.BatchNorm3d(num_features=out_channels//2),
#nn.InstanceNorm3d(out_channels//2, affine=True),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels//2,
out_channels,
kernel_size,
stride=1,
padding=padding,
groups=groups),
nn.BatchNorm3d(num_features=out_channels),
#nn.InstanceNorm3d(out_channels, affine=True),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Conv3d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=padding,
groups=groups),
nn.BatchNorm3d(num_features=out_channels),
#nn.InstanceNorm3d(out_channels, affine=True),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels,
out_channels,
kernel_size,
stride=1,
padding=padding,
groups=groups),
nn.BatchNorm3d(num_features=out_channels),
#nn.InstanceNorm3d(out_channels, affine=True),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class Up_sample(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=2,
padding=1,
output_padding=0,
groups=2):
super(Up_sample, self).__init__()
self.block = nn.ConvTranspose3d(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups)
def forward(self, x):
return self.block(x)
class Down_sample(nn.Module):
def __init__(self, kernel_size, stride=2, padding=1):
super(Down_sample, self).__init__()
self.block = nn.MaxPool3d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
return self.block(x)