Skip to content

Commit

Permalink
Merge branch 'master' into test1
Browse files Browse the repository at this point in the history
  • Loading branch information
NoamRosenberg authored Aug 28, 2019
2 parents b94dfa2 + aca71fa commit afb07fe
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 12 deletions.
11 changes: 6 additions & 5 deletions auto_deeplab.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def __init__(self, num_classes, num_layers, criterion = None, filter_multiplier
self._filter_multiplier = filter_multiplier
self._criterion = criterion
self._initialize_alphas_betas ()
C_initial = self._filter_multiplier * self._block_multiplier
# C_initial = self._filter_multiplier * self._block_multiplier
C_initial = 128
half_C_initial = int(C_initial / 2)

self.stem0 = nn.Sequential(
Expand Down Expand Up @@ -142,16 +143,16 @@ def __init__(self, num_classes, num_layers, criterion = None, filter_multiplier
self.cells += [cell4]

self.aspp_4 = nn.Sequential (
ASPP (self._block_multiplier * self._filter_multiplier, self._num_classes, 24, 24) #96 / 4 as in the paper
ASPP (self._filter_multiplier, self._num_classes, 24, 24) #96 / 4 as in the paper
)
self.aspp_8 = nn.Sequential (
ASPP (self._block_multiplier * self._filter_multiplier * 2, self._num_classes, 12, 12) #96 / 8
ASPP (self._filter_multiplier * 2, self._num_classes, 12, 12) #96 / 8
)
self.aspp_16 = nn.Sequential (
ASPP (self._block_multiplier * self._filter_multiplier * 4, self._num_classes, 6, 6) #96 / 16
ASPP (self._filter_multiplier * 4, self._num_classes, 6, 6) #96 / 16
)
self.aspp_32 = nn.Sequential (
ASPP (self._block_multiplier * self._filter_multiplier * 8, self._num_classes, 3, 3) #96 / 32
ASPP (self._filter_multiplier * 8, self._num_classes, 3, 3) #96 / 32
)


Expand Down
10 changes: 4 additions & 6 deletions cell_level_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,15 @@ def __init__(self, steps, block_multiplier, prev_prev_fmultiplier,
filter_multiplier):

super(Cell, self).__init__()
# self.C_in = block_multiplier * filter_multiplier * block_multiplier
# self.C_out = filter_multiplier * block_multiplier

self.C_in = block_multiplier * filter_multiplier
self.C_out = filter_multiplier

self.C_prev_prev = int(prev_prev_fmultiplier * block_multiplier)
self._prev_fmultiplier_same = prev_fmultiplier_same
# self.C_prev_prev = int(prev_prev_fmultiplier)

if prev_fmultiplier_down is not None:
self.C_prev_down = int(prev_fmultiplier_down * block_multiplier)
# self.C_prev_down = prev_fmultiplier_down
self.preprocess_down = FactorizedReduce(
self.C_prev_down, self.C_out, affine=False)
if prev_fmultiplier_same is not None:
Expand Down Expand Up @@ -66,7 +65,7 @@ def __init__(self, steps, block_multiplier, prev_prev_fmultiplier,
op = MixedOp(self.C_out, stride)
self._ops.append(op)

self.ReLUConvBN = ReLUConvBN(self.C_in, self.C_out, 1, 1, 0)
#self.ReLUConvBN = ReLUConvBN(self.C_in, self.C_out, 1, 1, 0)

def forward(self, s0, s1_down, s1_same, s1_up, n_alphas):

Expand Down Expand Up @@ -116,6 +115,5 @@ def forward(self, s0, s1_down, s1_same, s1_up, n_alphas):
states.append(s)

concat_feature = torch.cat(states[-self.block_multiplier:], dim=1)
# final_concates.append(self.ReLUConvBN(concat_feature))
final_concates.append(concat_feature)
return final_concates
8 changes: 7 additions & 1 deletion operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=T
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, dilation=dilation, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)

Expand All @@ -46,10 +47,15 @@ def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)


def forward(self, x):
return self.op(x)

Expand Down

0 comments on commit afb07fe

Please sign in to comment.