Skip to content

Commit

Permalink
Remove named arguments where possible (#7105)
Browse files Browse the repository at this point in the history
* Remove named arguments where possible

Speed improvements.

* Update yolo.py

* Update yolo.py

* Update yolo.py
  • Loading branch information
glenn-jocher authored Mar 22, 2022
1 parent 6134ec5 commit ecc2c7b
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
14 changes: 7 additions & 7 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))


class C3(nn.Module):
Expand All @@ -136,7 +136,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu
# self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))

def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))


class C3TR(C3):
Expand Down Expand Up @@ -527,7 +527,7 @@ def forward(self, imgs, size=640, augment=False, profile=False):
p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type
autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=autocast):
with amp.autocast(autocast):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference

# Pre-process
Expand All @@ -550,19 +550,19 @@ def forward(self, imgs, size=640, augment=False, profile=False):
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
t.append(time_sync())

with amp.autocast(enabled=autocast):
with amp.autocast(autocast):
# Inference
y = self.model(x, augment, profile) # forward
t.append(time_sync())

# Post-process
y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes,
agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS
y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic,
self.multi_label, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])

Expand Down
10 changes: 5 additions & 5 deletions models/yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ def forward(self, x):

def _make_grid(self, nx=20, ny=20, i=0):
d = self.anchors[i].device
shape = 1, self.na, ny, nx, 2 # grid shape
if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij')
yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij')
else:
yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)])
grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float()
anchor_grid = (self.anchors[i].clone() * self.stride[i]) \
.view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float()
yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d))
grid = torch.stack((xv, yv), 2).expand(shape).float()
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float()
return grid, anchor_grid


Expand Down

0 comments on commit ecc2c7b

Please sign in to comment.