diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index b0168bbb593..5c2850e8834 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -201,7 +201,7 @@ def compute_loss(self, targets, head_outputs, anchors, matched_idxs): losses.append(torch.nn.functional.l1_loss( bbox_regression_per_image, target_regression, - size_average=False + reduction='sum' ) / max(1, num_foreground)) return _sum(losses) / max(1, len(targets)) diff --git a/torchvision/transforms/_functional_video.py b/torchvision/transforms/_functional_video.py index 37fcb359ad4..56eaa436188 100644 --- a/torchvision/transforms/_functional_video.py +++ b/torchvision/transforms/_functional_video.py @@ -23,7 +23,7 @@ def crop(clip, i, j, h, w): def resize(clip, target_size, interpolation_mode): assert len(target_size) == 2, "target size should be tuple (height, width)" return torch.nn.functional.interpolate( - clip, size=target_size, mode=interpolation_mode + clip, size=target_size, mode=interpolation_mode, align_corners=False )