-
Notifications
You must be signed in to change notification settings - Fork 7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Unified input for resize op #2394
Changes from 3 commits
f5a9776
7936258
965ad6b
8f48a02
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,5 @@ | ||
from PIL.Image import NEAREST, BILINEAR, BICUBIC | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: this doesn't seem to be used? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct. Will remove it |
||
|
||
import torch | ||
from torch import Tensor | ||
from torch.jit.annotations import List, BroadcastingList2 | ||
|
@@ -8,6 +10,7 @@ def _is_tensor_a_torch_image(x: Tensor) -> bool: | |
|
||
|
||
def _get_image_size(img: Tensor) -> List[int]: | ||
"""Returns (w, h) of tensor image""" | ||
if _is_tensor_a_torch_image(img): | ||
return [img.shape[-1], img.shape[-2]] | ||
raise TypeError("Unexpected type {}".format(type(img))) | ||
|
@@ -433,6 +436,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con | |
|
||
if isinstance(padding, int): | ||
if torch.jit.is_scripting(): | ||
# This maybe unreachable | ||
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]") | ||
pad_left = pad_right = pad_top = pad_bottom = padding | ||
elif len(padding) == 1: | ||
|
@@ -480,3 +484,95 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con | |
img = img.to(out_dtype) | ||
|
||
return img | ||
|
||
|
||
def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor: | ||
r"""Resize the input Tensor to the given size. | ||
|
||
Args: | ||
img (Tensor): Image to be resized. | ||
size (int or tuple or list): Desired output size. If size is a sequence like | ||
(h, w), the output size will be matched to this. If size is an int, | ||
the smaller edge of the image will be matched to this number maintaining | ||
the aspect ratio. i.e, if height > width, then image will be rescaled to | ||
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. | ||
In torchscript mode padding as a single int is not supported, use a tuple or | ||
list of length 1: ``[size, ]``. | ||
interpolation (int, optional): Desired interpolation. Default is bilinear. | ||
|
||
Returns: | ||
Tensor: Resized image. | ||
""" | ||
if not _is_tensor_a_torch_image(img): | ||
raise TypeError("tensor is not a torch image.") | ||
|
||
if not isinstance(size, (int, tuple, list)): | ||
raise TypeError("Got inappropriate size arg") | ||
if not isinstance(interpolation, int): | ||
raise TypeError("Got inappropriate interpolation arg") | ||
|
||
_interpolation_modes = { | ||
0: "nearest", | ||
2: "bilinear", | ||
3: "bicubic", | ||
} | ||
|
||
if interpolation not in _interpolation_modes: | ||
raise ValueError("This interpolation mode is unsupported with Tensor input") | ||
|
||
if isinstance(size, tuple): | ||
size = list(size) | ||
|
||
if isinstance(size, list) and len(size) not in [1, 2]: | ||
raise ValueError("Padding must be an int or a 1 or 2 element tuple/list, not a " + | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. error message seems off? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks ! |
||
"{} element tuple/list".format(len(size))) | ||
|
||
if interpolation not in [0, 1, 2, 3, 4]: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like we don't support mode |
||
raise ValueError("Interpolation mode should be either constant, edge, reflect or symmetric") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. error message seems off? |
||
|
||
w, h = _get_image_size(img) | ||
|
||
if isinstance(size, int): | ||
size_w, size_h = size, size | ||
elif len(size) < 2: | ||
size_w, size_h = size[0], size[0] | ||
else: | ||
size_w, size_h = size[0], size[1] | ||
|
||
if isinstance(size, int) or len(size) < 2: | ||
if w < h: | ||
size_h = int(size_w * h / w) | ||
else: | ||
size_w = int(size_h * w / h) | ||
|
||
if (w <= h and w == size_w) or (h <= w and h == size_h): | ||
return img | ||
|
||
# make image NCHW | ||
need_squeeze = False | ||
if img.ndim < 4: | ||
img = img.unsqueeze(dim=0) | ||
need_squeeze = True | ||
|
||
mode = _interpolation_modes[interpolation] | ||
|
||
out_dtype = img.dtype | ||
need_cast = False | ||
if img.dtype not in (torch.float32, torch.float64): | ||
need_cast = True | ||
img = img.to(torch.float32) | ||
|
||
# Define align_corners to avoid warnings | ||
align_corners = False if mode in ["bilinear", "bicubic"] else None | ||
|
||
img = torch.nn.functional.interpolate(img, size=(size_h, size_w), mode=mode, align_corners=align_corners) | ||
|
||
if need_squeeze: | ||
img = img.squeeze(dim=0) | ||
|
||
if need_cast: | ||
if mode == "bicubic": | ||
img = img.clamp(min=0, max=255) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: for the future, we might want to change the |
||
img = img.to(out_dtype) | ||
|
||
return img |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's a pity that the behavior of
nearest
interpolate is different between implementations, I would say it could be worth opening an issue in PyTorch to mention this. I also believe that PIL and OpenCV are consistent, which would make for a case to maybe change the implementation in PyTorch to make this more consistent.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let me check that between PIL and OpenCV and then we decide about PyTorch.