Skip to content

Commit

Permalink
[fbsync] Port to_tensor tests in test_transforms to pytest (#3966)
Browse files Browse the repository at this point in the history
Reviewed By: NicolasHug

Differential Revision: D29027351

fbshipit-source-id: 23fd0b586b06e4448b6ca90577ecd3b7cb9af7fb
  • Loading branch information
fmassa authored and facebook-github-bot committed Jun 10, 2021
1 parent 2bf475c commit 7f40f2d
Showing 1 changed file with 96 additions and 81 deletions.
177 changes: 96 additions & 81 deletions test/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,54 +307,6 @@ def test_random_crop(self):
with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"):
t(img)

def test_to_tensor(self):
test_channels = [1, 3, 4]
height, width = 4, 4
trans = transforms.ToTensor()

with self.assertRaises(TypeError):
trans(np.random.rand(1, height, width).tolist())

with self.assertRaises(ValueError):
trans(np.random.rand(height))
trans(np.random.rand(1, 1, height, width))

for channels in test_channels:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data, check_stride=False)

ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)

ndarray = np.random.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)

# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)

def test_to_tensor_with_other_default_dtypes(self):
current_def_dtype = torch.get_default_dtype()

t = transforms.ToTensor()
np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img = Image.fromarray(np_arr)

for dtype in [torch.float16, torch.float, torch.double]:
torch.set_default_dtype(dtype)
res = t(img)
self.assertTrue(res.dtype == dtype, msg=f"{res.dtype} vs {dtype}")

torch.set_default_dtype(current_def_dtype)

def test_max_value(self):
for dtype in int_dtypes():
self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max)
Expand Down Expand Up @@ -492,39 +444,6 @@ def test_accimage_to_tensor(self):

torch.testing.assert_close(output, expected_output)

def test_pil_to_tensor(self):
test_channels = [1, 3, 4]
height, width = 4, 4
trans = transforms.PILToTensor()

with self.assertRaises(TypeError):
trans(np.random.rand(1, height, width).tolist())
trans(np.random.rand(1, height, width))

for channels in test_channels:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output, check_stride=False)

input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
output = trans(img)
expected_output = input_data.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output)

input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output, check_stride=False)

# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output, check_stride=False)

@unittest.skipIf(accimage is None, 'accimage not available')
def test_accimage_pil_to_tensor(self):
trans = transforms.PILToTensor()
Expand Down Expand Up @@ -1219,6 +1138,102 @@ def test_random_erasing(self):
t.__repr__()


@pytest.mark.parametrize('channels', [1, 3, 4])
def test_to_tensor(channels):
height, width = 4, 4
trans = transforms.ToTensor()

input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data, check_stride=False)

ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)

ndarray = np.random.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)

# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)


def test_to_tensor_errors():
height, width = 4, 4
trans = transforms.ToTensor()

with pytest.raises(TypeError):
trans(np.random.rand(1, height, width).tolist())

with pytest.raises(ValueError):
trans(np.random.rand(height))

with pytest.raises(ValueError):
trans(np.random.rand(1, 1, height, width))


@pytest.mark.parametrize('dtype', [torch.float16, torch.float, torch.double])
def test_to_tensor_with_other_default_dtypes(dtype):
current_def_dtype = torch.get_default_dtype()

t = transforms.ToTensor()
np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img = Image.fromarray(np_arr)

torch.set_default_dtype(dtype)
res = t(img)
assert res.dtype == dtype, f"{res.dtype} vs {dtype}"

torch.set_default_dtype(current_def_dtype)


@pytest.mark.parametrize('channels', [1, 3, 4])
def test_pil_to_tensor(channels):
height, width = 4, 4
trans = transforms.PILToTensor()

input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output, check_stride=False)

input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
output = trans(img)
expected_output = input_data.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output)

input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output, check_stride=False)

# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output, check_stride=False)


def test_pil_to_tensor_errors():
height, width = 4, 4
trans = transforms.PILToTensor()

with pytest.raises(TypeError):
trans(np.random.rand(1, height, width).tolist())

with pytest.raises(TypeError):
trans(np.random.rand(1, height, width))


def test_randomresized_params():
height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2
Expand Down

0 comments on commit 7f40f2d

Please sign in to comment.