diff --git a/test/optests_failures_dict.json b/test/optests_failures_dict.json index 3bad0bbb027..2d01571374f 100644 --- a/test/optests_failures_dict.json +++ b/test/optests_failures_dict.json @@ -1,5 +1,16 @@ { "_description": "This is a dict containing failures for tests autogenerated by generate_opcheck_tests. For more details, please see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit", "_version": 1, - "data": {} + "data": { + "torchvision::roi_align": { + "TestRoIAlign.test_aot_dispatch_dynamic__test_mps_error_inputs": { + "comment": "RuntimeError: MPS does not support roi_align backward with float16 inputs", + "status": "xfail" + }, + "TestRoIAlign.test_autograd_registration__test_mps_error_inputs": { + "comment": "NotImplementedError: autograd_registration_check: NYI devices other than CPU/CUDA, got {'mps'}", + "status": "xfail" + } + } + } } diff --git a/test/test_ops.py b/test/test_ops.py index 6d80f037b88..d1cfce5919c 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -474,6 +474,7 @@ def test_boxes_shape(self): @pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str) @pytest.mark.parametrize("contiguous", (True, False)) @pytest.mark.parametrize("deterministic", (True, False)) + @pytest.mark.opcheck_only_one() def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None): if deterministic and device == "cpu": pytest.skip("cpu is always deterministic, don't retest") @@ -491,6 +492,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois @pytest.mark.parametrize("deterministic", (True, False)) @pytest.mark.parametrize("x_dtype", (torch.float, torch.half)) @pytest.mark.parametrize("rois_dtype", (torch.float, torch.half)) + @pytest.mark.opcheck_only_one() def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype): with torch.cuda.amp.autocast(): self.test_forward( @@ -506,6 +508,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype): @pytest.mark.parametrize("device", cpu_and_cuda_and_mps()) @pytest.mark.parametrize("contiguous", (True, False)) @pytest.mark.parametrize("deterministic", (True, False)) + @pytest.mark.opcheck_only_one() def test_backward(self, seed, device, contiguous, deterministic): if deterministic and device == "cpu": pytest.skip("cpu is always deterministic, don't retest") @@ -520,6 +523,7 @@ def _make_rois(self, img_size, num_imgs, dtype, num_rois=1000): @pytest.mark.parametrize("aligned", (True, False)) @pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 10), (0.1, 50))) @pytest.mark.parametrize("qdtype", (torch.qint8, torch.quint8, torch.qint32)) + @pytest.mark.opcheck_only_one() def test_qroialign(self, aligned, scale, zero_point, qdtype): """Make sure quantized version of RoIAlign is close to float version""" pool_size = 5 @@ -589,6 +593,15 @@ def test_jit_boxes_list(self): self._helper_jit_boxes_list(model) +optests.generate_opcheck_tests( + testcase=TestRoIAlign, + namespaces=["torchvision"], + failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"), + additional_decorators=[], + test_utils=OPTESTS, +) + + class TestPSRoIAlign(RoIOpTester): mps_backward_atol = 5e-2