Skip to content

Commit

Permalink
[fbsync] Add manual opcheck tests for roi ops (#8144)
Browse files Browse the repository at this point in the history
Reviewed By: vmoens

Differential Revision: D52539014

fbshipit-source-id: 6fc2eb107216f1c3933e199a75dc51e620d7c093
  • Loading branch information
NicolasHug authored and facebook-github-bot committed Jan 16, 2024
1 parent e59dc0f commit b0ff741
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 21 deletions.
13 changes: 1 addition & 12 deletions test/optests_failures_dict.json
Original file line number Diff line number Diff line change
@@ -1,16 +1,5 @@
{
"_description": "This is a dict containing failures for tests autogenerated by generate_opcheck_tests. For more details, please see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit",
"_version": 1,
"data": {
"torchvision::roi_align": {
"TestRoIAlign.test_aot_dispatch_dynamic__test_mps_error_inputs": {
"comment": "RuntimeError: MPS does not support roi_align backward with float16 inputs",
"status": "xfail"
},
"TestRoIAlign.test_autograd_registration__test_mps_error_inputs": {
"comment": "NotImplementedError: autograd_registration_check: NYI devices other than CPU/CUDA, got {'mps'}",
"status": "xfail"
}
}
}
"data": {}
}
46 changes: 37 additions & 9 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,15 +610,6 @@ def test_jit_boxes_list(self):
self._helper_jit_boxes_list(model)


optests.generate_opcheck_tests(
testcase=TestRoIAlign,
namespaces=["torchvision"],
failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"),
additional_decorators=[],
test_utils=OPTESTS,
)


class TestPSRoIAlign(RoIOpTester):
mps_backward_atol = 5e-2

Expand Down Expand Up @@ -676,6 +667,43 @@ def test_boxes_shape(self):
self._helper_boxes_shape(ops.ps_roi_align)


@pytest.mark.parametrize(
"op",
(
torch.ops.torchvision.roi_pool,
torch.ops.torchvision.ps_roi_pool,
torch.ops.torchvision.roi_align,
torch.ops.torchvision.ps_roi_align,
),
)
@pytest.mark.parametrize("dtype", (torch.float16, torch.float32, torch.float64))
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("requires_grad", (True, False))
def test_roi_opcheck(op, dtype, device, requires_grad):
# This manually calls opcheck() on the roi ops. We do that instead of
# relying on opcheck.generate_opcheck_tests() as e.g. done for nms, because
# pytest and generate_opcheck_tests() don't interact very well when it comes
# to skipping tests - and these ops need to skip the MPS tests since MPS we
# don't support dynamic shapes yet for MPS.
rois = torch.tensor(
[[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9], [1, 0, 0, 9, 9]],
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
pool_size = 5
num_channels = 2 * (pool_size**2)
x = torch.rand(2, num_channels, 10, 10, dtype=dtype, device=device)

kwargs = dict(rois=rois, spatial_scale=1, pooled_height=pool_size, pooled_width=pool_size)
if op in (torch.ops.torchvision.roi_align, torch.ops.torchvision.ps_roi_align):
kwargs["sampling_ratio"] = -1
if op is torch.ops.torchvision.roi_align:
kwargs["aligned"] = True

optests.opcheck(op, args=(x,), kwargs=kwargs)


class TestMultiScaleRoIAlign:
def make_obj(self, fmap_names=None, output_size=(7, 7), sampling_ratio=2, wrap=False):
if fmap_names is None:
Expand Down

0 comments on commit b0ff741

Please sign in to comment.