diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index bdfd8f78b22e..9d85eacfd040 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -3844,6 +3844,32 @@ def inplace_copy(self, inputs, input_types): # Return return _op.scatter_nd(source, indices, values, mode) + def linalg_vector_norm(self, inputs, input_types): + data = inputs[0] + dtype = input_types[0] + ord = inputs[1] + dim = inputs[2] + keepdim = inputs[3] + + assert dtype == "float32" or dtype == "float64" + + if ord == 0: + return _op.reduce.sum( + _op.cast(_op.not_equal(data, _expr.const(0, dtype=dtype)), dtype=dtype), + axis=dim, + keepdims=keepdim, + ) + elif ord == np.inf: + return _op.reduce.max(_op.abs(data), axis=dim, keepdims=keepdim) + elif ord == np.NINF: + return _op.reduce.min(_op.abs(data), axis=dim, keepdims=keepdim) + reci_ord = _expr.const(1.0 / ord, dtype=dtype) + ord = _expr.const(ord, dtype=dtype) + return _op.power( + _op.reduce.sum(_op.power(_op.abs(data), ord), axis=dim, keepdims=keepdim), + reci_ord, + ) + # Operator mappings def create_convert_map(self): self.convert_map = { @@ -4118,6 +4144,7 @@ def create_convert_map(self): "aten::_weight_norm": self.weight_norm, "aten::copy_": self.inplace_copy, "aten::swapaxes": self.transpose, + "aten::linalg_vector_norm": self.linalg_vector_norm, } def update_convert_map(self, custom_map): diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 894bea60ed46..424e30bc2214 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -1780,7 +1780,6 @@ def forward(self, *args): verify_model(LogSoftmax1().float().eval(), input_data=input_data) -@pytest.mark.skip(reason="unsupported op aten::linalg_vector_norm") @tvm.testing.uses_gpu def test_forward_norm(): """test_forward_norm""" @@ -1840,7 +1839,6 @@ def forward(self, *args): verify_model(Norm10().float().eval(), input_data=input_data) -@pytest.mark.skip(reason="unsupported op aten::linalg_vector_norm") @tvm.testing.uses_gpu def test_forward_frobenius_norm(): """test_forward_frobenius_norm""" @@ -5432,6 +5430,31 @@ def forward(self, *args): verify_model(Swapaxes3().float().eval(), input_data=input_data) +def test_linalg_vector_norm(): + """test_linalg_vector_norm""" + torch.set_grad_enabled(False) + + def test_fn(order): + return lambda x: torch.linalg.vector_norm(x, ord=order) + + input_shape = [3, 3] + + input_data = torch.rand(input_shape).float() + verify_model(test_fn(order=2), input_data=input_data) + verify_model(test_fn(order=3.5), input_data=input_data) + verify_model(test_fn(order=np.inf), input_data=input_data) + verify_model(test_fn(order=np.NINF), input_data=input_data) + verify_model(test_fn(order=0), input_data=input_data) + + # Also test on double + input_data = torch.rand(input_shape).double() + verify_model(test_fn(order=2), input_data=input_data) + verify_model(test_fn(order=3.5), input_data=input_data) + verify_model(test_fn(order=np.inf), input_data=input_data) + verify_model(test_fn(order=np.NINF), input_data=input_data) + verify_model(test_fn(order=0), input_data=input_data) + + class TestSetSpan: """test structural equal between translated / hand-crafted relay IR with span tagged."""