diff --git a/apps/android_rpc/tests/android_rpc_test.py b/apps/android_rpc/tests/android_rpc_test.py index 0c0f429b516f..ea9e13899922 100644 --- a/apps/android_rpc/tests/android_rpc_test.py +++ b/apps/android_rpc/tests/android_rpc_test.py @@ -76,7 +76,7 @@ def test_rpc_module(): time_f = f2.time_evaluator(f2.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Compile the Graph for OpenCL target if test_opencl: @@ -99,7 +99,7 @@ def test_rpc_module(): time_f = f1.time_evaluator(f1.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Compile the Graph for Vulkan target if test_vulkan: @@ -122,7 +122,7 @@ def test_rpc_module(): time_f = f1.time_evaluator(f1.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op\n" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) if __name__ == "__main__": diff --git a/apps/extension/tests/test_ext.py b/apps/extension/tests/test_ext.py index a01f97c349ca..994a673298f1 100644 --- a/apps/extension/tests/test_ext.py +++ b/apps/extension/tests/test_ext.py @@ -45,7 +45,7 @@ def check_llvm(): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1) check_llvm() @@ -92,7 +92,7 @@ def check_llvm(): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1) check_llvm() diff --git a/apps/howto_deploy/python_deploy.py b/apps/howto_deploy/python_deploy.py index 0a6780343d1e..9a692c863d8e 100644 --- a/apps/howto_deploy/python_deploy.py +++ b/apps/howto_deploy/python_deploy.py @@ -33,8 +33,8 @@ def verify(mod, fname): y = tvm.nd.array(np.zeros(N, dtype=np.float32)) # Invoke the function f(x, y) - np_x = x.asnumpy() - np_y = y.asnumpy() + np_x = x.numpy() + np_y = y.numpy() # Verify correctness of function assert np.all([xi + 1 == yi for xi, yi in zip(np_x, np_y)]) print("Finish verification...") diff --git a/apps/ios_rpc/tests/ios_rpc_mobilenet.py b/apps/ios_rpc/tests/ios_rpc_mobilenet.py index ee6ab5fd8363..a57db729d8a6 100644 --- a/apps/ios_rpc/tests/ios_rpc_mobilenet.py +++ b/apps/ios_rpc/tests/ios_rpc_mobilenet.py @@ -125,7 +125,7 @@ def run(mod, target): m.set_input("data", tvm.nd.array(image, dev)) m.run() tvm_output = m.get_output(0) - top1 = np.argmax(tvm_output.asnumpy()[0]) + top1 = np.argmax(tvm_output.numpy()[0]) print("TVM prediction top-1:", top1, synset[top1]) # evaluate diff --git a/apps/ios_rpc/tests/ios_rpc_test.py b/apps/ios_rpc/tests/ios_rpc_test.py index ede70cb982bd..0f81dcce929f 100644 --- a/apps/ios_rpc/tests/ios_rpc_test.py +++ b/apps/ios_rpc/tests/ios_rpc_test.py @@ -96,7 +96,7 @@ def test_rpc_module(): time_f = f1.time_evaluator(f1.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # CPU dev = remote.cpu(0) f2 = remote.load_module("cpu_lib.dylib") @@ -106,7 +106,7 @@ def test_rpc_module(): time_f = f2.time_evaluator(f2.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) def test_rpc_module_with_upload(): @@ -142,7 +142,7 @@ def test_rpc_module_with_upload(): time_f = f.time_evaluator(f.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) if __name__ == "__main__": diff --git a/apps/topi_recipe/broadcast/test_broadcast_map.py b/apps/topi_recipe/broadcast/test_broadcast_map.py index 43a44afa785b..1ac099f7b4cd 100644 --- a/apps/topi_recipe/broadcast/test_broadcast_map.py +++ b/apps/topi_recipe/broadcast/test_broadcast_map.py @@ -69,7 +69,7 @@ def test_broadcast_to(in_shape, out_shape): out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), tvm.cuda()) for _ in range(2): fcuda(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"): @@ -121,7 +121,7 @@ def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"): out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), tvm.cuda()) for _ in range(2): fcuda(lhs_nd, rhs_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) if __name__ == "__main__": diff --git a/apps/topi_recipe/conv/depthwise_conv2d_test.py b/apps/topi_recipe/conv/depthwise_conv2d_test.py index e282e67af717..6f408e051555 100644 --- a/apps/topi_recipe/conv/depthwise_conv2d_test.py +++ b/apps/topi_recipe/conv/depthwise_conv2d_test.py @@ -146,11 +146,9 @@ def check_device(device): depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c] ) relu_scipy = np.maximum(scale_shift_scipy, 0) - tvm.testing.assert_allclose( - depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5 - ) - tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5) - tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5) + tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5) + tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5) + tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5) print("success") for device in ["cuda", "opencl", "rocm"]: @@ -253,11 +251,9 @@ def check_device(device): depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c] ) relu_scipy = np.maximum(scale_shift_scipy, 0) - tvm.testing.assert_allclose( - depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5 - ) - tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5) - tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5) + tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5) + tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5) + tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5) print("success") for device in ["cuda", "opencl", "rocm"]: diff --git a/apps/topi_recipe/conv/test_conv2d_hwcn_map.py b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py index a2394a7279c0..1d65c54a5139 100644 --- a/apps/topi_recipe/conv/test_conv2d_hwcn_map.py +++ b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py @@ -88,10 +88,10 @@ def check_device(device): ): func1 = tvm.build(s1, [A, W, B], device) func1(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) func2 = tvm.build(s2, [A, W, C], device) func2(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for device in ["cuda", "opencl", "rocm"]: check_device(device) diff --git a/apps/topi_recipe/conv/test_conv_int8_arm.py b/apps/topi_recipe/conv/test_conv_int8_arm.py index 4e8262928568..ed2464140cfd 100644 --- a/apps/topi_recipe/conv/test_conv_int8_arm.py +++ b/apps/topi_recipe/conv/test_conv_int8_arm.py @@ -186,9 +186,9 @@ def run_inference( # Functional check if data_dtype == "uint8": - np.testing.assert_equal(c_orig.asnumpy(), c_sch.asnumpy()) + np.testing.assert_equal(c_orig.numpy(), c_sch.numpy()) else: - assert np.allclose(c_orig.asnumpy(), c_sch.asnumpy()) + assert np.allclose(c_orig.numpy(), c_sch.numpy()) evaluator = func.time_evaluator(func.entry_name, DEV, number=1000) LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True)) diff --git a/apps/topi_recipe/conv/test_conv_int8_intel.py b/apps/topi_recipe/conv/test_conv_int8_intel.py index b46d80fff821..36f8233559d4 100644 --- a/apps/topi_recipe/conv/test_conv_int8_intel.py +++ b/apps/topi_recipe/conv/test_conv_int8_intel.py @@ -172,9 +172,9 @@ def run_inference( # Functional check if data_dtype == "uint8": - np.testing.assert_equal(c_orig.asnumpy(), c_sch.asnumpy()) + np.testing.assert_equal(c_orig.numpy(), c_sch.numpy()) else: - assert np.allclose(c_orig.asnumpy(), c_sch.asnumpy()) + assert np.allclose(c_orig.numpy(), c_sch.numpy()) evaluator = func.time_evaluator(func.entry_name, DEV, number=1000) LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True)) diff --git a/apps/topi_recipe/gemm/android_gemm_square.py b/apps/topi_recipe/gemm/android_gemm_square.py index 41370c677b38..011bc172b9b5 100644 --- a/apps/topi_recipe/gemm/android_gemm_square.py +++ b/apps/topi_recipe/gemm/android_gemm_square.py @@ -51,7 +51,7 @@ def evaluate(func, dev, N, times): cost = time_f(a, b, c).mean gf = ngflops(N) / cost print("%g secs/op, %g GFLOPS" % (cost, gf)) - np.testing.assert_almost_equal(c.asnumpy(), a_np.dot(b_np), decimal=2) + np.testing.assert_almost_equal(c.numpy(), a_np.dot(b_np), decimal=2) def test_gemm_gpu(N, times, bn, num_block, num_thread): diff --git a/apps/topi_recipe/gemm/cuda_gemm_square.py b/apps/topi_recipe/gemm/cuda_gemm_square.py index d84deea86e82..85a38a6cd04e 100644 --- a/apps/topi_recipe/gemm/cuda_gemm_square.py +++ b/apps/topi_recipe/gemm/cuda_gemm_square.py @@ -136,7 +136,7 @@ def check_device(device): c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) for i in range(2): f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(b_np.T, a_np), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.dot(b_np.T, a_np), rtol=1e-5) num_flops = 2 * nn * nn * nn num_runs = 10 diff --git a/apps/topi_recipe/gemm/gemm_int8.py b/apps/topi_recipe/gemm/gemm_int8.py index ff3fa81f20d3..330769cb22ba 100644 --- a/apps/topi_recipe/gemm/gemm_int8.py +++ b/apps/topi_recipe/gemm/gemm_int8.py @@ -171,7 +171,7 @@ def block_size_filter(entity): f(a, b, c) tvm.testing.assert_allclose( - c.asnumpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5 + c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5 ) num_ops = 2 * l * m * n diff --git a/apps/topi_recipe/reduce/test_reduce_map.py b/apps/topi_recipe/reduce/test_reduce_map.py index 71ceb8f0cd07..f8d63e2d1912 100644 --- a/apps/topi_recipe/reduce/test_reduce_map.py +++ b/apps/topi_recipe/reduce/test_reduce_map.py @@ -83,7 +83,7 @@ def test_reduce_map(in_shape, axis, keepdims, type="sum", test_id=0): for _ in range(2): fcuda(data_tvm, out_tvm) - tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, rtol=4e-4, atol=4e-4) + tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, rtol=4e-4, atol=4e-4) if __name__ == "__main__": diff --git a/apps/topi_recipe/rnn/matexp.py b/apps/topi_recipe/rnn/matexp.py index 85e0d617eb07..d147ad66604b 100644 --- a/apps/topi_recipe/rnn/matexp.py +++ b/apps/topi_recipe/rnn/matexp.py @@ -160,7 +160,7 @@ def check_device(target): print("Time cost=%g" % tgap) # correctness if not SKIP_CHECK: - res_cuda = res_a.asnumpy() + res_cuda = res_a.numpy() res_cmp = np.ones_like(res_np).astype("float64") Whh_np = Whh_np.astype("float64") for t in range(1, n_num_step): diff --git a/docs/deploy/hls.rst b/docs/deploy/hls.rst index 3c735e829936..50ae78bbbcc3 100644 --- a/docs/deploy/hls.rst +++ b/docs/deploy/hls.rst @@ -79,7 +79,7 @@ We use two python scripts for this tutorial. c = tvm.nd.array(np.zeros(n, dtype="float32"), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) Setup diff --git a/docs/dev/codebase_walkthrough.rst b/docs/dev/codebase_walkthrough.rst index 90c0670e402f..60ab5e5ae9d2 100644 --- a/docs/dev/codebase_walkthrough.rst +++ b/docs/dev/codebase_walkthrough.rst @@ -169,7 +169,7 @@ The returned module, which can be thought of as a combination of a compiled func b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - output = c.asnumpy() + output = c.numpy() Under the hood, TVM allocates device memory and manages memory transfers automatically. To do that, each backend needs to subclass ``DeviceAPI`` class, defined in ``include/tvm/runtime/device_api.h``, and override memory management methods to use device specific API. For example, the CUDA backend implements ``CUDADeviceAPI`` in ``src/runtime/cuda/cuda_device_api.cc`` to use ``cudaMalloc``, ``cudaMemcpy`` etc. diff --git a/docs/dev/debugger.rst b/docs/dev/debugger.rst index f1bd004717b4..38172a2189e0 100644 --- a/docs/dev/debugger.rst +++ b/docs/dev/debugger.rst @@ -146,7 +146,7 @@ How to use Debugger? m.set_input(**params) # execute m.run() - tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy() + tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy() The outputs are dumped to a temporary folder in ``/tmp`` folder or the folder specified while creating the runtime. diff --git a/docs/dev/index.rst b/docs/dev/index.rst index 5189ffddf1ee..7eeecc12b33c 100644 --- a/docs/dev/index.rst +++ b/docs/dev/index.rst @@ -147,7 +147,7 @@ The main goal of TVM's runtime is to provide a minimal API for loading and execu arr: tvm.runtime.NDArray = tvm.nd.array([1, 2, 3], device=tvm.cuda(0)) fun: tvm.runtime.PackedFunc = mod["addone"] fun(a) - print(a.asnumpy()) + print(a.numpy()) :py:class:`tvm.runtime.Module` encapsulates the result of compilation. A runtime.Module contains a GetFunction method to obtain PackedFuncs by name. @@ -172,7 +172,7 @@ The above example only deals with a simple `addone` function. The code snippet b # execute the model gmod["run"]() # get the output - result = gmod["get_output"](0).asnumpy() + result = gmod["get_output"](0).numpy() The main take away is that runtime.Module and runtime.PackedFunc are sufficient to encapsulate both operator level programs (such as addone), as well as the end-to-end models. diff --git a/docs/langref/relay_pattern.rst b/docs/langref/relay_pattern.rst index efb98045480c..257fe085bfe5 100644 --- a/docs/langref/relay_pattern.rst +++ b/docs/langref/relay_pattern.rst @@ -446,7 +446,7 @@ with a single batch_norm op: beta = node_map[self.beta][0] gamma = node_map[self.gamma][0] eps = node_map[self.eps][0] - return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon = eps.data.asnumpy().item())[0] + return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon = eps.data.numpy().item())[0] # A graph of arithmetic operators that are functional equivalent to batch_norm. x = relay.var('x') diff --git a/python/tvm/auto_scheduler/search_task.py b/python/tvm/auto_scheduler/search_task.py index 4bc3968e7fe8..f1156998bdac 100644 --- a/python/tvm/auto_scheduler/search_task.py +++ b/python/tvm/auto_scheduler/search_task.py @@ -234,7 +234,7 @@ def _save_buffer_to_file(buffer_name, buffer_data): File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy """ - np_data = buffer_data.asnumpy() + np_data = buffer_data.numpy() buffer_name += "." for i in np_data.shape: diff --git a/python/tvm/contrib/sparse.py b/python/tvm/contrib/sparse.py index bee9b835a98a..d515f58f9d2f 100644 --- a/python/tvm/contrib/sparse.py +++ b/python/tvm/contrib/sparse.py @@ -16,6 +16,7 @@ # under the License. """Tensor and Operation class for computation declaration.""" # pylint: disable=invalid-name +import warnings import numpy as _np from tvm.runtime import ndarray as _nd from tvm import te @@ -81,11 +82,21 @@ def __init__(self, arg1, device=None, shape=None): ) def asnumpy(self): + """Construct a full matrix and convert it to numpy array. This API will be deprecated + in TVM v0.8 release. Please use `numpy` instead.""" + warnings.warn( + "CSRNDArray.asnumpy() will be deprecated in TVM v0.8 release. " + "Please use CSRNDArray.numpy() instead.", + DeprecationWarning, + ) + return self.numpy() + + def numpy(self): """Construct a full matrix and convert it to numpy array.""" full = _np.zeros(self.shape, self.dtype) - ridx = _np.diff(self.indptr.asnumpy()) + ridx = _np.diff(self.indptr.numpy()) ridx = _np.hstack((_np.ones((v,), itype) * i for i, v in enumerate(ridx))) - full[ridx, self.indices.asnumpy().astype(itype)] = self.data.asnumpy() + full[ridx, self.indices.numpy().astype(itype)] = self.data.numpy() return full diff --git a/python/tvm/contrib/target/coreml.py b/python/tvm/contrib/target/coreml.py index 18a53bdffd86..b5a03e380493 100644 --- a/python/tvm/contrib/target/coreml.py +++ b/python/tvm/contrib/target/coreml.py @@ -69,7 +69,7 @@ def _convert_softmax(builder, name, inputs, outputs, args, attrs): def _convert_conv2d(builder, name, inputs, outputs, args, attrs): - weight = args[1].data.asnumpy() + weight = args[1].data.numpy() if attrs["kernel_layout"] == "OIHW": # convert to 'HWIO' weight = weight.transpose([2, 3, 1, 0]) @@ -169,7 +169,7 @@ def visit_constant(self, const): self.builder.add_load_constant_nd( name=output, output_name=output, - constant_value=const.data.asnumpy(), + constant_value=const.data.numpy(), shape=const.data.shape, ) self.buf_idx_ = self.buf_idx_ + 1 diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index f9141a6d1dec..a38bcf5bcefa 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -684,7 +684,7 @@ def _get_opsets(self): return opsets def make_model(self): - """ Creates the onnx model from the graph """ + """Creates the onnx model from the graph""" onnx_graph = onnx.helper.make_graph( self._nodes, self._name, self._inputs, self._outputs, self._initializers ) @@ -734,7 +734,7 @@ def _get_node_entry(cls, relay_node, name): } def convert_to_onnx(self, func): - """ Traverse Relay graph and generate a ONNX model""" + """Traverse Relay graph and generate a ONNX model""" self.visit(func) self._add_output(self._node_dict[self.last_node]) @@ -826,7 +826,7 @@ def _add_params(self, node_entry, idx): param_name in self._params ), "The parameter {0} is not present" "in params dict provided.".format(param_name) value = self._params[param_name] - numpy_array = value.asnumpy() + numpy_array = value.numpy() tensor = numpy_helper.from_array(numpy_array, param_name) self._mc.add_initializers([tensor]) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype] diff --git a/python/tvm/driver/tvmc/runner.py b/python/tvm/driver/tvmc/runner.py index 9bd31644197d..c59689face63 100644 --- a/python/tvm/driver/tvmc/runner.py +++ b/python/tvm/driver/tvmc/runner.py @@ -42,7 +42,7 @@ @register_parser def add_run_parser(subparsers): - """ Include parser for 'run' subcommand """ + """Include parser for 'run' subcommand""" parser = subparsers.add_parser("run", help="run a compiled module") parser.set_defaults(func=drive_run) @@ -420,6 +420,6 @@ def run_module( outputs = {} for i in range(num_outputs): output_name = "output_{}".format(i) - outputs[output_name] = module.get_output(i).asnumpy() + outputs[output_name] = module.get_output(i).numpy() return TVMCResult(outputs, times) diff --git a/python/tvm/relay/analysis/sparse_conv2d.py b/python/tvm/relay/analysis/sparse_conv2d.py index 9790c1fdda2d..11278bddca33 100644 --- a/python/tvm/relay/analysis/sparse_conv2d.py +++ b/python/tvm/relay/analysis/sparse_conv2d.py @@ -85,7 +85,7 @@ def process_params(expr, params, block_size, sparsity_threshold, layout): weight_names = _search_conv2d_op_weight(expr) for name in weight_names: name = str(name) - w_np = params[name].asnumpy() + w_np = params[name].numpy() # currently only support conv2d_1*1 if not ( (w_np.shape[0] == 1 and w_np.shape[1] == 1) diff --git a/python/tvm/relay/analysis/sparse_dense.py b/python/tvm/relay/analysis/sparse_dense.py index 67e40a150e61..3199360592fa 100644 --- a/python/tvm/relay/analysis/sparse_dense.py +++ b/python/tvm/relay/analysis/sparse_dense.py @@ -83,7 +83,7 @@ def process_params(expr, params, block_size, sparsity_threshold): weight_names = _search_dense_op_weight(expr) for name in weight_names: name = str(name) - w_np = params[name].asnumpy() + w_np = params[name].numpy() sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size) if sparsity >= sparsity_threshold: sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size) diff --git a/python/tvm/relay/backend/_backend.py b/python/tvm/relay/backend/_backend.py index 6df83559645d..9460e23a5357 100644 --- a/python/tvm/relay/backend/_backend.py +++ b/python/tvm/relay/backend/_backend.py @@ -86,7 +86,7 @@ def build(mod, target, target_host=None): @tvm._ffi.register_func("relay._tensor_value_repr") def _tensor_value_repr(tvalue): - return str(tvalue.data.asnumpy()) + return str(tvalue.data.numpy()) @tvm._ffi.register_func("relay._constant_repr") @@ -94,7 +94,7 @@ def _tensor_constant_repr(tvalue): dtype = tvm.runtime.DataType(tvalue.data.dtype) if tvm.target.datatype.get_type_registered(dtype.type_code): return "custom tensor of type " + dtype.type_code - return str(tvalue.data.asnumpy()) + return str(tvalue.data.numpy()) tvm._ffi._init_api("relay.backend", __name__) diff --git a/python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py b/python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py index 2892c6cb7d67..eeb474efa136 100644 --- a/python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py +++ b/python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py @@ -47,7 +47,7 @@ def convert(func, params): weight_info = search_fc_transpose(func) for item in weight_info: name = str(item) - w_np = params[name].asnumpy() + w_np = params[name].numpy() new_w = np.transpose(w_np, axes=[1, 0]) params[name + ".T"] = tvm.nd.array(new_w) del params[name] diff --git a/python/tvm/relay/frontend/change_datatype.py b/python/tvm/relay/frontend/change_datatype.py index dc80b3e25422..1873b3461e3e 100644 --- a/python/tvm/relay/frontend/change_datatype.py +++ b/python/tvm/relay/frontend/change_datatype.py @@ -39,7 +39,7 @@ class ChangeDatatype(ExprMutator): def change_dtype(mod, params, src, dst): mod = ChangeDatatype(src, dst)(mod) - params = dict((p, tvm.nd.array(params[p].asnumpy().astype(dst))) for p in params) + params = dict((p, tvm.nd.array(params[p].numpy().astype(dst))) for p in params) return mod, params mod, params = change_dtype(mod, params, "float32", "custom[posites2]32") diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index c2546205c571..55556cf583fa 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -582,7 +582,7 @@ def try_infer_value(val, on_success=None, on_failure=None): indicates whether infer_value has succeeded or not. """ try: - ret = infer_value(val, {}).asnumpy() + ret = infer_value(val, {}).numpy() if on_success: return on_success(ret), True return ret, True diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 11800eaf3cb3..3b940bd15f5b 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -1683,7 +1683,7 @@ def _has_fused_activation(_attrs, _supported_activations): return has_fused_activation def _get_data_scale_and_zp(_data, _inputs, _data_min_idx, _data_max_idx): - """ Finds the Qnn params for the data expr. """ + """Finds the Qnn params for the data expr.""" data_min = _inputs[_data_min_idx] data_max = _inputs[_data_max_idx] assert data_min <= data_max @@ -1702,7 +1702,7 @@ def _get_data_scale_and_zp(_data, _inputs, _data_min_idx, _data_max_idx): return _data_scale, _data_zero_point def _get_bn_alpha_coeff(_bn_gamma_idx, _bn_beta_idx, _bn_running_mean_idx, _bn_running_var_idx): - """ Extract the BN coeff. These will be use later for BN folding into convolution. """ + """Extract the BN coeff. These will be use later for BN folding into convolution.""" # Extract relevant attrs from bn. bn_attrs = _get_subgraph_op(subgraphs, "BatchNorm")["attrs"] bn_epsilon_param = float(bn_attrs["eps"]) @@ -1730,7 +1730,7 @@ def _get_bn_alpha_coeff(_bn_gamma_idx, _bn_beta_idx, _bn_running_mean_idx, _bn_r return _bn_scale, _bn_shift def _fold_bn(_bn_scale, _bn_shift, _has_bias, _has_bn): - """ Fold BN into kernel and bias. Get new kernel and bias. """ + """Fold BN into kernel and bias. Get new kernel and bias.""" _kernel = inputs[1] if _bn_scale: assert attrs.get_bool("with_bn", False) @@ -1753,13 +1753,13 @@ def _fold_bn(_bn_scale, _bn_shift, _has_bias, _has_bn): def _get_quantized_kernel(_kernel, _bias, _data_scale): # For quantizing, we need min/max of kernel. So, we have to pre compute this expr. - np_kernel = _infer_value(_kernel, params).asnumpy() + np_kernel = _infer_value(_kernel, params).numpy() kernel_channel_min = np.amin(np_kernel, axis=(1, 2, 3)) kernel_channel_max = np.amax(np_kernel, axis=(1, 2, 3)) np_bias = None if _bias is not None: - np_bias = _infer_value(_bias, params).asnumpy() + np_bias = _infer_value(_bias, params).numpy() return quantize_conv_weights_bias_channel_mkldnn_from_var( _kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale ) @@ -1803,7 +1803,7 @@ def _get_requantized_op(_res, _input_scale, _output_scale, _out_dtype): ) def _get_sum(_res, _output_scale, out_dtype): - """ Handles sum of the second quantized tensor. """ + """Handles sum of the second quantized tensor.""" # This is done in following steps # 1) rhs is the add's second operand. First rhs will be requantized to output scale with # dtype int32. The int32 dtype is to keep precision high before adding. @@ -2091,15 +2091,15 @@ def _get_kernel_scale_zp_tensor_quantized(_kernel, _inputs, _has_bias): ) if isinstance(_kernel, tvm.relay.Call) and _kernel.op.name == "qnn.quantize": - _kernel_scale = _kernel.args[1].data.asnumpy() - _kernel_zp = _kernel.args[2].data.asnumpy() + _kernel_scale = _kernel.args[1].data.numpy() + _kernel_zp = _kernel.args[2].data.numpy() return _kernel_scale, _kernel_zp kernel_min_idx, kernel_max_idx = (5, 6) if _has_bias else (4, 5) kernel_min_name = _get_name(_inputs[kernel_min_idx]) - kernel_min = params[kernel_min_name].asnumpy()[0] + kernel_min = params[kernel_min_name].numpy()[0] kernel_max_name = _get_name(_inputs[kernel_max_idx]) - kernel_max = params[kernel_max_name].asnumpy()[0] + kernel_max = params[kernel_max_name].numpy()[0] _kernel_scale = ( get_mkldnn_uint8_scale(kernel_min, kernel_max) if kernel_dtype == "uint8" @@ -2116,13 +2116,13 @@ def _get_kernel_scale_zp_channel_quantized(_kernel, _bias, _data_scale): ) # Get the FP32 values, calculate min/max and then channel quantize them - np_kernel = _infer_value(_kernel, params).asnumpy() + np_kernel = _infer_value(_kernel, params).numpy() kernel_channel_min = np.amin(np_kernel, axis=(1,)) kernel_channel_max = np.amax(np_kernel, axis=(1,)) np_bias = None if _bias is not None: - np_bias = _infer_value(_bias, params).asnumpy() + np_bias = _infer_value(_bias, params).numpy() return quantize_conv_weights_bias_channel_mkldnn_from_var( _kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale ) @@ -2130,15 +2130,15 @@ def _get_kernel_scale_zp_channel_quantized(_kernel, _bias, _data_scale): def _get_bias_requantize_scale(_inputs, _data_scale, _kernel_scale): _bias = _inputs[2] if isinstance(_bias, tvm.relay.Call) and _bias.op.name == "qnn.quantize": - _bias_scale = _bias.args[1].data.asnumpy() + _bias_scale = _bias.args[1].data.numpy() _bias_requantize_scale = _bias_scale / (_data_scale * _kernel_scale) _bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32") return _bias_requantize_scale bias_min_name = _get_name(_inputs[7]) - bias_min = params[bias_min_name].asnumpy()[0] + bias_min = params[bias_min_name].numpy()[0] bias_max_name = _get_name(_inputs[8]) - bias_max = params[bias_max_name].asnumpy()[0] + bias_max = params[bias_max_name].numpy()[0] bias_scale = get_mkldnn_int8_scale(bias_min, bias_max) _bias_requantize_scale = bias_scale / (_data_scale * _kernel_scale) _bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32") diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index e70167a6aa57..3f876f401b3c 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -981,7 +981,7 @@ def _impl_v1(cls, inputs, attr, params): @classmethod def _impl_v5(cls, inputs, attr, params): if get_name(inputs[1]) in params: - shape = tuple(params[inputs[1].name_hint].asnumpy().astype("int32")) + shape = tuple(params[inputs[1].name_hint].numpy().astype("int32")) out = _op.reshape(inputs[0], shape) else: out = _op.reshape(*inputs) @@ -1143,11 +1143,11 @@ def _impl_v9(cls, inputs, attr, params): assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs)) if get_name(inputs[1]) in params: - scales = params[inputs[1].name_hint].asnumpy() + scales = params[inputs[1].name_hint].numpy() else: scales = inputs[1] if isinstance(scales, _expr.Constant): - scales = list(scales.data.asnumpy()) + scales = list(scales.data.numpy()) if not isinstance(scales, _expr.Expr): assert scales[0] == 1.0 and scales[1] == 1.0 @@ -1224,7 +1224,7 @@ def _impl_v1(cls, inputs, attr, params): dim = inputs[1] if dim is not None: - dim = int(infer_value(dim, params).asnumpy()) + dim = int(infer_value(dim, params).numpy()) exclusive = attr.get("exclusive", 0) reverse = attr.get("reverse", 0) @@ -1376,7 +1376,7 @@ def normalize_gather_indices(data, indices, axis): s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis)) cond = fold_constant(indices < _op.const(0, ind_dtype)) if isinstance(cond, _expr.Constant): - val = cond.data.asnumpy() + val = cond.data.numpy() if val.size == 1: cond = val.item() if cond: @@ -2839,7 +2839,7 @@ class QLinearConv(OnnxOpConverter): def _impl_v10(cls, inputs, attr, params): def get_scalar(x, dtype="float32"): if isinstance(x, _expr.Var) and x.name_hint in params: - return _op.const(params[x.name_hint].asnumpy(), dtype) + return _op.const(params[x.name_hint].numpy(), dtype) rank = len(infer_shape(x)) assert rank <= 1, "QLinearConv scale and zero_point input must be scalars" if rank == 1: diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index ab2357fb8eb7..b5cfcf5e3bac 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -563,7 +563,7 @@ def repeat(self, inputs, input_types): if isinstance(r, int): reps.append(r) else: - reps.append(int(_infer_value(r, {}).asnumpy())) + reps.append(int(_infer_value(r, {}).numpy())) return _op.transform.tile(data, reps=reps) @@ -603,7 +603,7 @@ def full_impl(self, data, fill_value, dtype): for dim in data: if isinstance(dim, _expr.Expr): if isinstance(dim, _expr.Constant): - dim = int(dim.data.asnumpy()) + dim = int(dim.data.numpy()) if isinstance(size, list): size.append(dim) new_shape.append(dim) @@ -868,7 +868,7 @@ def convert_const_list(data): if isinstance(data, list): for i, _ in enumerate(data): if isinstance(data[i], _expr.Expr): - data[i] = int(_infer_value_simulated(data[i], {}).asnumpy()) + data[i] = int(_infer_value_simulated(data[i], {}).numpy()) return data def maxpool_2d(self, inputs, input_types): @@ -1302,7 +1302,7 @@ def view(self, inputs, input_types): for i, shape in enumerate(shape_inp): if isinstance(shape, _expr.Expr): val = _infer_value_simulated(shape, {}) - new_shape[i] = np.asscalar(val.asnumpy()) + new_shape[i] = np.asscalar(val.numpy()) return _op.transform.reshape(data, new_shape) @@ -1314,7 +1314,7 @@ def reshape(self, inputs, input_types): is_dyn = False for s in new_shape: if isinstance(s, _expr.Constant): - tmp_shape.append(int(s.data.asnumpy())) + tmp_shape.append(int(s.data.numpy())) elif isinstance(s, _expr.Expr): dim, success = try_infer_value(s, lambda ret: int(ret)) tmp_shape.append(dim) @@ -1684,7 +1684,7 @@ def expand(self, inputs, input_types): for i in range(out_dims): if sizes[i] != -1 and shape[i] == 1: if not isinstance(sizes[i], int): - sizes[i] = int(_infer_value(sizes[i], {}).asnumpy()) + sizes[i] = int(_infer_value(sizes[i], {}).numpy()) out = _op.repeat(out, sizes[i], axis=i) return out @@ -1730,7 +1730,7 @@ def pad(inputs, input_types): const_paddings.append([]) for p in pad: if not isinstance(p, int): - p = int(_infer_value(p, {}).asnumpy()) + p = int(_infer_value(p, {}).numpy()) const_paddings[-1].append(p) if mode == "constant": @@ -1745,7 +1745,7 @@ def clamp(self, inputs, input_types): def get_v(v, default_v): if isinstance(v, _expr.Constant): - return float(v.data.asnumpy()) + return float(v.data.numpy()) if isinstance(v, _expr.Expr): infer_v, success = try_infer_value(v, lambda ret: float(ret)) if success: @@ -1790,7 +1790,7 @@ def get_upsample_out_size(self, inputs, method): if inputs[1] is not None: for size in inputs[1]: if not isinstance(size, int): - out_size.append(int(_infer_value(size, {}).asnumpy())) + out_size.append(int(_infer_value(size, {}).numpy())) else: out_size.append(size) else: @@ -2169,7 +2169,7 @@ def scalar_tensor(self, inputs, input_types): } type_key = inputs[1] if isinstance(data, _expr.Constant): - data = data.data.asnumpy().tolist() + data = data.data.numpy().tolist() return _expr.const(data, cast_map[type_key]) def interpolate(self, inputs, input_types): @@ -2523,7 +2523,7 @@ def update_convert_map(self, custom_map): self.convert_map.update(custom_map) def report_missing_conversion(self, op_names): - """ Check if all ops in an input graph are supported by TVM """ + """Check if all ops in an input graph are supported by TVM""" known_ops = [ "prim::Constant", "prim::GetAttr", @@ -2545,13 +2545,13 @@ def report_missing_conversion(self, op_names): raise NotImplementedError(msg) def convert_block(self, block, outputs): - """ Translate Torch "Block", used for prim::If and prim::Loop """ + """Translate Torch "Block", used for prim::If and prim::Loop""" ops = _get_operator_nodes(block.nodes()) ret_names = _get_input_names(block.returnNode()) return self.convert_operators(ops, outputs, ret_names) def convert_if(self, if_node, outputs): - """ Translate Torch prim::If to Relay If """ + """Translate Torch prim::If to Relay If""" cond = outputs[if_node.inputsAt(0).debugName()] blocks = list(if_node.blocks()) true_branch = self.convert_block(blocks[0], outputs) @@ -2560,7 +2560,7 @@ def convert_if(self, if_node, outputs): return _expr.If(cond, true_branch[0], false_branch[0]) def convert_loop(self, loop_node, outputs): - """ Translate Torch prim::Loop to Relay while_loop """ + """Translate Torch prim::Loop to Relay while_loop""" def get_input(index): ivalue = loop_node.inputsAt(index) @@ -2690,7 +2690,7 @@ def body(*current_vals): return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)] def convert_operators(self, operators, outputs, ret_names): - """ Convert each Torch IR operators to Relay equivalent """ + """Convert each Torch IR operators to Relay equivalent""" for node_name, op_node in operators: operator = op_node.kind() inputs = _get_op_inputs(op_node, outputs) @@ -2872,7 +2872,7 @@ def _wrap_const(c): def _run_jit_passes(graph): - """ The inline pass is necessary to unwrap prim::CallMethod """ + """The inline pass is necessary to unwrap prim::CallMethod""" # pylint: disable=c-extension-no-member import torch @@ -2978,7 +2978,7 @@ def _get_input_types(op_node, outputs, default_dtype="float32"): def _get_constant(node): - """ Retrieve a constant associated with this prim::Constant node """ + """Retrieve a constant associated with this prim::Constant node""" attribute_names = node.attributeNames() num_attributes = len(attribute_names) @@ -3012,7 +3012,7 @@ def _get_constant(node): def _get_operator_nodes(nodes): - """ Returns torch IR nodes that need conversion to Relay """ + """Returns torch IR nodes that need conversion to Relay""" ops = [] # Traverse nodes and add to graph for node in nodes: @@ -3226,7 +3226,7 @@ def convert_params(graph, state_dict): def get_all_op_names(graph): - """ Return all operator names in the input graph """ + """Return all operator names in the input graph""" nodes = list(graph.nodes()) prim_with_blocks = ["prim::If", "prim::Loop"] for prim in prim_with_blocks: diff --git a/python/tvm/relay/frontend/qnn_torch.py b/python/tvm/relay/frontend/qnn_torch.py index 2dd84b650bd2..4a3db910c8a2 100644 --- a/python/tvm/relay/frontend/qnn_torch.py +++ b/python/tvm/relay/frontend/qnn_torch.py @@ -30,7 +30,7 @@ class QNNParam: - """ A placeholder for weight quantization parameters """ + """A placeholder for weight quantization parameters""" def __init__(self, weight, bias, scale, zero_point, param_key): param_prefix = param_key[: -len("._packed_params")] @@ -98,7 +98,7 @@ def make_conv_packed_param(param_name, qweight, bias, packed_params): def get_weight_quant_params(script_module): - """ Retrive and unpack weight parameters from quantized modules """ + """Retrive and unpack weight parameters from quantized modules""" import torch param_name = "_packed_params" @@ -403,7 +403,7 @@ def add_input_quant_params_to_op_inputs(graph): def add_quant_params(params, quant_params): - """ Add quant parameters to TVM param map """ + """Add quant parameters to TVM param map""" for qparam in quant_params.values(): params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight) if qparam.bias is not None: @@ -456,7 +456,7 @@ def _impl(inputs, _): def _get_numpy(relay_const_scalar): - return relay_const_scalar.data.asnumpy() + return relay_const_scalar.data.numpy() def _get_scalar(relay_const_scalar): @@ -466,7 +466,7 @@ def _get_scalar(relay_const_scalar): def _do_bias_and_requantize( output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu ): - """ Output processing for conv and linear """ + """Output processing for conv and linear""" # this is a vector for per channel case requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale)) # Torch does bias add and requanize scale in fp32 diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 5b0507bf486f..4af73702ad9c 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -105,8 +105,8 @@ def _dim_check(attrs): def _get_param(params, input_node): if isinstance(input_node, _expr.Constant): - return np.atleast_1d(input_node.data.asnumpy()) - return params[input_node.name_hint].asnumpy() + return np.atleast_1d(input_node.data.numpy()) + return params[input_node.name_hint].numpy() def _get_num_param(params, input_node): @@ -117,7 +117,7 @@ def _get_list_param(params, input_node, mod): try: return _get_param(params, input_node).tolist() except (IndexError, KeyError, AttributeError): - return _infer_value(input_node, params, mod).asnumpy().tolist() + return _infer_value(input_node, params, mod).numpy().tolist() def _get_tuple_param(params, input_node): @@ -155,7 +155,7 @@ def _impl(inputs, attr, params, mod): def _argx(func, func_name): - """ A common wrapper for argmin and argmax operations """ + """A common wrapper for argmin and argmax operations""" def _impl(inputs, attr, params, mod): try: @@ -723,17 +723,17 @@ def _nms(return_scores=False): def _impl(inputs, attr, params, mod): # Get parameter values try: - max_output_size = int(np.atleast_1d(inputs[2].data.asnumpy().astype("int64"))[0]) + max_output_size = int(np.atleast_1d(inputs[2].data.numpy().astype("int64"))[0]) except Exception: try: max_output_size = ( - _infer_value(inputs[2], params, mod).asnumpy().astype("int64").tolist()[0] + _infer_value(inputs[2], params, mod).numpy().astype("int64").tolist()[0] ) except Exception: max_output_size = inputs[2] - iou_threshold = np.atleast_1d(inputs[3].data.asnumpy())[0] + iou_threshold = np.atleast_1d(inputs[3].data.numpy())[0] # score_threshold was introduced from V3 - score_threshold = np.atleast_1d(inputs[4].data.asnumpy())[0] if len(inputs) > 4 else 0.0 + score_threshold = np.atleast_1d(inputs[4].data.numpy())[0] if len(inputs) > 4 else 0.0 pad_output = "pad_to_max_output_size" # Generate data with shape (1, num_anchors, 5) @@ -799,17 +799,17 @@ def _impl(inputs, attr, params, mod): boxes = inputs[0] scores = inputs[1] try: - max_output_size = int(np.atleast_1d(inputs[2].data.asnumpy().astype("int64"))[0]) + max_output_size = int(np.atleast_1d(inputs[2].data.numpy().astype("int64"))[0]) except Exception: try: max_output_size = ( - _infer_value(inputs[2], params, mod).asnumpy().astype("int64").tolist()[0] + _infer_value(inputs[2], params, mod).numpy().astype("int64").tolist()[0] ) except Exception: max_output_size = inputs[2] max_total_size = inputs[3] - iou_threshold = np.atleast_1d(inputs[4].data.asnumpy())[0] - score_threshold = np.atleast_1d(inputs[5].data.asnumpy())[0] + iou_threshold = np.atleast_1d(inputs[4].data.numpy())[0] + score_threshold = np.atleast_1d(inputs[5].data.numpy())[0] if attr["pad_per_class"]: raise tvm.error.OpAttributeUnImplemented( "pad_per_class for CombinedNonMaxSuppression is not supported" @@ -968,9 +968,9 @@ def _impl(inputs, attr, params, mod): # Important that the size is defined. If an axis is not, we need to infer what # the shape should be. if -1 in size: - size = _infer_value(inputs[1], params, mod).asnumpy().reshape([-1]).tolist() + size = _infer_value(inputs[1], params, mod).numpy().reshape([-1]).tolist() else: - size = _infer_value(inputs[1], params, mod).asnumpy().reshape([-1]).tolist() + size = _infer_value(inputs[1], params, mod).numpy().reshape([-1]).tolist() attr["size"] = size inputs.pop(1) @@ -1100,9 +1100,9 @@ def _impl(inputs, attr, params, mod): assert len(inputs) == 4, "There should be 4 input tensors" - indices_tensor = _infer_value(inputs[0], params, mod).asnumpy() - values_tensor = _infer_value(inputs[1], params, mod).asnumpy() - dense_shape_tensor = _infer_value(inputs[2], params, mod).asnumpy() + indices_tensor = _infer_value(inputs[0], params, mod).numpy() + values_tensor = _infer_value(inputs[1], params, mod).numpy() + dense_shape_tensor = _infer_value(inputs[2], params, mod).numpy() data = inputs[3] @@ -1230,7 +1230,7 @@ def _sparse_segment_sum_with_num_segments(): def _impl(inputs, attr, params, mod): assert len(inputs) == 4, "There should be 4 input tensors" data = _op.take(inputs[0], inputs[1], axis=0) - num_segments = int(inputs[3].data.asnumpy().item()) + num_segments = int(inputs[3].data.numpy().item()) return _op.segment_sum(data, inputs[2], num_segments) return _impl @@ -1287,7 +1287,7 @@ def _sparse_segment_sum_sqrtn_with_num_segments(): def _impl(inputs, attr, params, mod): assert len(inputs) == 4, "There should be 4 input tensors" data = _op.take(inputs[0], inputs[1], axis=0) - num_segments = int(inputs[3].data.asnumpy().item()) + num_segments = int(inputs[3].data.numpy().item()) real_counts = count_all_indices(inputs[2], attr["T"].name, num_segments=num_segments) real_sqrt_counts = _op.sqrt(_op.cast_like(real_counts, data)) @@ -1317,7 +1317,7 @@ def _sparse_segment_mean_with_num_segments(): def _impl(inputs, attr, params, mod): assert len(inputs) == 4, "There should be 4 input tensors" data = _op.take(inputs[0], inputs[1], axis=0) - num_segments = int(inputs[3].data.asnumpy().item()) + num_segments = int(inputs[3].data.numpy().item()) real_counts = count_all_indices(inputs[2], attr["T"].name, num_segments=num_segments) # Calculate regular segment sum @@ -1337,9 +1337,9 @@ def _impl(inputs, attr, params, mod): len(inputs) == 4 ), "There should be 4 input tensors [sparse_indices, sparse_values, sparse_shape, dense]." - indices_tensor = _infer_value(inputs[0], params, mod).asnumpy() - values_tensor = _infer_value(inputs[1], params, mod).asnumpy() - dense_shape_tensor = _infer_value(inputs[2], params, mod).asnumpy() + indices_tensor = _infer_value(inputs[0], params, mod).numpy() + values_tensor = _infer_value(inputs[1], params, mod).numpy() + dense_shape_tensor = _infer_value(inputs[2], params, mod).numpy() data = inputs[3] @@ -1655,7 +1655,7 @@ def _tile(): def _impl(inputs, attr, params, mod): reps_input = inputs.pop() if isinstance(reps_input, _expr.Call): - np_reps = _infer_value(reps_input, params, mod).asnumpy() + np_reps = _infer_value(reps_input, params, mod).numpy() reps = [np_reps.flatten()[i] for i in range(np_reps.flatten().shape[0])] else: reps = _get_list_param(params, reps_input, mod) @@ -1708,7 +1708,7 @@ def _impl(inputs, attr, params, mod): # try to infer shape by precompute prune if possible. try: params_new = _infer_value(pop_node, params, mod) - shape_arg = tuple(params_new.asnumpy().astype("int32").flatten()) + shape_arg = tuple(params_new.numpy().astype("int32").flatten()) except Exception: # Deal with symbolic shape case. if isinstance(pop_node, _expr.Call) and "shape_of" in str(pop_node.op): @@ -1775,8 +1775,8 @@ def _impl(inputs, attr, params, mod): s1 = params[inputs[1].name_hint] else: s1 = _infer_value(inputs[1], params, mod) - s0 = list(s0.asnumpy().reshape([-1])) - s1 = list(s1.asnumpy().reshape([-1])) + s0 = list(s0.numpy().reshape([-1])) + s1 = list(s1.numpy().reshape([-1])) s0_size, s1_size = len(s0), len(s1) out = deque([]) @@ -1808,7 +1808,7 @@ def _impl(inputs, attr, params, mod): shape = params[inputs[1].name_hint] else: shape = _infer_value(inputs[1], params, mod) - shape = list(shape.asnumpy().reshape([-1])) + shape = list(shape.numpy().reshape([-1])) return _op.broadcast_to(inputs[0], shape) return _impl @@ -1917,7 +1917,7 @@ def _impl(inputs, attr, params, mod): def _fill(): def _impl(inputs, attr, params, mod): try: - output_shape = _infer_value(inputs[0], params, mod).asnumpy().tolist() + output_shape = _infer_value(inputs[0], params, mod).numpy().tolist() except Exception: output_shape = inputs[0] @@ -2047,7 +2047,7 @@ def _impl(inputs, attr, params, mod): data_dim = len(data_shape) stride_dim = len(stride) if data_dim == 0 and isinstance(inputs[0], _expr.Constant): - new_data = inputs[0].data.asnumpy().reshape(1) + new_data = inputs[0].data.numpy().reshape(1) return _expr.const(new_data, inputs[0].data.dtype) # This is a special routine to handle strided_slice after shape_of. @@ -2193,7 +2193,7 @@ def _impl(inputs, attr, params, mod): padlist = _get_param(params, inputs[1]) except (IndexError, KeyError, AttributeError): try: - padlist = _infer_value(inputs[1], params, mod).asnumpy().tolist() + padlist = _infer_value(inputs[1], params, mod).numpy().tolist() except Exception: padlist = inputs[1] @@ -2288,7 +2288,7 @@ def _impl(inputs, attr, params, mod): start = _get_param(params, inputs[0])[0] except (IndexError, KeyError, AttributeError): try: - start = _infer_value(inputs[1], params, mod).asnumpy().tolist() + start = _infer_value(inputs[1], params, mod).numpy().tolist() start = start if not isinstance(start, list) else start[0] except Exception: # Symbolic start @@ -2298,11 +2298,11 @@ def _impl(inputs, attr, params, mod): limit = ( _get_param(params, inputs[1])[0] if hasattr(inputs[1], "name_hint") or isinstance(inputs[1], _expr.Constant) - else params.pop("Rank").asnumpy()[0] + else params.pop("Rank").numpy()[0] ) except (IndexError, KeyError, AttributeError): try: - limit = _infer_value(inputs[1], params, mod).asnumpy().tolist() + limit = _infer_value(inputs[1], params, mod).numpy().tolist() limit = limit if not isinstance(limit, list) else limit[0] except Exception: limit = inputs[1] @@ -2311,7 +2311,7 @@ def _impl(inputs, attr, params, mod): delta = _get_param(params, inputs[2])[0] except (IndexError, KeyError, AttributeError): try: - delta = _infer_value(inputs[2], params, mod).asnumpy().tolist() + delta = _infer_value(inputs[2], params, mod).numpy().tolist() delta = delta if not isinstance(delta, list) else delta[0] except Exception: # Symbolic delta @@ -2480,7 +2480,7 @@ def _impl(inputs, attr, params, mod): k = int(_get_num_param(params, k_input)) except (IndexError, KeyError, AttributeError): try: - k = int(_infer_value(k_input, params, mod).asnumpy().tolist()) + k = int(_infer_value(k_input, params, mod).numpy().tolist()) except Exception: k = k_input if isinstance(k, int): @@ -3672,7 +3672,7 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ if node_name_prefix not in self._while_loop_name_set: try: cond_val = np.all( - _infer_value(branch.cond, self._params, self._mod).asnumpy() + _infer_value(branch.cond, self._params, self._mod).numpy() ) if cond_val: op = branch.true_branch diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 636173a52641..2d7613d046af 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -422,9 +422,9 @@ def has_same_qnn_params(self, lhs_tensor, rhs_tensor): rhs_zero_point = rhs_tensor.qnn_params["zero_point"] # 0.1 + 0.2 != 0.3 return np.allclose( - lhs_scale.data.asnumpy(), rhs_scale.data.asnumpy(), rtol=1e-5, atol=1e-5 + lhs_scale.data.numpy(), rhs_scale.data.numpy(), rtol=1e-5, atol=1e-5 ) and np.allclose( - lhs_zero_point.data.asnumpy(), rhs_zero_point.data.asnumpy(), rtol=1e-5, atol=1e-5 + lhs_zero_point.data.numpy(), rhs_zero_point.data.numpy(), rtol=1e-5, atol=1e-5 ) def is_quantized(self, op): @@ -434,7 +434,7 @@ def is_quantized(self, op): return first_tensor.qnn_params is not None def quantize(self, expr, tensor_to_quantize): - """ Helper function to quantize a tensor with Relay """ + """Helper function to quantize a tensor with Relay""" tensor_type = tensor_to_quantize.tensor.Type() tensor_type_str = self.get_tensor_type_str(tensor_type) quantized = _qnn.op.quantize( @@ -446,7 +446,7 @@ def quantize(self, expr, tensor_to_quantize): return quantized def dequantize(self, expr, tensor): - """ Helper function to dequantize a tensor with Relay """ + """Helper function to dequantize a tensor with Relay""" dequantized = _qnn.op.dequantize( data=expr, input_scale=tensor.qnn_params["scale"], @@ -640,7 +640,7 @@ def convert_resize_nearest_neighbor(self, op): return self._convert_resize("nearest_neighbor", op) def convert_l2_normalization(self, op): - """Convert TFLite L2_NORMALIZATION """ + """Convert TFLite L2_NORMALIZATION""" try: from tflite.BuiltinOptions import BuiltinOptions from tflite.L2NormOptions import L2NormOptions @@ -683,7 +683,7 @@ def convert_l2_normalization(self, op): return out def convert_lrn(self, op): - """Convert TFLite LOCAL_RESPONSE_NORMALIZATION """ + """Convert TFLite LOCAL_RESPONSE_NORMALIZATION""" try: from tflite.BuiltinOptions import BuiltinOptions from tflite.LocalResponseNormalizationOptions import LocalResponseNormalizationOptions @@ -2389,7 +2389,7 @@ def convert_tile(self, op): return out def convert_topk_v2(self, op): - """ Convert TFLite TOPK_v2 """ + """Convert TFLite TOPK_v2""" input_tensors = self.get_input_tensors(op) assert len(input_tensors) == 2, "input tensors length should be 2" input_tensor = input_tensors[0] @@ -2539,7 +2539,7 @@ def convert_pad(self, op): ), "TFLite PADV2 requires input and output scale and zero points to be equal" # The pad value for quantized pad is the input zero point by default. - pad_value = float(input_tensor.qnn_params["zero_point"].data.asnumpy()) + pad_value = float(input_tensor.qnn_params["zero_point"].data.numpy()) if len(input_tensors) == 3: pad_value = self.get_tensor_value(input_tensors[2]) @@ -3328,7 +3328,7 @@ def get_prefetched_node(self, input_tensor_idx): return self.prefetched_nodes[get_tensor_name(self.subgraph, input_tensor_idx)] def get_tensor_expr(self, tensor, is_sparse=False): - """ Return the Relay expr for tensor. """ + """Return the Relay expr for tensor.""" if self.has_expr(tensor.tensor_idx): expr = self.get_expr(tensor.tensor_idx) else: @@ -3337,7 +3337,7 @@ def get_tensor_expr(self, tensor, is_sparse=False): return expr def get_tensor_shape(self, tensor_wrapper): - """ Returns tensor shape. Infers shape if the shape is empty. """ + """Returns tensor shape. Infers shape if the shape is empty.""" assert isinstance(tensor_wrapper, TensorWrapper), "Expecting TensorWrapper here" return ( tensor_wrapper.tensor.ShapeAsNumpy() @@ -3348,7 +3348,7 @@ def get_tensor_shape(self, tensor_wrapper): # pylint: disable=no-else-return def prepare_dense_matrix_from_sparse(sparse_tensor, sparse_tensor_value, sparse_tensor_type): - """ Prepare sparse indices and dense matrix from TFLite sparse parameters. """ + """Prepare sparse indices and dense matrix from TFLite sparse parameters.""" # The function is implemented based on TFLite sparse parameter specifications # Please refer # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs#L89 @@ -3488,11 +3488,11 @@ def _def_prepare_dense_matrix_from_sparse(indices, level, prev_idx): def get_scalar_from_constant(expr): - """ Returns scalar value from Relay constant scalar. """ + """Returns scalar value from Relay constant scalar.""" assert ( isinstance(expr, _expr.Constant) and not expr.data.shape ), "Expr is not a constant scalar." - value = expr.data.asnumpy() + value = expr.data.numpy() assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype( np.float32 ), "value must be float32/int32" @@ -3500,9 +3500,9 @@ def get_scalar_from_constant(expr): def get_tensor_from_constant(expr): - """ Returns tensor of values from Relay constant node. """ + """Returns tensor of values from Relay constant node.""" assert isinstance(expr, _expr.Constant) - value = expr.data.asnumpy() + value = expr.data.numpy() assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype( np.float32 ), "value must be float32/int32" diff --git a/python/tvm/relay/op/algorithm.py b/python/tvm/relay/op/algorithm.py index 6fd5c0645eed..119936f632f8 100644 --- a/python/tvm/relay/op/algorithm.py +++ b/python/tvm/relay/op/algorithm.py @@ -107,7 +107,7 @@ def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int32"): The computed result. """ if isinstance(k, Constant): - k = k.data.asnumpy().item() + k = k.data.numpy().item() if isinstance(k, Expr): out = _dyn_make.topk(data, k, axis, ret_type, is_ascend, dtype) else: diff --git a/python/tvm/relay/op/contrib/ethosn.py b/python/tvm/relay/op/contrib/ethosn.py index 2cd787a74dc1..39ecec7049b3 100644 --- a/python/tvm/relay/op/contrib/ethosn.py +++ b/python/tvm/relay/op/contrib/ethosn.py @@ -214,8 +214,8 @@ def qnn_concatenate(expr): max_range = -1e9 qnn_params = [] for i in range(len(args[1].fields)): - scale = args[1].fields[i].data.asnumpy() - zero_point = args[2].fields[i].data.asnumpy() + scale = args[1].fields[i].data.numpy() + zero_point = args[2].fields[i].data.numpy() min_range = min(-1 * zero_point * scale, min_range) max_range = max((255 - zero_point) * scale, max_range) qnn_params.append((scale, zero_point)) diff --git a/python/tvm/relay/op/image/image.py b/python/tvm/relay/op/image/image.py index dbfe20aee72a..6d7d79264844 100644 --- a/python/tvm/relay/op/image/image.py +++ b/python/tvm/relay/op/image/image.py @@ -80,7 +80,7 @@ def resize( The resized result. """ if isinstance(size, Constant): - size = list(size.data.asnumpy().astype("int32")) + size = list(size.data.numpy().astype("int32")) if isinstance(size, Expr): return _dyn_make.resize( data, diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index ba491954ac63..91c148b5df2e 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -1339,9 +1339,9 @@ def upsampling( The computed result. """ if isinstance(scale_h, Constant): - scale_h = scale_h.data.asnumpy().item() + scale_h = scale_h.data.numpy().item() if isinstance(scale_w, Constant): - scale_w = scale_w.data.asnumpy().item() + scale_w = scale_w.data.numpy().item() if isinstance(scale_h, Expr) or isinstance(scale_w, Expr): if not isinstance(scale_h, Expr): scale_h = const(scale_h, "float64") @@ -1402,11 +1402,11 @@ def upsampling3d( The computed result. """ if isinstance(scale_d, Constant): - scale_d = scale_d.data.asnumpy().item() + scale_d = scale_d.data.numpy().item() if isinstance(scale_h, Constant): - scale_h = scale_h.data.asnumpy().item() + scale_h = scale_h.data.numpy().item() if isinstance(scale_w, Constant): - scale_w = scale_w.data.asnumpy().item() + scale_w = scale_w.data.numpy().item() if isinstance(scale_d, Expr) or isinstance(scale_h, Expr) or isinstance(scale_w, Expr): if not isinstance(scale_d, Expr): scale_d = const(scale_d, "float64") @@ -1666,7 +1666,7 @@ def pad(data, pad_width, pad_value=0, pad_mode="constant"): The computed result. """ if isinstance(pad_width, Constant): - pad_width = [list(i) for i in pad_width.data.asnumpy()] + pad_width = [list(i) for i in pad_width.data.numpy()] if not isinstance(pad_value, Expr): pad_value = const(pad_value) if isinstance(pad_width, Expr): diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index 6b9ac30d3a3a..a38a23064d6f 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -961,7 +961,7 @@ def zeros(shape, dtype): The resulting tensor. """ if isinstance(shape, Constant): - shape = list(shape.data.asnumpy()) + shape = list(shape.data.numpy()) if isinstance(shape, Expr): return _dyn_make.zeros(shape, dtype) if isinstance(shape, int): @@ -1004,7 +1004,7 @@ def ones(shape, dtype): The resulting tensor. """ if isinstance(shape, Constant): - shape = list(shape.data.asnumpy()) + shape = list(shape.data.numpy()) if isinstance(shape, Expr): return _dyn_make.ones(shape, dtype) if isinstance(shape, int): diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index eeb8644d4328..c87f545c138a 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -217,7 +217,7 @@ def reshape(data, newshape): The reshaped result. """ if isinstance(newshape, Constant): - newshape = list(newshape.data.asnumpy()) + newshape = list(newshape.data.numpy()) if isinstance(newshape, Expr): return _dyn_make.reshape(data, newshape) if isinstance(newshape, int): @@ -440,7 +440,7 @@ def full(fill_value, shape=(), dtype=""): The resulting tensor. """ if isinstance(shape, Constant): - shape = list(shape.data.asnumpy()) + shape = list(shape.data.numpy()) if isinstance(shape, Expr): return _dyn_make.full(fill_value, shape, dtype) if isinstance(shape, int): @@ -625,7 +625,7 @@ def tile(data, reps): If data.ndim >= d, reps is promoted to a.ndim by pre-pending 1's to it. """ if isinstance(reps, Constant): - reps = list(reps.data.asnumpy()) + reps = list(reps.data.numpy()) if isinstance(reps, Expr): return _dyn_make.tile(data, reps) return _make.tile(data, reps) @@ -766,7 +766,7 @@ def broadcast_to(data, shape): The resulting tensor. """ if isinstance(shape, Constant): - shape = list(shape.data.asnumpy()) + shape = list(shape.data.numpy()) if isinstance(shape, Expr): return _dyn_make.broadcast_to(data, shape) if isinstance(shape, int): @@ -899,11 +899,11 @@ def strided_slice(data, begin, end, strides=None, slice_mode="end"): """ strides = strides or [1] if isinstance(begin, Constant): - begin = list(begin.data.asnumpy()) + begin = list(begin.data.numpy()) if isinstance(end, Constant): - end = list(end.data.asnumpy()) + end = list(end.data.numpy()) if isinstance(strides, Constant): - strides = list(strides.data.asnumpy()) + strides = list(strides.data.numpy()) if isinstance(begin, Expr) or isinstance(end, Expr) or isinstance(strides, Expr): if isinstance(begin, (tuple, list)): begin = const(list(begin)) @@ -1200,7 +1200,7 @@ def one_hot(indices, on_value, off_value, depth, axis, dtype): [0, 0, 1]] """ if isinstance(depth, Constant): - depth = depth.data.asnumpy().item() + depth = depth.data.numpy().item() if isinstance(depth, Expr): return _dyn_make.one_hot(indices, on_value, off_value, depth, axis, dtype) return _make.one_hot(indices, on_value, off_value, depth, axis, dtype) diff --git a/python/tvm/relay/qnn/op/legalizations.py b/python/tvm/relay/qnn/op/legalizations.py index d74b3d989270..3c4d2ddcd0ec 100644 --- a/python/tvm/relay/qnn/op/legalizations.py +++ b/python/tvm/relay/qnn/op/legalizations.py @@ -83,11 +83,11 @@ def qnn_dense_legalize(attrs, inputs, types): def get_scalar_from_constant(expr): - """ Returns scalar value from Relay constant scalar. """ + """Returns scalar value from Relay constant scalar.""" assert ( isinstance(expr, relay.Constant) and not expr.data.shape ), "Expr is not a constant scalar." - value = expr.data.asnumpy() + value = expr.data.numpy() assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype( np.float32 ), "value must be float32/int32" @@ -265,19 +265,19 @@ def _shift(data, zero_point, out_dtype): def is_fast_int8_on_intel(): - """ Checks whether the hardware has support for fast Int8 arithmetic operations. """ + """Checks whether the hardware has support for fast Int8 arithmetic operations.""" target = tvm.target.Target.current(allow_none=False) return target.mcpu in {"skylake-avx512", "cascadelake"} def is_fast_int8_on_arm(): - """ Checks whether the hardware has support for fast Int8 arithmetic operations. """ + """Checks whether the hardware has support for fast Int8 arithmetic operations.""" target = tvm.target.Target.current(allow_none=False) return "+v8.2a" in target.mattr and "+dotprod" in target.mattr def is_aarch64_arm(): - """ Checks whether we are compiling for an AArch64 target. """ + """Checks whether we are compiling for an AArch64 target.""" target = tvm.target.Target.current(allow_none=False) return "aarch64" in target.attrs.get("mtriple", "") diff --git a/python/tvm/relay/quantize/_calibrate.py b/python/tvm/relay/quantize/_calibrate.py index b56c09cdad09..ae3a846c11ed 100644 --- a/python/tvm/relay/quantize/_calibrate.py +++ b/python/tvm/relay/quantize/_calibrate.py @@ -86,7 +86,7 @@ def collect_stats(mod, dataset, chunk_by=-1): runtime.set_input(**batch) runtime.run() for j in range(i, min(i + chunk_by, num_outputs)): - outputs[j - i].append(runtime.get_output(j).asnumpy()) + outputs[j - i].append(runtime.get_output(j).numpy()) yield [np.concatenate(output).reshape(-1) for output in outputs] @@ -179,7 +179,7 @@ def _power2_scale(sq_call): # pylint: disable=unused-argument """calculate weight scale with nearest mode-2 scale""" var = sq_call.args[0] assert isinstance(var, _expr.Constant) - val = np.amax(np.abs(var.data.asnumpy())) + val = np.amax(np.abs(var.data.numpy())) return 2 ** np.math.ceil(np.math.log(val, 2)) if val > 0 else 1.0 @@ -187,7 +187,7 @@ def _max_scale(sq_call): """calculate weight scale with maximum absolute value""" var = sq_call.args[0] assert isinstance(var, _expr.Constant) - val = np.amax(np.abs(var.data.asnumpy())) + val = np.amax(np.abs(var.data.numpy())) return val diff --git a/python/tvm/relay/testing/__init__.py b/python/tvm/relay/testing/__init__.py index e889e9078a84..bfe797d844a8 100644 --- a/python/tvm/relay/testing/__init__.py +++ b/python/tvm/relay/testing/__init__.py @@ -138,7 +138,7 @@ def check_grad( # Get analytic gradients. _, grads = intrp.evaluate(bwd_func)(*inputs) - grads = [grad.asnumpy().astype("float64") for grad in grads] + grads = [grad.numpy().astype("float64") for grad in grads] # Throw out gradients we aren't testing if inputs != test_inputs: @@ -160,9 +160,9 @@ def check_grad( for i in np.ndindex(*x.shape): x_i = x[i] x[i] = x_i + eps - fwd_plus = intrp.evaluate(fwd_func)(*inputs).asnumpy().astype("float64") + fwd_plus = intrp.evaluate(fwd_func)(*inputs).numpy().astype("float64") x[i] = x_i - eps - fwd_minus = intrp.evaluate(fwd_func)(*inputs).asnumpy().astype("float64") + fwd_minus = intrp.evaluate(fwd_func)(*inputs).numpy().astype("float64") x[i] = x_i approx_grad[i] = np.sum((fwd_plus - fwd_minus) / (2 * eps)) approx_grads.append(approx_grad) diff --git a/python/tvm/relay/testing/py_converter.py b/python/tvm/relay/testing/py_converter.py index 283a238a7626..b9d6806306f4 100644 --- a/python/tvm/relay/testing/py_converter.py +++ b/python/tvm/relay/testing/py_converter.py @@ -467,7 +467,7 @@ def visit_if(self, if_block: Expr): false_body, false_defs = self.visit(if_block.false_branch) # need to get the value out of a NDArray to check the condition - # equvialent to: val.asnumpy() + # equvialent to: val.numpy() cond_check = ast.Call(ast.Attribute(cond_body, "asnumpy", Load()), [], []) ret = ast.IfExp(cond_check, true_body, false_body) return (ret, cond_defs + true_defs + false_defs) @@ -476,7 +476,7 @@ def visit_constant(self, constant: Expr): """Proceeds by converting constant value to a numpy array and converting it to the appropriate value in the generated code (whether it be a Python scalar or a Numpy array)""" - value = constant.data.asnumpy() + value = constant.data.numpy() const_expr = ast.Call( ast.Attribute(Name("numpy", Load()), "array", Load()), [self.parse_numpy_array(value)], diff --git a/python/tvm/relay/transform/memory_plan.py b/python/tvm/relay/transform/memory_plan.py index c97053f976aa..ca67014730c7 100644 --- a/python/tvm/relay/transform/memory_plan.py +++ b/python/tvm/relay/transform/memory_plan.py @@ -302,7 +302,7 @@ def process_alloc_tensor(self, lhs, call): storage, old_offset, shape = call.args region, offset = self.new_region_and_offset(storage) - assert old_offset.data.asnumpy().item() == 0, "no offsets should yet be allocated" + assert old_offset.data.numpy().item() == 0, "no offsets should yet be allocated" return ( lhs, expr.Call(call.op, [region.var, offset, shape], call.attrs), diff --git a/python/tvm/rpc/testing.py b/python/tvm/rpc/testing.py index b7acc74c413a..32c0ca00cf33 100644 --- a/python/tvm/rpc/testing.py +++ b/python/tvm/rpc/testing.py @@ -45,7 +45,7 @@ def _strcat(x, y): @tvm.register_func("rpc.test.remote_array_func") def _remote_array_func(y): x = np.ones((3, 4)) - np.testing.assert_equal(y.asnumpy(), x) + np.testing.assert_equal(y.numpy(), x) @tvm.register_func("rpc.test.add_to_lhs") @@ -63,7 +63,7 @@ def _my_module(name): if name == "ref_count": return lambda: tvm.testing.object_use_count(nd) if name == "get_elem": - return lambda idx: nd.asnumpy()[idx] + return lambda idx: nd.numpy()[idx] if name == "get_arr_elem": - return lambda arr, idx: arr.asnumpy()[idx] + return lambda arr, idx: arr.numpy()[idx] raise RuntimeError("unknown name") diff --git a/python/tvm/runtime/ndarray.py b/python/tvm/runtime/ndarray.py index 823b1cc4f8b5..e19221c9f186 100644 --- a/python/tvm/runtime/ndarray.py +++ b/python/tvm/runtime/ndarray.py @@ -176,13 +176,23 @@ def copyfrom(self, source_array): def __repr__(self): res = "\n".format(self.shape, self.device) - res += self.asnumpy().__repr__() + res += self.numpy().__repr__() return res def __str__(self): - return str(self.asnumpy()) + return str(self.numpy()) def asnumpy(self): + """Convert this array to numpy array. This API will be deprecated in TVM v0.8 release. + Please use `numpy` instead.""" + warnings.warn( + "NDArray.asnumpy() will be deprecated in TVM v0.8 release. " + "Please use NDArray.numpy() instead.", + DeprecationWarning, + ) + return self.numpy() + + def numpy(self): """Convert this array to numpy array Returns diff --git a/python/tvm/runtime/vm.py b/python/tvm/runtime/vm.py index 4fc90ac27a43..429da5892628 100644 --- a/python/tvm/runtime/vm.py +++ b/python/tvm/runtime/vm.py @@ -135,7 +135,7 @@ def save(self): x_data = np.random.rand(10, 10).astype('float32') des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev) res = des_vm.run(x_data) - print(res.asnumpy()) + print(res.numpy()) """ return self._save(), self._get_lib() diff --git a/python/tvm/testing.py b/python/tvm/testing.py index 22ede78146d1..1c3346169c6b 100644 --- a/python/tvm/testing.py +++ b/python/tvm/testing.py @@ -294,7 +294,7 @@ def _compute_body(*us): sch = tvm.te.create_schedule(A.op) mod = tvm.build(sch, [A]) mod(*args) - return args[0].asnumpy() + return args[0].numpy() res = _run_expr(bool_expr, vranges) if not np.all(res): diff --git a/python/tvm/tir/buffer.py b/python/tvm/tir/buffer.py index d7067a5bdd94..267f0151a91b 100644 --- a/python/tvm/tir/buffer.py +++ b/python/tvm/tir/buffer.py @@ -222,7 +222,7 @@ def decl_buffer( b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) Note ---- diff --git a/python/tvm/topi/cuda/sparse.py b/python/tvm/topi/cuda/sparse.py index b6baa9cd67a5..32f20a15016e 100644 --- a/python/tvm/topi/cuda/sparse.py +++ b/python/tvm/topi/cuda/sparse.py @@ -400,15 +400,15 @@ def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type): isinstance(inputs[1], relay.Constant) and isinstance(inputs[2], relay.Constant) and isinstance(inputs[3], relay.Constant) - and is_valid_for_sparse_dense_padded(inputs[0], inputs[1].data.asnumpy()) + and is_valid_for_sparse_dense_padded(inputs[0], inputs[1].data.numpy()) ): - if len(inputs[1].data.asnumpy().shape) == 1: + if len(inputs[1].data.numpy().shape) == 1: sparse_matrix = sp.csr_matrix( - (inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy()) + (inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy()) ).tobsr() else: sparse_matrix = sp.bsr_matrix( - (inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy()) + (inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy()) ) warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size) sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size) diff --git a/python/tvm/topi/random/kernel.py b/python/tvm/topi/random/kernel.py index 5aaa4a144a74..1be4c86e63c0 100644 --- a/python/tvm/topi/random/kernel.py +++ b/python/tvm/topi/random/kernel.py @@ -465,7 +465,7 @@ def gen_ir(out_ptr): s = tvm.te.create_schedule([f.op]) out_ary = tvm.nd.array(np.ones((1,), "uint64"), device) tvm.build(s, [f], target=target)(out_ary) - return out_ary.asnumpy()[0] == 0 + return out_ary.numpy()[0] == 0 def uniform(gen, low, high, out_shape, out_dtype): diff --git a/python/tvm/topi/sparse/utils.py b/python/tvm/topi/sparse/utils.py index f57418ee399a..e8636f95fcc6 100644 --- a/python/tvm/topi/sparse/utils.py +++ b/python/tvm/topi/sparse/utils.py @@ -78,7 +78,7 @@ def random_sparse_dense_params(func, params, bs_r, bs_c, density): def deepcopy(param_dic): ret = {} for k, v in param_dic.items(): - ret[k] = tvm.nd.array(v.asnumpy()) + ret[k] = tvm.nd.array(v.numpy()) return ret new_params = deepcopy(params) @@ -121,7 +121,7 @@ def random_sparse_conv2d_params(func, params, bs_r, bs_c, density, layout): def deepcopy(param_dic): ret = {} for k, v in param_dic.items(): - ret[k] = tvm.nd.array(v.asnumpy()) + ret[k] = tvm.nd.array(v.numpy()) return ret new_params = deepcopy(params) diff --git a/python/tvm/topi/testing/common.py b/python/tvm/topi/testing/common.py index 69ffc1482ba1..785a6d11d8a7 100644 --- a/python/tvm/topi/testing/common.py +++ b/python/tvm/topi/testing/common.py @@ -107,4 +107,4 @@ def compare_numpy_tvm(inputs, output, target, device, compute, schedule): func = tvm.build(s, te_inputs + [out]) arys = [tvm.nd.array(x, device=device) for x in inputs] func(*(arys + [te_out])) - assert_allclose(te_out.asnumpy(), output, atol=1e-4, rtol=1e-4) + assert_allclose(te_out.numpy(), output, atol=1e-4, rtol=1e-4) diff --git a/rust/tvm/examples/resnet/src/build_resnet.py b/rust/tvm/examples/resnet/src/build_resnet.py index 13c499b54deb..277555eeb409 100644 --- a/rust/tvm/examples/resnet/src/build_resnet.py +++ b/rust/tvm/examples/resnet/src/build_resnet.py @@ -63,7 +63,7 @@ def build(target_dir): - """ Compiles resnet18 with TVM""" + """Compiles resnet18 with TVM""" # Download the pretrained model in MxNet's format. block = get_model("resnet18_v1", pretrained=True) @@ -92,7 +92,7 @@ def build(target_dir): def download_img_labels(): - """ Download an image and imagenet1k class labels for test""" + """Download an image and imagenet1k class labels for test""" from mxnet.gluon.utils import download synset_url = "".join( @@ -135,7 +135,7 @@ def get_cat_image(): def test_build(build_dir): - """ Sanity check with the cat image we download.""" + """Sanity check with the cat image we download.""" graph = open(osp.join(build_dir, "deploy_graph.json")).read() lib = tvm.runtime.load_module(osp.join(build_dir, "deploy_lib.so")) params = bytearray(open(osp.join(build_dir, "deploy_param.params"), "rb").read()) @@ -144,7 +144,7 @@ def test_build(build_dir): module = graph_executor.create(graph, lib, dev) module.load_params(params) module.run(data=input_data) - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() top1 = np.argmax(out[0]) synset = download_img_labels() print("TVM prediction top-1:", top1, synset[top1]) diff --git a/tests/micro/test_runtime_micro_on_arm.py b/tests/micro/test_runtime_micro_on_arm.py index 7d19d9510062..0212c3ea2692 100644 --- a/tests/micro/test_runtime_micro_on_arm.py +++ b/tests/micro/test_runtime_micro_on_arm.py @@ -95,7 +95,7 @@ def test_alloc(): ctx = tvm.micro_dev(0) np_tensor = np.random.uniform(size=shape).astype(dtype) micro_tensor = tvm.nd.array(np_tensor, ctx) - tvm.testing.assert_allclose(np_tensor, micro_tensor.asnumpy()) + tvm.testing.assert_allclose(np_tensor, micro_tensor.numpy()) def test_add(): @@ -130,10 +130,10 @@ def test_add(): micro_func(a, b, c) # ensure inputs weren't corrupted - tvm.testing.assert_allclose(a.asnumpy(), a_np) - tvm.testing.assert_allclose(b.asnumpy(), b_np) + tvm.testing.assert_allclose(a.numpy(), a_np) + tvm.testing.assert_allclose(b.numpy(), b_np) # ensure output is correct - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) def test_workspace_add(): @@ -166,9 +166,9 @@ def test_workspace_add(): micro_func(a, c) # ensure input wasn't corrupted - tvm.testing.assert_allclose(a.asnumpy(), a_np) + tvm.testing.assert_allclose(a.numpy(), a_np) # ensure output is correct - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 2.0) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 2.0) def test_graph_executor(): @@ -189,9 +189,9 @@ def test_graph_executor(): x_in = np.random.uniform(size=shape[0]).astype(dtype) mod.run(x=x_in) - result = mod.get_output(0).asnumpy() + result = mod.get_output(0).numpy() - tvm.testing.assert_allclose(mod.get_input(0).asnumpy(), x_in) + tvm.testing.assert_allclose(mod.get_input(0).numpy(), x_in) tvm.testing.assert_allclose(result, x_in * x_in + 1.0) @@ -241,11 +241,11 @@ def test_conv2d(): micro_func(x_data, w_data, result) out_data = np.zeros(out_shape, dtype=dtype) - params = {"x": x_data.asnumpy(), "w": w_data.asnumpy()} + params = {"x": x_data.numpy(), "w": w_data.numpy()} intrp = create_executor("debug") expected_result = intrp.evaluate(mod["main"])(x_data, w_data) - tvm.testing.assert_allclose(result.asnumpy(), expected_result.asnumpy()) + tvm.testing.assert_allclose(result.numpy(), expected_result.numpy()) def test_interleave_sessions(): @@ -271,12 +271,12 @@ def test_interleave_sessions(): with sess_a: add_const_mod = relay_micro_build(add_const_func, DEV_CONFIG_A) add_const_mod.run(x=micro_tensor_a) - add_result = add_const_mod.get_output(0).asnumpy() + add_result = add_const_mod.get_output(0).numpy() tvm.testing.assert_allclose(add_result, np_tensor_a + 1.0) with sess_b: add_const_mod = relay_micro_build(add_const_func, DEV_CONFIG_B) add_const_mod.run(x=micro_tensor_b) - add_result = add_const_mod.get_output(0).asnumpy() + add_result = add_const_mod.get_output(0).numpy() tvm.testing.assert_allclose(add_result, np_tensor_b + 1.0) @@ -302,7 +302,7 @@ def test_nested_sessions(): micro_tensor_b = tvm.nd.array(np_tensor_b, tvm.micro_dev(0)) add_const_mod = relay_micro_build(add_const_func, DEV_CONFIG_A) add_const_mod.run(x=micro_tensor_a) - add_result = add_const_mod.get_output(0).asnumpy() + add_result = add_const_mod.get_output(0).numpy() tvm.testing.assert_allclose(add_result, np_tensor_a + 1.0) @@ -328,7 +328,7 @@ def test_inactive_session_use(): with sess_b: # These objects belong to `sess_a`. add_const_mod.run(x=micro_tensor_a) - add_result = add_const_mod.get_output(0).asnumpy() + add_result = add_const_mod.get_output(0).numpy() tvm.testing.assert_allclose(add_result, np_tensor_a + 1.0) diff --git a/tests/micro/zephyr/test_zephyr.py b/tests/micro/zephyr/test_zephyr.py index 4da1f12b273a..e217ec39ed1d 100644 --- a/tests/micro/zephyr/test_zephyr.py +++ b/tests/micro/zephyr/test_zephyr.py @@ -139,15 +139,15 @@ def test_compile_runtime(platform, west_cmd): # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) - assert (A_data.asnumpy() == np.array([2, 3])).all() + assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) - assert (B_data.asnumpy() == np.array([4])).all() + assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) - assert (C_data.asnumpy() == np.array([0, 0])).all() + assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) - assert (C_data.asnumpy() == np.array([6, 7])).all() + assert (C_data.numpy() == np.array([6, 7])).all() with _make_add_sess(model, zephyr_board, west_cmd) as sess: test_basic_add(sess) @@ -161,18 +161,18 @@ def test_platform_timer(platform, west_cmd): # NOTE: run test in a nested function so cPython will delete arrays before closing the session. def test_basic_add(sess): A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) - assert (A_data.asnumpy() == np.array([2, 3])).all() + assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) - assert (B_data.asnumpy() == np.array([4])).all() + assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) - assert (C_data.asnumpy() == np.array([0, 0])).all() + assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() time_eval_f = system_lib.time_evaluator( "add", sess.device, number=20, repeat=3, min_repeat_ms=40 ) result = time_eval_f(A_data, B_data, C_data) - assert (C_data.asnumpy() == np.array([6, 7])).all() + assert (C_data.numpy() == np.array([6, 7])).all() assert result.mean > 0 assert len(result.results) == 3 @@ -203,8 +203,8 @@ def test_relay(platform, west_cmd): graph_mod.set_input(**params) x_in = np.random.randint(10, size=shape[0], dtype=dtype) graph_mod.run(x=x_in) - result = graph_mod.get_output(0).asnumpy() - tvm.testing.assert_allclose(graph_mod.get_input(0).asnumpy(), x_in) + result = graph_mod.get_output(0).numpy() + tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in) tvm.testing.assert_allclose(result, x_in * x_in + 1) @@ -245,13 +245,13 @@ def test_onnx(platform, west_cmd): # Send the digit-2 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_2)) graph_mod.run() - result = graph_mod.get_output(0).asnumpy() + result = graph_mod.get_output(0).numpy() assert np.argmax(result) == 2 # Send the digit-9 image and confirm that the correct result is returned. graph_mod.set_input("Input3", tvm.nd.array(digit_9)) graph_mod.run() - result = graph_mod.get_output(0).asnumpy() + result = graph_mod.get_output(0).numpy() assert np.argmax(result) == 9 @@ -332,7 +332,7 @@ def check_result(relay_mod, model, zephyr_board, west_cmd, map_inputs, out_shape for idx, shape in enumerate(out_shapes): out = tvm.nd.empty(shape, device=session.device) out = rt_mod.get_output(idx, out) - tvm.testing.assert_allclose(out.asnumpy(), results[idx], rtol=TOL, atol=TOL) + tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL) def test_byoc_utvm(platform, west_cmd): diff --git a/tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py b/tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py index c2097bb15b52..82da49e41096 100644 --- a/tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py +++ b/tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py @@ -62,7 +62,7 @@ def check_llvm(): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_llvm() @@ -101,7 +101,7 @@ def check_llvm(use_file): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1.0) + tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1.0) check_llvm(use_file=True) check_llvm(use_file=False) diff --git a/tests/python/all-platform-minimal-test/test_runtime_ndarray.py b/tests/python/all-platform-minimal-test/test_runtime_ndarray.py index 00ca0b1af3cf..197a2f88e3fa 100644 --- a/tests/python/all-platform-minimal-test/test_runtime_ndarray.py +++ b/tests/python/all-platform-minimal-test/test_runtime_ndarray.py @@ -33,8 +33,8 @@ def test_nd_create(): assert y.dtype == x.dtype assert y.shape == x.shape assert isinstance(y, tvm.nd.NDArray) - np.testing.assert_equal(x, y.asnumpy()) - np.testing.assert_equal(x, z.asnumpy()) + np.testing.assert_equal(x, y.numpy()) + np.testing.assert_equal(x, z.numpy()) # no need here, just to test usablity dev.sync() @@ -54,8 +54,8 @@ def test_fp16_conversion(): func(x_tvm, y_tvm) - expected = x_tvm.asnumpy().astype(dst) - real = y_tvm.asnumpy() + expected = x_tvm.numpy().astype(dst) + real = y_tvm.numpy() tvm.testing.assert_allclose(expected, real) diff --git a/tests/python/contrib/test_arm_compute_lib/infrastructure.py b/tests/python/contrib/test_arm_compute_lib/infrastructure.py index 7e05194a5621..f151a85ec5b1 100644 --- a/tests/python/contrib/test_arm_compute_lib/infrastructure.py +++ b/tests/python/contrib/test_arm_compute_lib/infrastructure.py @@ -249,14 +249,12 @@ def verify(answers, atol, rtol, verify_saturation=False, config=None): try: if verify_saturation: assert ( - np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) assert ( - np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) - tvm.testing.assert_allclose( - outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol - ) + tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol) except AssertionError as e: err_msg = "Results not within the acceptable tolerance.\n" if config: diff --git a/tests/python/contrib/test_bnns/infrastructure.py b/tests/python/contrib/test_bnns/infrastructure.py index ebdf815aafb3..46bd049402a9 100644 --- a/tests/python/contrib/test_bnns/infrastructure.py +++ b/tests/python/contrib/test_bnns/infrastructure.py @@ -211,14 +211,12 @@ def verify(answers, atol, rtol, verify_saturation=False, config=None): try: if verify_saturation: assert ( - np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) assert ( - np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) - tvm.testing.assert_allclose( - outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol - ) + tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol) except AssertionError as e: err_msg = "Results not within the acceptable tolerance.\n" if config: diff --git a/tests/python/contrib/test_bnns/test_onnx_topologies.py b/tests/python/contrib/test_bnns/test_onnx_topologies.py index 25c4bc483333..c5eb6ffa2367 100644 --- a/tests/python/contrib/test_bnns/test_onnx_topologies.py +++ b/tests/python/contrib/test_bnns/test_onnx_topologies.py @@ -120,7 +120,7 @@ def run(mod, target, simplify=True, with_bnns=False): module = graph_executor.GraphModule(loaded_lib["default"](dev)) module.run() - return module.get_output(0).asnumpy() + return module.get_output(0).numpy() res_llvm = run(model, TARGET, simplify=True, with_bnns=False) res_bnns = run(model, TARGET, simplify=True, with_bnns=True) diff --git a/tests/python/contrib/test_cblas.py b/tests/python/contrib/test_cblas.py index 2245bb6c1527..b4fc2b283369 100644 --- a/tests/python/contrib/test_cblas.py +++ b/tests/python/contrib/test_cblas.py @@ -67,7 +67,7 @@ def verify(target="llvm"): bb = 10.0 f(a, b, d, bb) tvm.testing.assert_allclose( - d.asnumpy(), get_numpy(a.asnumpy(), b.asnumpy(), bb, transa, transb), rtol=1e-5 + d.numpy(), get_numpy(a.numpy(), b.numpy(), bb, transa, transb), rtol=1e-5 ) verify("llvm") @@ -138,8 +138,8 @@ def verify(target="llvm"): bb = 10 f(a, b, d, bb) tvm.testing.assert_allclose( - d.asnumpy(), - get_numpy(a.asnumpy().astype("int32"), b.asnumpy().astype("int32"), bb, transa, transb), + d.numpy(), + get_numpy(a.numpy().astype("int32"), b.numpy().astype("int32"), bb, transa, transb), rtol=1e-5, ) @@ -199,7 +199,7 @@ def verify(target="llvm"): d = tvm.nd.array(np.zeros((batch, n, m), dtype=D.dtype), dev) f(a, b, d) tvm.testing.assert_allclose( - d.asnumpy(), get_numpy(a.asnumpy(), b.asnumpy(), transa, transb), rtol=1e-5 + d.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5 ) verify("llvm") diff --git a/tests/python/contrib/test_coreml_codegen.py b/tests/python/contrib/test_coreml_codegen.py index b93c489fdac6..7e678a790f4a 100644 --- a/tests/python/contrib/test_coreml_codegen.py +++ b/tests/python/contrib/test_coreml_codegen.py @@ -118,7 +118,7 @@ def test_compile_and_run(): out = m.get_output(0, out) expected = (y_data * y_data) - (x_data + x_data) - tvm.testing.assert_allclose(out.asnumpy(), expected, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), expected, rtol=tol, atol=tol) @mock.patch("tvm.contrib.coreml_runtime.create") diff --git a/tests/python/contrib/test_coreml_runtime.py b/tests/python/contrib/test_coreml_runtime.py index 4a2ce487e65d..002fe302a073 100644 --- a/tests/python/contrib/test_coreml_runtime.py +++ b/tests/python/contrib/test_coreml_runtime.py @@ -76,7 +76,7 @@ def verify(coreml_model, model_path, dev): for name in inputs: runtime.set_input(name, tvm.nd.array(inputs[name], dev)) runtime.invoke() - tvm_outputs = [runtime.get_output(i).asnumpy() for i in range(runtime.get_num_outputs())] + tvm_outputs = [runtime.get_output(i).numpy() for i in range(runtime.get_num_outputs())] for c_out, t_out in zip(coreml_outputs, tvm_outputs): np.testing.assert_almost_equal(c_out, t_out, 3) diff --git a/tests/python/contrib/test_cublas.py b/tests/python/contrib/test_cublas.py index d871a384e1ff..a0f51ca7c9fc 100644 --- a/tests/python/contrib/test_cublas.py +++ b/tests/python/contrib/test_cublas.py @@ -42,7 +42,7 @@ def verify(target="cuda"): c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) f(a, b, c) tvm.testing.assert_allclose( - c.asnumpy(), np.dot(a.asnumpy().astype(C.dtype), b.asnumpy().astype(C.dtype)), rtol=rtol + c.numpy(), np.dot(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)), rtol=rtol ) verify() @@ -100,7 +100,7 @@ def verify(target="cuda"): c = tvm.nd.array(np.zeros((m, N_out), dtype=C.dtype), dev) f(a, b, c) # Transform output c from layout CUBLASLT_ORDER_COL32 to row major layout - c_out = c.asnumpy() + c_out = c.numpy() c_out = c_out.reshape([int(m * N_out / 32), 32]) c_out = np.hstack(np.vsplit(c_out, int(N_out / 32))) c_out = c_out[:, :n] @@ -133,8 +133,8 @@ def verify(target="cuda"): c = tvm.nd.array(np.zeros((j, n, m), dtype=C.dtype), dev) f(a, b, c) tvm.testing.assert_allclose( - c.asnumpy(), - np.matmul(a.asnumpy().astype(C.dtype), b.asnumpy().astype(C.dtype)).astype(C.dtype), + c.numpy(), + np.matmul(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)).astype(C.dtype), rtol=rtol, ) diff --git a/tests/python/contrib/test_cudnn.py b/tests/python/contrib/test_cudnn.py index d73f81bf5bd3..8a929f550a4f 100644 --- a/tests/python/contrib/test_cudnn.py +++ b/tests/python/contrib/test_cudnn.py @@ -86,7 +86,7 @@ def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1): c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups) f(x, w, y) - tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=1e-2, rtol=1e-2) + tvm.testing.assert_allclose(y.numpy(), c_np, atol=1e-2, rtol=1e-2) @tvm.testing.requires_gpu @@ -163,7 +163,7 @@ def verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1): raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)") f(x, w, y) - tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=3e-5, rtol=1e-4) + tvm.testing.assert_allclose(y.numpy(), c_np, atol=3e-5, rtol=1e-4) @tvm.testing.requires_gpu @@ -184,7 +184,7 @@ def verify_softmax(shape, axis, dtype="float32"): b = tvm.nd.array(b_np, dev) f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax") f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3) def verify_softmax_4d(shape, dtype="float32"): @@ -201,7 +201,7 @@ def verify_softmax_4d(shape, dtype="float32"): b = tvm.nd.array(b_np, dev) f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax") f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3) @tvm.testing.requires_gpu diff --git a/tests/python/contrib/test_dlpack.py b/tests/python/contrib/test_dlpack.py index ca6592b3e61e..c71fc45d0346 100644 --- a/tests/python/contrib/test_dlpack.py +++ b/tests/python/contrib/test_dlpack.py @@ -24,7 +24,7 @@ def test(): a = np.random.randn(1337) tvm_a = tvm.nd.array(a) - np.testing.assert_equal(tvm.nd.from_dlpack(tvm_a.to_dlpack()).asnumpy(), a) + np.testing.assert_equal(tvm.nd.from_dlpack(tvm_a.to_dlpack()).numpy(), a) try: import torch @@ -32,11 +32,11 @@ def test(): x = torch.rand(56, 56) tvm_x = tvm.nd.from_dlpack(torch.utils.dlpack.to_dlpack(x)) - np.testing.assert_equal(x.numpy(), tvm_x.asnumpy()) + np.testing.assert_equal(x.numpy(), tvm_x.numpy()) y = tvm.nd.from_dlpack(tvm_x) - np.testing.assert_equal(y.asnumpy(), tvm_x.asnumpy()) + np.testing.assert_equal(y.numpy(), tvm_x.numpy()) np.testing.assert_equal( - torch.utils.dlpack.from_dlpack(y.to_dlpack()).numpy(), tvm_x.asnumpy() + torch.utils.dlpack.from_dlpack(y.to_dlpack()).numpy(), tvm_x.numpy() ) n = tvm.runtime.convert(137) diff --git a/tests/python/contrib/test_edgetpu_runtime.py b/tests/python/contrib/test_edgetpu_runtime.py index 449e0137d113..7e59ab2e3cc6 100644 --- a/tests/python/contrib/test_edgetpu_runtime.py +++ b/tests/python/contrib/test_edgetpu_runtime.py @@ -80,7 +80,7 @@ def check_remote(target_edgetpu=False): runtime.set_input(0, tvm.nd.array(tflite_input, dev)) runtime.invoke() out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) + np.testing.assert_equal(out.numpy(), tflite_output) # Target CPU on coral board check_remote() diff --git a/tests/python/contrib/test_ethosn/infrastructure.py b/tests/python/contrib/test_ethosn/infrastructure.py index 59021cf86211..ba03acc1c112 100644 --- a/tests/python/contrib/test_ethosn/infrastructure.py +++ b/tests/python/contrib/test_ethosn/infrastructure.py @@ -235,12 +235,12 @@ def verify(answers, atol, rtol=1e-07, verify_saturation=True): for outs in combinations(answer, 2): if verify_saturation: assert ( - np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) assert ( - np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size + np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size ), "Output is saturated: {}".format(outs[0]) - tvm.testing.assert_allclose(outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol) + tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol) def inference_result(outputs): diff --git a/tests/python/contrib/test_ethosn/test_constant_duplication.py b/tests/python/contrib/test_ethosn/test_constant_duplication.py index a096e57c19a9..e443a18c6d09 100644 --- a/tests/python/contrib/test_ethosn/test_constant_duplication.py +++ b/tests/python/contrib/test_ethosn/test_constant_duplication.py @@ -79,4 +79,4 @@ def test_constant_duplication(): res = tei.build(mod, params, npu=True, expected_host_ops=1) for key, value in res.params.items(): assert key == "p0" - assert value.asnumpy().size == 64 + assert value.numpy().size == 64 diff --git a/tests/python/contrib/test_gemm_acc16.py b/tests/python/contrib/test_gemm_acc16.py index 4d2ed795d6e0..18e15098a07e 100644 --- a/tests/python/contrib/test_gemm_acc16.py +++ b/tests/python/contrib/test_gemm_acc16.py @@ -90,7 +90,7 @@ def verify(target="llvm -mcpu=skylake-avx512"): result = t_evaluator(x, w, y) gops_per_sec = gops_per_mm / result.mean / 1e9 - tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=1e-5) + tvm.testing.assert_allclose(y.numpy(), np.dot(a_, b_.T), rtol=1e-5) print( "Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}.".format( result.mean * 1000, gops_per_sec, gops_per_sec / peak diff --git a/tests/python/contrib/test_gemm_acc32_vnni.py b/tests/python/contrib/test_gemm_acc32_vnni.py index 02538e88c39e..3d038259b1ab 100644 --- a/tests/python/contrib/test_gemm_acc32_vnni.py +++ b/tests/python/contrib/test_gemm_acc32_vnni.py @@ -98,7 +98,7 @@ def verify(target="llvm -mcpu=cascadelake"): gops_per_sec = gops_per_mm / result.mean / 1e9 # verify the correctness - tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0) + tvm.testing.assert_allclose(y.numpy(), np.dot(a_, b_.T), rtol=0) print( "Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}".format( result.mean * 1000, gops_per_sec, gops_per_sec / peak diff --git a/tests/python/contrib/test_miopen.py b/tests/python/contrib/test_miopen.py index 630bfc011038..27a8ec6df357 100644 --- a/tests/python/contrib/test_miopen.py +++ b/tests/python/contrib/test_miopen.py @@ -66,8 +66,8 @@ def verify(): f_ref = tvm.build(s_ref, [X, W, Y_ref], "rocm --host=llvm") y_ref = tvm.nd.array(np.random.uniform(-1, 1, yshape).astype(np.float32), dev) f_ref(x, w, y_ref) - print("Max abs diff:", np.max(np.abs(y.asnumpy() - y_ref.asnumpy()))) - tvm.testing.assert_allclose(y.asnumpy(), y_ref.asnumpy(), atol=1e-3) + print("Max abs diff:", np.max(np.abs(y.numpy() - y_ref.numpy()))) + tvm.testing.assert_allclose(y.numpy(), y_ref.numpy(), atol=1e-3) verify() diff --git a/tests/python/contrib/test_mps.py b/tests/python/contrib/test_mps.py index 597e87778866..92462e4c4f9e 100644 --- a/tests/python/contrib/test_mps.py +++ b/tests/python/contrib/test_mps.py @@ -53,7 +53,7 @@ def verify(A, B, D, s, target="metal"): b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 1, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()) + 1, rtol=1e-5) verify(A, B, D, s) @@ -83,7 +83,7 @@ def verify(A, B, C, target="llvm"): b = tvm.nd.array(np.random.uniform(size=(co, kh, kw, ci)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((n, h // stride, w // stride, co), dtype=C.dtype), dev) f(a, b, c) - # print(c.asnumpy()) + # print(c.numpy()) # print(c.shape) verify(A, B, C, s1) diff --git a/tests/python/contrib/test_mxnet_bridge.py b/tests/python/contrib/test_mxnet_bridge.py index 308bd82988ef..920e3649f370 100644 --- a/tests/python/contrib/test_mxnet_bridge.py +++ b/tests/python/contrib/test_mxnet_bridge.py @@ -56,7 +56,7 @@ def mxnet_check(): mxf(xx, yy, zz, 10.0) mxf(xx, yy, zz, 10.0) - tvm.testing.assert_allclose(zz.asnumpy(), (xx.asnumpy() + yy.asnumpy()) * 10) + tvm.testing.assert_allclose(zz.numpy(), (xx.numpy() + yy.numpy()) * 10) if __name__ == "__main__": diff --git a/tests/python/contrib/test_nnpack.py b/tests/python/contrib/test_nnpack.py index 0208d7211960..c693af7e4198 100644 --- a/tests/python/contrib/test_nnpack.py +++ b/tests/python/contrib/test_nnpack.py @@ -49,7 +49,7 @@ def verify(target="llvm"): d = tvm.nd.array(np.zeros((m,), dtype=D.dtype), dev) bb = 10.0 f(a, b, d, bb) - tvm.testing.assert_allclose(d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy().T) + bb, rtol=1e-5) + tvm.testing.assert_allclose(d.numpy(), np.dot(a.numpy(), b.numpy().T) + bb, rtol=1e-5) verify() @@ -135,7 +135,7 @@ def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias= nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape( 1, bshape[0], 1, 1 ) - tvm.testing.assert_allclose(td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5) + tvm.testing.assert_allclose(td.numpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5) for algorithm in [ nnpack.ConvolutionAlgorithm.AUTO, @@ -209,7 +209,7 @@ def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias= nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape( 1, bshape[0], 1, 1 ) - tvm.testing.assert_allclose(td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5) + tvm.testing.assert_allclose(td.numpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5) for algorithm in [nnpack.ConvolutionAlgorithm.WT_8x8]: for with_bias in [True, False]: diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 9b29b33caaf6..d99946d19d66 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -55,7 +55,7 @@ def run_relay(func, data_tuple): result = [] relay_res = relay_res if isinstance(relay_res, list) else [relay_res] for res in relay_res: - result.append(res.asnumpy()) + result.append(res.numpy()) return result diff --git a/tests/python/contrib/test_onnx_model.py b/tests/python/contrib/test_onnx_model.py index addb13732550..84cff57d1d94 100644 --- a/tests/python/contrib/test_onnx_model.py +++ b/tests/python/contrib/test_onnx_model.py @@ -61,7 +61,7 @@ def run_relay(mod, params, in_data): dev = tvm.device("llvm", 0) intrp = relay.create_executor("graph", mod, device=dev, target=target) in_data = [tvm.nd.array(value) for value in in_data.values()] - return intrp.evaluate()(*in_data, **params).asnumpy() + return intrp.evaluate()(*in_data, **params).numpy() def _verify_results(mod, params, in_data): diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py index e7a1a5e01c00..bd92f2f70ea7 100644 --- a/tests/python/contrib/test_random.py +++ b/tests/python/contrib/test_random.py @@ -39,7 +39,7 @@ def verify(target="llvm"): f = tvm.build(s, [A], target) a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev) f(a) - na = a.asnumpy() + na = a.numpy() assert abs(np.mean(na)) < 0.3 assert np.min(na) == -127 assert np.max(na) == 127 @@ -64,7 +64,7 @@ def verify(target="llvm"): f = tvm.build(s, [A], target) a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev) f(a) - na = a.asnumpy() + na = a.numpy() assert abs(np.mean(na) - 0.5) < 1e-1 assert abs(np.min(na) - 0.0) < 1e-3 assert abs(np.max(na) - 1.0) < 1e-3 @@ -89,7 +89,7 @@ def verify(target="llvm"): f = tvm.build(s, [A], target) a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev) f(a) - na = a.asnumpy() + na = a.numpy() assert abs(np.mean(na) - 3) < 1e-1 assert abs(np.std(na) - 4) < 1e-2 @@ -106,10 +106,10 @@ def test_local(dev, dtype): random_fill = tvm.get_global_func("tvm.contrib.random.random_fill") random_fill(value) - assert np.count_nonzero(value.asnumpy()) == 512 * 512 + assert np.count_nonzero(value.numpy()) == 512 * 512 # make sure arithmentic doesn't overflow too - np_values = value.asnumpy() + np_values = value.numpy() assert np.isfinite(np_values * np_values + np_values).any() def test_rpc(dtype): @@ -126,10 +126,10 @@ def test_rpc(dtype): random_fill = remote.get_function("tvm.contrib.random.random_fill") random_fill(value) - assert np.count_nonzero(value.asnumpy()) == 512 * 512 + assert np.count_nonzero(value.numpy()) == 512 * 512 # make sure arithmentic doesn't overflow too - np_values = value.asnumpy() + np_values = value.numpy() assert np.isfinite(np_values * np_values + np_values).any() for dtype in [ diff --git a/tests/python/contrib/test_rocblas.py b/tests/python/contrib/test_rocblas.py index bcbec60265d3..2defc9c81251 100644 --- a/tests/python/contrib/test_rocblas.py +++ b/tests/python/contrib/test_rocblas.py @@ -43,7 +43,7 @@ def verify(target="rocm"): b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5) verify() @@ -77,7 +77,7 @@ def verify(target="rocm"): c = tvm.nd.array(np.zeros((batch, m, n), dtype=C.dtype), dev) f(a, b, c) tvm.testing.assert_allclose( - c.asnumpy(), get_numpy(a.asnumpy(), b.asnumpy(), transa, transb), rtol=1e-5 + c.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5 ) verify() diff --git a/tests/python/contrib/test_sort.py b/tests/python/contrib/test_sort.py index cdb3a00dc492..fda9a777576d 100644 --- a/tests/python/contrib/test_sort.py +++ b/tests/python/contrib/test_sort.py @@ -56,7 +56,7 @@ def test_sort(): b = tvm.nd.array(np.array(sort_num_input).astype(sort_num.dtype), dev) c = tvm.nd.array(np.zeros(a.shape, dtype=out.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.array(sorted_index).astype(out.dtype), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.array(sorted_index).astype(out.dtype), rtol=1e-5) def test_sort_np(): @@ -88,7 +88,7 @@ def test_sort_np(): b = tvm.nd.array(np.array(sort_num_input).astype(sort_num.dtype), dev) c = tvm.nd.array(np.zeros(a.shape, dtype=out.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np_out, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np_out, rtol=1e-5) def test_sort_by_key_gpu(): @@ -119,8 +119,8 @@ def test_sort_by_key_gpu(): ref_keys_out = np.sort(keys_np) ref_values_out = np.array([values_np[i] for i in np.argsort(keys_np)]) - tvm.testing.assert_allclose(keys_out.asnumpy(), ref_keys_out, rtol=1e-5) - tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5) + tvm.testing.assert_allclose(keys_out.numpy(), ref_keys_out, rtol=1e-5) + tvm.testing.assert_allclose(values_out.numpy(), ref_values_out, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py index d9618391ce40..b4fdbc8481ba 100644 --- a/tests/python/contrib/test_sparse.py +++ b/tests/python/contrib/test_sparse.py @@ -46,7 +46,7 @@ def test_static_tensor(): c.indices = a.indices c.indptr = a.indptr f(a.data, c.data) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5) def test_dynamic_tensor(): @@ -73,7 +73,7 @@ def test_dynamic_tensor(): c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5) def test_sparse_array_tuple(): @@ -112,7 +112,7 @@ def test_sparse_array_tuple(): c.indices = a.indices c.indptr = a.indptr f(a.data.shape[0], a.data, c.data) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2.0, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), a.numpy() * 2.0, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/contrib/test_tensorrt.py b/tests/python/contrib/test_tensorrt.py index 52ee87e00599..f9912c9674e5 100644 --- a/tests/python/contrib/test_tensorrt.py +++ b/tests/python/contrib/test_tensorrt.py @@ -56,7 +56,7 @@ def skip_runtime_test(): def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy()] + return [o.numpy()] elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list): return [vmobj_to_list(f) for f in o] else: @@ -1370,7 +1370,7 @@ def get_maskrcnn_input(in_size: int) -> np.ndarray: # Descending sort by scores and get the high confidence indices. In this example 9 is chosen, # because this image has 9 boxes over 0.9 confidence num_high_confidence_boxes = 9 - tvm_indices = np.argsort(-1 * tvm_res[1].asnumpy())[:num_high_confidence_boxes] + tvm_indices = np.argsort(-1 * tvm_res[1].numpy())[:num_high_confidence_boxes] with torch.no_grad(): out = traced_module(torch.Tensor(np_sample_input)) @@ -1386,7 +1386,7 @@ def get_maskrcnn_input(in_size: int) -> np.ndarray: # 0.1 pixel difference of a box in a 300X300 image wont make any change. for i, tol_val in zip(range(4), tol): np.testing.assert_allclose( - tvm_res[i].asnumpy()[tvm_indices], + tvm_res[i].numpy()[tvm_indices], out[i].numpy()[pt_indices], rtol=tol_val, atol=tol_val, diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index f3369ac428f6..93ab634feb15 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -92,7 +92,7 @@ def test_local(): runtime.set_input(0, tvm.nd.array(tflite_input)) runtime.invoke() out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) + np.testing.assert_equal(out.numpy(), tflite_output) def test_remote(): @@ -137,7 +137,7 @@ def test_remote(): runtime.set_input(0, tvm.nd.array(tflite_input, remote.cpu(0))) runtime.invoke() out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) + np.testing.assert_equal(out.numpy(), tflite_output) server.terminate() diff --git a/tests/python/contrib/test_thrust.py b/tests/python/contrib/test_thrust.py index 7b4b3a3840ae..ca5b536fb0a3 100644 --- a/tests/python/contrib/test_thrust.py +++ b/tests/python/contrib/test_thrust.py @@ -59,8 +59,8 @@ def test_stable_sort_by_key(): ref_keys_out = np.sort(keys_np) ref_values_out = np.array([values_np[i] for i in np.argsort(keys_np)]) - tvm.testing.assert_allclose(keys_out.asnumpy(), ref_keys_out, rtol=1e-5) - tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5) + tvm.testing.assert_allclose(keys_out.numpy(), ref_keys_out, rtol=1e-5) + tvm.testing.assert_allclose(values_out.numpy(), ref_values_out, rtol=1e-5) def test_exclusive_scan(): @@ -99,9 +99,9 @@ def test_exclusive_scan(): f(values_in, values_out, reduction_out) ref_values_out = np.cumsum(values_np, axis=-1, dtype="int32") - values_np - tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5) + tvm.testing.assert_allclose(values_out.numpy(), ref_values_out, rtol=1e-5) ref_reduction_out = np.sum(values_np, axis=-1) - tvm.testing.assert_allclose(reduction_out.asnumpy(), ref_reduction_out, rtol=1e-5) + tvm.testing.assert_allclose(reduction_out.numpy(), ref_reduction_out, rtol=1e-5) def test_inclusive_scan(): @@ -133,7 +133,7 @@ def test_inclusive_scan(): f(values_in, values_out) ref_values_out = np.cumsum(values_np, axis=-1, dtype=out_dtype) - tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5) + tvm.testing.assert_allclose(values_out.numpy(), ref_values_out, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/contrib/test_verilator/test_mobilenet.py b/tests/python/contrib/test_verilator/test_mobilenet.py index 983c5d983d47..5728bc8bb25c 100644 --- a/tests/python/contrib/test_verilator/test_mobilenet.py +++ b/tests/python/contrib/test_verilator/test_mobilenet.py @@ -158,7 +158,7 @@ def run_model(mod, params=None, opts=None): input_tensor = get_input_tensor_name() module.set_input(input_tensor, image_data) module.run() - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() return out diff --git a/tests/python/contrib/test_verilator/test_verilator_ops.py b/tests/python/contrib/test_verilator/test_verilator_ops.py index 3b0eb3130c2e..fe32baa897d0 100644 --- a/tests/python/contrib/test_verilator/test_verilator_ops.py +++ b/tests/python/contrib/test_verilator/test_verilator_ops.py @@ -118,7 +118,7 @@ def run_and_check(xshape, yshape, dtype, mod, opts): clear_stats() out = run_module(inp, mod, params=None, opts=opts) values = stats() - tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5) return values["cycle_counter"] diff --git a/tests/python/contrib/test_vitis_ai/infrastructure.py b/tests/python/contrib/test_vitis_ai/infrastructure.py index bd3d85747105..3884f22e59aa 100644 --- a/tests/python/contrib/test_vitis_ai/infrastructure.py +++ b/tests/python/contrib/test_vitis_ai/infrastructure.py @@ -164,4 +164,4 @@ def verify_result( for idx, shape in enumerate(out_shapes): out = tvm.nd.empty(shape, device=device) out = rt_mod.get_output(idx, out) - tvm.testing.assert_allclose(out.asnumpy(), results[idx], rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol) diff --git a/tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py b/tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py index 64071325ef52..db9552c8eab2 100644 --- a/tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py +++ b/tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py @@ -67,7 +67,7 @@ def test_extern_vitis_ai_resnet18(): mod, {"data": i_data}, (1, 1000), - ref_res.asnumpy(), + ref_res.numpy(), tol=1e-5, params=params, dpu_target="DPUCADX8G", diff --git a/tests/python/frontend/caffe/test_forward.py b/tests/python/frontend/caffe/test_forward.py index d0f87fcc21c7..f4c0cd102340 100644 --- a/tests/python/frontend/caffe/test_forward.py +++ b/tests/python/frontend/caffe/test_forward.py @@ -47,20 +47,20 @@ def _create_dir(d_path): - """ If the directory is not existed, create it""" + """If the directory is not existed, create it""" if not (os.path.exists(d_path) and os.path.isdir(d_path)): os.makedirs(d_path) def _list_to_str(ll): - """ Convert list or tuple to str, separated by underline. """ + """Convert list or tuple to str, separated by underline.""" if isinstance(ll, (tuple, list)): tmp = [str(i) for i in ll] return "_".join(tmp) def _gen_filename_str(op_name, data_shape, *args, **kwargs): - """ Combining the filename according to the op_name, shape and other args. """ + """Combining the filename according to the op_name, shape and other args.""" file_dir = os.path.join(CURRENT_DIR, op_name) _create_dir(file_dir) res = op_name + "_" @@ -86,14 +86,14 @@ def _gen_filename_str(op_name, data_shape, *args, **kwargs): def _save_prototxt(n_netspec, f_path): - """ Generate .prototxt file according to caffe.NetSpec""" + """Generate .prototxt file according to caffe.NetSpec""" s = n_netspec.to_proto() with open(f_path, "w") as f: f.write(str(s)) def _save_solver(solver_file, proto_file, blob_file): - """ Define a solver proto, you can change the configs.""" + """Define a solver proto, you can change the configs.""" blob_file_prefix = blob_file.split(".caffemodel")[0] s = pb.SolverParameter() s.train_net = proto_file @@ -113,7 +113,7 @@ def _save_solver(solver_file, proto_file, blob_file): def _save_caffemodel(solver_file, blob_file): - """ Generate .caffemodel file.""" + """Generate .caffemodel file.""" solver = caffe.SGDSolver(solver_file) solver.net.save(blob_file) @@ -125,7 +125,7 @@ def _gen_model_files(n_netspec, proto_file, blob_file, solver_file): def _siso_op(data, func, *args, **kwargs): - """ Create single input and single output Caffe op """ + """Create single input and single output Caffe op""" n = caffe.NetSpec() n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}}) n.output = func(n.data, *args, **kwargs) @@ -133,7 +133,7 @@ def _siso_op(data, func, *args, **kwargs): def _miso_op(data_list, func, *args, **kwargs): - """ Create multi input and single output Caffe op """ + """Create multi input and single output Caffe op""" n = caffe.NetSpec() if not isinstance(data_list, (tuple, list)): raise TypeError("Need tuple or list but get {}".format(type(data_list))) @@ -146,7 +146,7 @@ def _miso_op(data_list, func, *args, **kwargs): def _simo_op(data, func, *args, **kwargs): - """ Create single input and multi output Caffe op """ + """Create single input and multi output Caffe op""" n = caffe.NetSpec() n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}}) output_list = func(n.data, *args, **kwargs) @@ -156,7 +156,7 @@ def _simo_op(data, func, *args, **kwargs): def _run_caffe(data, proto_file, blob_file): - """ Run caffe model by Caffe according to .caffemodel and .prototxt""" + """Run caffe model by Caffe according to .caffemodel and .prototxt""" net = caffe.Net(proto_file, blob_file, caffe.TEST) if isinstance(data, (list, tuple)): for idx, d in enumerate(data): @@ -175,7 +175,7 @@ def _run_caffe(data, proto_file, blob_file): def _run_tvm(data, proto_file, blob_file): - """ Run caffe model by TVM according to .caffemodel and .prototxt""" + """Run caffe model by TVM according to .caffemodel and .prototxt""" init_net = pb.NetParameter() predict_net = pb.NetParameter() @@ -216,7 +216,7 @@ def _run_tvm(data, proto_file, blob_file): tvm_output = list() # get outputs for i in range(m.get_num_outputs()): - tvm_output.append(m.get_output(i).asnumpy()) + tvm_output.append(m.get_output(i).numpy()) return tvm_output @@ -228,7 +228,7 @@ def _compare_caffe_tvm(caffe_out, tvm_out, is_network=False): def _test_op(data, func_op, op_name, **kwargs): - """ Single op testing pipline. """ + """Single op testing pipline.""" shape_list = list() if isinstance(data, (list, tuple)): n = _miso_op(data, func_op, **kwargs) @@ -268,14 +268,14 @@ def _test_network(data, proto_file, blob_file): def _test_batchnorm(data, moving_average_fraction=0.999, eps=1e-5): - """ One iteration of BatchNorm """ + """One iteration of BatchNorm""" _test_op( data, L.BatchNorm, "BatchNorm", moving_average_fraction=moving_average_fraction, eps=eps ) def test_forward_BatchNorm(): - """ BatchNorm """ + """BatchNorm""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_batchnorm(data) _test_batchnorm(data, moving_average_fraction=0.88, eps=1e-4) @@ -287,12 +287,12 @@ def test_forward_BatchNorm(): def _test_concat(data_list, axis=1): - """ One iteration of Concat """ + """One iteration of Concat""" _test_op(data_list, L.Concat, "Concat", axis=axis) def test_forward_Concat(): - """ Concat """ + """Concat""" _test_concat([np.random.rand(1, 3, 10, 10), np.random.rand(1, 2, 10, 10)], axis=1) _test_concat([np.random.rand(3, 10, 10), np.random.rand(2, 10, 10)], axis=0) _test_concat([np.random.rand(3, 10), np.random.rand(2, 10)], axis=0) @@ -304,12 +304,12 @@ def test_forward_Concat(): def _test_convolution(data, **kwargs): - """ One iteration of Convolution """ + """One iteration of Convolution""" _test_op(data, L.Convolution, "Convolution", **kwargs) def test_forward_Convolution(): - """ Convolution """ + """Convolution""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_convolution( data, @@ -378,12 +378,12 @@ def test_forward_Convolution(): def _test_crop(data, **kwargs): - """ One iteration of Crop """ + """One iteration of Crop""" _test_op(data, L.Crop, "Crop", **kwargs) def test_forward_Crop(): - """ Crop """ + """Crop""" _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)]) _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1) _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2) @@ -403,12 +403,12 @@ def test_forward_Crop(): def _test_deconvolution(data, **kwargs): - """ One iteration of Deconvolution """ + """One iteration of Deconvolution""" _test_op(data, L.Deconvolution, "Deconvolution", **kwargs) def test_forward_Deconvolution(): - """ Deconvolution """ + """Deconvolution""" data = np.random.rand(1, 16, 32, 32).astype(np.float32) _test_deconvolution( data, @@ -460,12 +460,12 @@ def test_forward_Deconvolution(): def _test_dropout(data, **kwargs): - """ One iteration of Dropout """ + """One iteration of Dropout""" _test_op(data, L.Dropout, "Dropout", **kwargs) def test_forward_Dropout(): - """ Dropout """ + """Dropout""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_dropout(data) _test_dropout(data, dropout_ratio=0.7) @@ -477,12 +477,12 @@ def test_forward_Dropout(): def _test_eltwise(data_list, **kwargs): - """ One iteration of Eltwise """ + """One iteration of Eltwise""" _test_op(data_list, L.Eltwise, "Eltwise", **kwargs) def test_forward_Eltwise(): - """ Eltwise """ + """Eltwise""" _test_eltwise( [ np.random.rand(1, 3, 10, 11).astype(np.float32), @@ -520,12 +520,12 @@ def test_forward_Eltwise(): def _test_flatten(data, axis=1): - """ One iteration of Flatten """ + """One iteration of Flatten""" _test_op(data, L.Flatten, "Flatten", axis=axis) def test_forward_Flatten(): - """ Flatten """ + """Flatten""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_flatten(data) _test_flatten(data, axis=1) @@ -537,12 +537,12 @@ def test_forward_Flatten(): def _test_inner_product(data, **kwargs): - """ One iteration of InnerProduct""" + """One iteration of InnerProduct""" _test_op(data, L.InnerProduct, "InnerProduct", **kwargs) def test_forward_InnerProduct(): - """ InnerProduct """ + """InnerProduct""" data = np.random.rand(1, 3, 10, 10) _test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type="xavier")) _test_inner_product( @@ -567,12 +567,12 @@ def test_forward_InnerProduct(): def _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0): - """ One iteration of LRN """ + """One iteration of LRN""" _test_op(data, L.LRN, "LRN", local_size=local_size, alpha=alpha, beta=beta, k=k) def test_forward_LRN(): - """ LRN """ + """LRN""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_lrn(data) _test_lrn(data, local_size=3) @@ -592,12 +592,12 @@ def test_forward_LRN(): def _test_pooling(data, **kwargs): - """ One iteration of Pooling. """ + """One iteration of Pooling.""" _test_op(data, L.Pooling, "Pooling", **kwargs) def test_forward_Pooling(): - """ Pooing """ + """Pooing""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) # MAX Pooling _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX) @@ -620,12 +620,12 @@ def test_forward_Pooling(): def _test_prelu(data, **kwargs): - """ One iteration of PReLU. """ + """One iteration of PReLU.""" _test_op(data, L.PReLU, "PReLU", **kwargs) def test_forward_PReLU(): - """ PReLU """ + """PReLU""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_prelu(data, filler=dict(type="constant", value=0.5)) _test_prelu(data) @@ -638,12 +638,12 @@ def test_forward_PReLU(): def _test_relu(data, **kwargs): - """ One iteration of ReLU. """ + """One iteration of ReLU.""" _test_op(data, L.ReLU, "ReLU", **kwargs) def test_forward_ReLU(): - """ ReLU """ + """ReLU""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_relu(data) _test_relu(np.random.rand(10, 20).astype(np.float32)) @@ -655,12 +655,12 @@ def test_forward_ReLU(): def _test_reshape(data, **kwargs): - """ One iteration of Reshape. """ + """One iteration of Reshape.""" _test_op(data, L.Reshape, "Reshape", **kwargs) def test_forward_Reshape(): - """ Reshape """ + """Reshape""" data = np.random.rand(1, 8, 6).astype(np.float32) _test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}}) _test_reshape(data, reshape_param={"shape": {"dim": [2, 0, 3]}}) @@ -681,12 +681,12 @@ def test_forward_Reshape(): def _test_scale(data, **kwargs): - """ One iteration of Scale. """ + """One iteration of Scale.""" _test_op(data, L.Scale, "Scale", **kwargs) def test_forward_Scale(): - """ Scale """ + """Scale""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_scale(data, filler=dict(type="xavier")) _test_scale(data, filler=dict(type="xavier"), bias_term=True, bias_filler=dict(type="xavier")) @@ -698,12 +698,12 @@ def test_forward_Scale(): def _test_sigmoid(data, **kwargs): - """ One iteration of Sigmoid. """ + """One iteration of Sigmoid.""" _test_op(data, L.Sigmoid, "Sigmoid", **kwargs) def test_forward_Sigmoid(): - """ Sigmoid """ + """Sigmoid""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_sigmoid(data) @@ -714,12 +714,12 @@ def test_forward_Sigmoid(): def _test_slice(data, **kwargs): - """ One iteration of Slice """ + """One iteration of Slice""" _test_op(data, L.Slice, "Slice", **kwargs) def test_forward_Slice(): - """ Slice """ + """Slice""" data = np.random.rand(1, 3, 10, 10).astype(np.float32) _test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1])) _test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1])) @@ -733,12 +733,12 @@ def test_forward_Slice(): def _test_softmax(data, **kwargs): - """ One iteration of Softmax """ + """One iteration of Softmax""" _test_op(data, L.Softmax, "Softmax", **kwargs) def test_forward_Softmax(): - """ Softmax""" + """Softmax""" _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32)) _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2) _test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0) @@ -751,12 +751,12 @@ def test_forward_Softmax(): def _test_tanh(data, **kwargs): - """ One iteration of TanH """ + """One iteration of TanH""" _test_op(data, L.TanH, "TanH", **kwargs) def test_forward_TanH(): - """ TanH """ + """TanH""" _test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32)) _test_tanh(np.random.rand(3, 10, 10).astype(np.float32)) _test_tanh(np.random.rand(10, 10).astype(np.float32)) @@ -769,7 +769,7 @@ def test_forward_TanH(): def _test_mobilenetv2(data): - """ One iteration of Mobilenetv2 """ + """One iteration of Mobilenetv2""" mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32) mean_val = np.reshape(mean_val, (1, 3, 1, 1)) mean_val = np.tile(mean_val, (1, 1, 224, 224)) @@ -789,7 +789,7 @@ def _test_mobilenetv2(data): def test_forward_Mobilenetv2(): - """ Mobilenetv2 """ + """Mobilenetv2""" data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32) _test_mobilenetv2(data) @@ -800,7 +800,7 @@ def test_forward_Mobilenetv2(): def _test_alexnet(data): - """ One iteration of Alexnet """ + """One iteration of Alexnet""" mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32) mean_val = np.reshape(mean_val, (1, 3, 1, 1)) mean_val = np.tile(mean_val, (1, 1, 227, 227)) @@ -817,7 +817,7 @@ def _test_alexnet(data): def test_forward_Alexnet(): - """ Alexnet """ + """Alexnet""" data = np.random.randint(0, 256, size=(1, 3, 227, 227)).astype(np.float32) _test_alexnet(data) @@ -828,7 +828,7 @@ def test_forward_Alexnet(): def _test_resnet50(data): - """ One iteration of Resnet50 """ + """One iteration of Resnet50""" mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32) mean_val = np.reshape(mean_val, (1, 3, 1, 1)) mean_val = np.tile(mean_val, (1, 1, 224, 224)) @@ -849,7 +849,7 @@ def _test_resnet50(data): def test_forward_Resnet50(): - """ Resnet50 """ + """Resnet50""" data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32) _test_resnet50(data) @@ -860,7 +860,7 @@ def test_forward_Resnet50(): def _test_inceptionv1(data): - """ One iteration of Inceptionv4 """ + """One iteration of Inceptionv4""" mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32) mean_val = np.reshape(mean_val, (1, 3, 1, 1)) mean_val = np.tile(mean_val, (1, 1, 224, 224)) @@ -878,7 +878,7 @@ def _test_inceptionv1(data): def test_forward_Inceptionv1(): - """ Inceptionv4 """ + """Inceptionv4""" data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32) _test_inceptionv1(data) diff --git a/tests/python/frontend/caffe2/test_forward.py b/tests/python/frontend/caffe2/test_forward.py index 1081b087c468..3d7ad230dec0 100644 --- a/tests/python/frontend/caffe2/test_forward.py +++ b/tests/python/frontend/caffe2/test_forward.py @@ -27,7 +27,7 @@ def get_tvm_output(model, input_data, target, device, output_shape, output_dtype="float32"): - """ Generic function to execute and get tvm output""" + """Generic function to execute and get tvm output""" # supporting multiple inputs in caffe2 in a bit tricky, # because the input names can appear at the beginning or end of model.predict_net.external_input assert isinstance(input_data, np.ndarray) @@ -55,11 +55,11 @@ def get_tvm_output(model, input_data, target, device, output_shape, output_dtype tvm_output_list = [] for i, s in enumerate(output_shape): tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i])) - tvm_output_list.append(tvm_output.asnumpy()) + tvm_output_list.append(tvm_output.numpy()) return tvm_output_list else: tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype)) - return tvm_output.asnumpy() + return tvm_output.numpy() def get_caffe2_output(model, x, dtype="float32"): diff --git a/tests/python/frontend/coreml/test_forward.py b/tests/python/frontend/coreml/test_forward.py index c227c3955c5b..72dac9b2501f 100644 --- a/tests/python/frontend/coreml/test_forward.py +++ b/tests/python/frontend/coreml/test_forward.py @@ -43,7 +43,7 @@ def get_tvm_output( m.run() # get outputs out = m.get_output(0, tvm.nd.empty(out_shape, dtype)) - return out.asnumpy() + return out.numpy() def run_model_checkonly(model_file, model_name="", input_name="image"): @@ -73,7 +73,7 @@ def test_resnet50_checkonly(): def run_tvm_graph( coreml_model, target, device, input_data, input_name, output_shape, output_dtype="float32" ): - """ Generic function to compile on relay and execute on tvm """ + """Generic function to compile on relay and execute on tvm""" if isinstance(input_data, list): shape_dict = {} dtype_dict = {} @@ -105,14 +105,14 @@ def run_tvm_graph( tvm_output_list = [] for i, s in enumerate(output_shape): tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i])) - tvm_output_list.append(tvm_output.asnumpy()) + tvm_output_list.append(tvm_output.numpy()) return tvm_output_list else: if not output_shape: tvm_output = m.get_output(0) else: tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype)) - return tvm_output.asnumpy() + return tvm_output.numpy() def verify_AddLayerParams(input_dim, alpha=2): diff --git a/tests/python/frontend/darknet/test_forward.py b/tests/python/frontend/darknet/test_forward.py index 3bb8e93d3d22..116748a4f78a 100644 --- a/tests/python/frontend/darknet/test_forward.py +++ b/tests/python/frontend/darknet/test_forward.py @@ -90,7 +90,7 @@ def _get_tvm_output(net, data, build_dtype="float32", states=None): # get outputs tvm_out = [] for i in range(m.get_num_outputs()): - tvm_out.append(m.get_output(i).asnumpy()) + tvm_out.append(m.get_output(i).numpy()) return tvm_out diff --git a/tests/python/frontend/keras/test_forward.py b/tests/python/frontend/keras/test_forward.py index 709bebfc232c..26bf58cbf384 100644 --- a/tests/python/frontend/keras/test_forward.py +++ b/tests/python/frontend/keras/test_forward.py @@ -93,7 +93,7 @@ def get_tvm_output(xs, target, dev, dtype="float32"): for name, x in zip(keras_model.input_names, xs): m.set_input(name, tvm.nd.array(x.astype(dtype))) m.run() - return [m.get_output(i).asnumpy() for i in range(m.get_num_outputs())] + return [m.get_output(i).numpy() for i in range(m.get_num_outputs())] def to_channels_first(arr): return arr.transpose([0, -1] + list(range(1, arr.ndim - 1))) diff --git a/tests/python/frontend/mxnet/test_forward.py b/tests/python/frontend/mxnet/test_forward.py index c4e8e804b15a..362a9b623d25 100644 --- a/tests/python/frontend/mxnet/test_forward.py +++ b/tests/python/frontend/mxnet/test_forward.py @@ -84,7 +84,7 @@ def get_tvm_output(symbol, x, args, auxs, target, dev, dtype="float32"): m.run() # get outputs out = m.get_output(0, tvm.nd.empty(out_shape, dtype)) - return out.asnumpy() + return out.numpy() # random input x = np.random.uniform(size=data_shape) @@ -337,7 +337,7 @@ def test_forward_where(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(np_cond, np_x, np_y) - tvm.testing.assert_allclose(op_res.asnumpy(), mx_out) + tvm.testing.assert_allclose(op_res.numpy(), mx_out) @tvm.testing.uses_gpu @@ -361,7 +361,7 @@ def verify(start, stop, step): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()() - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify(0, 20, None) verify(0, 20, 2) @@ -420,7 +420,7 @@ def test_forward_broadcast_ops(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np, b_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) @tvm.testing.uses_gpu @@ -455,7 +455,7 @@ def test_forward_elemwise_ops(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np, b_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) @tvm.testing.uses_gpu @@ -504,9 +504,7 @@ def test_forward_unary_ops(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -536,7 +534,7 @@ def test_forward_scalar_ops(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) for op in ["maximum", "minimum"]: dtype = "float32" a_shape = (3, 4, 5) @@ -550,7 +548,7 @@ def test_forward_scalar_ops(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) @tvm.testing.uses_gpu @@ -564,7 +562,7 @@ def verify(shape, axis, begin, end): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((3, 4), 0, 1, 2) verify((3, 4), 0, 1, None) @@ -589,7 +587,7 @@ def verify(x_shape, y_shape, axes): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np, y_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((3, 4), (2, 3), None) verify((3, 4), (2, 3), (0, 1)) @@ -623,7 +621,7 @@ def verify(shape, seq_lengths, use_seq_lengths, seq_axis): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(*in_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((3, 4), [1, 2, 3, 1], True, 0) verify((3, 4), None, False, 0) @@ -659,7 +657,7 @@ def test_forward_logistic_regression_output(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) @tvm.testing.uses_gpu @@ -677,7 +675,7 @@ def verify(a_shape, b_shape, transpose_b=False): intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np, b_np) tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-05, atol=1e-05 + op_res.numpy(), ref_res.asnumpy(), rtol=1e-05, atol=1e-05 ) verify((1, 256), (256, 1)) @@ -695,7 +693,7 @@ def verify(shape): for kind in ["debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1,)) verify((3, 4, 5)) @@ -717,7 +715,7 @@ def verify(shape, axis): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1, 3, 1), None) verify((1, 3, 1), 0) @@ -737,7 +735,7 @@ def verify(shape, axis, size): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1, 2, 1), 2, 3) verify((1, 2, 1), (0, 2), (2, 3)) @@ -754,7 +752,7 @@ def verify(input_shape, shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1, 2, 3), (3, 2, 3)) verify((4, 1, 32, 32), (4, 8, 32, 32)) @@ -772,7 +770,7 @@ def verify(input_shape, like_shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np, y_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1, 2, 3), (3, 2, 3)) verify((4, 1, 32, 32), (4, 8, 32, 32)) @@ -791,7 +789,7 @@ def test_forward_logical_not(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) @tvm.testing.uses_gpu @@ -807,7 +805,7 @@ def verify(val, shape, dtype): for kind in ["debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()() - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify(2, (3, 4), "float32") verify(2, (3, 4), "int32") @@ -831,7 +829,7 @@ def verify(data_shape, weight_shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x=x_np, w=w_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((2, 2), (4, 5)) verify((2, 3, 4), (4, 5)) @@ -858,7 +856,7 @@ def verify(shape, indices_src, axis, mode="clip"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np, indices_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((2, 2), [[[1, 0], [0, 1]]], 0) verify((2, 2), [[[1, 0], [0, 1]]], 1) @@ -882,7 +880,7 @@ def verify(xshape, yshape, y_data, error=False): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]]) verify((2, 2, 2), (2, 2), [[0, 1], [1, 0]]) @@ -911,9 +909,7 @@ def verify(shape, transform_type, target_shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5) verify((4, 6), "affine", (16, 32)) verify((4, 2, 16, 16), "warp", None) @@ -933,9 +929,7 @@ def verify(data_shape, grid_shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data, grid) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5) verify((4, 4, 16, 32), (4, 2, 8, 8)) verify((4, 4, 16, 32), (4, 2, 32, 32)) @@ -1001,9 +995,9 @@ def verify( if init_states: assert len(op_res) == len(mx_res) for i, val in enumerate(op_res): - tvm.testing.assert_allclose(val.asnumpy(), mx_res[i].asnumpy(), rtol=1e-3) + tvm.testing.assert_allclose(val.numpy(), mx_res[i].asnumpy(), rtol=1e-3) else: - tvm.testing.assert_allclose(op_res.asnumpy(), mx_res.asnumpy(), rtol=1e-3) + tvm.testing.assert_allclose(op_res.numpy(), mx_res.asnumpy(), rtol=1e-3) for mode in ["rnn", "gru", "lstm"]: verify(mode, 1, 64, 64, 1) @@ -1035,7 +1029,7 @@ def verify(xshape, yshape, offset=None): op_res = intrp.evaluate()(x_data, y_data) else: op_res = intrp.evaluate()(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((1, 3, 40, 40), (1, 3, 20, 20)) verify((1, 3, 40, 40), (1, 3, 20, 20), (0, 0)) @@ -1055,7 +1049,7 @@ def verify(shape, axis, is_ascend, dtype="float32"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((2, 3, 4), axis=0, is_ascend=False) verify((1, 4, 6), axis=1, is_ascend=True) @@ -1089,9 +1083,9 @@ def verify(shape, k, axis, ret_type, is_ascend=None, dtype="float32"): if isinstance(ref_res, list): assert len(op_res) == len(ref_res) for i, t in enumerate(op_res): - tvm.testing.assert_allclose(t.asnumpy(), ref_res[i].asnumpy()) + tvm.testing.assert_allclose(t.numpy(), ref_res[i].asnumpy()) else: - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((3, 4), k=1, axis=0, ret_type="both") verify((3, 4), k=1, axis=-1, ret_type="indices") @@ -1146,7 +1140,7 @@ def verify(shape, use_sequence_length, value, axis, dtype, itype): op_res = intrp.evaluate()(data_np, valid_length_np) else: op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((5, 10), True, 0.0, 0, "float32", "float32") verify((5, 4, 3), True, 1.0, 1, "float32", "float32") @@ -1165,7 +1159,7 @@ def verify(shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify((3, 4)) verify((3, 4, 5)) @@ -1213,7 +1207,7 @@ def verify(shape, axis=1, fix_gamma=False): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, gamma, beta, moving_mean, moving_var) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3) verify((2, 3, 4, 5)) verify((2, 3, 4, 5), axis=0) @@ -1237,9 +1231,7 @@ def verify(shape, axis=1, epsilon=1e-5): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, gamma, beta) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5) verify((2, 3, 4, 5)) verify((32, 64, 80, 64)) @@ -1263,9 +1255,7 @@ def verify(shape, axis=-1): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, gamma, beta) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((2, 5)) verify((2, 5), axis=0) @@ -1293,9 +1283,7 @@ def verify(shape, num_groups=1): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, gamma, beta) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((1, 4, 2), num_groups=4) # TODO(trevmorr): MXNet GroupNorm implementation is bugged for cases when num_groups != num_channels @@ -1316,9 +1304,7 @@ def verify(indices_shape, depth, on_value, off_value, dtype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x.astype("float32")) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((3,), 3, 1, 0, "int32") verify((3,), 3, 1.0, 0.0, "float32") @@ -1444,7 +1430,7 @@ def verify(data_shape, kernel_size, stride, pad, num_filter, is_depthwise=False) for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, weight, bias) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3) verify(data_shape=(1, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4) verify(data_shape=(20, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4) @@ -1525,9 +1511,7 @@ def verify(data_shape, kernel_size, stride, pad, num_filter): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x, weight, bias) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify(data_shape=(1, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4) verify(data_shape=(20, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4) @@ -1560,7 +1544,7 @@ def verify(a_np, b_np): for kind in ["debug", "vm"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np, b_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3) verify(np.asarray([1.0], "float32"), np.asarray([2.0], "float32")) verify(np.asarray([4.0], "float32"), np.asarray([3.0], "float32")) @@ -1580,7 +1564,7 @@ def verify(from_dtype, to_dtype): intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(from_np) assert op_res.dtype == to_dtype, op_res.dtype - tvm.testing.assert_allclose(op_res.asnumpy(), from_np.astype(to_dtype)) + tvm.testing.assert_allclose(op_res.numpy(), from_np.astype(to_dtype)) verify("float32", "float16") verify("float16", "float32") @@ -1604,7 +1588,7 @@ def verify(dtypes, cast_narrow, expected_dtype): op_res = intrp.evaluate()(*x_nps) for i, res in enumerate(op_res): assert res.dtype == expected_dtype, res.dtype - tvm.testing.assert_allclose(res.asnumpy(), x_nps[i].astype(expected_dtype)) + tvm.testing.assert_allclose(res.numpy(), x_nps[i].astype(expected_dtype)) verify(["float32", "float16"], False, "float32") verify(["float32", "float16"], True, "float16") @@ -1627,7 +1611,7 @@ def verify(x, shape, dtype): for kind in ["graph", "vm", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(a_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) for dtype in ["int32", "int64"]: verify([0, 1, 2, 3], [2, 2], dtype) @@ -1668,9 +1652,7 @@ def verify(shape, blocksize=2): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((1, 18, 3, 3), 3) @@ -1689,9 +1671,7 @@ def verify(shape, blocksize=2): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((1, 1, 9, 9), 3) @@ -1727,9 +1707,7 @@ def verify(data_shape, kernel_size, max_displacement, stride1, stride2, pad_size for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data1, data2) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify( (1, 3, 10, 10), @@ -1834,7 +1812,7 @@ def verify(data_shape, start=None, step=None, axis=None): for kind in ["graph"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()() - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy()) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy()) verify(data_shape=(3,), start=0.0, step=1.0) verify(data_shape=(3, 4, 5), start=0.0, step=1.0) @@ -1856,7 +1834,7 @@ def verify(batch, seq_length, num_heads, head_dim): for kind in ["graph"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) verify(1, 10, 3, 16) verify(3, 10, 6, 8) @@ -1881,7 +1859,7 @@ def verify(batch, seq_length, num_heads, head_dim): for kind in ["graph"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data=data_np, weight=weight_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) verify(1, 10, 4, 16) verify(3, 10, 6, 8) @@ -1938,9 +1916,7 @@ def verify( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((1, 10, 6)) # No valid boxes @@ -1979,9 +1955,7 @@ def verify(data_shape, anchor_shape, stds=[1, 1, 1, 1], clip=-1, in_format="corn for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data, anchors) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((1, 10, 4), (1, 10, 4)) verify((4, 10, 4), (1, 10, 4)) @@ -2025,9 +1999,7 @@ def verify(data_shape, axis, use_length, length): else: op_res = intrp.evaluate()(x) - tvm.testing.assert_allclose( - op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5 - ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5) verify((2, 3, 5), -1, False, None) verify((2, 3, 5), 2, False, None) @@ -2063,7 +2035,7 @@ def test_forward_npi_pad(data_shape, pad_width, mode, dtype, constant_value, tar mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @pytest.mark.skipif( @@ -2082,7 +2054,7 @@ def test_forward_npi_transpose(data_shape, axes, dtype, target, dev, kind): mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize( @@ -2110,7 +2082,7 @@ def test_forward_npi_concatenate(data_shape1, data_shape2, axis, dtype, target, ) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np1, data_np2) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize( @@ -2138,7 +2110,7 @@ def test_forward_npi_stack(data_shape1, data_shape2, axis, dtype, target, dev, k ) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np1, data_np2) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize("data_shape", [(2, 2, 2), (2, 7, 2), (2, 2, 2, 1, 2, 3, 1), (1, 8)]) @@ -2153,7 +2125,7 @@ def test_forward_np_copy(data_shape, dtype, target, dev, kind): mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"]) @@ -2181,7 +2153,7 @@ def test_forward_npx_reshape(data_shape, out_shape, dtype, target, reverse, dev, mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize( @@ -2216,7 +2188,7 @@ def test_forward_npi_binary(data_shape, dtype, target, dev, kind): ) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np1, data_np2) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize( @@ -2248,7 +2220,7 @@ def test_forward_npi_binary_scalar(data_shape, dtype, scalar, target, dev, kind) mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"lhs": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np1) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize( @@ -2265,7 +2237,7 @@ def test_forward_npi_tanh(data_shape, dtype, target, dev, kind): mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"data": data_shape}, dtype=dtype) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np1) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.skipif(not hasattr(mx.np, "where"), reason="mx.np.where hasn't been publish yet") @@ -2297,7 +2269,7 @@ def test_forward_npi_where_rscalar( ) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(cond_np, data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5) @pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"]) @@ -2328,7 +2300,7 @@ def test_forward_split_v2( op_res = intrp.evaluate()(data_np) op_res_ = [] for arr in op_res: - op_res_.append(arr.asnumpy().tolist()) + op_res_.append(arr.numpy().tolist()) ref_res_ = [] for arr in ref_res: ref_res_.append(arr.asnumpy().tolist()) diff --git a/tests/python/frontend/mxnet/test_qnn_ops_utils.py b/tests/python/frontend/mxnet/test_qnn_ops_utils.py index a200e06ed2d0..adbb0a74558b 100644 --- a/tests/python/frontend/mxnet/test_qnn_ops_utils.py +++ b/tests/python/frontend/mxnet/test_qnn_ops_utils.py @@ -45,7 +45,7 @@ def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() assert np.allclose(res, verify_output_data) assert res.dtype == np.float32 @@ -124,7 +124,7 @@ def quantize_test_driver(out_dtype, quant_args, in_data, verify_output_data): rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() assert np.allclose(res, verify_output_data) assert res.dtype == verify_output_data.dtype diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index aaf524cc9dcc..d1ecfc5559a4 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -62,8 +62,8 @@ def get_tvm_output_with_vm( ex = relay.create_executor("vm", mod=mod, device=device, target=target) result = ex.evaluate()(*input_data, **params) if isinstance(result, tvm.runtime.NDArray): - return result.asnumpy() - return [r.asnumpy() for r in result] + return result.numpy() + return [r.numpy() for r in result] def get_tvm_output( @@ -109,11 +109,11 @@ def get_tvm_output( tvm_output_list = [] for i, _ in enumerate(output_shape): tvm_output = m.get_output(i) - tvm_output_list.append(tvm_output.asnumpy()) + tvm_output_list.append(tvm_output.numpy()) return tvm_output_list else: tvm_output = m.get_output(0) - return tvm_output.asnumpy() + return tvm_output.numpy() def get_onnxruntime_output(model, inputs): @@ -641,7 +641,7 @@ def test_dynamic_gather(): for target, device in tvm.testing.enabled_targets(): ex = relay.create_executor("vm", mod=mod, device=device, target=target) result = ex.evaluate()(x, **params) - tvm.testing.assert_allclose(out_np, result.asnumpy(), rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(out_np, result.numpy(), rtol=1e-5, atol=1e-5) def verify_gatherelements(in_shape, indices, axis): @@ -1191,7 +1191,7 @@ def verify_model(ex, a_shape, b_shape): # relu out_np[out_np < 0] = 0 - tvm_out = ex.evaluate()(a_array, b_array).asnumpy() + tvm_out = ex.evaluate()(a_array, b_array).numpy() tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) mul_node = helper.make_node("MatMul", ["a", "b"], ["out"]) diff --git a/tests/python/frontend/pytorch/qnn_test.py b/tests/python/frontend/pytorch/qnn_test.py index 5b0b65f7b128..704245040025 100644 --- a/tests/python/frontend/pytorch/qnn_test.py +++ b/tests/python/frontend/pytorch/qnn_test.py @@ -291,7 +291,7 @@ def test_quantized_modules(): runtime = get_tvm_runtime(script_module, input_name, ishape) runtime.set_input(input_name, inp.numpy().copy()) runtime.run() - tvm_result = runtime.get_output(0).asnumpy() + tvm_result = runtime.get_output(0).numpy() max_abs_diff = np.max(np.abs(tvm_result - pt_result)) mean_abs_diff = np.mean(np.abs(tvm_result - pt_result)) @@ -396,7 +396,7 @@ def get_imagenet_input(): runtime.set_input(input_name, inp) runtime.run() - tvm_result = runtime.get_output(0).asnumpy() + tvm_result = runtime.get_output(0).numpy() results.append((model_name, pt_result[0], tvm_result[0])) @@ -496,7 +496,7 @@ def test_serialized_modules(): runtime = get_tvm_runtime(loaded, input_name, ishape) runtime.set_input(input_name, inp.numpy().copy()) runtime.run() - tvm_result = runtime.get_output(0).asnumpy() + tvm_result = runtime.get_output(0).numpy() # with 0.5ish results, 1e-2 is relative accuracy close to 2**-6. # for simple layers like here this should be achievable @@ -538,7 +538,7 @@ def forward(self, inp): runtime = get_tvm_runtime(script_module, "input", inp.shape) runtime.set_input(input_name, inp.numpy().copy()) runtime.run() - tvm_result = runtime.get_output(0).asnumpy() + tvm_result = runtime.get_output(0).numpy() # Only compare with the PyTorch result for version v1.6 or newer # Have seen a strange accuracy problem from PyTorch 1.4 and 1.5 diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index f26d8cb37dd8..07f0d8e75c4d 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -219,7 +219,7 @@ def verify_model( relay_model.run() for i, baseline_output in enumerate(baseline_outputs): - compiled_output = relay_model.get_output(i).asnumpy() + compiled_output = relay_model.get_output(i).numpy() assert_shapes_match(baseline_output, compiled_output) tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol) @@ -1756,7 +1756,7 @@ def forward(self, x): @tvm.testing.uses_gpu def test_to(): - """ test for aten::to(...) """ + """test for aten::to(...)""" class ToCPU(Module): def forward(self, x): @@ -2160,7 +2160,7 @@ def verify_trace_model(pt_model, idata, targets): def convert_pt_to_tvm_type(idtype): - """ Accepts a pytorch dtype and returns string TVM dtype.""" + """Accepts a pytorch dtype and returns string TVM dtype.""" # TVM does not support PyTorch complex dtypes if idtype == torch.float64: curr_dtype = "float64" @@ -2236,13 +2236,13 @@ def verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=["llv if isinstance(pt_result, tuple): # handle multiple outputs for i in range(len(pt_result)): - tvm_res = vm_res[i].asnumpy() + tvm_res = vm_res[i].numpy() tvm.testing.assert_allclose(tvm_res, pt_result[i].numpy(), rtol=1e-5, atol=1e-5) elif not isinstance(pt_result, torch.Tensor): - tvm_res = vm_res.asnumpy().item() + tvm_res = vm_res.numpy().item() assert pt_result == tvm_res else: - tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(vm_res.numpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -3669,7 +3669,7 @@ def test_forward_pretrained_bert_base_uncased(): relay_model.set_input(input_1, tokens_tensor) relay_model.set_input(input_2, segments_tensors) relay_model.run() - compiled_output = relay_model.get_output(0).asnumpy() + compiled_output = relay_model.get_output(0).numpy() ###################################################################### # Validate the outputs diff --git a/tests/python/frontend/pytorch/test_lstm.py b/tests/python/frontend/pytorch/test_lstm.py index 9089a83239e4..1aa8bff4076e 100644 --- a/tests/python/frontend/pytorch/test_lstm.py +++ b/tests/python/frontend/pytorch/test_lstm.py @@ -217,9 +217,7 @@ def assert_equal(tvm_result, torch_result): for tvm_res, pt_res in zip(tvm_result, torch_result): assert_equal(tvm_res, pt_res) elif isinstance(torch_result, torch.Tensor): - tvm.testing.assert_allclose( - tvm_result.asnumpy(), torch_result.numpy(), rtol=1e-4, atol=1e-4 - ) + tvm.testing.assert_allclose(tvm_result.numpy(), torch_result.numpy(), rtol=1e-4, atol=1e-4) def run_and_compare(mod, params, pt_result, target, device): diff --git a/tests/python/frontend/pytorch/test_object_detection.py b/tests/python/frontend/pytorch/test_object_detection.py index 3d51f0e58655..3bc2bf5cd075 100644 --- a/tests/python/frontend/pytorch/test_object_detection.py +++ b/tests/python/frontend/pytorch/test_object_detection.py @@ -127,14 +127,14 @@ def compile_and_run_vm(mod, params, data_np, target): # Bounding boxes tvm.testing.assert_allclose( - pt_res[0].cpu().numpy(), tvm_res[0].asnumpy(), rtol=1e-5, atol=1e-5 + pt_res[0].cpu().numpy(), tvm_res[0].numpy(), rtol=1e-5, atol=1e-5 ) # Scores tvm.testing.assert_allclose( - pt_res[1].cpu().numpy(), tvm_res[1].asnumpy(), rtol=1e-5, atol=1e-5 + pt_res[1].cpu().numpy(), tvm_res[1].numpy(), rtol=1e-5, atol=1e-5 ) # Class ids - np.testing.assert_equal(pt_res[2].cpu().numpy(), tvm_res[2].asnumpy()) + np.testing.assert_equal(pt_res[2].cpu().numpy(), tvm_res[2].numpy()) score_threshold = 0.9 print("Num boxes:", pt_res[0].cpu().numpy().shape[0]) @@ -161,4 +161,4 @@ def compile_and_run_vm(mod, params, data_np, target): # Results should be equivalent after rewriting for res1, res2 in zip(tvm_res, tvm_res_after_rewrite): - tvm.testing.assert_allclose(res1.asnumpy(), res2.asnumpy()) + tvm.testing.assert_allclose(res1.numpy(), res2.numpy()) diff --git a/tests/python/frontend/tensorflow/test_bn_dynamic.py b/tests/python/frontend/tensorflow/test_bn_dynamic.py index 4eb0d01ef102..30c93d991afb 100644 --- a/tests/python/frontend/tensorflow/test_bn_dynamic.py +++ b/tests/python/frontend/tensorflow/test_bn_dynamic.py @@ -74,7 +74,7 @@ def verify_fused_batch_norm(shape): m.run() tvm_out = m.get_output(0) tvm.testing.assert_allclose( - tvm_out.asnumpy(), tf_out.astype(tvm_out.dtype), atol=1e-3, rtol=1e-3 + tvm_out.numpy(), tf_out.astype(tvm_out.dtype), atol=1e-3, rtol=1e-3 ) diff --git a/tests/python/frontend/tensorflow/test_control_flow.py b/tests/python/frontend/tensorflow/test_control_flow.py index ebe2ca3b8fda..c91661db7e36 100644 --- a/tests/python/frontend/tensorflow/test_control_flow.py +++ b/tests/python/frontend/tensorflow/test_control_flow.py @@ -37,11 +37,11 @@ def check_equal(graph, tf_out, input_map=None): ex = relay.create_executor("vm", mod=mod) relay_out = ex.evaluate()(**params) if isinstance(relay_out, nd.NDArray): - np.testing.assert_allclose(tf_out, relay_out.asnumpy()) + np.testing.assert_allclose(tf_out, relay_out.numpy()) else: if not isinstance(tf_out, (list, tuple)): tf_out = [tf_out] - for x, y in zip(tf_out, [r.asnumpy() for r in relay_out]): + for x, y in zip(tf_out, [r.numpy() for r in relay_out]): np.testing.assert_allclose(x, y) diff --git a/tests/python/frontend/tensorflow/test_debugging.py b/tests/python/frontend/tensorflow/test_debugging.py index 2a5fb60f24af..26fe171fb789 100644 --- a/tests/python/frontend/tensorflow/test_debugging.py +++ b/tests/python/frontend/tensorflow/test_debugging.py @@ -52,7 +52,7 @@ def test_assert_true(): # do that, it's happening in Relay, and that optimization shouldn't # affect the arity of the main function. We should have to pass in # x_value here. - np.testing.assert_allclose(0, run_relay(g, {"input": shape}).asnumpy()) + np.testing.assert_allclose(0, run_relay(g, {"input": shape}).numpy()) def test_assert_true_var_capture(): @@ -72,7 +72,7 @@ def test_assert_true_var_capture(): # TODO: The frontend converter notes the output of # the graph as a boolean, which is not correct - as you can see above, # TF believes that the value of this graph is None. - np.testing.assert_allclose(True, run_relay(g, None, x_value).asnumpy()) + np.testing.assert_allclose(True, run_relay(g, None, x_value).numpy()) def test_assert_false(): @@ -91,7 +91,7 @@ def test_assert_false(): # though it should probably be none or an empty tuple. For the same # reason, there should not be an error here, even though the assertion # argument is false. - np.testing.assert_allclose(0, run_relay(g).asnumpy()) + np.testing.assert_allclose(0, run_relay(g).numpy()) if __name__ == "__main__": diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index e7b189345c61..f29450dbb604 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -79,7 +79,7 @@ def convert_to_list(x): def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy()] + return [o.numpy()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: @@ -96,7 +96,7 @@ def vmobj_to_list(o): elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: - return [o.fields[0].asnumpy()] + return [o.fields[0].numpy()] else: raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint) else: @@ -118,7 +118,7 @@ def run_tvm_graph( ignore_in_shape=False, serialize=False, ): - """ Generic function to compile on relay and execute on tvm """ + """Generic function to compile on relay and execute on tvm""" input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) if target == "cuda": @@ -182,12 +182,12 @@ def run_tvm_graph( assert out_names is None or num_output == len( out_names ), "out_names: {} num_output: {}".format(out_names, num_output) - tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)] + tvm_output_list = [m.get_output(i).numpy() for i in range(num_output)] return tvm_output_list def run_tf_graph(sess, input_data, input_node, output_node): - """ Generic function to execute tensorflow """ + """Generic function to execute tensorflow""" input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) output_node = convert_to_list(output_node) @@ -290,7 +290,7 @@ def is_gpu_available(): def _test_pooling_iteration(input_shape, **kwargs): - """ One iteration of pool operation with given shapes and attributes """ + """One iteration of pool operation with given shapes and attributes""" x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1 @@ -317,7 +317,7 @@ def _test_pooling(input_shape, **kwargs): def _test_pooling_dynamic(input_shape, np_shape, **kwargs): - """ Pooling with dynamic height and width dimensions. """ + """Pooling with dynamic height and width dimensions.""" x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1 with tf.Graph().as_default(): @@ -334,7 +334,7 @@ def _test_pooling_dynamic(input_shape, np_shape, **kwargs): @tvm.testing.uses_gpu def test_forward_pooling(): - """ Pooling """ + """Pooling""" # TensorFlow only supports NDHWC for max_pool3d on CPU for pool_type in ["AVG", "MAX"]: # NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow @@ -478,7 +478,7 @@ def _test_convolution( deconv_output_shape=[], add_shapes_to_graph_def=True, ): - """ One iteration of convolution with given shapes and attributes """ + """One iteration of convolution with given shapes and attributes""" total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) @@ -924,7 +924,7 @@ def _test_convolution3d( deconv_output_shape=[], add_shapes_to_graph_def=True, ): - """ One iteration of 3D convolution with given shapes and attributes """ + """One iteration of 3D convolution with given shapes and attributes""" total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) @@ -1016,7 +1016,7 @@ def _test_convolution3d_transpose( data_format="NCDHW", add_shapes_to_graph_def=True, ): - """ One iteration of 3D convolution transpose with given shapes and attributes """ + """One iteration of 3D convolution transpose with given shapes and attributes""" dtype = "float32" data_array = np.random.uniform(size=data_shape).astype(dtype) @@ -1137,7 +1137,7 @@ def test_forward_convolution3d_transpose(): def _test_biasadd(tensor_in_sizes, data_format): - """ One iteration of biasadd with given shapes and attributes """ + """One iteration of biasadd with given shapes and attributes""" total_size_1 = 1 for s in tensor_in_sizes: @@ -1287,7 +1287,7 @@ def test_forward_batch_to_space_nd(): def _test_reshape(data, out_shape): - """ One iteration of reshape operation with given data and out shape """ + """One iteration of reshape operation with given data and out shape""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1297,7 +1297,7 @@ def _test_reshape(data, out_shape): def _test_reshape_with_call(): - """ relay.expr.Call as shape """ + """relay.expr.Call as shape""" data = np.zeros((6, 4, 2)) with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1309,7 +1309,7 @@ def _test_reshape_with_call(): def _test_reshape_like(data, shape_like): - """ A special case for reshape. """ + """A special case for reshape.""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1353,7 +1353,7 @@ def test_forward_reshape(): def _test_depthtospace(data, block_size): - """ One iteration of depth_to_space operation with given data and block size """ + """One iteration of depth_to_space operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1373,7 +1373,7 @@ def test_forward_depthtospace(): def _test_spacetodepth(data, block_size): - """ One iteration of space_to_depth operation with given data and block size """ + """One iteration of space_to_depth operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1393,7 +1393,7 @@ def test_forward_spacetodepth(): def _test_squeeze(data, squeeze_dims=None): - """ One iteration of squeeze """ + """One iteration of squeeze""" if squeeze_dims is None: squeeze_dims = [] @@ -1410,7 +1410,7 @@ def _test_squeeze(data, squeeze_dims=None): def test_forward_squeeze(): - """ Squeeze """ + """Squeeze""" # Nothing to squeeze. _test_squeeze(np.arange(2).reshape((2))) @@ -1625,7 +1625,7 @@ def run(dtype_str, input_shape, infer_shape): def _test_concat_v2(shape1, shape2, dim): - """ One iteration of ConcatV2 """ + """One iteration of ConcatV2""" with tf.Graph().as_default(): dtype = "float32" @@ -1656,7 +1656,7 @@ def test_forward_concat_v2(): def _test_sigmoid(data): - """ One iteration of sigmoid """ + """One iteration of sigmoid""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -1666,7 +1666,7 @@ def _test_sigmoid(data): def test_forward_sigmoid(): - """ Sigmoid """ + """Sigmoid""" _test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32")) @@ -1698,7 +1698,7 @@ def test_forward_argminmax(): def _test_variable(data): - """ One iteration of a variable """ + """One iteration of a variable""" tf.reset_default_graph() with tf.Graph().as_default(): @@ -1720,7 +1720,7 @@ def test_forward_variable(): @tvm.testing.parametrize_targets("llvm", "cuda") def test_read_variable_op(target, dev): - """ Read Variable op test """ + """Read Variable op test""" tf.reset_default_graph() data = np.random.uniform(size=(32, 100)).astype("float32") @@ -1778,7 +1778,7 @@ def test_read_variable_op(target, dev): def _test_matmul(i, j, k, dtype, outer=None): - """ One iteration of matmul """ + """One iteration of matmul""" A_shape_init = [i, j] B_shape_init = [j, k] @@ -1800,7 +1800,7 @@ def _test_matmul(i, j, k, dtype, outer=None): def test_forward_matmul(): - """ MatMul op test""" + """MatMul op test""" _test_matmul(1, 3, 6, "int32") _test_matmul(5, 3, 1, "float64") @@ -1835,7 +1835,7 @@ def _test_batch_matmul_dynamic( def test_forward_batch_matmul(): - """ TF op BatchMatMul, BatchMatMulV2 test""" + """TF op BatchMatMul, BatchMatMulV2 test""" _test_batch_matmul((3, 5, 4), (3, 4, 5), "int32") _test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True) _test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False) @@ -1879,7 +1879,7 @@ def test_forward_batch_matmul_dynamic(): def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False): - """ One iteration of sparse_dense_matmul """ + """One iteration of sparse_dense_matmul""" for adjoint_a in [False, True]: for adjoint_b in [False, True]: @@ -1905,7 +1905,7 @@ def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, def test_forward_sparse_dense_matmul(): - """ sparse_dense_matmul op test""" + """sparse_dense_matmul op test""" ################################################################### # # In order to create a SparseTensor, it requires 3 input as below: @@ -2018,7 +2018,7 @@ def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_ def test_forward_sparse_fill_empty_rows( sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn ): - """ sparse_fill_empty_rows op test""" + """sparse_fill_empty_rows op test""" ################################################################### # # In order to create a SparseTensor, it requires 3 input as below: @@ -2145,7 +2145,7 @@ def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use def test_forward_sparse_reshape( sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn ): - """ sparse_reshape op test""" + """sparse_reshape op test""" ################################################################### # # In order to create a SparseTensor, it requires 3 input as below: @@ -2438,7 +2438,7 @@ def test_forward_sparse_to_dense_v2(): def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False): - """ One iteration of tf.sparse.add """ + """One iteration of tf.sparse.add""" # TODO(ANSHUMAN87): support cuda # TODO(ANSHUMAN87): support both sparse input case @@ -2461,7 +2461,7 @@ def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False): def test_sparse_add(): - """ sparse.add op test""" + """sparse.add op test""" ################################################################### # # In order to create a SparseTensor, it requires 3 input as below: @@ -2497,7 +2497,7 @@ def _test_stridedslice( shrink_axis_mask=0, ellipsis_mask=0, ): - """ One iteration of a Stridedslice """ + """One iteration of a Stridedslice""" tf.reset_default_graph() np_data = np.random.uniform(size=ip_shape).astype(dtype) @@ -2706,7 +2706,7 @@ def test_forward_truncatemod(): def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims, dtype): - """ One iteration of a GatherV2 """ + """One iteration of a GatherV2""" tf.reset_default_graph() with tf.Graph().as_default(): @@ -3019,7 +3019,7 @@ def test_forward_multi_output(): def _test_resize_bilinear(in_shape, to_shape, align_corners): - """ One iteration of resize bilinear """ + """One iteration of resize bilinear""" data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") @@ -3051,7 +3051,7 @@ def _test_resize_bilinear_from_tensor(in_shape, align_corners): def _test_resize_nearest_neighbor(in_shape, to_shape): - """ One iteration of resize nearest neighbor """ + """One iteration of resize nearest neighbor""" data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") @@ -3067,7 +3067,7 @@ def _test_resize_nearest_neighbor(in_shape, to_shape): def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale): - """ One iteration of resize nearest neighbor for graph with dynamic input shape """ + """One iteration of resize nearest neighbor for graph with dynamic input shape""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): @@ -3080,7 +3080,7 @@ def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale): def test_forward_resize(): - """ Resize Bilinear, Nearest_Neighbor """ + """Resize Bilinear, Nearest_Neighbor""" # TF default layout is NHWC _test_resize_bilinear((4, 32, 32, 3), [50, 50], False) _test_resize_bilinear((6, 32, 32, 3), [20, 20], True) @@ -3096,7 +3096,7 @@ def test_forward_resize(): def _test_broadcast_args(in_shape_1, in_shape_2): - """ One iteration of broadcast_args""" + """One iteration of broadcast_args""" shape_1 = np.array(in_shape_1).astype("int32") shape_2 = np.array(in_shape_2).astype("int32") @@ -3110,7 +3110,7 @@ def _test_broadcast_args(in_shape_1, in_shape_2): def test_forward_broadcast_args(): - """ Resize Bilinear """ + """Resize Bilinear""" _test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32]) _test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16]) @@ -3123,7 +3123,7 @@ def test_forward_broadcast_args(): def _test_broadcast_to(in_shape, to_shape): - """ One iteration of broadcast_to""" + """One iteration of broadcast_to""" data = np.random.uniform(size=in_shape).astype("float32") shape_data = np.array(to_shape).astype("int32") @@ -3139,7 +3139,7 @@ def _test_broadcast_to(in_shape, to_shape): def _test_broadcast_to_from_tensor(in_shape): - """ One iteration of broadcast_to with unknown shape at graph build""" + """One iteration of broadcast_to with unknown shape at graph build""" data = np.random.uniform(size=in_shape).astype("float32") @@ -3153,7 +3153,7 @@ def _test_broadcast_to_from_tensor(in_shape): def test_forward_broadcast_to(): - """ Resize Bilinear """ + """Resize Bilinear""" _test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32]) _test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16]) @@ -3166,7 +3166,7 @@ def test_forward_broadcast_to(): def _test_fill(in_shape): - """ Use the fill op to create a tensor of ones with non-constant shape.""" + """Use the fill op to create a tensor of ones with non-constant shape.""" with tf.Graph().as_default(): tf.ones(shape=in_shape, dtype="float32") @@ -3202,7 +3202,7 @@ def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype): def test_forward_fill(): - """ Resize Bilinear """ + """Resize Bilinear""" _test_fill((32)) _test_fill((6, 32, 64, 64)) @@ -3218,7 +3218,7 @@ def test_forward_fill(): def _test_crop(in_shape, off_h, off_w, tar_h, tar_w): - """ Crop to bounding box """ + """Crop to bounding box""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -3227,7 +3227,7 @@ def _test_crop(in_shape, off_h, off_w, tar_h, tar_w): def test_forward_crop(): - """ Crop to bounding box """ + """Crop to bounding box""" _test_crop((1, 224, 224, 3), 20, 20, 120, 120) @@ -3262,7 +3262,7 @@ def _test_forward_crop_and_resize( def test_forward_crop_and_resize(): - """ CropAndResize """ + """CropAndResize""" _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3]) _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2) _test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest") @@ -3393,7 +3393,7 @@ def _test_forward_nms_v5( def test_forward_nms(): - """ NonMaxSuppressionV3,5 """ + """NonMaxSuppressionV3,5""" for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]: _test_forward_nms((5, 4), (5,), 0.7, 0.5, 5) _test_forward_nms((20, 4), (20,), 0.5, 0.6, 10) @@ -3443,7 +3443,7 @@ def _test_forward_combined_nms( def test_forward_combined_nms(): - """ CombinedNonMaxSuppression """ + """CombinedNonMaxSuppression""" _test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64) _test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10) _test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True) @@ -3456,7 +3456,7 @@ def test_forward_combined_nms(): def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype): - """ One iteration of a LSTM cell """ + """One iteration of a LSTM cell""" tf.reset_default_graph() input_size = num_hidden @@ -3581,7 +3581,7 @@ def test_forward_range(): def _test_pad(input_shape, paddings, mode, **kwargs): - """ One iteration of pad operation with given shape""" + """One iteration of pad operation with given shape""" x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) @@ -3602,7 +3602,7 @@ def _test_pad(input_shape, paddings, mode, **kwargs): def test_forward_pad(): - """ Pad """ + """Pad""" _test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT") _test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0) _test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC") @@ -3663,7 +3663,7 @@ def test_forward_logical(): # Where, Select, SelectV2 # ------------- def test_forward_where(): - """ Where: return elements depending on conditions""" + """Where: return elements depending on conditions""" with tf.Graph().as_default(): with tf.Session() as sess: input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1") @@ -3958,12 +3958,12 @@ def _get_sample(data, state): ) model.set_input(**params) model.run() - tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy() + tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).numpy() state_output = [] for i in range(4): state_output.append( - model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy() + model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).numpy() ) sample = tf_testing.pick_from_weight(tvm_output[0]) @@ -4019,7 +4019,7 @@ def _get_sample(data, state): def _test_lrn(ishape, size, axis, bias, alpha, beta): - """ testing local response normalization """ + """testing local response normalization""" lrn_depth_radius = size / 2 inp_array = np.random.uniform(size=ishape).astype(np.float32) @@ -4043,7 +4043,7 @@ def test_forward_lrn(): def _test_l2_normalize(ishape, eps, axis): - """ testing l2 normalize (uses max, sum, square, sqrt frontend operators)""" + """testing l2 normalize (uses max, sum, square, sqrt frontend operators)""" inp_array = np.random.uniform(size=ishape).astype(np.float32) @@ -4190,7 +4190,7 @@ def test_forward_tanh(): # Softmax # ------- def test_forward_softmax(): - """test operator Softmax """ + """test operator Softmax""" def check_softmax(in_shape, axis, dtype): np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype) @@ -4287,7 +4287,7 @@ def test_forward_sign(): def test_forward_square(): - """test operator Square """ + """test operator Square""" np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): @@ -4297,7 +4297,7 @@ def test_forward_square(): def test_forward_pow_exp(): - """test Pow and Exp """ + """test Pow and Exp""" np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32) np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() @@ -4338,7 +4338,7 @@ def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32): def test_forward_atan2(): - """test operator tan """ + """test operator tan""" tf.disable_eager_execution() np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32) @@ -4350,7 +4350,7 @@ def test_forward_atan2(): def test_forward_expm1(): - """test operator expm1 """ + """test operator expm1""" def _test_forward_expm1(shape): tf.disable_eager_execution() @@ -4366,7 +4366,7 @@ def _test_forward_expm1(shape): def test_forward_softsign(): - """test operator softsign """ + """test operator softsign""" def _test_forward_softsign(shape): tf.disable_eager_execution() @@ -4382,7 +4382,7 @@ def _test_forward_softsign(shape): def test_forward_rint(): - """test operator rint """ + """test operator rint""" def _test_forward_rint(shape): tf.disable_eager_execution() @@ -4399,7 +4399,7 @@ def _test_forward_rint(shape): def test_forward_negative(): - """test tf operator Neg """ + """test tf operator Neg""" np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): @@ -4429,7 +4429,7 @@ def test_forward_softplus(): def test_forward_rsqrt(): - """test Rsqrt """ + """test Rsqrt""" np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): @@ -4439,7 +4439,7 @@ def test_forward_rsqrt(): def test_forward_sqrt(): - """test Sqrt """ + """test Sqrt""" np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32) tf.reset_default_graph() with tf.Graph().as_default(): @@ -4827,7 +4827,7 @@ def test_forward_unravel_index(): # Dilation2d # ---------------------- def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding): - """ One iteration of dilation2d with given shapes and attributes """ + """One iteration of dilation2d with given shapes and attributes""" total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) diff --git a/tests/python/frontend/tensorflow/test_no_op.py b/tests/python/frontend/tensorflow/test_no_op.py index a84e2548e1b1..38246ea5e14f 100644 --- a/tests/python/frontend/tensorflow/test_no_op.py +++ b/tests/python/frontend/tensorflow/test_no_op.py @@ -40,7 +40,7 @@ def test_no_op(): # In TVM, no-op is currently translated to 0, though it should # probably be none or an empty tuple. - np.testing.assert_allclose(0, run_relay(g).asnumpy()) + np.testing.assert_allclose(0, run_relay(g).numpy()) if __name__ == "__main__": diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index 09d273d44dc9..7b5377b3363d 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -112,7 +112,7 @@ def get_real_image_object_detection(im_height, im_width): def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy().tolist()] + return [o.numpy().tolist()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: @@ -129,7 +129,7 @@ def vmobj_to_list(o): elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: - return [o.fields[0].asnumpy()] + return [o.fields[0].numpy()] else: raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint) else: @@ -162,7 +162,7 @@ def run_tvm_graph( out_names=None, mode="graph_executor", ): - """ Generic function to compile on relay and execute on tvm """ + """Generic function to compile on relay and execute on tvm""" # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1 try: import tflite.Model @@ -223,12 +223,12 @@ def run_tvm_graph( tvm_output_list = [] for i in range(0, num_output): tvm_output = m.get_output(i) - tvm_output_list.append(tvm_output.asnumpy()) + tvm_output_list.append(tvm_output.numpy()) return tvm_output_list def run_tflite_graph(tflite_model_buf, input_data): - """ Generic function to execute TFLite """ + """Generic function to execute TFLite""" input_data = convert_to_list(input_data) interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf) @@ -399,7 +399,7 @@ def test_forward_split(): def _test_slice(data, begin, size): - """ One iteration of SLICE """ + """One iteration of SLICE""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = array_ops.slice(in_data, begin, size) @@ -407,7 +407,7 @@ def _test_slice(data, begin, size): def test_forward_slice(): - """ SLICE """ + """SLICE""" _test_slice(np.arange(4, dtype=np.float32).reshape((4,)), begin=[0], size=[2]) _test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3]) # tflite 1.13 outputs nonsense values if size[i] == -1 @@ -420,7 +420,7 @@ def test_forward_slice(): # Topk # ---- def _test_topk(in_shape, k=1): - """ One iteration of TOPK """ + """One iteration of TOPK""" data = np.random.uniform(size=in_shape).astype("float32") with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -429,7 +429,7 @@ def _test_topk(in_shape, k=1): def test_forward_topk(): - """ TOPK """ + """TOPK""" _test_topk((3,), 1) _test_topk((3,), 3) _test_topk((3, 5, 7), 3) @@ -442,7 +442,7 @@ def test_forward_topk(): def _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False, wrap_idx=False): - """ One iteration of Gather """ + """One iteration of Gather""" indices = np.asarray(indices).astype("int32") data = np.random.uniform(1, 10, size=dshape) data = data.astype(np.uint8) if quantized else data.astype(dtype) @@ -483,7 +483,7 @@ def _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False, wrap_ def test_forward_gather(): - """ GATHER """ + """GATHER""" for quantized in [False, True]: for wrap_idx in [False, True]: _test_gather((4,), [1], 0, "float32", quantized, wrap_idx) @@ -509,7 +509,7 @@ def test_forward_gather(): def _test_gather_nd(data, indices): - """ One iteration of GATHER_ND """ + """One iteration of GATHER_ND""" with tf.Graph().as_default(): in_data = tf.placeholder(shape=data.shape, dtype=data.dtype, name="data") indices_data = tf.placeholder(shape=indices.shape, dtype=indices.dtype, name="indices") @@ -521,7 +521,7 @@ def _test_gather_nd(data, indices): def test_forward_gather_nd(): - """ GATHER_ND """ + """GATHER_ND""" _test_gather_nd( np.array([[[1.2, 2.0], [3.1, 4.1]], [[5.1, 6.1], [7.1, 8.1]]]).astype("float32"), np.asarray([[0, 1], [1, 0]]).astype("int32"), @@ -562,7 +562,7 @@ def _test_stridedslice( ellipsis_mask=0, quantized=False, ): - """ One iteration of a Stridedslice """ + """One iteration of a Stridedslice""" data = np.random.uniform(size=ip_shape).astype(dtype) data = data.astype(np.uint8) if quantized else data.astype(dtype) with tf.Graph().as_default(): @@ -652,7 +652,7 @@ def test_forward_transpose(): def _test_cast(data, cast_dtype, use_mlir=False): - """ One iteration of CAST """ + """One iteration of CAST""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = math_ops.cast(in_data, cast_dtype) @@ -662,7 +662,7 @@ def _test_cast(data, cast_dtype, use_mlir=False): def test_forward_cast(): - """ CAST """ + """CAST""" for use_mlir in [False, True]: _test_cast( np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32, use_mlir=use_mlir @@ -690,7 +690,7 @@ def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False def test_forward_batch_matmul(): - """ BATCH_MAT_MUL """ + """BATCH_MAT_MUL""" _test_batch_matmul((3, 5, 4), (3, 4, 5), "float32") _test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True) _test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", True, False) @@ -777,7 +777,7 @@ def test_forward_space_to_batch_nd(): # Pooling # ------- def _test_pooling_iteration(input_shape, **kwargs): - """ One iteration of pool operation with given shapes and attributes """ + """One iteration of pool operation with given shapes and attributes""" x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1 @@ -793,7 +793,7 @@ def _test_pooling(input_shape, **kwargs): def test_forward_pooling(): - """ Pooling """ + """Pooling""" for pool_type in ["AVG", "MAX"]: _test_pooling( @@ -871,7 +871,7 @@ def test_forward_l2_pool2d(): def _test_tflite2_quantized_convolution( input_shape, kernel_shape, dilations, strides, padding, data_format ): - """ One iteration of TFLite2 quantized convolution with given shapes and attributes """ + """One iteration of TFLite2 quantized convolution with given shapes and attributes""" data_format = "channels_last" if "NHWC" else "channels_first" data = np.random.uniform(0, 1, input_shape).astype("float32") kernel = np.random.uniform(0, 1, kernel_shape).astype("float32") @@ -950,7 +950,7 @@ def _test_convolution( quantized=False, fp16_quantized=False, ): - """ One iteration of convolution with given shapes and attributes """ + """One iteration of convolution with given shapes and attributes""" total_size_1 = 1 total_size_2 = 1 @@ -1199,7 +1199,7 @@ def _test_transpose_conv( quantized=False, fp16_quantized=False, ): - """ One iteration of transpose convolution with given shapes and attributes """ + """One iteration of transpose convolution with given shapes and attributes""" total_size_1 = 1 total_size_2 = 1 @@ -1444,7 +1444,7 @@ def test_forward_transpose_conv(): def _test_reshape(data, out_shape, wrap_shape, quantized=False): - """ One iteration of reshape operation with given data and out shape """ + """One iteration of reshape operation with given data and out shape""" if quantized: with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in") @@ -1517,7 +1517,7 @@ def test_forward_reshape(): def _test_resize(tf_resize_op, images_data, size_data, align_corners, quantized=False): - """ One iteration of Resize """ + """One iteration of Resize""" # Test with tensor and constant with tf.Graph().as_default(): images_tensor = array_ops.placeholder(shape=images_data.shape, dtype="float32", name="in") @@ -1549,7 +1549,7 @@ def _test_resize(tf_resize_op, images_data, size_data, align_corners, quantized= def test_all_resize(): - """ Resize """ + """Resize""" images_data = np.random.uniform(0, 255, (1, 16, 16, 3)) images_data_float32 = images_data.astype(np.float32) images_data_uint8 = images_data.astype(np.uint8) @@ -1669,7 +1669,7 @@ def test_forward_shape(): def _test_concatenation(data, axis): - """ One iteration of concatenation """ + """One iteration of concatenation""" assert len(data) >= 1 @@ -1706,7 +1706,7 @@ def test_forward_concatenation(): def _test_unary_elemwise(math_op, data): - """ One iteration of unary elemwise """ + """One iteration of unary elemwise""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in") @@ -1720,7 +1720,7 @@ def _test_unary_elemwise(math_op, data): def _test_abs(data): - """ One iteration of abs """ + """One iteration of abs""" return _test_unary_elemwise(math_ops.abs, data) @@ -1730,7 +1730,7 @@ def _test_abs(data): def _test_ceil(data): - """ One iteration of ceil """ + """One iteration of ceil""" return _test_unary_elemwise(math_ops.ceil, data) @@ -1740,7 +1740,7 @@ def _test_ceil(data): def _test_floor(data): - """ One iteration of floor """ + """One iteration of floor""" return _test_unary_elemwise(math_ops.floor, data) @@ -1750,7 +1750,7 @@ def _test_floor(data): def _test_round(data): - """ One iteration of round """ + """One iteration of round""" return _test_unary_elemwise(math_ops.round, data) @@ -1760,7 +1760,7 @@ def _test_round(data): def _test_exp(data): - """ One iteration of exp """ + """One iteration of exp""" return _test_unary_elemwise(math_ops.exp, data) @@ -1770,7 +1770,7 @@ def _test_exp(data): def _test_log(data): - """ One iteration of log """ + """One iteration of log""" return _test_unary_elemwise(math_ops.log, data) @@ -1780,7 +1780,7 @@ def _test_log(data): def _test_sin(data): - """ One iteration of sin """ + """One iteration of sin""" return _test_unary_elemwise(math_ops.sin, data) @@ -1790,7 +1790,7 @@ def _test_sin(data): def _test_cos(data): - """ One iteration of cos """ + """One iteration of cos""" return _test_unary_elemwise(math_ops.cos, data) @@ -1800,7 +1800,7 @@ def _test_cos(data): def _test_tan(data): - """ One iteration of tan """ + """One iteration of tan""" return _test_unary_elemwise(math_ops.tan, data) @@ -1810,7 +1810,7 @@ def _test_tan(data): def _test_sqrt(data): - """ One iteration of sqrt """ + """One iteration of sqrt""" return _test_unary_elemwise(math_ops.sqrt, data) @@ -1820,7 +1820,7 @@ def _test_sqrt(data): def _test_rsqrt(data): - """ One iteration of rsqrt """ + """One iteration of rsqrt""" return _test_unary_elemwise(math_ops.rsqrt, data) @@ -1830,7 +1830,7 @@ def _test_rsqrt(data): def _test_neg(data): - """ One iteration of neg """ + """One iteration of neg""" return _test_unary_elemwise(math_ops.neg, data) @@ -1840,7 +1840,7 @@ def _test_neg(data): def _test_square(data): - """ One iteration of square """ + """One iteration of square""" return _test_unary_elemwise(math_ops.square, data) @@ -1850,7 +1850,7 @@ def _test_square(data): def _test_elu(data): - """ One iteration of elu """ + """One iteration of elu""" return _test_unary_elemwise(nn_ops.elu, data) @@ -1898,7 +1898,7 @@ def _test_elemwise( qnn_op=None, same_qnn_params=False, ): - """ One iteration of elemwise """ + """One iteration of elemwise""" assert len(data) == 2 @@ -2000,7 +2000,7 @@ def __test_elemwise(in_data): def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None): - """ One iteration of add """ + """One iteration of add""" return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op) @@ -2010,7 +2010,7 @@ def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None): - """ One iteration of subtract """ + """One iteration of subtract""" return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op) @@ -2020,7 +2020,7 @@ def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None): - """ One iteration of mul """ + """One iteration of mul""" return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op) @@ -2030,7 +2030,7 @@ def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None def _test_div(data, fused_activation_function=None): - """ One iteration of divide """ + """One iteration of divide""" return _test_elemwise(math_ops.divide, data, fused_activation_function) @@ -2040,7 +2040,7 @@ def _test_div(data, fused_activation_function=None): def _test_pow(data): - """ One iteration of power """ + """One iteration of power""" return _test_elemwise(math_ops.pow, data) @@ -2050,7 +2050,7 @@ def _test_pow(data): def _test_maximum(data, fused_activation_function=None, quantized=False, qnn_op=None): - """ One iteration of maximum """ + """One iteration of maximum""" return _test_elemwise( math_ops.maximum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True ) @@ -2062,7 +2062,7 @@ def _test_maximum(data, fused_activation_function=None, quantized=False, qnn_op= def _test_minimum(data, fused_activation_function=None, quantized=False, qnn_op=None): - """ One iteration of minimum """ + """One iteration of minimum""" return _test_elemwise( math_ops.minimum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True ) @@ -2074,7 +2074,7 @@ def _test_minimum(data, fused_activation_function=None, quantized=False, qnn_op= def _test_greater(data): - """ One iteration of greater """ + """One iteration of greater""" return _test_elemwise(math_ops.greater, data) @@ -2084,7 +2084,7 @@ def _test_greater(data): def _test_greater_equal(data): - """ One iteration of greater_equal """ + """One iteration of greater_equal""" return _test_elemwise(math_ops.greater_equal, data) @@ -2094,7 +2094,7 @@ def _test_greater_equal(data): def _test_less(data): - """ One iteration of less """ + """One iteration of less""" return _test_elemwise(math_ops.less, data) @@ -2104,7 +2104,7 @@ def _test_less(data): def _test_less_equal(data): - """ One iteration of less_equal """ + """One iteration of less_equal""" return _test_elemwise(math_ops.less_equal, data) @@ -2114,7 +2114,7 @@ def _test_less_equal(data): def _test_equal(data): - """ One iteration of equal """ + """One iteration of equal""" return _test_elemwise(math_ops.equal, data) @@ -2124,7 +2124,7 @@ def _test_equal(data): def _test_not_equal(data): - """ One iteration of not_equal""" + """One iteration of not_equal""" return _test_elemwise(math_ops.not_equal, data) @@ -2134,7 +2134,7 @@ def _test_not_equal(data): def _test_squared_difference(data): - """ One iteration of squared difference """ + """One iteration of squared difference""" return _test_elemwise(math_ops.squared_difference, data) @@ -2144,7 +2144,7 @@ def _test_squared_difference(data): def _test_floor_divide(data): - """ One iteration of floor_div""" + """One iteration of floor_div""" return _test_elemwise(math_ops.floordiv, data) @@ -2154,12 +2154,12 @@ def _test_floor_divide(data): def _test_floor_mod(data): - """ One iteration of floor_mod""" + """One iteration of floor_mod""" return _test_elemwise(math_ops.floormod, data) def _test_forward_elemwise(testop): - """ Elewise""" + """Elewise""" testop( [ np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)), @@ -2301,17 +2301,17 @@ def _test_logical_binary(logical_bin_op, data): def _test_forward_logical_and(data): - """ One iteration of logical and """ + """One iteration of logical and""" return _test_logical_binary(math_ops.logical_and, data) def _test_forward_logical_or(data): - """ One iteration of logical or """ + """One iteration of logical or""" return _test_logical_binary(math_ops.logical_or, data) def _test_forward_logical_not(data): - """ One iteration of logical not """ + """One iteration of logical not""" return _test_logical_binary(math_ops.logical_not, data) @@ -2333,7 +2333,7 @@ def test_all_logical(): def _test_zeros_like(data): - """ One iteration of ZEROS LIKE """ + """One iteration of ZEROS LIKE""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = gen_array_ops.zeros_like(in_data) @@ -2341,7 +2341,7 @@ def _test_zeros_like(data): def test_forward_zeros_like(): - """ ZEROS LIKE """ + """ZEROS LIKE""" _test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6))) @@ -2351,7 +2351,7 @@ def test_forward_zeros_like(): def _test_fill(dims, value_data, value_dtype): - """ Use the fill op to create a tensor of value_data with constant dims.""" + """Use the fill op to create a tensor of value_data with constant dims.""" value_data = np.array(value_data, dtype=value_dtype) # TF 1.13 TFLite convert method does not accept empty shapes @@ -2371,7 +2371,7 @@ def _test_fill(dims, value_data, value_dtype): def test_forward_fill(): - """ Test FILL op """ + """Test FILL op""" _test_fill((1, 2, 2, 4), 5, "int32") _test_fill((1, 2, 2, 4), 5, "float32") @@ -2384,7 +2384,7 @@ def test_forward_fill(): def _test_reduce(math_op, data, keep_dims=None): - """ One iteration of reduce """ + """One iteration of reduce""" assert len(data) == 2 @@ -2396,7 +2396,7 @@ def _test_reduce(math_op, data, keep_dims=None): def _test_reduce_quantize(math_op, data, keep_dims=None): - """ One iteration of reduce """ + """One iteration of reduce""" assert len(data) == 2 @@ -2422,7 +2422,7 @@ def _test_reduce_quantize(math_op, data, keep_dims=None): def _test_reduce_min(data, keep_dims=None): - """ One iteration of reduce_min """ + """One iteration of reduce_min""" return _test_reduce(math_ops.reduce_min, data, keep_dims) @@ -2432,7 +2432,7 @@ def _test_reduce_min(data, keep_dims=None): def _test_reduce_max(data, keep_dims=None): - """ One iteration of reduce_max """ + """One iteration of reduce_max""" return _test_reduce(math_ops.reduce_max, data, keep_dims) @@ -2442,7 +2442,7 @@ def _test_reduce_max(data, keep_dims=None): def _test_reduce_mean(data, keep_dims=None, quantized=False): - """ One iteration of reduce_mean """ + """One iteration of reduce_mean""" if quantized: return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims) else: @@ -2455,7 +2455,7 @@ def _test_reduce_mean(data, keep_dims=None, quantized=False): def _test_reduce_prod(data, keep_dims=None): - """ One iteration of reduce_prod """ + """One iteration of reduce_prod""" return _test_reduce(math_ops.reduce_prod, data, keep_dims) @@ -2465,7 +2465,7 @@ def _test_reduce_prod(data, keep_dims=None): def _test_reduce_sum(data, keep_dims=None): - """ One iteration of reduce_sum """ + """One iteration of reduce_sum""" return _test_reduce(math_ops.reduce_sum, data, keep_dims) @@ -2475,12 +2475,12 @@ def _test_reduce_sum(data, keep_dims=None): def _test_reduce_any(data, keep_dims=None): - """ One iteration of reduce_any """ + """One iteration of reduce_any""" return _test_reduce(math_ops.reduce_any, data, keep_dims) def _test_forward_reduce(testop, dtype="float32"): - """ Reduce """ + """Reduce""" if dtype == "bool": data0 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), None] data1 = [ @@ -2529,7 +2529,7 @@ def test_all_reduce(): def _test_arg_min_max(math_op, data, axis, quantized=False): - """ One iteration of arg_min_max""" + """One iteration of arg_min_max""" with tf.Graph().as_default(): t_name = "in" @@ -2588,7 +2588,7 @@ def test_forward_select(): def _test_squeeze(data, squeeze_dims=None): - """ One iteration of squeeze """ + """One iteration of squeeze""" if squeeze_dims is None: squeeze_dims = [] @@ -2605,7 +2605,7 @@ def _test_squeeze(data, squeeze_dims=None): def test_forward_squeeze(): - """ Squeeze """ + """Squeeze""" _test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2]) _test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3]) @@ -2616,7 +2616,7 @@ def test_forward_squeeze(): def _test_quantize_dequantize(data): - """ One iteration of quantize and dequantize """ + """One iteration of quantize and dequantize""" # Keras model to force TFLite converter to insert 2 TFLite quantize ops. # First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize. @@ -2643,7 +2643,7 @@ def representative_data_gen(): def _test_quantize_dequantize_const(data): - """ One iteration of quantize and dequantize """ + """One iteration of quantize and dequantize""" # Keras model to force TFLite converter to insert 2 TFLite quantize ops. # First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize. @@ -2670,7 +2670,7 @@ def representative_data_gen(): def test_forward_quantize_dequantize(): - """ Quantize Dequantize """ + """Quantize Dequantize""" data = np.random.uniform(0, 1, (1, 4, 4, 3)).astype("float32") if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): _test_quantize_dequantize(data) @@ -2683,7 +2683,7 @@ def test_forward_quantize_dequantize(): def _test_pad(data, mode="CONSTANT", quantized=False): - """ One iteration of PAD """ + """One iteration of PAD""" assert len(data) == 2 @@ -2713,7 +2713,7 @@ def _test_pad(data, mode="CONSTANT", quantized=False): def test_forward_pad(): - """ Pad """ + """Pad""" _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), @@ -2767,7 +2767,7 @@ def test_forward_pad(): def _test_padv2(data, mode="CONSTANT", quantized=False): - """ One iteration of PADV2 """ + """One iteration of PADV2""" assert len(data) == 2 or len(data) == 3 @@ -2824,7 +2824,7 @@ def _test_padv2(data, mode="CONSTANT", quantized=False): def test_forward_padv2(): - """ PADV2 """ + """PADV2""" # Tests without Constant_values _test_padv2( [ @@ -2940,7 +2940,7 @@ def test_forward_padv2(): def _test_expand_dims(input_shape, input_type, axis, quantized=False): - """ One iteration of EXPAND_DIMS """ + """One iteration of EXPAND_DIMS""" with tf.Graph().as_default(): axis = ops.convert_to_tensor(axis, dtype=axis.dtype) @@ -2970,7 +2970,7 @@ def _test_expand_dims(input_shape, input_type, axis, quantized=False): def test_forward_expand_dims(): - """ EXPAND_DIMS """ + """EXPAND_DIMS""" for quantized in [False, True]: _test_expand_dims((6, 2, 7, 5), "float32", np.int32(0), quantized=quantized) _test_expand_dims((1, 2, 3), "int32", np.int32(-2), quantized=quantized) @@ -2983,7 +2983,7 @@ def test_forward_expand_dims(): def _test_one_hot(indices, depth, on_value, off_value, axis=None): - """ One iteration of One_Hot """ + """One iteration of One_Hot""" with tf.Graph().as_default(): in_indices = tf.placeholder(dtype=indices.dtype, shape=indices.shape, name="indices") in_depth = ops.convert_to_tensor(depth, dtype=depth.dtype) @@ -3004,7 +3004,7 @@ def _test_one_hot(indices, depth, on_value, off_value, axis=None): def test_forward_one_hot(): - """ One_Hot """ + """One_Hot""" _test_one_hot(np.int32(2), np.int32(8), np.int32(1), np.int32(0)) _test_one_hot(np.int32(4), np.int32(8), np.float32(1), np.float32(0)) _test_one_hot(np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1)) @@ -3019,7 +3019,7 @@ def test_forward_one_hot(): def _test_pack(data, is_var, axis, quantized=False): - """ One iteration of pack """ + """One iteration of pack""" assert len(data) >= 1 assert len(data) == len(is_var) @@ -3066,7 +3066,7 @@ def _test_pack(data, is_var, axis, quantized=False): def test_forward_pack(): - """ Pack """ + """Pack""" _test_pack([np.int32(1), np.int32(5)], [False, False], 0) _test_pack([np.array([1, 4]), np.array([2, 5]), np.array([3, 6])], [True, False, False], 0) _test_pack( @@ -3103,7 +3103,7 @@ def test_forward_pack(): def _test_unpack(data, axis, num_unpacks): - """ One iteration of UNPACK """ + """One iteration of UNPACK""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name="unpack") @@ -3112,7 +3112,7 @@ def _test_unpack(data, axis, num_unpacks): def test_forward_unpack(): - """ UNPACK """ + """UNPACK""" _test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1) _test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3) # tflite 1.13 doesn't accept negative axis @@ -3131,7 +3131,7 @@ def test_forward_unpack(): def _test_local_response_normalization(data, depth_radius, bias, alpha, beta): - """ One iteration of LOCAL_RESPONSE_NORMALIZATION """ + """One iteration of LOCAL_RESPONSE_NORMALIZATION""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") out = nn_ops.local_response_normalization( @@ -3141,7 +3141,7 @@ def _test_local_response_normalization(data, depth_radius, bias, alpha, beta): def test_forward_local_response_normalization(): - """ LOCAL_RESPONSE_NORMALIZATION """ + """LOCAL_RESPONSE_NORMALIZATION""" data = np.random.uniform(size=(1, 6, 4, 3)).astype("float32") # LOCAL_RESPONSE_NORMALIZATION come with TFLite >= 1.14.0 fbs schema if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): @@ -3154,7 +3154,7 @@ def test_forward_local_response_normalization(): def _test_l2_normalization(data, axis, fused_activation_function=None): - """ One iteration of L2_NORMALIZATION """ + """One iteration of L2_NORMALIZATION""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = nn_impl.l2_normalize(in_data, axis) @@ -3163,7 +3163,7 @@ def _test_l2_normalization(data, axis, fused_activation_function=None): def test_forward_l2_normalization(): - """ L2_NORMALIZATION """ + """L2_NORMALIZATION""" data = np.random.uniform(size=(3, 6, 4)).astype("float32") _test_l2_normalization(data, axis=2) _test_l2_normalization(data, axis=2, fused_activation_function="RELU") @@ -3175,7 +3175,7 @@ def test_forward_l2_normalization(): def _test_logistic(data, quantized=False): - """ One iteration of LOGISTIC """ + """One iteration of LOGISTIC""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3195,7 +3195,7 @@ def _test_logistic(data, quantized=False): def test_forward_logistic(): - """ LOGISTIC """ + """LOGISTIC""" _test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6))) _test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) @@ -3206,7 +3206,7 @@ def test_forward_logistic(): def _test_softmax(data): - """ One iteration of softmax """ + """One iteration of softmax""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = nn_ops.softmax(in_data) @@ -3214,7 +3214,7 @@ def _test_softmax(data): def test_forward_softmax(): - """ Softmax """ + """Softmax""" _test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6))) @@ -3224,7 +3224,7 @@ def test_forward_softmax(): def _test_log_softmax(data, quantized=False): - """ One iteration of log_softmax """ + """One iteration of log_softmax""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3245,7 +3245,7 @@ def _test_log_softmax(data, quantized=False): def test_forward_log_softmax(): - """ Log_softmax """ + """Log_softmax""" _test_log_softmax(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32)) _test_log_softmax(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) @@ -3256,7 +3256,7 @@ def test_forward_log_softmax(): def _test_tanh(data, quantized=False): - """ One iteration of TANH """ + """One iteration of TANH""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3287,7 +3287,7 @@ def test_forward_tanh(): def _test_relu(data, quantized=False): - """ One iteration of ReLU """ + """One iteration of ReLU""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3308,7 +3308,7 @@ def _test_relu(data, quantized=False): def test_forward_relu(): - """ ReLU """ + """ReLU""" _test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6))) _test_relu(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) @@ -3319,7 +3319,7 @@ def test_forward_relu(): def _test_relu6(data, quantized=False): - """ One iteration of ReLU6 """ + """One iteration of ReLU6""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3339,7 +3339,7 @@ def _test_relu6(data, quantized=False): def test_forward_relu6(): - """ ReLU6 """ + """ReLU6""" _test_relu6(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32)) _test_relu6(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) @@ -3350,7 +3350,7 @@ def test_forward_relu6(): def _test_leaky_relu(data, alpha, quantized=False): - """ One iteration of Leaky_ReLU """ + """One iteration of Leaky_ReLU""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3370,7 +3370,7 @@ def _test_leaky_relu(data, alpha, quantized=False): def test_forward_leaky_relu(): - """ Leaky_ReLU """ + """Leaky_ReLU""" _test_leaky_relu(np.random.uniform(-5, 5, (1, 6)).astype(np.float32), alpha=0.2) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_leaky_relu( @@ -3384,7 +3384,7 @@ def test_forward_leaky_relu(): def _test_relu_n1_to_1(data, quantized=False): - """ One iteration of ReLU_n1_to_1 """ + """One iteration of ReLU_n1_to_1""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") @@ -3405,7 +3405,7 @@ def _test_relu_n1_to_1(data, quantized=False): def test_forward_relu_n1_to_1(): - """ ReLU_n1_to_1 """ + """ReLU_n1_to_1""" _test_relu_n1_to_1(np.random.uniform(-3, 3, (1, 6)).astype(np.float32)) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_relu_n1_to_1(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) @@ -3417,7 +3417,7 @@ def test_forward_relu_n1_to_1(): def _test_prelu(data, alpha): - """ One iteration of PReLU """ + """One iteration of PReLU""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) # This specific pattern will be replaced into PRelu by tflite @@ -3426,7 +3426,7 @@ def _test_prelu(data, alpha): def test_forward_prelu(): - """ PReLU """ + """PReLU""" _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((3,), 0.2, dtype="float32"), @@ -3443,7 +3443,7 @@ def test_forward_prelu(): def _test_depthtospace(data, block_size): - """ One iteration of depth_to_space operation with given data and block size """ + """One iteration of depth_to_space operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -3464,7 +3464,7 @@ def test_forward_depthtospace(): def _test_spacetodepth(data, block_size): - """ One iteration of space_to_depth operation with given data and block size """ + """One iteration of space_to_depth operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) @@ -3483,7 +3483,7 @@ def test_forward_spacetodepth(): def _test_reverse_sequence(shape, dtype, seq_lengths, batch_axis, seq_axis): - """ One iteration of reverse_sequence operation with given data and attributes """ + """One iteration of reverse_sequence operation with given data and attributes""" data = np.random.uniform(0, 100, size=shape).astype(dtype) with tf.Graph().as_default(): @@ -3597,7 +3597,7 @@ def _test_fully_connected( quantized=False, fp16_quantized=False, ): - """ One iteration of fully connected """ + """One iteration of fully connected""" total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) @@ -3681,7 +3681,7 @@ def _test_fully_connected( def test_forward_fully_connected(): - """ Fully Connected """ + """Fully Connected""" for input_shape, weight_shape, bias_shape in [ ([1, 4], [4, 4], None), ([1, 4], [4, 4], [4]), @@ -3712,7 +3712,7 @@ def test_forward_fully_connected(): def _test_reverse_v2(input_shape, axis, dtype): - """ One iteration of REVERSE_V2 """ + """One iteration of REVERSE_V2""" with tf.Graph().as_default(): input = np.random.randint(0, 100, size=input_shape).astype(dtype) in_input = tf.placeholder(dtype=input.dtype, shape=input.shape, name="input") @@ -3724,7 +3724,7 @@ def _test_reverse_v2(input_shape, axis, dtype): def test_forward_reverse_v2(): - """ REVERSE_V2 """ + """REVERSE_V2""" for dtype in ["float32", "int32"]: _test_reverse_v2((5), np.array([0], dtype="int32"), dtype) _test_reverse_v2((5, 6, 4, 2), np.array([2], dtype="int32"), dtype) @@ -3736,7 +3736,7 @@ def test_forward_reverse_v2(): def _test_matrix_set_diag(input_shape, input_type, quantized=False): - """ One iteration of MATRIX_SET_DIAG """ + """One iteration of MATRIX_SET_DIAG""" with tf.Graph().as_default(): diagonal_shape = list(input_shape[:-2]) diagonal_shape.append(min(input_shape[-2], input_shape[-1])) @@ -3785,7 +3785,7 @@ def _test_matrix_set_diag(input_shape, input_type, quantized=False): def test_forward_matrix_set_diag(): - """ MATRIX_SET_DIAG """ + """MATRIX_SET_DIAG""" for dtype in [np.float32, np.int32]: _test_matrix_set_diag((4, 4), dtype) _test_matrix_set_diag((5, 4, 3, 4), dtype) @@ -3802,7 +3802,7 @@ def test_forward_matrix_set_diag(): def _test_matrix_diag(diagonal_shape, dtype): - """ One iteration of MATRIX_DIAG """ + """One iteration of MATRIX_DIAG""" with tf.Graph().as_default(): diagonal = np.random.uniform(0, 100, diagonal_shape).astype(dtype) in_diagonal = tf.placeholder(dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal") @@ -3815,7 +3815,7 @@ def _test_matrix_diag(diagonal_shape, dtype): def test_forward_matrix_diag(): - """ MATRIX_DIAG """ + """MATRIX_DIAG""" for dtype in [np.float32, np.int32]: _test_matrix_diag((4), dtype) _test_matrix_diag((5, 4, 3), dtype) diff --git a/tests/python/integration/test_dot.py b/tests/python/integration/test_dot.py index 7d4eca4d33d9..41abb51a2e99 100644 --- a/tests/python/integration/test_dot.py +++ b/tests/python/integration/test_dot.py @@ -38,7 +38,7 @@ def verify(target): b = tvm.nd.array(np.random.uniform(size=(nn,)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((), dtype=C.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-4) verify("llvm") diff --git a/tests/python/integration/test_ewise.py b/tests/python/integration/test_ewise.py index 034f89cd3fe2..1f7deb0422f7 100644 --- a/tests/python/integration/test_ewise.py +++ b/tests/python/integration/test_ewise.py @@ -47,7 +47,7 @@ def check_device(device, host="stackvm"): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fexp(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.exp(a.asnumpy()), rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), np.exp(a.numpy()), rtol=1e-5) check_device("opencl -device=intel_graphics") check_device("cuda", "llvm") @@ -93,7 +93,7 @@ def check_device(device): ftimer = fmod.time_evaluator(fmod.entry_name, dev, number=1) tcost = ftimer(a, b, c).mean # fmod(a, b, c) - np.testing.assert_allclose(c.asnumpy(), np.mod(a.asnumpy(), b.asnumpy()), rtol=1e-5) + np.testing.assert_allclose(c.numpy(), np.mod(a.numpy(), b.numpy()), rtol=1e-5) check_device("cuda") check_device("opencl -device=intel_graphics") @@ -135,7 +135,7 @@ def check_device(device, host="stackvm"): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) func(a0, a1, c) tvm.testing.assert_allclose( - c.asnumpy(), a0.asnumpy() + a1.asnumpy() + (a0.asnumpy() * a1.asnumpy()), rtol=1e-5 + c.numpy(), a0.numpy() + a1.numpy() + (a0.numpy() * a1.numpy()), rtol=1e-5 ) check_device("cuda", "llvm") @@ -165,7 +165,7 @@ def test_log_pow_llvm(): ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat) res = ftimer(a, b) assert len(res.results) == repeat - tvm.testing.assert_allclose(b.asnumpy(), np.power(np.log(a.asnumpy()), 2.0), rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), np.power(np.log(a.numpy()), 2.0), rtol=1e-5) @tvm.testing.uses_gpu @@ -196,7 +196,7 @@ def check_device(device): b = tvm.nd.array(np.zeros(shape=n, dtype=B.dtype), dev) func(a, b) tvm.testing.assert_allclose( - b.asnumpy(), list(map(lambda x: bin(x).count("1"), a.asnumpy())), rtol=1e-5 + b.numpy(), list(map(lambda x: bin(x).count("1"), a.numpy())), rtol=1e-5 ) check_device("llvm") @@ -246,7 +246,7 @@ def check_device(device): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) ftimer = fadd.time_evaluator(fadd.entry_name, dev, number=1) tcost = ftimer(a, b, c).mean - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy(), rtol=1e-6) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy(), rtol=1e-6) check_device("opencl") check_device("cuda") @@ -293,7 +293,7 @@ def check_device(device): a = tvm.nd.array((np.random.uniform(size=m) * 256).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(m, dtype=B.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 3, rtol=1e-6) + tvm.testing.assert_allclose(b.numpy(), a.numpy() + 3, rtol=1e-6) check_device("cuda") diff --git a/tests/python/integration/test_ewise_fpga.py b/tests/python/integration/test_ewise_fpga.py index fb2c6b1a3db6..6171c37b1672 100644 --- a/tests/python/integration/test_ewise_fpga.py +++ b/tests/python/integration/test_ewise_fpga.py @@ -53,7 +53,7 @@ def check_device(device, host="llvm"): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fexp(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.exp(a.asnumpy()), rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), np.exp(a.numpy()), rtol=1e-5) check_device("sdaccel") if "AWS_PLATFORM" in os.environ: @@ -90,7 +90,7 @@ def check_device(device, host="llvm"): c = tvm.nd.array(np.random.uniform(size=n).astype(C.dtype), dev) d = tvm.nd.array(np.random.uniform(size=n).astype(D.dtype), dev) fadd(a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), a.asnumpy() * 2 + b.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(d.numpy(), a.numpy() * 2 + b.numpy(), rtol=1e-5) check_device("sdaccel") check_device("aocl_sw_emu") diff --git a/tests/python/integration/test_gemm.py b/tests/python/integration/test_gemm.py index 5faacde30b58..aa6c5a1e74e1 100644 --- a/tests/python/integration/test_gemm.py +++ b/tests/python/integration/test_gemm.py @@ -99,7 +99,7 @@ def check_device(device): ftimer = f.time_evaluator(f.entry_name, dev, number=1) tcost = ftimer(a, b, c).mean print("%s: exec=%g sec/op" % (dev, tcost)) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(a_np, b_np.T), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.dot(a_np, b_np.T), rtol=1e-5) check_device("vulkan") check_device("nvptx -mcpu=sm_20") diff --git a/tests/python/integration/test_reduce.py b/tests/python/integration/test_reduce.py index 19bd03ec79ce..939d0819546b 100644 --- a/tests/python/integration/test_reduce.py +++ b/tests/python/integration/test_reduce.py @@ -54,9 +54,9 @@ def check_device(device, host="llvm"): x = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), dev) y = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) freduce(x, y) - npy = y.asnumpy() + npy = y.numpy() npy[:2] = 0 - res = np_reducer(x.asnumpy(), axis=1) + res = np_reducer(x.numpy(), axis=1) res[:2] = 0 tvm.testing.assert_allclose(npy, res, rtol=1e-4) @@ -90,8 +90,8 @@ def check_target(target="llvm"): a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros((), dtype=B.dtype), dev) fsum(a, b) - res = 10.0 + np.sum(a.asnumpy(), axis=0) - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = 10.0 + np.sum(a.numpy(), axis=0) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target() @@ -121,8 +121,8 @@ def check_target(target="llvm"): ii = tvm.nd.array(np.random.uniform(size=(n, n)).astype(B.dtype), dev) b = tvm.nd.array(np.zeros((n, n), dtype=B.dtype), dev) mmult(a, c, ii, b) - res = ii.asnumpy() + np.matmul(a.asnumpy(), c.asnumpy()) - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = ii.numpy() + np.matmul(a.numpy(), c.numpy()) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target() @@ -149,8 +149,8 @@ def check_target(target="llvm"): a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros((), dtype=B.dtype), dev) fsum(a, b) - res = np.sum(a.asnumpy(), axis=0) - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = np.sum(a.numpy(), axis=0) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target() @@ -183,8 +183,8 @@ def check_target(target="llvm"): ii = tvm.nd.array(np.random.uniform(size=(n, n)).astype(B.dtype), dev) b = tvm.nd.array(np.zeros((n, n), dtype=B.dtype), dev) mmult(a, c, ii, b) - res = ii.asnumpy() + np.matmul(a.asnumpy(), c.asnumpy()) - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = ii.numpy() + np.matmul(a.numpy(), c.numpy()) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target() @@ -211,8 +211,8 @@ def check_target(target="llvm"): a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros((), dtype=B.dtype), dev) fsum(a, b) - res = np.sum(a.asnumpy(), axis=0) - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = np.sum(a.numpy(), axis=0) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target() @@ -255,9 +255,9 @@ def check_target(device, host="stackvm"): a = tvm.nd.array(np.random.uniform(size=(m, n)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(m, dtype=B.dtype), dev) fsum(a, b) - res = np.sum(a.asnumpy(), axis=1) + res = np.sum(a.numpy(), axis=1) res[:2] = 0 - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target("vulkan") check_target("cuda") @@ -306,8 +306,8 @@ def check_target(device, host="stackvm"): a = tvm.nd.array(np.random.uniform(size=(m, n)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(m, dtype=B.dtype), dev) fsum(a, b) - res = np.sum(a.asnumpy(), axis=1) + 2 - tvm.testing.assert_allclose(b.asnumpy(), res, rtol=1e-4) + res = np.sum(a.numpy(), axis=1) + 2 + tvm.testing.assert_allclose(b.numpy(), res, rtol=1e-4) check_target("vulkan") check_target("cuda") @@ -354,7 +354,7 @@ def check_target(): nd_res0 = tvm.nd.array(np.zeros(mm, dtype="int32"), dev) nd_res1 = tvm.nd.array(np.zeros(mm, dtype="float32"), dev) fargmax(nd_idx, nd_val, nd_res0, nd_res1) - tvm.testing.assert_allclose(np_res, nd_res0.asnumpy()) + tvm.testing.assert_allclose(np_res, nd_res0.numpy()) check_target() @@ -411,7 +411,7 @@ def check_target(device): nd_res0 = tvm.nd.array(np.zeros(mm, dtype="int32"), dev) nd_res1 = tvm.nd.array(np.zeros(mm, dtype="float32"), dev) fargmax(nd_idx, nd_val, nd_res0, nd_res1) - tvm.testing.assert_allclose(np_res, nd_res0.asnumpy()) + tvm.testing.assert_allclose(np_res, nd_res0.numpy()) check_target("cuda") check_target("vulkan") @@ -456,7 +456,7 @@ def check_target(device, m, n): b = tvm.nd.array(b_np, dev) b_np = np.max(a_np, axis=1) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) check_target("cuda", m=32, n=256) check_target("cuda", m=10, n=20) @@ -517,8 +517,8 @@ def check_target(device): func(a0, a1, t0, t1) t0_np = np.sum(a0_np, axis=1) t1_np = np.product(a1_np, axis=1) - tvm.testing.assert_allclose(t0.asnumpy(), t0_np, rtol=1e-3, atol=1e-3) - tvm.testing.assert_allclose(t1.asnumpy(), t1_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(t0.numpy(), t0_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(t1.numpy(), t1_np, rtol=1e-3, atol=1e-3) check_target("cuda") check_target("rocm") diff --git a/tests/python/integration/test_scan.py b/tests/python/integration/test_scan.py index 54a8f1e92ed1..edeb862cd5fc 100644 --- a/tests/python/integration/test_scan.py +++ b/tests/python/integration/test_scan.py @@ -61,7 +61,7 @@ def check_device(device): a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros((m, n), dtype=res.dtype), dev) fscan(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.cumsum(a_np, axis=0)) + tvm.testing.assert_allclose(b.numpy(), np.cumsum(a_np, axis=0)) check_device("vulkan") check_device("cuda") diff --git a/tests/python/integration/test_winograd_nnpack.py b/tests/python/integration/test_winograd_nnpack.py index aa018616c87f..71091f69d964 100644 --- a/tests/python/integration/test_winograd_nnpack.py +++ b/tests/python/integration/test_winograd_nnpack.py @@ -108,7 +108,7 @@ def check_device(device): % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4) for device in devices: check_device(device) diff --git a/tests/python/nightly/quantization/test_quantization_accuracy.py b/tests/python/nightly/quantization/test_quantization_accuracy.py index 5cf4dfee7f71..6109104bd32b 100644 --- a/tests/python/nightly/quantization/test_quantization_accuracy.py +++ b/tests/python/nightly/quantization/test_quantization_accuracy.py @@ -111,10 +111,10 @@ def eval_acc( # Execute for i, batch in enumerate(dataset): data, label = batch_fn(batch, [mx.cpu(0)]) - m.run(data=data[0].asnumpy()) + m.run(data=data[0].numpy()) out_arr = m.get_output(0) - acc_top1.update(label, [mx.nd.array(out_arr.asnumpy())]) - acc_top5.update(label, [mx.nd.array(out_arr.asnumpy())]) + acc_top1.update(label, [mx.nd.array(out_arr.numpy())]) + acc_top5.update(label, [mx.nd.array(out_arr.numpy())]) if not (i + 1) % log_interval: _, top1 = acc_top1.get() diff --git a/tests/python/nightly/quantization/test_quantization_accuracy_for_vit.py b/tests/python/nightly/quantization/test_quantization_accuracy_for_vit.py index 2d581d8c2acd..8cecbf97c001 100644 --- a/tests/python/nightly/quantization/test_quantization_accuracy_for_vit.py +++ b/tests/python/nightly/quantization/test_quantization_accuracy_for_vit.py @@ -34,7 +34,7 @@ def calibrate_dataset(model_name, rec_val, batch_size, calibration_samples): for i, batch in enumerate(val_data): if i * batch_size >= calibration_samples: break - data = batch.data[0].asnumpy() + data = batch.data[0].numpy() yield {"data": data} diff --git a/tests/python/relay/aot/aot_test_utils.py b/tests/python/relay/aot/aot_test_utils.py index 8c7aefe70d09..c1917674873d 100644 --- a/tests/python/relay/aot/aot_test_utils.py +++ b/tests/python/relay/aot/aot_test_utils.py @@ -243,5 +243,5 @@ def generate_ref_data(mod, input_data, params=None, target="llvm"): grt_mod.set_input(**input_data) grt_mod.run() output_count = grt_mod.get_num_outputs() - out = [grt_mod.get_output(i).asnumpy() for i in range(output_count)] + out = [grt_mod.get_output(i).numpy() for i in range(output_count)] return out diff --git a/tests/python/relay/benchmarking/benchmark_vm.py b/tests/python/relay/benchmarking/benchmark_vm.py index 44ce9be766d2..12edbdac5f23 100644 --- a/tests/python/relay/benchmarking/benchmark_vm.py +++ b/tests/python/relay/benchmarking/benchmark_vm.py @@ -58,7 +58,7 @@ def get_graph_executor_output( % (np.mean(prof_res), np.std(prof_res)) ) - return out.asnumpy() + return out.numpy() def get_vm_output(mod, data, params, target, dev, dtype="float32", number=2, repeat=20): with tvm.transform.PassContext(opt_level=3): @@ -76,7 +76,7 @@ def get_vm_output(mod, data, params, target, dev, dtype="float32", number=2, rep % (np.mean(prof_res), np.std(prof_res)) ) - return result.asnumpy().astype(dtype) + return result.numpy().astype(dtype) # random input data = np.random.uniform(size=data_shape).astype(dtype) diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index 9bcb656bd246..ad9a0ecd4e59 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -51,7 +51,7 @@ def verify_more_dynamic_broadcast_to(x_shape, out_shape): op_res = intrp.evaluate(func)( x, np.array(x_shape).astype(shape_type), np.array(out_shape).astype(shape_type) ) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_more_dynamic_broadcast_to((4, 3), (3, 4, 3)) @@ -75,7 +75,7 @@ def verify_broadcast_to(x_shape, out_shape): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate(func)(x, np.array(out_shape).astype(shape_type)) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_broadcast_to((1,), (1, 1, 1)) verify_broadcast_to((1, 1), (4, 1, 1)) @@ -105,7 +105,7 @@ def test_dyn_broadcast_to(): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate(func)(x, np.array(dyn_shape).astype(shape_type)) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -138,7 +138,7 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) out_relay = intrp.evaluate()(indices_np, np.array(depth).astype("int32")) - tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) + tvm.testing.assert_allclose(out_relay.numpy(), out_np) _verify((3,), 3, 1, 0, -1, "int32") _verify((3,), 3, 1.0, 0.0, -1, "float32") diff --git a/tests/python/relay/dyn/test_dynamic_op_level2.py b/tests/python/relay/dyn/test_dynamic_op_level2.py index c0cbce17e9d0..dca5dd6d4384 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level2.py +++ b/tests/python/relay/dyn/test_dynamic_op_level2.py @@ -63,7 +63,7 @@ def verify_upsampling(dshape, scale_h, scale_w, layout, method, align_corners=Fa op_res = intrp.evaluate()( x_data, np.array(scale_h).astype("float32"), np.array(scale_w).astype("float32") ) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4, atol=1e-6) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) verify_upsampling((1, 16, 32, 32), 3, 2.0, "NCHW", "nearest_neighbor") verify_upsampling((1, 16, 32, 32), 5, 2.0, "NCHW", "bilinear", True) @@ -135,7 +135,7 @@ def verify_upsampling3d( np.array(scale_h).astype("float32"), np.array(scale_w).astype("float32"), ) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4, atol=1e-6) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "NCDHW", "nearest_neighbor") verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 3.0, 4.0, "NCDHW", "nearest_neighbor") diff --git a/tests/python/relay/dyn/test_dynamic_op_level3.py b/tests/python/relay/dyn/test_dynamic_op_level3.py index 78d12f9b1c8e..3673f08cf8b2 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level3.py +++ b/tests/python/relay/dyn/test_dynamic_op_level3.py @@ -38,9 +38,9 @@ def verify_func(func, data, ref_res, target_device=tvm.testing.enabled_targets() ref_res ), "Outputs from TVM and Python implementation must be equal " for op_result, ref_result in zip(op_res, ref_res): - tvm.testing.assert_allclose(op_result.asnumpy(), ref_result, rtol=1e-5) + tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5) else: - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) relay.backend.compile_engine.get().clear() diff --git a/tests/python/relay/dyn/test_dynamic_op_level4.py b/tests/python/relay/dyn/test_dynamic_op_level4.py index 01e5056c72cb..f5afbd7588fd 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level4.py +++ b/tests/python/relay/dyn/test_dynamic_op_level4.py @@ -68,7 +68,7 @@ def verify(dshape, begin, end, strides, slice_mode="end", test_ref=True, dtype=" mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor("vm", mod=mod, device=dev, target=target) op_res = intrp.evaluate()(*data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify( (1, 224, 224, 3), diff --git a/tests/python/relay/dyn/test_dynamic_op_level5.py b/tests/python/relay/dyn/test_dynamic_op_level5.py index c49ac9680266..78e2c232c08e 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level5.py +++ b/tests/python/relay/dyn/test_dynamic_op_level5.py @@ -65,7 +65,7 @@ def verify_resize(dshape, scale, method, layout): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_data, size) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4, atol=1e-6) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) for method in ["bilinear", "nearest_neighbor"]: for layout in ["NCHW", "NHWC"]: diff --git a/tests/python/relay/dyn/test_dynamic_op_level6.py b/tests/python/relay/dyn/test_dynamic_op_level6.py index 9ceb9ab9db97..03823062eab7 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level6.py +++ b/tests/python/relay/dyn/test_dynamic_op_level6.py @@ -58,12 +58,12 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype): intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(np_data, np.array([k]).astype("float32")) if ret_type == "both": - tvm.testing.assert_allclose(op_res[0].asnumpy(), np_values) - tvm.testing.assert_allclose(op_res[1].asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res[0].numpy(), np_values) + tvm.testing.assert_allclose(op_res[1].numpy(), np_indices) elif ret_type == "values": - tvm.testing.assert_allclose(op_res.asnumpy(), np_values) + tvm.testing.assert_allclose(op_res.numpy(), np_values) else: - tvm.testing.assert_allclose(op_res.asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res.numpy(), np_indices) np.random.seed(0) for k in [0, 1, 5]: diff --git a/tests/python/relay/test_adt.py b/tests/python/relay/test_adt.py index c432596a2a9f..51f46799e606 100644 --- a/tests/python/relay/test_adt.py +++ b/tests/python/relay/test_adt.py @@ -101,7 +101,7 @@ def tree_to_dict(t): def vmobj_to_list(o, dtype="float32"): if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy().tolist()] + return [o.numpy().tolist()] elif isinstance(o, tvm.runtime.container.ADT): if len(o) == 0: tensor_nil = p.get_var("tensor_nil", dtype=dtype) @@ -124,7 +124,7 @@ def vmobj_to_list(o, dtype="float32"): elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: - return [o.fields[0].asnumpy()] + return [o.fields[0].numpy()] else: raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint) else: @@ -133,7 +133,7 @@ def vmobj_to_list(o, dtype="float32"): # turns a scalar-valued relay tensor value into a python number def get_scalar(tv): - return tv.asnumpy().item() + return tv.numpy().item() # @tvm.testing.uses_gpu diff --git a/tests/python/relay/test_analysis_get_calibration_data.py b/tests/python/relay/test_analysis_get_calibration_data.py index 66500f84db2c..8ac36f7bebaf 100644 --- a/tests/python/relay/test_analysis_get_calibration_data.py +++ b/tests/python/relay/test_analysis_get_calibration_data.py @@ -75,13 +75,13 @@ def test_simple_graph(): # Check the number and orders check_data_size(mod, data) - tvm.testing.assert_allclose(data[g0]["inputs"][0].asnumpy(), x_data) - tvm.testing.assert_allclose(data[g0]["inputs"][1].asnumpy(), y_data) - tvm.testing.assert_allclose(data[g0]["outputs"][0].asnumpy(), x_data + y_data) - tvm.testing.assert_allclose(data[g0]["outputs"][1].asnumpy(), x_data - y_data) - tvm.testing.assert_allclose(data[g1]["inputs"][0].asnumpy(), x_data + y_data) - tvm.testing.assert_allclose(data[g1]["inputs"][1].asnumpy(), z_data) - tvm.testing.assert_allclose(data[g1]["outputs"][0].asnumpy(), x_data + y_data - z_data) + tvm.testing.assert_allclose(data[g0]["inputs"][0].numpy(), x_data) + tvm.testing.assert_allclose(data[g0]["inputs"][1].numpy(), y_data) + tvm.testing.assert_allclose(data[g0]["outputs"][0].numpy(), x_data + y_data) + tvm.testing.assert_allclose(data[g0]["outputs"][1].numpy(), x_data - y_data) + tvm.testing.assert_allclose(data[g1]["inputs"][0].numpy(), x_data + y_data) + tvm.testing.assert_allclose(data[g1]["inputs"][1].numpy(), z_data) + tvm.testing.assert_allclose(data[g1]["outputs"][0].numpy(), x_data + y_data - z_data) def test_mobilenet_dnnl(): diff --git a/tests/python/relay/test_any.py b/tests/python/relay/test_any.py index 7d1c577234b0..11f4515fbb1e 100644 --- a/tests/python/relay/test_any.py +++ b/tests/python/relay/test_any.py @@ -60,9 +60,9 @@ def check_result( ex = relay.create_executor(kind, mod=mod, device=dev, target=tgt) result = ex.evaluate()(*args) if isinstance(result, tvm.runtime.container.ADT): - result = [r.asnumpy() for r in result] + result = [r.numpy() for r in result] else: - result = [result.asnumpy()] + result = [result.numpy()] for r, e in zip(result, expected): if assert_shape: @@ -764,9 +764,9 @@ def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, r ex = relay.create_executor(kind, mod=mod, device=tvm.cpu(), target="llvm") result = ex.evaluate()(data_np) for ret, ref_ret in zip(result, ref_out_shape): - assert ret.asnumpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % ( + assert ret.numpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % ( str(ref_ret), - str(ret.asnumpy().shape), + str(ret.numpy().shape), ) diff --git a/tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py b/tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py index 1d2c61fb87a9..6a9d9a5cf0ad 100644 --- a/tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py +++ b/tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py @@ -173,7 +173,7 @@ def get_output(data, lib): module.set_input("data", data) module.run() - return module.get_output(0).asnumpy() + return module.get_output(0).numpy() # Check correctness actual_output = get_output(data, lib) diff --git a/tests/python/relay/test_auto_scheduler_tuning.py b/tests/python/relay/test_auto_scheduler_tuning.py index d3c54fd11769..bbf3c48d5e3f 100644 --- a/tests/python/relay/test_auto_scheduler_tuning.py +++ b/tests/python/relay/test_auto_scheduler_tuning.py @@ -73,7 +73,7 @@ def get_output(data, lib): module = graph_executor.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() - return module.get_output(0).asnumpy() + return module.get_output(0).numpy() np.random.seed(0) if network == "mlp": diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index 42b6373b45f1..b90bce548a5e 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -190,7 +190,7 @@ def get_func(shape): x = tvm.nd.array(np.ones(10).astype("float32"), device=dev) y = tvm.nd.empty((10,), device=dev) f(x, y) - tvm.testing.assert_allclose(y.asnumpy(), x.asnumpy() * 3) + tvm.testing.assert_allclose(y.numpy(), x.numpy() * 3) engine.dump() diff --git a/tests/python/relay/test_backend_graph_executor.py b/tests/python/relay/test_backend_graph_executor.py index 06623e0baa24..4ec1c21467fc 100644 --- a/tests/python/relay/test_backend_graph_executor.py +++ b/tests/python/relay/test_backend_graph_executor.py @@ -44,8 +44,8 @@ def check_rts(expr, args, expected_result, mod=None): graph = relay.create_executor("graph", mod=mod) eval_result = intrp.evaluate(expr)(*args) rts_result = graph.evaluate(expr)(*args) - tvm.testing.assert_allclose(eval_result.asnumpy(), rts_result.asnumpy()) - tvm.testing.assert_allclose(eval_result.asnumpy(), expected_result) + tvm.testing.assert_allclose(eval_result.numpy(), rts_result.numpy()) + tvm.testing.assert_allclose(eval_result.numpy(), expected_result) def test_add_op_scalar(): @@ -107,7 +107,7 @@ def test_with_params(): mod.set_input(**params) mod.set_input(x=x_data) mod.run() - res = mod.get_output(0).asnumpy() + res = mod.get_output(0).numpy() ref_res = np.exp(y_data + x_data) tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5) @@ -198,9 +198,9 @@ def test_reshape_nop(): ) ) z2_np = np.abs(x_data).reshape(1, 40) - tvm.testing.assert_allclose(gmod.get_output(0).asnumpy(), z0_np) - tvm.testing.assert_allclose(gmod.get_output(1).asnumpy(), z1_np) - tvm.testing.assert_allclose(gmod.get_output(2).asnumpy(), z2_np) + tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np) + tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np) + tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np) @tvm.testing.uses_gpu @@ -236,7 +236,7 @@ def unit_numpy(X, W): m.set_input("y", tvm.nd.array(y.astype(dtype))) m.set_input(**params) m.run() - out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy() + out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy() ref = unit_numpy(x, y) tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) @@ -261,7 +261,7 @@ def test_compile_nested_tuples(): ref = x_data + 1 for i in range(mod.get_num_outputs()): - out = mod.get_output(i).asnumpy() + out = mod.get_output(i).numpy() tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5) ref = ref + 1 @@ -279,12 +279,12 @@ def test_graph_executor_nested_tuples(): data = [np.random.uniform(size=(2, 3)).astype("float32") for _ in "xyzw"] out = f(*data) assert len(out) == 2 - tvm.testing.assert_allclose(out[0].asnumpy(), data[0]) + tvm.testing.assert_allclose(out[0].numpy(), data[0]) assert len(out[1]) == 2 - tvm.testing.assert_allclose(out[1][0].asnumpy(), data[1]) + tvm.testing.assert_allclose(out[1][0].numpy(), data[1]) assert len(out[1][1]) == 2 - tvm.testing.assert_allclose(out[1][1][0].asnumpy(), data[2]) - tvm.testing.assert_allclose(out[1][1][1].asnumpy(), data[3]) + tvm.testing.assert_allclose(out[1][1][0].numpy(), data[2]) + tvm.testing.assert_allclose(out[1][1][1].numpy(), data[3]) if __name__ == "__main__": diff --git a/tests/python/relay/test_backend_interpreter.py b/tests/python/relay/test_backend_interpreter.py index b5d76030f41d..d65bcad3364d 100644 --- a/tests/python/relay/test_backend_interpreter.py +++ b/tests/python/relay/test_backend_interpreter.py @@ -35,14 +35,14 @@ def check_eval(expr, args, expected_result, mod=None, rtol=1e-07): intrp = create_executor(mod=mod, device=dev, target=target) result = intrp.evaluate(expr)(*args) # use tvm.testing which also set atol - tvm.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) + tvm.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol) def test_tuple_value(): tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)]) - np.testing.assert_allclose(tv[0].data.asnumpy(), 1) - np.testing.assert_allclose(tv[1].data.asnumpy(), 2) - np.testing.assert_allclose(tv[2].data.asnumpy(), 3) + np.testing.assert_allclose(tv[0].data.numpy(), 1) + np.testing.assert_allclose(tv[1].data.numpy(), 2) + np.testing.assert_allclose(tv[2].data.numpy(), 3) def test_tuple_getitem(): @@ -148,7 +148,7 @@ def test_binds(): y = relay.add(x, x) intrp = create_executor("debug") xx = np.ones((10, 20)) - res = intrp.evaluate(y, binds={x: xx}).asnumpy() + res = intrp.evaluate(y, binds={x: xx}).numpy() tvm.testing.assert_allclose(xx + xx, res) @@ -163,7 +163,7 @@ def test_kwargs_params(): params = {"y": y_data, "z": z_data} intrp = create_executor("debug") res = intrp.evaluate(f)(x_data, **params) - tvm.testing.assert_allclose(res.asnumpy(), x_data + y_data + z_data) + tvm.testing.assert_allclose(res.numpy(), x_data + y_data + z_data) def test_function_taking_adt_ref_tuple(): @@ -193,17 +193,17 @@ def test_function_taking_adt_ref_tuple(): res_cons = id_func(cons_value) assert res_cons.tag == cons_value.tag assert len(res_cons.fields) == len(cons_value.fields) - tvm.testing.assert_allclose(res_cons.fields[0].asnumpy(), cons_value.fields[0].asnumpy()) + tvm.testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy()) assert isinstance(res_cons.fields[1], ConstructorValue) assert res_cons.fields[1].tag == nil.tag assert len(res_cons.fields[1].fields) == 0 res_ref = id_func(ref_value) - tvm.testing.assert_allclose(res_ref.value.asnumpy(), ref_value.value.asnumpy()) + tvm.testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy()) res_tuple = id_func(tuple_value) for i in range(10): - tvm.testing.assert_allclose(res_tuple[i].asnumpy(), tuple_value[i].asnumpy()) + tvm.testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy()) def test_tuple_passing(): @@ -226,11 +226,11 @@ def test_tuple_passing(): f = exec.evaluate(gv) # First use a Python tuple. out = f((10, 8)) - tvm.testing.assert_allclose(out.asnumpy(), np.array(10)) + tvm.testing.assert_allclose(out.numpy(), np.array(10)) # Second use a tuple value. value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))]) out = f(value_tuple) - tvm.testing.assert_allclose(out.asnumpy(), np.array(11)) + tvm.testing.assert_allclose(out.numpy(), np.array(11)) if __name__ == "__main__": diff --git a/tests/python/relay/test_const.py b/tests/python/relay/test_const.py index 14fff0f7e65e..c815f6bd4fa4 100644 --- a/tests/python/relay/test_const.py +++ b/tests/python/relay/test_const.py @@ -32,13 +32,13 @@ def test_const_dtype(): a = tvm.nd.array(np.random.randint(0, high=255, size=(2, 3), dtype="uint8")) a = _op.const(a, dtype="uint8") - aa = a.data.asnumpy() + aa = a.data.numpy() assert aa.dtype == np.dtype(np.uint8) b = _op.const(1, dtype="int8") - bb = b.data.asnumpy() + bb = b.data.numpy() assert bb.dtype == np.dtype(np.int8) kshape = (3, 10, 3, 3) w = relay.const(np.zeros(kshape, dtype="float32")) - assert w.data.asnumpy().dtype == np.dtype(np.float32) + assert w.data.numpy().dtype == np.dtype(np.float32) diff --git a/tests/python/relay/test_cpp_build_module.py b/tests/python/relay/test_cpp_build_module.py index 0d98cc0ed7ff..23bc7ca95a34 100644 --- a/tests/python/relay/test_cpp_build_module.py +++ b/tests/python/relay/test_cpp_build_module.py @@ -54,8 +54,8 @@ def test_basic_build(): out = rt.get_output(0) np.testing.assert_allclose( - out.asnumpy(), - np.maximum(np.dot(A.asnumpy(), B.asnumpy().T), 0) + C.asnumpy(), + out.numpy(), + np.maximum(np.dot(A.numpy(), B.numpy().T), 0) + C.numpy(), atol=1e-5, rtol=1e-5, ) @@ -90,7 +90,7 @@ def test_fp16_build(): rt.run() out = rt.get_output(0) - np.testing.assert_allclose(out.asnumpy(), X.asnumpy() + Y.asnumpy(), atol=1e-5, rtol=1e-5) + np.testing.assert_allclose(out.numpy(), X.numpy() + Y.numpy(), atol=1e-5, rtol=1e-5) @tvm.testing.parametrize_targets("llvm", "cuda") @@ -119,7 +119,7 @@ def test_fp16_conversion(target, dev): rt.run() out = rt.get_output(0) - np.testing.assert_allclose(out.asnumpy(), X.asnumpy().astype(dst), atol=1e-5, rtol=1e-5) + np.testing.assert_allclose(out.numpy(), X.numpy().astype(dst), atol=1e-5, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/relay/test_dataflow_pattern.py b/tests/python/relay/test_dataflow_pattern.py index 8e2c74ab44b8..229b9905050c 100644 --- a/tests/python/relay/test_dataflow_pattern.py +++ b/tests/python/relay/test_dataflow_pattern.py @@ -899,9 +899,7 @@ def callback(self, pre, post, node_map): beta = node_map[self.beta][0] gamma = node_map[self.gamma][0] eps = node_map[self.eps][0] - return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.asnumpy().item())[ - 0 - ] + return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.numpy().item())[0] def test_fuse_batchnorm(): diff --git a/tests/python/relay/test_debug.py b/tests/python/relay/test_debug.py index 60feaf1bf37b..c4ed657701ae 100644 --- a/tests/python/relay/test_debug.py +++ b/tests/python/relay/test_debug.py @@ -34,7 +34,7 @@ def did_exec(x): prog = debug(x, debug_func=did_exec) result = ex.evaluate(prog, {x: const(1, "int32")}) assert _test_debug_hit - assert result.asnumpy() == 1 + assert result.numpy() == 1 def test_debug_with_expr(): @@ -51,4 +51,4 @@ def did_exec(x): prog = debug(x + x * x, debug_func=did_exec) result = ex.evaluate(prog, {x: const(2, "int32")}) assert _test_debug_hit - assert result.asnumpy() == 6 + assert result.numpy() == 6 diff --git a/tests/python/relay/test_external_codegen.py b/tests/python/relay/test_external_codegen.py index 156abfc4c22a..84e2fa305bfe 100644 --- a/tests/python/relay/test_external_codegen.py +++ b/tests/python/relay/test_external_codegen.py @@ -61,7 +61,7 @@ def check_vm_result(): exe = runtime.vm.Executable.load_exec(code, lib) vm = runtime.vm.VirtualMachine(exe, device) out = vm.run(**map_inputs) - tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) def check_graph_executor_result(): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): @@ -75,7 +75,7 @@ def check_graph_executor_result(): out = tvm.nd.empty(out_shape, device=device) out = rt_mod.get_output(0, out) - tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) check_vm_result() check_graph_executor_result() @@ -293,7 +293,7 @@ def test_extern_dnnl(): ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()) ref_res = ref_ex.evaluate()(i_data, w_data, w_data) check_result( - mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5 + mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5 ) @@ -331,7 +331,7 @@ def test_extern_dnnl_const(): ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()) ref_res = ref_ex.evaluate()(i_data) - check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5) + check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5) def test_load_params_with_constants_in_ext_codegen(): diff --git a/tests/python/relay/test_ir_nodes.py b/tests/python/relay/test_ir_nodes.py index 1ba39b0eb8aa..6d2ac21cc7ff 100644 --- a/tests/python/relay/test_ir_nodes.py +++ b/tests/python/relay/test_ir_nodes.py @@ -143,7 +143,7 @@ def test_function_attrs(): assert key1.name_hint == key2.name_hint p1 = model_params[key1] p2 = model_params_after[key2] - np.testing.assert_allclose(p1.data.asnumpy(), p2.data.asnumpy()) + np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy()) def test_call(): diff --git a/tests/python/relay/test_ir_parser.py b/tests/python/relay/test_ir_parser.py index 04cc2c0e79e4..c75a0f461eb6 100644 --- a/tests/python/relay/test_ir_parser.py +++ b/tests/python/relay/test_ir_parser.py @@ -118,7 +118,7 @@ def assert_parse_module_as(code, mod): def get_scalar(x): # type: (relay.Constant) -> (Union[float, int, bool]) - return x.data.asnumpy().item() + return x.data.numpy().item() int32 = relay.scalar_type("int32") diff --git a/tests/python/relay/test_json_runtime.py b/tests/python/relay/test_json_runtime.py index bf5676d096f1..52e082e27b74 100644 --- a/tests/python/relay/test_json_runtime.py +++ b/tests/python/relay/test_json_runtime.py @@ -58,7 +58,7 @@ def check_result( rt_mod.run() out = tvm.nd.empty(out_shape, device=device) out = rt_mod.get_output(0, out) - ref_result = out.asnumpy() + ref_result = out.numpy() def check_vm_result(): compile_engine.get().clear() @@ -68,7 +68,7 @@ def check_vm_result(): exe = runtime.vm.Executable.load_exec(code, lib) vm = runtime.vm.VirtualMachine(exe, device) out = vm.run(**map_inputs) - tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol) def check_graph_executor_result(): compile_engine.get().clear() @@ -82,7 +82,7 @@ def check_graph_executor_result(): rt_mod.run() out = tvm.nd.empty(out_shape, device=device) out = rt_mod.get_output(0, out) - tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol) check_vm_result() check_graph_executor_result() diff --git a/tests/python/relay/test_memory_passes.py b/tests/python/relay/test_memory_passes.py index 546aaf51f734..7ad72a35a1a0 100644 --- a/tests/python/relay/test_memory_passes.py +++ b/tests/python/relay/test_memory_passes.py @@ -41,13 +41,13 @@ def check_memory_plan(func, check_fn): plan_result = ex.evaluate(mod["main"])(*args) # Compute Python result. - py_res = check_fn(*[arg.asnumpy() for arg in args]) + py_res = check_fn(*[arg.numpy() for arg in args]) # First check that the two VM results agree. - np.testing.assert_allclose(no_plan_result.asnumpy(), plan_result.asnumpy()) + np.testing.assert_allclose(no_plan_result.numpy(), plan_result.numpy()) # Finally check that the results match the Python result. - np.testing.assert_allclose(plan_result.asnumpy(), py_res) + np.testing.assert_allclose(plan_result.numpy(), py_res) def storage_type(mod): diff --git a/tests/python/relay/test_op_fast_math.py b/tests/python/relay/test_op_fast_math.py index 7bcbc6839c4f..8e401bc5670a 100644 --- a/tests/python/relay/test_op_fast_math.py +++ b/tests/python/relay/test_op_fast_math.py @@ -51,7 +51,7 @@ def test_apply(relay_op, name, f_numpy, low, high, step, dtype="float32"): m.run() # Get outputs tvm_output = m.get_output(0) - tvm.testing.assert_allclose(tvm_output.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(tvm_output.numpy(), b_np, rtol=1e-5, atol=1e-5) test_apply(relay.exp, "fast_exp", np.exp, low=-88, high=88, step=0.01) test_apply(relay.erf, "fast_erf", scipy.special.erf, low=-10, high=10, step=0.01) diff --git a/tests/python/relay/test_op_grad_level1.py b/tests/python/relay/test_op_grad_level1.py index 6c6c727c788f..686c0ea556c3 100644 --- a/tests/python/relay/test_op_grad_level1.py +++ b/tests/python/relay/test_op_grad_level1.py @@ -56,7 +56,7 @@ def check_single_op(opfunc, ref, dtype): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad, _) = intrp.evaluate(bwd_func)(data, grad_in) - np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01) + np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) for opfunc, ref in [ (tvm.relay.log, lambda x, g: g * (1 / x)), @@ -107,8 +107,8 @@ def check_binary_op(opfunc, ref, dtype): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad0, op_grad1) = intrp.evaluate(bwd_func)(x_data, y_data) - np.testing.assert_allclose(op_grad0.asnumpy(), ref_grad0, rtol=0.01) - np.testing.assert_allclose(op_grad1.asnumpy(), ref_grad1, rtol=0.01) + np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01) + np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01) for opfunc, ref in [ (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]), diff --git a/tests/python/relay/test_op_grad_level2.py b/tests/python/relay/test_op_grad_level2.py index b855065186c2..686fd9834640 100644 --- a/tests/python/relay/test_op_grad_level2.py +++ b/tests/python/relay/test_op_grad_level2.py @@ -53,7 +53,7 @@ def verify_max_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad,) = intrp.evaluate(bwd_func)(data) - np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01) + np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) @tvm.testing.uses_gpu @@ -102,7 +102,7 @@ def verify_avg_pool2d_grad( for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad,) = intrp.evaluate(bwd_func)(data) - np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01) + np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) @tvm.testing.uses_gpu @@ -158,7 +158,7 @@ def verify_global_avg_pool2d_grad(x_shape): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad,) = intrp.evaluate(bwd_func)(data) - np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01) + np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) @tvm.testing.uses_gpu diff --git a/tests/python/relay/test_op_grad_level3.py b/tests/python/relay/test_op_grad_level3.py index e394eaa45a82..821e10f97e21 100644 --- a/tests/python/relay/test_op_grad_level3.py +++ b/tests/python/relay/test_op_grad_level3.py @@ -43,7 +43,7 @@ def test_clip(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) op_res, (op_grad,) = intrp.evaluate(bwd_func)(data) - np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01) + np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) def verify_transpose_grad(d_shape, axes=None): @@ -176,8 +176,8 @@ def test_zeros_ones_grad_dynamic(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(device=dev, target=target) res, (grad,) = intrp.evaluate(bwd_func)(dyn_shape) - tvm.testing.assert_allclose(res.asnumpy(), op_ref(dyn_shape, dtype="float32")) - tvm.testing.assert_allclose(grad.asnumpy(), np.zeros((rank,), dtype="int32")) + tvm.testing.assert_allclose(res.numpy(), op_ref(dyn_shape, dtype="float32")) + tvm.testing.assert_allclose(grad.numpy(), np.zeros((rank,), dtype="int32")) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index aef3c3cdccc2..89475ac7df86 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -72,7 +72,7 @@ def check_single_op(opfunc, ref, dtype): continue intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) for opfunc, ref in [ (tvm.relay.log, np.log), @@ -134,7 +134,7 @@ def check_binary_op(opfunc, ref, dtype): continue intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01, atol=1e-3) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01, atol=1e-3) for opfunc, ref in [ (relay.add, np.add), @@ -165,7 +165,7 @@ def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis): ref_res = data.reshape(oshape) intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) for dtype in ["float16", "float32"]: verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2) @@ -198,7 +198,7 @@ def test_bias_add(): continue intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol) def test_bias_add_type_failure(): @@ -242,7 +242,7 @@ def test_softmax(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -263,7 +263,7 @@ def test_log_softmax(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -320,9 +320,9 @@ def test_concatenate(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.01) op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.01) def test_dropout(): @@ -343,7 +343,7 @@ def test_dropout(): for backend in ["debug", "graph"]: intrp = relay.create_executor("debug", device=dev, target=target) op_res = intrp.evaluate(func)() - tvm.testing.assert_allclose(op_res.asnumpy(), in_np, rtol=0.01) + tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01) def test_batch_norm(): @@ -464,9 +464,9 @@ def test_dense(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data, w_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data, w_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) def test_dense_dtype(): diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index 0faa31fdc0be..96d90b2a4f76 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -45,7 +45,7 @@ def test_checkpoint(): intrp = relay.create_executor(kind, device=dev, target=target) f_res = intrp.evaluate(f)(*inputs) f_checkpoint_res = intrp.evaluate(f_checkpoint)(*inputs) - tvm.testing.assert_allclose(f_res.asnumpy(), f_checkpoint_res.asnumpy(), 0, 0) + tvm.testing.assert_allclose(f_res.numpy(), f_checkpoint_res.numpy(), 0, 0) def test_checkpoint_alpha_equal(): @@ -176,7 +176,7 @@ def test_collapse_sum_like(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x, y) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -196,7 +196,7 @@ def test_collapse_sum_to(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -216,7 +216,7 @@ def test_broadcast_to(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -240,7 +240,7 @@ def test_broadcast_to_like(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x, y) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def np_slice_like(np_data, np_shape_like, axis=None): @@ -285,7 +285,7 @@ def verify_slice_like(data, slice_like, axes, output, dtype="float32"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -319,7 +319,7 @@ def verify_reverse_reshape(shape, newshape, oshape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_reverse_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2)) verify_reverse_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4)) @@ -344,7 +344,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) z = intrp.evaluate(func)(x_np, y_np) - tvm.testing.assert_allclose(z.asnumpy(), z_np, rtol=1e-5) + tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5) @tvm.testing.uses_gpu @@ -377,7 +377,7 @@ def verify_dynamic_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) z = intrp.evaluate()(x_np, y_np) - tvm.testing.assert_allclose(z.asnumpy(), z_np, rtol=1e-5) + tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5) # TODO(mbrookhart): enable once VM supports heterogenous execution @@ -402,7 +402,7 @@ def test_shape_of(): for kind in ["debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), np.array(shape).astype("int32")) + tvm.testing.assert_allclose(op_res.numpy(), np.array(shape).astype("int32")) @tvm.testing.uses_gpu @@ -418,7 +418,7 @@ def verify_ndarray_size(shape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify_ndarray_size((2, 3, 5)) verify_ndarray_size((2, 3, 5, 7)) @@ -436,7 +436,7 @@ def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) relay_out = intrp1.evaluate(func)(np_data) - tvm.testing.assert_allclose(relay_out.asnumpy(), np_out, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(relay_out.numpy(), np_out, rtol=1e-5, atol=1e-5) def verify_adaptive_pool1d(dshape, out_size, pool_type, layout="NCW", dtype="float32"): @@ -497,7 +497,7 @@ def _verify(data_shape, mask_value, axis, dtype, itype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) out_relay = intrp.evaluate(func)(data_np, valid_length_np) - tvm.testing.assert_allclose(out_relay.asnumpy(), gt_out_np) + tvm.testing.assert_allclose(out_relay.numpy(), gt_out_np) _verify((5, 10), 0.0, 1, "float32", "int32") _verify((2, 3, 5, 3), 0.0, 0, "float32", "int64") @@ -537,7 +537,7 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) out_relay = intrp.evaluate(func)(indices_np) - tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) + tvm.testing.assert_allclose(out_relay.numpy(), out_np) _verify((3,), 3, 1, 0, -1, "int32") _verify((3,), 3, 1.0, 0.0, -1, "float32") @@ -567,7 +567,7 @@ def _verify(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) out_relay = intrp.evaluate(func)(input_np, diagonal_np) - tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) + tvm.testing.assert_allclose(out_relay.numpy(), out_np) _verify((2, 2), (2,), "float32") _verify((4, 3, 3), (4, 3), "int32") diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index b76facae5aa3..50fc0622ee6e 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -100,7 +100,7 @@ def run_test_conv1d( dev = tvm.device(target, 0) intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape = (1, 3, 224) @@ -228,7 +228,7 @@ def run_test_conv2d( dev = tvm.device(target, 0) intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4) def compile_test_conv2d_arm_cpu( dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs @@ -408,7 +408,7 @@ def run_test_conv2d_cuda( module.set_input(**params) module.run() op_res1 = module.get_output(0) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding 1, kernel 3x3 dshape = (1, 80, 73, 73) @@ -515,7 +515,7 @@ def run_test_conv3d( intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 3, 5, 224, 224) @@ -580,7 +580,7 @@ def run_test_conv3d( intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 5, 224, 224, 6) @@ -671,7 +671,7 @@ def run_test_conv3d_cuda( module.set_input(**params) module.run() op_res1 = module.get_output(0) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding 1, kernel 3x3x3 dshape = (1, 32, 16, 16, 16) @@ -763,7 +763,7 @@ def test_conv3d_transpose_ncdhw_run(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -806,7 +806,7 @@ def test_conv2d_transpose_nchw_run(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -842,7 +842,7 @@ def test_conv2d_transpose_nhwc_run(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -864,7 +864,7 @@ def test_conv1d_transpose_ncw_run(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -949,7 +949,7 @@ def _test_global_pool2d(opfunc, reffunc): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -982,7 +982,7 @@ def _test_pool2d(opfunc, pool_type, pool_size=2, strides=2, dilation=1, padding= for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w = te.size_var("n"), 10, 224, 224 @@ -1003,7 +1003,7 @@ def _test_pool2d_int(opfunc, reffunc, dtype): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool2d(relay.nn.max_pool2d, "max") _test_pool2d(relay.nn.max_pool2d, "max", pool_size=2, strides=2, padding=0) @@ -1041,7 +1041,7 @@ def _test_global_pool1d(opfunc, reffunc): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1077,7 +1077,7 @@ def _test_pool1d( for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d, "max") _test_pool1d(relay.nn.max_pool1d, "max", dtype="int32") @@ -1137,7 +1137,7 @@ def _test_pool3d( for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d, "max") _test_pool3d(relay.nn.max_pool3d, "max", dtype="int32") @@ -1189,7 +1189,7 @@ def test_avg_pool2d_no_count_pad(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1225,9 +1225,9 @@ def test_flatten_infer_type(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -1298,7 +1298,7 @@ def _test_run(dtype): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run("float32") _test_run("int32") @@ -1322,7 +1322,7 @@ def _test_run(dtype): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor(kind="graph", device=dev, target=target) result = intrp.evaluate(f)(data_arr, pad_value_arr) - tvm.testing.assert_allclose(result.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(result.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run("float32") _test_run("int32") @@ -1356,9 +1356,9 @@ def test_lrn(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -1386,9 +1386,9 @@ def test_l2_normalize(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) def batch_flatten(data): @@ -1410,7 +1410,7 @@ def test_batch_flatten(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): @@ -1458,7 +1458,7 @@ def get_shape(): for target, dev in tvm.testing.enabled_targets(): executor = relay.create_executor("graph", device=dev, target=target) out = executor.evaluate(func)(data) - tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1530,7 +1530,7 @@ def get_shape(): for target, dev in tvm.testing.enabled_targets(): executor = relay.create_executor("graph", device=dev, target=target) out = executor.evaluate(func)(data) - tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1797,7 +1797,7 @@ def _test_correlation( for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data1_np, data2_np) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) _test_correlation( (1, 3, 10, 10), diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index b8bab295ba67..fd6d7a9aeb14 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -34,7 +34,7 @@ def test_zeros_ones(): yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((124, 50), "float64") intrp = create_executor() - intrp_res = intrp.evaluate(y).asnumpy() + intrp_res = intrp.evaluate(y).numpy() np.testing.assert_allclose(intrp_res, ref((124, 50), "float64")) @@ -62,7 +62,7 @@ def test_unary_identity(): intrp = create_executor() op_res = intrp.evaluate(y, {x: relay.const(data)}) ref_res = ref(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) def test_cast(): @@ -89,7 +89,7 @@ def test_clip(): intrp = create_executor() op_res = intrp.evaluate(y, {a: relay.const(data)}) ref_res = np.clip(data, 1.0, 4.0) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) def test_fixed_point_multiply(): @@ -107,7 +107,7 @@ def test_fixed_point_multiply(): intrp = create_executor() op_res = intrp.evaluate(y, {a: relay.const(data)}) ref_res = np.ones((10, 4)).astype("int32") - np.testing.assert_allclose(op_res.asnumpy(), ref_res, atol=1) + np.testing.assert_allclose(op_res.numpy(), ref_res, atol=1) def test_reinterpret(): @@ -120,7 +120,7 @@ def test_reinterpret(): intrp = create_executor() op_res = intrp.evaluate(y, {a: relay.const(data)}) ref_res = data.view("int32") - np.testing.assert_equal(op_res.asnumpy(), ref_res) + np.testing.assert_equal(op_res.numpy(), ref_res) def test_approximate_transcendental(): @@ -160,7 +160,7 @@ def approximate_tanh(x): def reference_sigmoid(x): return np.exp(-np.logaddexp(0, -x)) - np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9) + np.testing.assert_allclose(op_res.numpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9) y = approximate_tanh(a) yy = run_infer_type(y) @@ -172,7 +172,7 @@ def reference_sigmoid(x): def reference_tanh(x): return np.tanh(x) - np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9) + np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9) def test_squeeze(): @@ -186,7 +186,7 @@ def verify_squeeze(shape, dtype, axis): intrp = create_executor() op_res = intrp.evaluate(squeeze, {x: relay.const(data)}) ref_res = np.squeeze(data, axis=np_axis) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) verify_squeeze((1, 3, 2, 5), "float32", None) verify_squeeze((1, 3, 1), "float32", [0]) @@ -221,7 +221,7 @@ def verify_transpose(dshape, axes): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_transpose((2, 3, 4), (0, 2, 1)) @@ -276,7 +276,7 @@ def verify_reshape(shape, newshape, oshape): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_reshape((2, 3, 4), (8, 3), (8, 3)) verify_reshape((4, 7), (2, 7, 2), (2, 7, 2)) @@ -365,7 +365,7 @@ def verify_reshape_like(shape, oshape, shape_like=None, reshape_like_kwargs={}): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_reshape_like((2, 3, 4), (1, 8, 3)) verify_reshape_like((4, 7), (2, 7, 2)) @@ -411,7 +411,7 @@ def verify_take(src_shape, indices_src, axis=None, mode="clip"): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, indices_src) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_take((4,), [1]) verify_take((4,), [[0, 1, 2, 3]]) @@ -546,7 +546,7 @@ def verify_full(fill_value, src_shape, dtype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(np.array(fill_value, dtype)) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_full(4, (1, 3, 4, 4), "int32") # verify_full(4, (1, 3, 4, 4), "int64") # This does not pass, python int32 is not upcast to int64, not sure how to fix it. @@ -585,7 +585,7 @@ def verify_full_like(base, fill_value, dtype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype)) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_full_like((1, 3, 4, 4), 4, "int32") verify_full_like((1, 1), 44.0, "float32") @@ -615,9 +615,9 @@ def test_infer_type_leaky_relu(): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): @@ -652,9 +652,9 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data, a_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data, a_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -696,7 +696,7 @@ def verify_arange(start, stop, step): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)() - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_arange(None, 20, None) verify_arange(None, 20, 2) @@ -737,7 +737,7 @@ def verify_meshgrid(lengths, indexing="ij"): op_res = intrp.evaluate(func)(*input_data) assert len(op_res) == len(ref_res) for i in range(len(op_res)): - tvm.testing.assert_allclose(op_res[i].asnumpy(), ref_res[i], rtol=1e-5) + tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5) verify_meshgrid([3, 5]) verify_meshgrid([4, 2], indexing="xy") @@ -761,7 +761,7 @@ def verify_tile(dshape, reps): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_tile((2, 3, 4), (3, 2, 1)) verify_tile((2, 3, 4), (1, 2)) @@ -779,7 +779,7 @@ def verify_repeat(dshape, repeats, axis): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_repeat((3,), 2, 0) verify_repeat((3, 10), 2, -1) @@ -804,7 +804,7 @@ def verify_stack(input_expr, relay_args, ref_res, axis): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(*relay_args) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def verify_tup_lit_stack(dshapes, axis): input_tuple = produce_input_tuple(dshapes) @@ -856,7 +856,7 @@ def verify_reverse(dshape, axis): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_reverse((2, 3, 4), 1) verify_reverse((4, 7), 0) @@ -877,7 +877,7 @@ def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32") result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]] @@ -971,7 +971,7 @@ def verify_scatter(dshape, ishape, axis=0): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(data_np, indices_np, updates_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def verify_dynamic_scatter(dshape, ishape, axis=0): d = relay.var("d", relay.TensorType([relay.Any() for i in range(len(dshape))], "float32")) @@ -992,7 +992,7 @@ def verify_dynamic_scatter(dshape, ishape, axis=0): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(data_np, indices_np, updates_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_scatter((10,), (10,), 0) verify_scatter((10, 5), (10, 5), -2) @@ -1245,7 +1245,7 @@ def verify_gather(data, axis, indices, ref_res): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(data, indices) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_gather(data, axis, indices, ref_res) @@ -1291,7 +1291,7 @@ def gather_nd_batch_dims_1_ref(data, indices): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]]) verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]]) @@ -1340,7 +1340,7 @@ def _verify_infiniteness_ops(relay_op, ref_op): intrp = create_executor() op_res = intrp.evaluate(y, {x: data}) ref_res = ref_op(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) def test_isfinite(): @@ -1374,7 +1374,7 @@ def verify_unravel_index(indices, shape, dtype): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) for dtype in ["int64", "int32"]: verify_unravel_index([0, 1, 2, 3], [2, 2], dtype) @@ -1425,7 +1425,7 @@ def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_ op_res = intrp.evaluate(func)( sparse_indices_data, sparse_values_data, default_value_data ) - tvm.testing.assert_allclose(op_res.asnumpy(), xpected, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5) verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector @@ -1769,9 +1769,9 @@ def verify_func(func, data, ref_res, target_device=tvm.testing.enabled_targets() ), "Outputs from TVM and Python implementation must be equal " for op_result, ref_result in zip(op_res, ref_res): - tvm.testing.assert_allclose(op_result.asnumpy(), ref_result, rtol=1e-5) + tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5) else: - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) relay.backend.compile_engine.get().clear() @@ -1795,7 +1795,7 @@ def verify_adv_index(data_shape, index_shapes): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(*np_args) - tvm.testing.assert_allclose(op_res.asnumpy(), np_out, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5) verify_adv_index((10, 5), [(3, 4), (3, 1)]) verify_adv_index( @@ -1833,7 +1833,7 @@ def assert_relay_scanop( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(data_np) - tvm.testing.assert_allclose(op_res.asnumpy(), np_out, rtol=rtol, atol=atol) + tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol) data = np.array([2, 3, 0]) assert_relay_scanop(data, gt_func(data)) @@ -1895,7 +1895,7 @@ def verify_scatter_nd( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(data_np, indices_np, updates_np) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol, atol=atol) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) def verify_scatter_nd_with_stack( data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5 @@ -1920,7 +1920,7 @@ def verify_scatter_nd_with_stack( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(*fargs) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol, atol=atol) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) data = np.zeros((2, 2)).astype("int64") indices = np.array([[1, 1, 0], [0, 1, 0]]) @@ -2001,15 +2001,15 @@ def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False): tvm_res = intrp.evaluate()(x_data) np_res = calc_numpy_unique(x_data, is_sorted) num_unique = np_res[3][0] - assert num_unique == tvm_res[2].asnumpy()[0] + assert num_unique == tvm_res[2].numpy()[0] # unique - tvm.testing.assert_allclose(tvm_res[0].asnumpy()[:num_unique], np_res[0], rtol=1e-5) + tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5) # inverse_indices - tvm.testing.assert_allclose(tvm_res[1].asnumpy(), np_res[1], rtol=1e-5) + tvm.testing.assert_allclose(tvm_res[1].numpy(), np_res[1], rtol=1e-5) # counts if return_counts: tvm.testing.assert_allclose( - tvm_res[3].asnumpy()[:num_unique], np_res[2], rtol=1e-5 + tvm_res[3].numpy()[:num_unique], np_res[2], rtol=1e-5 ) for dtype in ["int32", "int64"]: diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 8de644999c9e..c49e3de62662 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -52,7 +52,7 @@ def check_binary_op(opfunc, ref): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) for opfunc, ref in [(relay.power, np.power)]: check_binary_op(opfunc, ref) @@ -90,7 +90,7 @@ def test_cmp_type(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) @tvm.testing.uses_gpu @@ -115,7 +115,7 @@ def test_binary_int_broadcast_1(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) @tvm.testing.uses_gpu @@ -140,7 +140,7 @@ def test_binary_int_broadcast_2(): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, y_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) @tvm.testing.uses_gpu @@ -150,7 +150,7 @@ def run(func, inputs, ref_res): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(*inputs) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def verify(x_np, y_np, cond_np): ref_res = np.where(cond_np, x_np, y_np) @@ -261,9 +261,9 @@ def verify_reduce(funcs, data, axis, keepdims, exclude, output, dtype="float32") intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -355,11 +355,11 @@ def verify_mean_var_std(funcs, shape, axis, keepdims): intrp1 = relay.create_executor("graph", device=dev, target=target) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1[0].asnumpy(), ref_mean, rtol=1e-5) - tvm.testing.assert_allclose(op_res1[1].asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1[0].numpy(), ref_mean, rtol=1e-5) + tvm.testing.assert_allclose(op_res1[1].numpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2[0].asnumpy(), ref_mean, rtol=1e-5) - tvm.testing.assert_allclose(op_res2[1].asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2[0].numpy(), ref_mean, rtol=1e-5) + tvm.testing.assert_allclose(op_res2[1].numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -408,7 +408,7 @@ def verify(dshape, begin, end, strides, output, slice_mode="end", test_ref=True, for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify((1, 3, 10, 10), [0, 0, 0, 0], [-1, 3, 10, 10], [1], (0, 3, 10, 10), dtype="int64") verify( @@ -467,7 +467,7 @@ def verify(dshape, begin, end, strides, output, slice_mode="end", test_ref=True, mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor("vm", mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify( (1, 224, 224, 3), @@ -518,7 +518,7 @@ def verify(dshape, begin, end, strides, vshape, test_ref=True): for target, dev in tvm.testing.enabled_targets(): intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(x_data, v_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res) + tvm.testing.assert_allclose(op_res.numpy(), ref_res) verify((3, 4, 16), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2)) verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2)) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index 64ec7e3345a1..e27520339f36 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -66,7 +66,7 @@ def verify_resize(dshape, scale, method, layout, coord_trans): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-3, atol=1e-4) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4) for method in ["nearest_neighbor", "bilinear"]: for coord_trans in ["asymmetric"]: # TOPI testing function only support asymmetric @@ -121,7 +121,7 @@ def verify_resize(dshape, scale, method, layout): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4, atol=1e-6) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) for method in ["trilinear", "nearest_neighbor"]: for layout in ["NDHWC", "NCDHW"]: @@ -155,7 +155,7 @@ def verify_crop_and_resize( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(image_data, boxes, box_indices) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-3, atol=1e-04) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04) boxes_nhwc = np.array([[0.1, 0.2, 0.8, 0.7], [0.2, 0, 1, 0.6]]).astype("float32") indices_nhwc = np.array([1, 0]).astype("int32") @@ -255,10 +255,10 @@ def verify_multibox_prior( for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) sizes = (0.3, 1.5, 0.7) ratios = (1.3, 2.4) @@ -315,9 +315,9 @@ def verify_get_valid_counts(dshape, score_threshold, id_index, score_index): intrp = relay.create_executor("debug", device=dev, target=target) out = intrp.evaluate(func)(np_data) - tvm.testing.assert_allclose(out[0].asnumpy(), np_out1, rtol=1e-3, atol=1e-04) - tvm.testing.assert_allclose(out[1].asnumpy(), np_out2, rtol=1e-3, atol=1e-04) - tvm.testing.assert_allclose(out[2].asnumpy(), np_out3, rtol=1e-3, atol=1e-04) + tvm.testing.assert_allclose(out[0].numpy(), np_out1, rtol=1e-3, atol=1e-04) + tvm.testing.assert_allclose(out[1].numpy(), np_out2, rtol=1e-3, atol=1e-04) + tvm.testing.assert_allclose(out[2].numpy(), np_out3, rtol=1e-3, atol=1e-04) verify_get_valid_counts((1, 2500, 6), 0, 0, 1) verify_get_valid_counts((1, 2500, 5), -1, -1, 0) @@ -386,14 +386,14 @@ def verify_nms( for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(x0_data, x1_data, x2_data, x3_data) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(x0_data, x1_data, x2_data, x3_data) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) op_indices_res1 = intrp1.evaluate(func_indices)(x0_data, x1_data, x2_data, x3_data) - tvm.testing.assert_allclose(op_indices_res1[0].asnumpy(), ref_indices_res, rtol=1e-5) + tvm.testing.assert_allclose(op_indices_res1[0].numpy(), ref_indices_res, rtol=1e-5) op_indices_res2 = intrp2.evaluate(func_indices)(x0_data, x1_data, x2_data, x3_data) - tvm.testing.assert_allclose(op_indices_res2[0].asnumpy(), ref_indices_res, rtol=1e-5) + tvm.testing.assert_allclose(op_indices_res2[0].numpy(), ref_indices_res, rtol=1e-5) np_data = np.array( [ @@ -577,10 +577,10 @@ def test_default_value(): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds, np_anchors) - tvm.testing.assert_allclose(op_res1.asnumpy(), expected_np_out, rtol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), expected_np_out, rtol=1e-5) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(np_cls_prob, np_loc_preds, np_anchors) - tvm.testing.assert_allclose(op_res2.asnumpy(), expected_np_out, rtol=1e-5) + tvm.testing.assert_allclose(op_res2.numpy(), expected_np_out, rtol=1e-5) def test_threshold(): num_anchors = 5 @@ -671,10 +671,10 @@ def verify_roi_align( print("test on", target) intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(np_data, np_rois) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(np_data, np_rois) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-4) def verify_roi_align_nchw( data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode @@ -766,10 +766,10 @@ def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale): for target, dev in tvm.testing.enabled_targets(): intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(np_data, np_rois) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(np_data, np_rois) - tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-4) verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0) verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5) @@ -794,10 +794,10 @@ def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs): dev = tvm.device(target, 0) intrp1 = relay.create_executor("graph", device=dev, target=target) op_res1 = intrp1.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info) - tvm.testing.assert_allclose(op_res1.asnumpy(), np_out, rtol=1e-4) + tvm.testing.assert_allclose(op_res1.numpy(), np_out, rtol=1e-4) intrp2 = relay.create_executor("debug", device=dev, target=target) op_res2 = intrp2.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info) - tvm.testing.assert_allclose(op_res2.asnumpy(), np_out, rtol=1e-4) + tvm.testing.assert_allclose(op_res2.numpy(), np_out, rtol=1e-4) attrs = { "scales": (0.5,), @@ -888,7 +888,7 @@ def verify_yolo_reorg(shape, stride): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_yolo_reorg((1, 100, 20, 20), 10) verify_yolo_reorg((1, 4, 6, 6), 2) @@ -1023,7 +1023,7 @@ def test_run(batch, in_channel, size, out_channel, deformable_groups, groups, la for kind in ["graph", "debug"]: intrp1 = relay.create_executor(kind, device=dev, target=target) op_res1 = intrp1.evaluate(func)(data, offset, kernel) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) test_run(1, 4, 16, 4, 1, 1, "NCHW") test_run(1, 4, 16, 4, 1, 1, "NHWC") @@ -1068,7 +1068,7 @@ def verify_depth_to_space(dshape, block_size, layout, mode): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4) for layout in ["NHWC", "NCHW"]: for mode in ["DCR", "CDR"]: @@ -1112,7 +1112,7 @@ def verify_space_to_depth(dshape, block_size, layout): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4) for layout in ["NHWC", "NCHW"]: verify_space_to_depth((1, 4, 4, 4), 2, layout) @@ -1168,7 +1168,7 @@ def run_test_dilation2d( continue intrp = relay.create_executor("graph", device=dev, target=target) op_res = intrp.evaluate(func)(indata, kernel) - tvm.testing.assert_allclose(op_res.asnumpy(), out, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5) def _convert_data(indata, kernel, out, layout=None): indata = np.asarray(indata) @@ -1270,7 +1270,7 @@ def verify_affine_grid(num_batch, target_shape): for kind in ["graph", "debug"]: intrp1 = relay.create_executor(kind, device=dev, target=target) op_res1 = intrp1.evaluate(func)(data_np) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) verify_affine_grid(1, (16, 32)) verify_affine_grid(4, (16, 32)) @@ -1297,7 +1297,7 @@ def verify_grid_sample(data_shape, grid_shape): for kind in ["graph", "debug"]: intrp1 = relay.create_executor(kind, device=dev, target=target) op_res1 = intrp1.evaluate(func)(data_np, grid_np) - tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) verify_grid_sample((4, 4, 16, 32), (4, 2, 8, 8)) verify_grid_sample((4, 4, 16, 32), (4, 2, 32, 32)) @@ -1324,7 +1324,7 @@ def verify_space_to_batch_nd(dshape, block_shape, paddings): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4) verify_space_to_batch_nd([3, 3, 2, 1], [3], [[0, 0]]) verify_space_to_batch_nd([2, 2, 4, 1], [2, 2], [[0, 0], [2, 0]]) @@ -1351,7 +1351,7 @@ def verify_batch_to_space_nd(dshape, block_shape, crops): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4) verify_batch_to_space_nd([4, 1, 1, 3], [2, 2], [[0, 0], [0, 0]]) verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [[0, 0], [2, 0]]) @@ -1385,7 +1385,7 @@ def verify_all_class_non_max_suppression( for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, device=dev, target=target) selected_indices, num_detections = intrp.evaluate(func)(boxes_np, scores_np) - tvm_res = selected_indices.asnumpy()[: num_detections.asnumpy()[0]] + tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]] np.testing.assert_equal(tvm_res, expected_indices) boxes = np.array( diff --git a/tests/python/relay/test_op_level6.py b/tests/python/relay/test_op_level6.py index f0c66247329d..1838233e3a3a 100644 --- a/tests/python/relay/test_op_level6.py +++ b/tests/python/relay/test_op_level6.py @@ -48,7 +48,7 @@ def verify_sort(shape, axis, is_ascend, is_dyn=False): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) for is_dyn in [False, True]: verify_sort((2, 3, 4), axis=0, is_ascend=False, is_dyn=is_dyn) @@ -82,7 +82,7 @@ def verify_argsort(shape, axis, is_ascend, dtype, is_dyn=False): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(x_data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.astype(dtype), rtol=1e-5) + tvm.testing.assert_allclose(op_res.numpy(), ref_res.astype(dtype), rtol=1e-5) for is_dyn in [False, True]: for dtype in ["int32", "int64", "float32", "float64"]: @@ -127,12 +127,12 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype): intrp = relay.create_executor(kind, device=dev, target=target) op_res = intrp.evaluate(func)(np_data) if ret_type == "both": - tvm.testing.assert_allclose(op_res[0].asnumpy(), np_values) - tvm.testing.assert_allclose(op_res[1].asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res[0].numpy(), np_values) + tvm.testing.assert_allclose(op_res[1].numpy(), np_indices) elif ret_type == "values": - tvm.testing.assert_allclose(op_res.asnumpy(), np_values) + tvm.testing.assert_allclose(op_res.numpy(), np_values) else: - tvm.testing.assert_allclose(op_res.asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res.numpy(), np_indices) np.random.seed(0) for k in [0, 1, 5]: diff --git a/tests/python/relay/test_op_qnn_add.py b/tests/python/relay/test_op_qnn_add.py index b37ddc2c227c..d3a3b8ffca5f 100644 --- a/tests/python/relay/test_op_qnn_add.py +++ b/tests/python/relay/test_op_qnn_add.py @@ -65,7 +65,7 @@ def test_tflite_same_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_tflite_different_io_qnn_params(): @@ -113,7 +113,7 @@ def test_tflite_different_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_saturation(): @@ -145,7 +145,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) # Same params, different scale z = relay.qnn.op.add( @@ -171,7 +171,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) # Same io params, different output scale z = relay.qnn.op.add( @@ -197,7 +197,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) # All params different z = relay.qnn.op.add( @@ -223,7 +223,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_qnn_concatenate.py b/tests/python/relay/test_op_qnn_concatenate.py index 453875301af9..12571aad0822 100644 --- a/tests/python/relay/test_op_qnn_concatenate.py +++ b/tests/python/relay/test_op_qnn_concatenate.py @@ -53,7 +53,7 @@ def test_same_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_different_io_qnn_params(): @@ -88,7 +88,7 @@ def test_different_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_few_same_io_qnn_params(): @@ -123,7 +123,7 @@ def test_few_same_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_same_i_qnn_params(): @@ -158,7 +158,7 @@ def test_same_i_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_call_input(): @@ -185,7 +185,7 @@ def test_call_input(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data) - np.testing.assert_equal(op_res.asnumpy(), x_data) + np.testing.assert_equal(op_res.numpy(), x_data) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_qnn_conv2_transpose.py b/tests/python/relay/test_op_qnn_conv2_transpose.py index e4e02279efd6..ac92692c727f 100644 --- a/tests/python/relay/test_op_qnn_conv2_transpose.py +++ b/tests/python/relay/test_op_qnn_conv2_transpose.py @@ -195,7 +195,7 @@ def get_output(func, golden_inputs): mod.set_input("data", golden_data) mod.set_input(**params) mod.run() - res = mod.get_output(0).asnumpy() + res = mod.get_output(0).numpy() return res golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype) diff --git a/tests/python/relay/test_op_qnn_conv2d.py b/tests/python/relay/test_op_qnn_conv2d.py index 928450312147..3a81e6e7b47a 100644 --- a/tests/python/relay/test_op_qnn_conv2d.py +++ b/tests/python/relay/test_op_qnn_conv2d.py @@ -202,7 +202,7 @@ def get_output(func, golden_inputs): mod.set_input("data", golden_data) mod.set_input(**params) mod.run() - res = mod.get_output(0).asnumpy() + res = mod.get_output(0).numpy() return res golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype) @@ -726,7 +726,7 @@ def test_tflite_large_irregular(): mod.set_input("data", golden_data) mod.set_input(**params) mod.run() - qnn_output = mod.get_output(0).asnumpy() + qnn_output = mod.get_output(0).numpy() golden_output = np.full((1, 1001, 1, 1), 0).astype("uint8") np.testing.assert_equal(qnn_output, golden_output) @@ -771,7 +771,7 @@ def test_tflite_output_multiplier_greater_than_one(): mod.set_input("data", golden_data) mod.set_input(**params) mod.run() - qnn_output = mod.get_output(0).asnumpy() + qnn_output = mod.get_output(0).numpy() golden_output = np.array((17, 17, 0, 0, 2, 2, 16, 36, 2, 2, 0, 0)).reshape(2, 3, 1, 2) np.testing.assert_equal(qnn_output, golden_output) @@ -834,7 +834,7 @@ def test_tflite_anistropic_strides(): mod.set_input("data", golden_data) mod.set_input(**params) mod.run() - qnn_output = mod.get_output(0).asnumpy() + qnn_output = mod.get_output(0).numpy() golden_output = np.array((124, -92, 164, -132)).reshape(1, 1, 2, 2) np.testing.assert_equal(qnn_output, golden_output) diff --git a/tests/python/relay/test_op_qnn_dense.py b/tests/python/relay/test_op_qnn_dense.py index c47ac6b35ec7..3609d8f8edb1 100644 --- a/tests/python/relay/test_op_qnn_dense.py +++ b/tests/python/relay/test_op_qnn_dense.py @@ -218,7 +218,7 @@ def qnn_dense_driver(test_configuration): mod.set_input(bias_name, test_configuration[bias_name]) mod.set_input(**params) mod.run() - res = mod.get_output(0).asnumpy() + res = mod.get_output(0).numpy() np.testing.assert_equal(res, test_configuration["output"]) assert res.dtype == expected_out_dtype diff --git a/tests/python/relay/test_op_qnn_dequantize.py b/tests/python/relay/test_op_qnn_dequantize.py index ab398bbc1316..fd5c23fe35b1 100644 --- a/tests/python/relay/test_op_qnn_dequantize.py +++ b/tests/python/relay/test_op_qnn_dequantize.py @@ -39,7 +39,7 @@ def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data, ax rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() np.testing.assert_equal(res, verify_output_data) assert res.dtype == np.float32 diff --git a/tests/python/relay/test_op_qnn_mul.py b/tests/python/relay/test_op_qnn_mul.py index 8ff3ab5c3df2..c4cd3244c8fe 100644 --- a/tests/python/relay/test_op_qnn_mul.py +++ b/tests/python/relay/test_op_qnn_mul.py @@ -83,7 +83,7 @@ def test_tflite_same_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), np.uint8(golden)) + np.testing.assert_equal(op_res.numpy(), np.uint8(golden)) def test_tflite_different_io_qnn_params(): @@ -136,7 +136,7 @@ def test_tflite_different_io_qnn_params(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), np.uint8(golden)) + np.testing.assert_equal(op_res.numpy(), np.uint8(golden)) def test_saturation(): @@ -174,7 +174,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), np.uint8(golden)) + np.testing.assert_equal(op_res.numpy(), np.uint8(golden)) # Same params, different scale @@ -208,7 +208,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), np.uint8(golden)) + np.testing.assert_equal(op_res.numpy(), np.uint8(golden)) # All params different @@ -243,7 +243,7 @@ def test_saturation(): intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), np.uint8(golden)) + np.testing.assert_equal(op_res.numpy(), np.uint8(golden)) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_qnn_quantize.py b/tests/python/relay/test_op_qnn_quantize.py index 2ae688ef4784..345e8b815da1 100644 --- a/tests/python/relay/test_op_qnn_quantize.py +++ b/tests/python/relay/test_op_qnn_quantize.py @@ -43,7 +43,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_ rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() np.testing.assert_equal(res, verify_output_data) assert res.dtype == out_dtype diff --git a/tests/python/relay/test_op_qnn_requantize.py b/tests/python/relay/test_op_qnn_requantize.py index 5e61fad7676d..ad9805e74929 100644 --- a/tests/python/relay/test_op_qnn_requantize.py +++ b/tests/python/relay/test_op_qnn_requantize.py @@ -32,7 +32,7 @@ def verify(mod, goldens): rt_mod.set_input("quantized_data", golden_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() np.testing.assert_equal(res, golden_output) diff --git a/tests/python/relay/test_op_qnn_simulated_dequantize.py b/tests/python/relay/test_op_qnn_simulated_dequantize.py index 3aecd935b62b..64c70be9d7a7 100644 --- a/tests/python/relay/test_op_qnn_simulated_dequantize.py +++ b/tests/python/relay/test_op_qnn_simulated_dequantize.py @@ -43,7 +43,7 @@ def dequantize_test_driver(in_dtype, quant_args, axis, in_data): rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() return res @@ -81,7 +81,7 @@ def verify_simulated_dequantize_simple(dtype): dtype = relay.var("dtype", shape=[]) vm = build_simulated_dequantize(input_data, scale, zp, dtype) sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np) - np.testing.assert_allclose(sim_dq_out.asnumpy(), dq_out, rtol=1e-5) + np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5) def test_simulated_dequantize(): @@ -112,7 +112,7 @@ def test_dynamic_channels(): dtype = relay.var("dtype", shape=[]) vm = build_simulated_dequantize(input_data, scale, zp, dtype, axis=0) sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np) - np.testing.assert_allclose(sim_dq_out.asnumpy(), dq_out, rtol=1e-5) + np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5) # Now get the perchannel quantize output and compare without recompiling. scale_np = np.array([0.5, 0.25]).astype("float32") @@ -128,7 +128,7 @@ def test_dynamic_channels(): ) # Run the simulated quantize without recompiling and confirm results match. sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np) - np.testing.assert_allclose(sim_dq_out.asnumpy(), dq_out, rtol=1e-5) + np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5) def test_dynamic_dtype(): @@ -153,7 +153,7 @@ def test_dynamic_dtype(): dtype = relay.var("dtype", shape=[]) vm = build_simulated_dequantize(input_data, scale, zp, dtype) sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np) - np.testing.assert_allclose(sim_dq_out.asnumpy(), dq_out, rtol=1e-5) + np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5) # Now test int8 to float32 compilation. data = np.random.uniform(low=0, high=255, size=[2, 5]).astype("int8") @@ -168,7 +168,7 @@ def test_dynamic_dtype(): # Run the simulated quantize without recompiling and confirm results match. dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int8"]) sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np) - np.testing.assert_allclose(sim_dq_out.asnumpy(), dq_out, rtol=1e-5) + np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_qnn_simulated_quantize.py b/tests/python/relay/test_op_qnn_simulated_quantize.py index fd9d13168e01..14014f2e4605 100644 --- a/tests/python/relay/test_op_qnn_simulated_quantize.py +++ b/tests/python/relay/test_op_qnn_simulated_quantize.py @@ -51,7 +51,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data): rt_mod.set_input(input_data=in_data) rt_mod.set_input(**params) rt_mod.run() - res = rt_mod.get_output(0).asnumpy() + res = rt_mod.get_output(0).numpy() return res @@ -89,7 +89,7 @@ def verify_simulated_quantize_simple(dtype): dtype = relay.var("dtype", shape=[]) vm = build_simulated_quantize(input_data, scale, zp, dtype) sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np) - allclose_with_rounding(sim_q_out.asnumpy(), q_out) + allclose_with_rounding(sim_q_out.numpy(), q_out) def test_simulated_quantize(): @@ -120,7 +120,7 @@ def test_dynamic_channels(): dtype = relay.var("dtype", shape=[]) vm = build_simulated_quantize(input_data, scale, zp, dtype, axis=0) sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np) - allclose_with_rounding(sim_q_out.asnumpy(), q_out) + allclose_with_rounding(sim_q_out.numpy(), q_out) # Now get the perchannel quantize output and compare without recompiling. scale_np = np.array([0.5, 0.25]).astype("float32") @@ -137,7 +137,7 @@ def test_dynamic_channels(): ) # Run the simulated quantize without recompiling and confirm results match. sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np) - allclose_with_rounding(sim_q_out.asnumpy(), q_out) + allclose_with_rounding(sim_q_out.numpy(), q_out) def test_dynamic_dtype(): @@ -162,7 +162,7 @@ def test_dynamic_dtype(): dtype = relay.var("dtype", shape=[]) vm = build_simulated_quantize(input_data, scale, zp, dtype) sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np) - allclose_with_rounding(sim_q_out.asnumpy(), q_out) + allclose_with_rounding(sim_q_out.numpy(), q_out) # Now test float32 to int32 compilation. # Get the reference quantize output. @@ -176,7 +176,7 @@ def test_dynamic_dtype(): # Run the simulated quantize without recompiling and confirm results match. dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int32"]) sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np) - allclose_with_rounding(sim_q_out.asnumpy(), q_out) + allclose_with_rounding(sim_q_out.numpy(), q_out) if __name__ == "__main__": diff --git a/tests/python/relay/test_op_qnn_subtract.py b/tests/python/relay/test_op_qnn_subtract.py index fb55cdc94844..4f9a36757b81 100644 --- a/tests/python/relay/test_op_qnn_subtract.py +++ b/tests/python/relay/test_op_qnn_subtract.py @@ -54,7 +54,7 @@ def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dty golden_output = golden_outputs[i] intrp = relay.create_executor("graph", device=tvm.cpu(0), target="llvm") op_res = intrp.evaluate(func)(x_data, y_data) - np.testing.assert_equal(op_res.asnumpy(), golden_output) + np.testing.assert_equal(op_res.numpy(), golden_output) def test_tflite_same_io_qnn_params(): diff --git a/tests/python/relay/test_param_dict.py b/tests/python/relay/test_param_dict.py index 2272883fc39c..5471460c71f2 100644 --- a/tests/python/relay/test_param_dict.py +++ b/tests/python/relay/test_param_dict.py @@ -35,8 +35,8 @@ def test_save_load(): assert isinstance(param_bytes, bytearray) param2 = relay.load_param_dict(param_bytes) assert len(param2) == 2 - np.testing.assert_equal(param2["x"].asnumpy(), x) - np.testing.assert_equal(param2["y"].asnumpy(), y) + np.testing.assert_equal(param2["x"].numpy(), x) + np.testing.assert_equal(param2["y"].numpy(), y) def test_ndarray_reflection(): @@ -48,9 +48,9 @@ def test_ndarray_reflection(): # Serialize then deserialize `param_dict`. deser_param_dict = relay.load_param_dict(runtime.save_param_dict(param_dict)) # Make sure the data matches the original data and `x` and `y` contain the same data. - np.testing.assert_equal(deser_param_dict["x"].asnumpy(), tvm_array.asnumpy()) + np.testing.assert_equal(deser_param_dict["x"].numpy(), tvm_array.numpy()) # Make sure `x` and `y` contain the same data. - np.testing.assert_equal(deser_param_dict["x"].asnumpy(), deser_param_dict["y"].asnumpy()) + np.testing.assert_equal(deser_param_dict["x"].numpy(), deser_param_dict["y"].numpy()) def test_bigendian_rpc_param(): @@ -80,7 +80,7 @@ def verify_graph_executor(remote, target, shape, dtype): mod.load_params(runtime.save_param_dict(params)) mod.run() out = mod.get_output(0, tvm.nd.empty(shape, dtype=dtype, device=dev)) - tvm.testing.assert_allclose(x_in + 1, out.asnumpy()) + tvm.testing.assert_allclose(x_in + 1, out.numpy()) print("Test RPC connection to PowerPC...") remote = rpc.connect(host, port) diff --git a/tests/python/relay/test_pass_alter_op_layout.py b/tests/python/relay/test_pass_alter_op_layout.py index aeaf1f89c388..3031c55379ae 100644 --- a/tests/python/relay/test_pass_alter_op_layout.py +++ b/tests/python/relay/test_pass_alter_op_layout.py @@ -75,7 +75,7 @@ def expected(): def test_alter_return_none(): - """Test doing nothing by returning 'None' """ + """Test doing nothing by returning 'None'""" def before(): x = relay.var("x", shape=(1, 64, 56, 56)) @@ -316,7 +316,7 @@ def expected(): def test_alter_layout_broadcast_op(): - """Test boradcast operators """ + """Test boradcast operators""" def before(): x = relay.var("x", shape=(1, 64, 56, 56)) @@ -553,7 +553,7 @@ def expected(): def test_alter_layout_concatenate(): - """ NCHW, NHWC and corner case concatenate layout transform.""" + """NCHW, NHWC and corner case concatenate layout transform.""" def alter_conv2d(attrs, inputs, tinfos, out_type): data, weight = inputs @@ -635,7 +635,7 @@ def expected_nhwc(): def test_alter_layout_nchw_upsamping_op(): - """Test upsamping operators """ + """Test upsamping operators""" def before(): x = relay.var("x", shape=(1, 32, 28, 28)) @@ -674,7 +674,7 @@ def expected(): def test_alter_layout_nchw_dyn_upsamping_op(): - """Test upsamping operators """ + """Test upsamping operators""" def before(): x = relay.var("x", shape=(1, 32, 28, 28)) @@ -766,7 +766,7 @@ def expected(): result_before = ex_before.evaluate()(np_data, np_weight) result_new = ex_new.evaluate()(np_data, np_weight) tvm.testing.assert_allclose( - result_before.asnumpy(), result_new.asnumpy(), rtol=1e-5, atol=1e-5 + result_before.numpy(), result_new.numpy(), rtol=1e-5, atol=1e-5 ) @@ -855,7 +855,7 @@ def expected(): def test_alter_layout_pad(): - """ Check NCHW, NHWC and corner case for pad layout conversion""" + """Check NCHW, NHWC and corner case for pad layout conversion""" def alter_conv2d(attrs, inputs, tinfos, out_type): data, weight = inputs @@ -951,7 +951,7 @@ def expected(): def test_alter_layout_pool(): - """ Check NCHW, NHWC pool layout conversion""" + """Check NCHW, NHWC pool layout conversion""" def alter_conv2d(attrs, inputs, tinfos, out_type): data, weight = inputs @@ -1019,7 +1019,7 @@ def expected_nhwc(): def test_alter_layout_sum(): - """ Check NCHW, NHWC sum layout conversion""" + """Check NCHW, NHWC sum layout conversion""" def alter_conv2d(attrs, inputs, tinfos, out_type): data, weight = inputs @@ -1088,7 +1088,7 @@ def expected_nhwc(): def test_alter_layout_nhwc_arm(): - """ Check that AlterOplayout does not alter NHWC data layout. """ + """Check that AlterOplayout does not alter NHWC data layout.""" def alter_conv2d(attrs, inputs, tinfos, out_type): from tvm import topi @@ -1125,7 +1125,7 @@ def expected_nhwc(): def test_alter_layout_nhwc_int8_aarch64(): - """ Check that AlterOplayout does not alter NHWC data layout. """ + """Check that AlterOplayout does not alter NHWC data layout.""" from tvm import autotvm expected_workload_shape = (20, 42, 4, 16) diff --git a/tests/python/relay/test_pass_annotate_target.py b/tests/python/relay/test_pass_annotate_target.py index c756d74ff0be..098fb5c64e82 100644 --- a/tests/python/relay/test_pass_annotate_target.py +++ b/tests/python/relay/test_pass_annotate_target.py @@ -58,7 +58,7 @@ def check_vm_result(): exe = runtime.vm.Executable.load_exec(code, lib) vm = runtime.vm.VirtualMachine(exe, device) out = vm.run(**map_inputs) - tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) def check_graph_executor_result(): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): @@ -73,7 +73,7 @@ def check_graph_executor_result(): out = tvm.nd.empty(out_shape, device=device) out = rt_mod.get_output(0, out) - tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol) check_vm_result() check_graph_executor_result() @@ -148,7 +148,7 @@ def test_run(): ref_res = ref_ex.evaluate()(i_data, w1_data) check_result( - mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5 + mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5 ) test_annotate() @@ -174,7 +174,7 @@ def test_extern_dnnl_mobilenet(): ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)) ref_res = ref_ex.evaluate()(i_data, **params) - check_result(mod, {"data": i_data}, (1, 1000), ref_res.asnumpy(), tol=1e-5, params=params) + check_result(mod, {"data": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params) def test_multiple_ends(): diff --git a/tests/python/relay/test_pass_annotation.py b/tests/python/relay/test_pass_annotation.py index abf795cd46cc..7288b6421de1 100644 --- a/tests/python/relay/test_pass_annotation.py +++ b/tests/python/relay/test_pass_annotation.py @@ -44,7 +44,7 @@ def check_graph_executor( mod = graph_executor.create(graph, lib, contexts) mod.set_input(**new_params) mod.run() - res = mod.get_output(0).asnumpy() + res = mod.get_output(0).numpy() tvm.testing.assert_allclose(res, ref_res, rtol=1e-5, atol=1e-5) @@ -56,7 +56,7 @@ def check_vm_runtime(target, ref_res, device, func, params, config, opt_level, e dev = [tvm.cpu(0), tvm.device(device)] vm = tvm.runtime.vm.VirtualMachine(exe, dev) res = vm.invoke("main", **params) - tvm.testing.assert_allclose(res.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(res.numpy(), ref_res, rtol=1e-5, atol=1e-5) def run_opt_pass(expr, passes): @@ -186,7 +186,7 @@ def check_annotated_graph(annotated_func, expected_func): def test_conv_network(): - R"""The network is as following: + r"""The network is as following: data1 data2 | | conv2d conv2d @@ -389,7 +389,7 @@ def get_func(): return func def test_fuse_log_add(device, tgt): - """ Only log and add are fused.""" + """Only log and add are fused.""" fallback_device = tvm.device("cpu") target = {"cpu": "llvm", device: tgt} cpu_dev = fallback_device @@ -530,7 +530,7 @@ def test_fallback_all_operators(device, tgt): def run_unpropagatable_graph(dev, tgt): - R"""The network is as following: + r"""The network is as following: a b c d \ / \ / add mul diff --git a/tests/python/relay/test_pass_auto_quantize.py b/tests/python/relay/test_pass_auto_quantize.py index c123b182c0b6..51b9f5f24d1d 100644 --- a/tests/python/relay/test_pass_auto_quantize.py +++ b/tests/python/relay/test_pass_auto_quantize.py @@ -190,9 +190,7 @@ def _eval_mod(mod): partitioned_mod_result = _eval_mod(partitioned_mod) unpartitioned_mod_result = _eval_mod(unpartitioned_mod) - tvm.testing.assert_allclose( - unpartitioned_mod_result.asnumpy(), partitioned_mod_result.asnumpy() - ) + tvm.testing.assert_allclose(unpartitioned_mod_result.numpy(), partitioned_mod_result.numpy()) def test_add_partition(): @@ -366,7 +364,7 @@ def visit_call(self, call): opf.visit(qnn_mod["main"]) assert len(opf.ops) > 0, 'Broken case, can\'t find any "left_shift" operators.' for left_shift_op in opf.ops: - shift_amount = left_shift_op.args[1].data.asnumpy() + shift_amount = left_shift_op.args[1].data.numpy() assert shift_amount >= 0, "Shift amount must be non-negative." diff --git a/tests/python/relay/test_pass_defunctionalization.py b/tests/python/relay/test_pass_defunctionalization.py index 4097eb770e31..57dbb82c2d0d 100644 --- a/tests/python/relay/test_pass_defunctionalization.py +++ b/tests/python/relay/test_pass_defunctionalization.py @@ -105,7 +105,7 @@ def to_list(mod, l): ret = [] while True: if val.tag == cons.tag: - ret.append(val.fields[0].asnumpy()) + ret.append(val.fields[0].numpy()) val = val.fields[1] else: assert val.tag == nil.tag @@ -154,7 +154,7 @@ def @main(%l: Tensor[(5, 5), float32]) -> Tensor[(5, 5), float32] { out = ex.evaluate()(input) defunc_out = defunc_ex.evaluate()(input) - np.testing.assert_equal(out.asnumpy(), defunc_out.asnumpy()) + np.testing.assert_equal(out.numpy(), defunc_out.numpy()) def test_global_recursion(): @@ -226,7 +226,7 @@ def @main(%l: List[int32]) -> int32 { out = ex.evaluate(mod["main"])(to_adt_list(mod, input)) defunc_out = defunc_ex.evaluate()(to_adt_list(defunc_mod, input)) - tvm.testing.assert_allclose(out.asnumpy(), defunc_out.asnumpy()) + tvm.testing.assert_allclose(out.numpy(), defunc_out.numpy()) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index b9d3a8ef357c..9f7f3deebeb8 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -42,7 +42,7 @@ def verify_func(func, data, ref_res, rtol=1e-5, atol=1e-7): mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(*data) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol, atol=atol) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) @tvm.testing.uses_gpu @@ -184,12 +184,12 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype): intrp = relay.create_executor(kind, mod=mod, device=dev, target=target) op_res = intrp.evaluate()(np_data) if ret_type == "both": - tvm.testing.assert_allclose(op_res[0].asnumpy(), np_values) - tvm.testing.assert_allclose(op_res[1].asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res[0].numpy(), np_values) + tvm.testing.assert_allclose(op_res[1].numpy(), np_indices) elif ret_type == "values": - tvm.testing.assert_allclose(op_res.asnumpy(), np_values) + tvm.testing.assert_allclose(op_res.numpy(), np_values) else: - tvm.testing.assert_allclose(op_res.asnumpy(), np_indices) + tvm.testing.assert_allclose(op_res.numpy(), np_indices) np.random.seed(0) for k in [0, 1, 5]: diff --git a/tests/python/relay/test_pass_fold_explicit_padding.py b/tests/python/relay/test_pass_fold_explicit_padding.py index a3f82dd6d270..58ba58aa06d3 100644 --- a/tests/python/relay/test_pass_fold_explicit_padding.py +++ b/tests/python/relay/test_pass_fold_explicit_padding.py @@ -77,7 +77,7 @@ def validate(ndim, pad_width, pad_value, pad_mode, orig_padding, layout): result1 = ex1.evaluate()(x_np, w_np) result2 = ex2.evaluate()(x_np, w_np) - tvm.testing.assert_allclose(result1.asnumpy(), result2.asnumpy(), rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5) for orig_pad in [[0, 0], [2, 0], [0, 2]]: for i_pad in [[0, 0], [1, 1], [1, 0]]: diff --git a/tests/python/relay/test_pass_fuse_ops.py b/tests/python/relay/test_pass_fuse_ops.py index 9b7471f9a5ed..931f453f9a6d 100644 --- a/tests/python/relay/test_pass_fuse_ops.py +++ b/tests/python/relay/test_pass_fuse_ops.py @@ -781,7 +781,7 @@ def test_fuse_dynamic_squeeze_slice_take(): np_result = np.squeeze(input_data[0][:, input_data[1][0], :], axis=0) - assert np.allclose(result.asnumpy(), np_result) + assert np.allclose(result.numpy(), np_result) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_gradient.py b/tests/python/relay/test_pass_gradient.py index 0604ed51272c..6228c5fc157b 100644 --- a/tests/python/relay/test_pass_gradient.py +++ b/tests/python/relay/test_pass_gradient.py @@ -48,8 +48,8 @@ def test_fo_id(): ex = create_executor() x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy())) def test_id(): @@ -64,8 +64,8 @@ def test_id(): ex = create_executor() x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy())) def test_relu(): @@ -92,8 +92,8 @@ def test_add(): ex = create_executor() x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), 2 * x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), 2 * np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 2 * x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), 2 * np.ones_like(x.numpy())) def test_check_grad(): @@ -121,8 +121,8 @@ def test_temp_add(): ex = create_executor() x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy())) def test_sub(): @@ -137,8 +137,8 @@ def test_sub(): ex = create_executor() x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy())) - tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), np.zeros_like(x.numpy())) + tvm.testing.assert_allclose(grad.numpy(), np.zeros_like(x.numpy())) def test_broadcast_add(): @@ -147,8 +147,8 @@ def test_broadcast_add(): dtype = "float32" x_nd = rand(dtype, *shape1) y_nd = rand(dtype, *shape2) - x_np = x_nd.asnumpy() - y_np = y_nd.asnumpy() + x_np = x_nd.numpy() + y_np = y_nd.numpy() expected_forward = x_np + y_np t1 = relay.TensorType(shape1, dtype) t2 = relay.TensorType(shape2, dtype) @@ -165,12 +165,12 @@ def test_broadcast_add(): ) ex = create_executor() forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd) - tvm.testing.assert_allclose(forward.asnumpy(), expected_forward) + tvm.testing.assert_allclose(forward.numpy(), expected_forward) tvm.testing.assert_allclose( - grad_x.asnumpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True) + grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True) ) tvm.testing.assert_allclose( - grad_y.asnumpy(), + grad_y.numpy(), np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0), ) @@ -181,8 +181,8 @@ def test_broadcast_subtract(): dtype = "float32" x_nd = rand(dtype, *shape1) y_nd = rand(dtype, *shape2) - x_np = x_nd.asnumpy() - y_np = y_nd.asnumpy() + x_np = x_nd.numpy() + y_np = y_nd.numpy() expected_forward = x_np - y_np t1 = relay.TensorType(shape1, dtype) t2 = relay.TensorType(shape2, dtype) @@ -199,12 +199,12 @@ def test_broadcast_subtract(): ) ex = create_executor() forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd) - tvm.testing.assert_allclose(forward.asnumpy(), expected_forward) + tvm.testing.assert_allclose(forward.numpy(), expected_forward) tvm.testing.assert_allclose( - grad_x.asnumpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True) + grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True) ) tvm.testing.assert_allclose( - grad_y.asnumpy(), + grad_y.numpy(), -np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0), ) @@ -243,16 +243,16 @@ def _test_tuple(mode): x_nd = rand(dtype, *shape) y_nd = rand(dtype, *shape) z_nd = rand(dtype, *shape) - x_np = x_nd.asnumpy() - y_np = y_nd.asnumpy() - z_np = z_nd.asnumpy() + x_np = x_nd.numpy() + y_np = y_nd.numpy() + z_np = z_nd.numpy() expected_forward = x_np + y_np - z_np ex = create_executor() forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd) - tvm.testing.assert_allclose(forward.asnumpy(), expected_forward) - tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy())) - tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy())) - tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), expected_forward) + tvm.testing.assert_allclose(grad_x.numpy(), np.ones_like(grad_x.numpy())) + tvm.testing.assert_allclose(grad_y.numpy(), np.ones_like(grad_y.numpy())) + tvm.testing.assert_allclose(grad_z.numpy(), -1 * np.ones_like(grad_z.numpy())) def _test_tuple_argument(mode): @@ -269,13 +269,13 @@ def _test_tuple_argument(mode): func = run_infer_type(func) back_func = run_infer_type(gradient(func, mode=mode)) xs = [rand(dtype, *shape) for _ in range(fields)] - xs_np = np.array([x.asnumpy() for x in xs]) + xs_np = np.array([x.numpy() for x in xs]) expected_forward = np.sum(xs_np, axis=0) ex = create_executor() forward, grad = ex.evaluate(back_func)(tuple(xs)) - tvm.testing.assert_allclose(forward.asnumpy(), expected_forward) + tvm.testing.assert_allclose(forward.numpy(), expected_forward) for field in grad[0]: - tvm.testing.assert_allclose(field.asnumpy(), np.ones_like(field.asnumpy())) + tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy())) def test_tuple(): @@ -317,8 +317,8 @@ def test_pow(): i_nd = rand(dtype, *shape) ex = create_executor(mod=mod) forward, (grad_i,) = ex.evaluate(back_func)(i_nd) - tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy()) - tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy()) + tvm.testing.assert_allclose(grad_i.numpy(), 8 * np.ones_like(grad_i.numpy())) def test_ref(): @@ -338,8 +338,8 @@ def test_ref(): x_nd = rand(dtype, *shape) ex = create_executor() forward, (grad_x,) = ex.evaluate(back_func)(x_nd) - tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy()) - tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 2 * x_nd.numpy()) + tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy())) def test_square_second_order(): @@ -360,8 +360,8 @@ def test_square_second_order(): x_nd = rand(dtype, *shape) ex = create_executor() forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd) - tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy()) - tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 2 * x_nd.numpy()) + tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy())) def test_if(): @@ -393,9 +393,9 @@ def test_grad_tuple(): ex = create_executor() x = rand(dtype, *shape) (forward_four, forward_two), (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward_four.asnumpy(), 4 * x.asnumpy()) - tvm.testing.assert_allclose(forward_two.asnumpy(), 2 * x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward_four.numpy(), 4 * x.numpy()) + tvm.testing.assert_allclose(forward_two.numpy(), 2 * x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy())) def test_concat(): @@ -449,8 +449,8 @@ def test_global_function(): ex = create_executor(mod=m) x = rand(dtype, *shape) forward, (grad,) = ex.evaluate(back_func)(x) - tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy()) - tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy())) + tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy()) + tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy())) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_lazy_gradient_init.py b/tests/python/relay/test_pass_lazy_gradient_init.py index 9c85ac0a2242..f37856669306 100644 --- a/tests/python/relay/test_pass_lazy_gradient_init.py +++ b/tests/python/relay/test_pass_lazy_gradient_init.py @@ -68,7 +68,7 @@ def test_add(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy() + x.asnumpy()) + assert_allclose(y.numpy(), x.numpy() + x.numpy()) def test_add_tuple(): @@ -95,7 +95,7 @@ def test_add_tuple(): ex = create_executor(mod=mod) x = (rand(dtype, *shape), rand(dtype, *shape)) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x[0].asnumpy() + x[1].asnumpy()) + assert_allclose(y.numpy(), x[0].numpy() + x[1].numpy()) def test_mult(): @@ -120,7 +120,7 @@ def test_mult(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy() * x.asnumpy()) + assert_allclose(y.numpy(), x.numpy() * x.numpy()) def test_ret_tuple(): @@ -146,8 +146,8 @@ def test_ret_tuple(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(func)(x) - assert_allclose(y[0].asnumpy(), x.asnumpy()) - assert_allclose(y[1].asnumpy(), x.asnumpy() * 2.0) + assert_allclose(y[0].numpy(), x.numpy()) + assert_allclose(y[1].numpy(), x.numpy() * 2.0) def test_add_broadcast(): @@ -170,8 +170,8 @@ def test_add_broadcast(): mod = transform.LazyGradientInit()(mod) func = mod["main"] - x1_np = rand(dtype, *shape1).asnumpy() - x2_np = rand(dtype, *shape2).asnumpy() + x1_np = rand(dtype, *shape1).numpy() + x2_np = rand(dtype, *shape2).numpy() expected_forward = x1_np + x2_np expected_forward_type = relay.TensorType(expected_forward.shape, dtype) @@ -180,7 +180,7 @@ def test_add_broadcast(): ex = create_executor(mod=mod) forward = ex.evaluate(func)(x1_np, x2_np) - assert_allclose(forward.asnumpy(), expected_forward) + assert_allclose(forward.numpy(), expected_forward) def test_reverse_ad_identity(): @@ -211,8 +211,8 @@ def test_reverse_ad_identity(): ex = create_executor(mod=mod) x = rand(dtype, *shape) (forward), (grad,) = ex.evaluate(back_func)(x) - assert_allclose(forward.asnumpy(), x.asnumpy()) - assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy())) + assert_allclose(forward.numpy(), x.numpy()) + assert_allclose(grad.numpy(), np.ones_like(x.numpy())) def test_multivar_reverse_ad(): @@ -246,9 +246,9 @@ def test_multivar_reverse_ad(): (forward), (grad_x, grad_y,) = ex.evaluate( back_func )(x, y) - assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy()) - assert_allclose(grad_x.asnumpy(), y.asnumpy()) - assert_allclose(grad_y.asnumpy(), x.asnumpy()) + assert_allclose(forward.numpy(), x.numpy() * y.numpy()) + assert_allclose(grad_x.numpy(), y.numpy()) + assert_allclose(grad_y.numpy(), x.numpy()) def test_partial_eval(): @@ -311,9 +311,9 @@ def test_after_partial_eval(): (forward), (grad_x, grad_y,) = ex.evaluate( back_func )(x, y) - assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy()) - assert_allclose(grad_x.asnumpy(), y.asnumpy()) - assert_allclose(grad_y.asnumpy(), x.asnumpy()) + assert_allclose(forward.numpy(), x.numpy() * y.numpy()) + assert_allclose(grad_x.numpy(), y.numpy()) + assert_allclose(grad_y.numpy(), x.numpy()) def test_before_partial_eval(): @@ -349,9 +349,9 @@ def test_before_partial_eval(): (forward), (grad_x, grad_y,) = ex.evaluate( back_func )(x, y) - assert_allclose(forward.asnumpy(), x.asnumpy() * y.asnumpy()) - assert_allclose(grad_x.asnumpy(), y.asnumpy()) - assert_allclose(grad_y.asnumpy(), x.asnumpy()) + assert_allclose(forward.numpy(), x.numpy() * y.numpy()) + assert_allclose(grad_x.numpy(), y.numpy()) + assert_allclose(grad_y.numpy(), x.numpy()) def test_zeros(): @@ -375,7 +375,7 @@ def test_zeros(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy()) + assert_allclose(y.numpy(), x.numpy()) def test_ones(): @@ -399,7 +399,7 @@ def test_ones(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy() + np.ones_like(x.asnumpy())) + assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy())) def test_zeros_like(): @@ -423,7 +423,7 @@ def test_zeros_like(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy()) + assert_allclose(y.numpy(), x.numpy()) def test_ones_like(): @@ -447,7 +447,7 @@ def test_ones_like(): ex = create_executor(mod=mod) x = rand(dtype, *shape) y = ex.evaluate(y)(x) - assert_allclose(y.asnumpy(), x.asnumpy() + np.ones_like(x.asnumpy())) + assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy())) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_manager.py b/tests/python/relay/test_pass_manager.py index 5a29d1acd171..edd46168a286 100644 --- a/tests/python/relay/test_pass_manager.py +++ b/tests/python/relay/test_pass_manager.py @@ -177,14 +177,14 @@ def test_pass_run(): # Execute the add function. x_nd = get_rand(shape, dtype) y_nd = get_rand(shape, dtype) - ref_res = x_nd.asnumpy() + y_nd.asnumpy() + ref_res = x_nd.numpy() + y_nd.numpy() for target, dev in tvm.testing.enabled_targets(): exe1 = relay.create_executor("graph", device=dev, target=target) exe2 = relay.create_executor("debug", device=dev, target=target) res1 = exe1.evaluate(new_add)(x_nd, y_nd) - tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5) res2 = exe2.evaluate(new_add)(x_nd, y_nd) - tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5) test_pass_registration() test_pass_registration_no_decorator @@ -274,14 +274,14 @@ def test_pass_run(): # Execute the add function. x_nd = get_rand(shape, dtype) - ref_res = np.log(x_nd.asnumpy() * 2) + ref_res = np.log(x_nd.numpy() * 2) for target, dev in tvm.testing.enabled_targets(): exe1 = relay.create_executor("graph", device=dev, target=target) exe2 = relay.create_executor("debug", device=dev, target=target) res1 = exe1.evaluate(new_log)(x_nd) - tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5) res2 = exe2.evaluate(new_log)(x_nd) - tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5) test_pass_registration() test_pass_registration_no_decorator() @@ -436,25 +436,25 @@ def test_multiple_passes(): # Execute the updated subtract function. x_nd = get_rand(shape, dtype) y_nd = get_rand(shape, dtype) - ref_res = np.subtract(x_nd.asnumpy() * 2, y_nd.asnumpy() * 2) + ref_res = np.subtract(x_nd.numpy() * 2, y_nd.numpy() * 2) for target, dev in tvm.testing.enabled_targets(): exe1 = relay.create_executor("graph", device=dev, target=target) exe2 = relay.create_executor("debug", device=dev, target=target) res1 = exe1.evaluate(new_sub)(x_nd, y_nd) - tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5) res2 = exe2.evaluate(new_sub)(x_nd, y_nd) - tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5) # Execute the updated abs function. x_nd = get_rand((5, 10), dtype) - ref_res = np.abs(x_nd.asnumpy() * 2) + ref_res = np.abs(x_nd.numpy() * 2) for target, dev in tvm.testing.enabled_targets(): exe1 = relay.create_executor("graph", device=dev, target=target) exe2 = relay.create_executor("debug", device=dev, target=target) res1 = exe1.evaluate(new_abs)(x_nd) - tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5) res2 = exe2.evaluate(new_abs)(x_nd) - tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5) + tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5) test_pass_registration() test_no_pass() diff --git a/tests/python/relay/test_pass_partial_eval.py b/tests/python/relay/test_pass_partial_eval.py index 57286670f06d..129ac047cd89 100644 --- a/tests/python/relay/test_pass_partial_eval.py +++ b/tests/python/relay/test_pass_partial_eval.py @@ -34,7 +34,7 @@ def check_eval(expr, expected_result, mod=None, rtol=1e-07): intrp = create_executor(mod=mod, device=dev, target="llvm") result = intrp.evaluate(expr) - np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) + np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol) def run_opt_pass(expr, passes): @@ -147,8 +147,8 @@ def test_if_ref(): ex = create_executor() f_res = ex.evaluate(f)(const(True)) pe_f_res = ex.evaluate(pe_f)(const(True)) - np.testing.assert_allclose(f_res.asnumpy(), 2 * np.ones_like(f_res.asnumpy())) - np.testing.assert_allclose(pe_f_res.asnumpy(), 2 * np.ones_like(pe_f_res.asnumpy())) + np.testing.assert_allclose(f_res.numpy(), 2 * np.ones_like(f_res.numpy())) + np.testing.assert_allclose(pe_f_res.numpy(), 2 * np.ones_like(pe_f_res.numpy())) def test_function_invalidate(): @@ -171,8 +171,8 @@ def test_function_invalidate(): ex = create_executor() f_res = ex.evaluate(f)(const(True)) pe_f_res = ex.evaluate(pe_f)(const(True)) - np.testing.assert_allclose(f_res.asnumpy(), np.ones_like(f_res.asnumpy())) - np.testing.assert_allclose(pe_f_res.asnumpy(), np.ones_like(pe_f_res.asnumpy())) + np.testing.assert_allclose(f_res.numpy(), np.ones_like(f_res.numpy())) + np.testing.assert_allclose(pe_f_res.numpy(), np.ones_like(pe_f_res.numpy())) def test_head_cons(): diff --git a/tests/python/relay/test_pass_partition_graph.py b/tests/python/relay/test_pass_partition_graph.py index 01a1e48f832a..4db8bd5e7b5b 100644 --- a/tests/python/relay/test_pass_partition_graph.py +++ b/tests/python/relay/test_pass_partition_graph.py @@ -206,7 +206,7 @@ def check_vm_result(): outs = outs if isinstance(outs, runtime.container.ADT) else [outs] results = result if isinstance(result, list) else [result] for out, ref in zip(outs, results): - tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), ref, rtol=tol, atol=tol) def check_graph_executor_result(): compile_engine.get().clear() @@ -226,7 +226,7 @@ def check_graph_executor_result(): for idx, shape in enumerate(out_shapes): out = tvm.nd.empty(shape, device=device) out = rt_mod.get_output(idx, out) - tvm.testing.assert_allclose(out.asnumpy(), results[idx], rtol=tol, atol=tol) + tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol) check_vm_result() check_graph_executor_result() @@ -459,7 +459,7 @@ def get_func(): ref_ex = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()) ref_res = ref_ex.evaluate()(i_data, w1_data) check_result( - mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5 + mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5 ) @@ -480,7 +480,7 @@ def test_extern_dnnl_mobilenet(): ref_res = ref_ex.evaluate()(i_data, **params) compile_engine.get().clear() - check_result(mod, {"data": i_data}, (1, 1000), ref_res.asnumpy(), tol=1e-5, params=params) + check_result(mod, {"data": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params) def test_function_lifting(): @@ -923,7 +923,7 @@ def test_exec(mod, params, ref_mod, ref_params, out_shape): mod = get_partitoned_mod(mod, params, dnnl_patterns) - check_result(mod, {"data": i_data}, out_shape, ref_res.asnumpy(), tol=1e-5, params=params) + check_result(mod, {"data": i_data}, out_shape, ref_res.numpy(), tol=1e-5, params=params) test_partition() test_partition_mobilenet() @@ -1356,7 +1356,7 @@ def Optimize(mod): assert isinstance(t0.body, relay.Constant) expected = np.empty([2, 2]) expected.fill(2) - tvm.testing.assert_allclose(t0.body.data.asnumpy(), expected, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(t0.body.data.numpy(), expected, rtol=1e-5, atol=1e-5) if __name__ == "__main__": diff --git a/tests/python/relay/test_pass_to_a_normal_form.py b/tests/python/relay/test_pass_to_a_normal_form.py index e7aee5fae00b..61e5b8ea9407 100644 --- a/tests/python/relay/test_pass_to_a_normal_form.py +++ b/tests/python/relay/test_pass_to_a_normal_form.py @@ -40,7 +40,7 @@ def check_eval(expr, expected_result, mod=None, rtol=1e-07): intrp = create_executor(mod=mod, device=dev, target="llvm") result = intrp.evaluate(expr) - np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) + np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol) def test_explicit_bound(): diff --git a/tests/python/relay/test_pass_to_basic_block_normal_form.py b/tests/python/relay/test_pass_to_basic_block_normal_form.py index 2085c8a2799c..d345d465c53e 100644 --- a/tests/python/relay/test_pass_to_basic_block_normal_form.py +++ b/tests/python/relay/test_pass_to_basic_block_normal_form.py @@ -42,7 +42,7 @@ def check_eval(expr, expected_result, mod=None, rtol=1e-07): intrp = create_executor(mod=mod, device=dev, target="llvm") result = intrp.evaluate(expr) - np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) + np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol) def test_no_explicit_bind(): diff --git a/tests/python/relay/test_pass_to_cps.py b/tests/python/relay/test_pass_to_cps.py index 023bcb224d2b..0cde1d9ae492 100644 --- a/tests/python/relay/test_pass_to_cps.py +++ b/tests/python/relay/test_pass_to_cps.py @@ -61,7 +61,7 @@ def test_recursion(): ex = create_executor(mod=mod) i_nd = rand(dtype, *shape) forward = ex.evaluate()(i_nd) - tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy()) + tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy()) # This serve as an integration test. diff --git a/tests/python/relay/test_pass_to_graph_normal_form.py b/tests/python/relay/test_pass_to_graph_normal_form.py index f4c1a32b2566..4f5084d83f9c 100644 --- a/tests/python/relay/test_pass_to_graph_normal_form.py +++ b/tests/python/relay/test_pass_to_graph_normal_form.py @@ -37,7 +37,7 @@ def check_eval(expr, args, expected_result, mod=None, rtol=1e-07): intrp = create_executor(mod=mod, device=dev, target="llvm") result = intrp.evaluate(expr)(*args) - np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol) + np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol) def test_implicit_share(): diff --git a/tests/python/relay/test_prng.py b/tests/python/relay/test_prng.py index f622bfca5625..01515a93546b 100644 --- a/tests/python/relay/test_prng.py +++ b/tests/python/relay/test_prng.py @@ -36,11 +36,11 @@ def test_threefry_repeatability(target, dev): ).evaluate()() assert ( - out1.asnumpy() == out2.asnumpy() + out1.numpy() == out2.numpy() ).all(), "Generate on same seed should have the same output random numbers" assert ( - out_key1.asnumpy() == out_key2.asnumpy() + out_key1.numpy() == out_key2.numpy() ).all(), "Generate on same seed should have the same next keys" @@ -58,7 +58,7 @@ def test_threefry_split(target, dev): ).evaluate()() assert ( - out1.asnumpy() != out2.asnumpy() + out1.numpy() != out2.numpy() ).any(), "Generate after split should not have the same output" @@ -75,7 +75,7 @@ def test_threefry_sequential_generate(target, dev): ).evaluate()() assert ( - out1.asnumpy() != out2.asnumpy() + out1.numpy() != out2.numpy() ).any(), "Sequential generates should not have the same output" diff --git a/tests/python/relay/test_py_converter.py b/tests/python/relay/test_py_converter.py index dda7471bcd52..bd5635e8cf09 100644 --- a/tests/python/relay/test_py_converter.py +++ b/tests/python/relay/test_py_converter.py @@ -44,7 +44,7 @@ def init_box_adt(mod): # assert that the candidate is a NDArray with value val def assert_tensor_value(candidate, val): assert isinstance(candidate, tvm.nd.NDArray) - assert np.array_equal(candidate.asnumpy(), np.array(val)) + assert np.array_equal(candidate.numpy(), np.array(val)) # assert that the candidate is an ADT with the indicated number of fields @@ -605,7 +605,7 @@ def reference(x, gamma, beta, moving_mean, moving_var): # there will be a change in accuracy so we need to check # approximate equality assert isinstance(call_val, tvm.nd.NDArray) - tvm.testing.assert_allclose(call_val.asnumpy(), ref_res, atol=eps, rtol=eps) + tvm.testing.assert_allclose(call_val.numpy(), ref_res, atol=eps, rtol=eps) verify_batch_norm([(10, 20), (20,), (20,), (20,), (20,)]) verify_batch_norm([(20, 10), (10,), (10,), (10,), (10,)]) diff --git a/tests/python/relay/test_simplify_fc_transpose.py b/tests/python/relay/test_simplify_fc_transpose.py index fa5f332e6cd5..284950471c58 100644 --- a/tests/python/relay/test_simplify_fc_transpose.py +++ b/tests/python/relay/test_simplify_fc_transpose.py @@ -42,7 +42,7 @@ def run_func(func, params, x): m.run() # get outputs tvm_output = m.get_output(0) - return tvm_output.asnumpy() + return tvm_output.numpy() def test_simplify_fc_transpose(): diff --git a/tests/python/relay/test_sparse_conv2d_convert.py b/tests/python/relay/test_sparse_conv2d_convert.py index 671693cc5827..0af78fc033ac 100644 --- a/tests/python/relay/test_sparse_conv2d_convert.py +++ b/tests/python/relay/test_sparse_conv2d_convert.py @@ -43,7 +43,7 @@ def run_func(func, params, x): m.run() # get outputs tvm_output = m.get_output(0) - return tvm_output.asnumpy() + return tvm_output.numpy() def test_bsr_sparse_conv2d_nchw(): diff --git a/tests/python/relay/test_sparse_dense_convert.py b/tests/python/relay/test_sparse_dense_convert.py index 1efa813ebfb0..3ff31db2e995 100644 --- a/tests/python/relay/test_sparse_dense_convert.py +++ b/tests/python/relay/test_sparse_dense_convert.py @@ -64,7 +64,7 @@ def run_func(func, params, x): m.run() # get outputs tvm_output = m.get_output(0) - return tvm_output.asnumpy() + return tvm_output.numpy() def test_bsr_sparse_dense(): diff --git a/tests/python/relay/test_tensor_array.py b/tests/python/relay/test_tensor_array.py index 3b950b45f1e2..e93831bef95f 100644 --- a/tests/python/relay/test_tensor_array.py +++ b/tests/python/relay/test_tensor_array.py @@ -29,7 +29,7 @@ def vmobj_to_list(mod, o, dtype="float32"): _, tensor_nil, _, _, _, _, _, _, _ = mod.get_type(f"tensor_{dtype}_t") if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy().tolist()] + return [o.numpy().tolist()] elif isinstance(o, tvm.runtime.container.ADT): if len(o) == 0: if tensor_nil.tag == o.tag: @@ -51,7 +51,7 @@ def vmobj_to_list(mod, o, dtype="float32"): elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: - return [o.fields[0].asnumpy()] + return [o.fields[0].numpy()] else: raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint) else: diff --git a/tests/python/relay/test_vm.py b/tests/python/relay/test_vm.py index 9f861a2e7b54..151e5ecc160b 100644 --- a/tests/python/relay/test_vm.py +++ b/tests/python/relay/test_vm.py @@ -48,7 +48,7 @@ def check_result(args, expected_result, mod=None): for target, dev in tvm.testing.enabled_targets(): vm = relay.create_executor("vm", device=dev, target=target, mod=mod) rts_result = vm.evaluate()(*args) - tvm.testing.assert_allclose(expected_result, rts_result.asnumpy()) + tvm.testing.assert_allclose(expected_result, rts_result.numpy()) def veval(f, *args, device=tvm.cpu(), target="llvm"): @@ -65,7 +65,7 @@ def veval(f, *args, device=tvm.cpu(), target="llvm"): def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): - return [o.asnumpy().tolist()] + return [o.numpy().tolist()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: @@ -88,7 +88,7 @@ def test_split(): for tgt, dev in tvm.testing.enabled_targets(): res = veval(f, x_data, device=dev, target=tgt) for i in range(3): - tvm.testing.assert_allclose(res[i].asnumpy(), ref_res[i]) + tvm.testing.assert_allclose(res[i].numpy(), ref_res[i]) @tvm.testing.uses_gpu @@ -103,7 +103,7 @@ def test_split_no_fuse(): ).astype("float32") for tgt, dev in tvm.testing.enabled_targets(): res = veval(f, x_data, device=dev, target=tgt) - tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0]) + tvm.testing.assert_allclose(res.numpy(), np.split(x_data, 3, axis=0)[0]) @tvm.testing.uses_gpu @@ -246,7 +246,7 @@ def test_count_loop(): mod["main"] = relay.Function([iarg], sum_up(iarg)) for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, i_data, device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), i_data) + tvm.testing.assert_allclose(result.numpy(), i_data) check_result([i_data], i_data, mod=mod) @@ -393,7 +393,7 @@ def test_compose(): x_data = np.array(np.random.rand()).astype("float32") for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, [x_data], device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0) + tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) @tvm.testing.uses_gpu @@ -414,7 +414,7 @@ def test_list_hd(): for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), 3) + tvm.testing.assert_allclose(result.numpy(), 3) @pytest.mark.xfail @@ -473,7 +473,7 @@ def test_list_nth(): mod["main"] = f for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), expected[i]) + tvm.testing.assert_allclose(result.numpy(), expected[i]) @tvm.testing.uses_gpu @@ -523,7 +523,7 @@ def test_list_length(): mod["main"] = f for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), 10) + tvm.testing.assert_allclose(result.numpy(), 10) @tvm.testing.uses_gpu @@ -599,7 +599,7 @@ def test_list_sum(): mod["main"] = f for tgt, dev in tvm.testing.enabled_targets(): result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.asnumpy(), 6) + tvm.testing.assert_allclose(result.numpy(), 6) @tvm.testing.uses_gpu @@ -635,7 +635,7 @@ def test_closure(): main = clo(relay.const(2.0)) for tgt, dev in tvm.testing.enabled_targets(): res = veval(main, device=dev, target=tgt) - tvm.testing.assert_allclose(res.asnumpy(), 3.0) + tvm.testing.assert_allclose(res.numpy(), 3.0) @tvm.testing.uses_gpu @@ -801,7 +801,7 @@ def test_vm_reshape_tuple(x_shape=(1, 4, 2), y_shape=(1, 2, 10)): for tgt, dev in tvm.testing.enabled_targets(): res = veval(f, (x_data, y_data), device=dev, target=tgt) - tvm.testing.assert_allclose(res.asnumpy(), np.reshape(x_data, (1, -1))) + tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1))) def test_constant_shape_with_external_codegen(): @@ -869,7 +869,7 @@ def test_vm_rpc(): # Invoke its "main" function. out = vm_factory.invoke("main", input_tensor) # Check the result. - np.testing.assert_allclose(out.asnumpy(), np_input + np_input) + np.testing.assert_allclose(out.numpy(), np_input + np_input) # delete tensors before the server shuts down so we don't throw errors. del input_tensor @@ -893,7 +893,7 @@ def test_get_output_single(): vm_factory.invoke_stateful("main", inp) outputs = vm_factory.get_outputs() assert len(outputs) == 1 - np.testing.assert_allclose(outputs[0].asnumpy(), inp + inp) + np.testing.assert_allclose(outputs[0].numpy(), inp + inp) def test_get_output_multiple(): @@ -911,8 +911,8 @@ def test_get_output_multiple(): vm_factory.invoke_stateful("main", inp) outputs = vm_factory.get_outputs() assert len(outputs) == 2 - np.testing.assert_allclose(outputs[0].asnumpy(), inp + inp) - np.testing.assert_allclose(outputs[1].asnumpy(), inp) + np.testing.assert_allclose(outputs[0].numpy(), inp + inp) + np.testing.assert_allclose(outputs[1].numpy(), inp) if __name__ == "__main__": diff --git a/tests/python/relay/test_vm_serialization.py b/tests/python/relay/test_vm_serialization.py index 36f97f815e6e..ef7d9111b84c 100644 --- a/tests/python/relay/test_vm_serialization.py +++ b/tests/python/relay/test_vm_serialization.py @@ -56,7 +56,7 @@ def run_network(mod, params, dtype="float32"): def get_vm_output(mod, data, params, target, device, dtype="float32"): ex = relay.create_executor("vm", mod=mod, device=device) result = ex.evaluate()(data, **params) - return result.asnumpy().astype(dtype) + return result.numpy().astype(dtype) data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape] data = np.random.uniform(size=data_shape).astype(dtype) @@ -67,7 +67,7 @@ def get_vm_output(mod, data, params, target, device, dtype="float32"): vm_out = get_serialized_output( mod, tvm.nd.array(data.astype(dtype)), params=params, target=target, device=dev ) - tvm.testing.assert_allclose(vm_out.asnumpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(vm_out.numpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5) def test_serializer(): @@ -143,7 +143,7 @@ def test_save_load(): des_vm = _vm.VirtualMachine(des_exec, tvm.cpu()) res = des_vm.run(x_data) - tvm.testing.assert_allclose(res.asnumpy(), x_data + x_data) + tvm.testing.assert_allclose(res.numpy(), x_data + x_data) def test_const(): @@ -152,7 +152,7 @@ def test_const(): f = relay.Function([x], x + c) x_data = np.random.rand(10, 10).astype("float32") res = get_serialized_output(f, x_data) - tvm.testing.assert_allclose(res.asnumpy(), x_data + 1) + tvm.testing.assert_allclose(res.numpy(), x_data + 1) def test_if(): @@ -166,11 +166,11 @@ def test_if(): # same res = get_serialized_output(f, x_data, x_data) - tvm.testing.assert_allclose(res.asnumpy(), x_data) + tvm.testing.assert_allclose(res.numpy(), x_data) # diff res = get_serialized_output(f, x_data, y_data) - tvm.testing.assert_allclose(res.asnumpy(), y_data) + tvm.testing.assert_allclose(res.numpy(), y_data) def test_loop(): @@ -196,7 +196,7 @@ def test_loop(): mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg)) result = get_serialized_output(mod, i_data, accum_data) - tvm.testing.assert_allclose(result.asnumpy(), sum(range(1, loop_bound + 1))) + tvm.testing.assert_allclose(result.numpy(), sum(range(1, loop_bound + 1))) def test_tuple(): @@ -207,7 +207,7 @@ def test_tuple(): j_data = np.random.rand(10).astype("float32") result = get_serialized_output(f, (i_data, j_data)) - tvm.testing.assert_allclose(result.asnumpy(), j_data) + tvm.testing.assert_allclose(result.numpy(), j_data) def test_adt_list(): @@ -226,9 +226,9 @@ def test_adt_list(): assert len(result[1]) == 2 assert len(result[1][1]) == 2 res = [] - res.append(result[0].asnumpy().tolist()) - res.append(result[1][0].asnumpy().tolist()) - res.append(result[1][1][0].asnumpy().tolist()) + res.append(result[0].numpy().tolist()) + res.append(result[1][0].numpy().tolist()) + res.append(result[1][1][0].numpy().tolist()) tvm.testing.assert_allclose(res, np.array([3, 2, 1])) @@ -263,7 +263,7 @@ def test_adt_compose(): x_data = np.array(np.random.rand()).astype("float32") result = get_serialized_output(mod, x_data) - tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0) + tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) def test_closure(): @@ -275,7 +275,7 @@ def test_closure(): main = clo(relay.const(2.0)) res = get_serialized_output(main) - tvm.testing.assert_allclose(res.asnumpy(), 3.0) + tvm.testing.assert_allclose(res.numpy(), 3.0) def test_synthetic(): @@ -298,7 +298,7 @@ def test_vm_shape_of(): args.append(np.array((1, -1), dtype="int64")) main = relay.Function([x, newshape_var], relay.reshape(relu_x, newshape=newshape_var)) - res = get_serialized_output(main, *args).asnumpy() + res = get_serialized_output(main, *args).numpy() tvm.testing.assert_allclose(res.flatten(), data.flatten()) @@ -313,7 +313,7 @@ def test_dynamic_bcast(): res_np = np.add(x_data, y_data) for target, dev in testing.enabled_targets(): res = get_serialized_output(mod, *(x_data, y_data), target=target, device=dev) - tvm.testing.assert_allclose(res.asnumpy(), res_np) + tvm.testing.assert_allclose(res.numpy(), res_np) if __name__ == "__main__": diff --git a/tests/python/topi/python/test_fifo_buffer.py b/tests/python/topi/python/test_fifo_buffer.py index c2a4f8e7dd84..6668acdbc14f 100644 --- a/tests/python/topi/python/test_fifo_buffer.py +++ b/tests/python/topi/python/test_fifo_buffer.py @@ -58,7 +58,7 @@ def check_device(target, dev): out_tvm = tvm.nd.empty(shape=buffer_shape, device=dev, dtype=dtype) f = tvm.build(s, [data, buffer, out], target, name="fifo") f(data_tvm, buffer_tvm, out_tvm) - tvm.testing.assert_allclose(out_tvm.asnumpy(), out_np) + tvm.testing.assert_allclose(out_tvm.numpy(), out_np) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -177,9 +177,7 @@ def check_device(target, dev): conv2d(input_window_tvm, kernel_tvm, output_window_ref_tvm) # Incrementally updating the output window should be equivalent to computing it from # scratch using the input window - tvm.testing.assert_allclose( - output_window_tvm.asnumpy(), output_window_ref_tvm.asnumpy() - ) + tvm.testing.assert_allclose(output_window_tvm.numpy(), output_window_ref_tvm.numpy()) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) diff --git a/tests/python/topi/python/test_topi_argwhere.py b/tests/python/topi/python/test_topi_argwhere.py index 2b75dada3f1e..8592f57b74a4 100644 --- a/tests/python/topi/python/test_topi_argwhere.py +++ b/tests/python/topi/python/test_topi_argwhere.py @@ -57,7 +57,7 @@ def check_device(target): args.append(tvm.nd.empty(out.shape, device=dev, dtype=condition.dtype)) func(*args) np.set_printoptions(threshold=np.inf) - tvm.testing.assert_allclose(args[-1].asnumpy(), np.array(np_out)) + tvm.testing.assert_allclose(args[-1].numpy(), np.array(np_out)) for target, _ in tvm.testing.enabled_targets(): check_device(target) diff --git a/tests/python/topi/python/test_topi_batch_matmul.py b/tests/python/topi/python/test_topi_batch_matmul.py index e6edd53ba115..8c8ad37287dc 100644 --- a/tests/python/topi/python/test_topi_batch_matmul.py +++ b/tests/python/topi/python/test_topi_batch_matmul.py @@ -82,7 +82,7 @@ def check_device(target, dev): c = tvm.nd.array(np.zeros(get_const_tuple(out_shape), dtype=dtype), dev) f = tvm.build(s, [x, y, out], target, name="dense") f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): if dynamic and (target == "cuda" or target == "nvptx"): @@ -125,7 +125,7 @@ def check_device(device): c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev) f = tvm.build(s, [x, y, out], device, name="batch_matmul_int8") f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for device in ["cuda"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_batch_matmul_tensorcore.py b/tests/python/topi/python/test_topi_batch_matmul_tensorcore.py index 9712aa561d51..31a7e85113ab 100644 --- a/tests/python/topi/python/test_topi_batch_matmul_tensorcore.py +++ b/tests/python/topi/python/test_topi_batch_matmul_tensorcore.py @@ -58,7 +58,7 @@ def check_device(device): c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=dtype), dev) f = tvm.build(s, [x, y, out], device, name="dense") f(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-3) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) check_device("cuda") diff --git a/tests/python/topi/python/test_topi_batch_to_space_nd.py b/tests/python/topi/python/test_topi_batch_to_space_nd.py index 7a8a813a196b..6ee99dacc61a 100644 --- a/tests/python/topi/python/test_topi_batch_to_space_nd.py +++ b/tests/python/topi/python/test_topi_batch_to_space_nd.py @@ -50,7 +50,7 @@ def check_device(target, dev): b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev) f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) diff --git a/tests/python/topi/python/test_topi_bitserial_conv2d.py b/tests/python/topi/python/test_topi_bitserial_conv2d.py index 4834b9069f9c..6b6e8a2c3fa7 100644 --- a/tests/python/topi/python/test_topi_bitserial_conv2d.py +++ b/tests/python/topi/python/test_topi_bitserial_conv2d.py @@ -78,7 +78,7 @@ def get_ref_data(): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], "llvm") func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def verify_bitserial_conv2d_nhwc( @@ -132,7 +132,7 @@ def get_ref_data(): func = tvm.build(s, [A, W, B], "llvm") func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def test_bitserial_conv2d(): diff --git a/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py b/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py index 3aa9bd96774e..fbfb06f50cb4 100644 --- a/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py +++ b/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py @@ -98,7 +98,7 @@ def get_ref_data(): func = tvm.build(s, [A, W, B], device) func(a, w, b) - np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + np.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def test_bitserial_conv2d(): diff --git a/tests/python/topi/python/test_topi_bitserial_dense.py b/tests/python/topi/python/test_topi_bitserial_dense.py index 1e68fddcede9..581de8ff98e5 100644 --- a/tests/python/topi/python/test_topi_bitserial_dense.py +++ b/tests/python/topi/python/test_topi_bitserial_dense.py @@ -74,7 +74,7 @@ def get_ref_data(a_shape, b_shape, input_dtype): c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) func = tvm.build(s, [A, B, C], target) func(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) def test_bitserial_dense(): diff --git a/tests/python/topi/python/test_topi_bnn.py b/tests/python/topi/python/test_topi_bnn.py index 710489e41d45..57c24d0242a6 100644 --- a/tests/python/topi/python/test_topi_bnn.py +++ b/tests/python/topi/python/test_topi_bnn.py @@ -63,7 +63,7 @@ def get_ref_data(): f1(a, bnn_a) f2(b, bnn_b) f3(bnn_a, bnn_b, bnn_c) - tvm.testing.assert_allclose(bnn_c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(bnn_c.numpy(), c_np, rtol=1e-5) def test_binary_dense(): diff --git a/tests/python/topi/python/test_topi_broadcast.py b/tests/python/topi/python/test_topi_broadcast.py index 1abd2cfc5e50..d77ed2eae2e4 100644 --- a/tests/python/topi/python/test_topi_broadcast.py +++ b/tests/python/topi/python/test_topi_broadcast.py @@ -42,7 +42,7 @@ def check_target(target): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target) @@ -103,7 +103,7 @@ def check_target(target): out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev) foo(lhs_nd, rhs_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for target, dev in tvm.testing.enabled_targets(): check_target(target) @@ -316,7 +316,7 @@ def check_target(target, dev): out_npy = f_numpy(indata) out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -354,7 +354,7 @@ def check_target(target, dev): out_npy = f_numpy(data_npy) out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -393,7 +393,7 @@ def check_target(target, dev): out_npy = f_numpy(lhs, rhs) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev) foo(lhs_nd, rhs_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_clip.py b/tests/python/topi/python/test_topi_clip.py index b8d5321d40f4..21546e8b575d 100644 --- a/tests/python/topi/python/test_topi_clip.py +++ b/tests/python/topi/python/test_topi_clip.py @@ -48,7 +48,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev) f = tvm.build(s, [A, B], target, name="clip") f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_conv1d.py b/tests/python/topi/python/test_topi_conv1d.py index 4b1d71282484..f5284ca36fee 100644 --- a/tests/python/topi/python/test_topi_conv1d.py +++ b/tests/python/topi/python/test_topi_conv1d.py @@ -92,7 +92,7 @@ def check_target(target, dev): func = tvm.build(s, [A, W, B], target) func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py b/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py index bfb60a9168d9..81d3b3fd7f3f 100644 --- a/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py +++ b/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py @@ -71,8 +71,8 @@ def check_target(target, dev): func2 = tvm.build(s2, [A, W, C], target) func1(a, w, b) func2(a, w, c) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_conv2d_NCHWc.py b/tests/python/topi/python/test_topi_conv2d_NCHWc.py index 09bd17ab2a72..2298816d373a 100644 --- a/tests/python/topi/python/test_topi_conv2d_NCHWc.py +++ b/tests/python/topi/python/test_topi_conv2d_NCHWc.py @@ -160,7 +160,7 @@ def check_device(device): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-3) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) # test llvm only for now since conv2d_NCHWc implement is missing in other backend. for device in ["llvm"]: diff --git a/tests/python/topi/python/test_topi_conv2d_hwcn.py b/tests/python/topi/python/test_topi_conv2d_hwcn.py index 81fc09d1ea95..ab0cf741960d 100644 --- a/tests/python/topi/python/test_topi_conv2d_hwcn.py +++ b/tests/python/topi/python/test_topi_conv2d_hwcn.py @@ -85,9 +85,9 @@ def check_target(target): func1(a, w, conv_out) func2(a, w, b, bias_out) func3(a, w, b, relu_out) - tvm.testing.assert_allclose(conv_out.asnumpy(), c1_np, rtol=1e-5) - tvm.testing.assert_allclose(bias_out.asnumpy(), c2_np, rtol=1e-5) - tvm.testing.assert_allclose(relu_out.asnumpy(), c3_np, rtol=1e-5) + tvm.testing.assert_allclose(conv_out.numpy(), c1_np, rtol=1e-5) + tvm.testing.assert_allclose(bias_out.numpy(), c2_np, rtol=1e-5) + tvm.testing.assert_allclose(relu_out.numpy(), c3_np, rtol=1e-5) for target in ["cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]: check_target(target) diff --git a/tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py b/tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py index 1b35fe80adef..5448a54fae6b 100644 --- a/tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py +++ b/tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py @@ -131,7 +131,7 @@ def check_target(target): func(a, w, c) rtol = 1e-3 - tvm.testing.assert_allclose(c.asnumpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol) + tvm.testing.assert_allclose(c.numpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol) check_target("cuda") diff --git a/tests/python/topi/python/test_topi_conv2d_int8.py b/tests/python/topi/python/test_topi_conv2d_int8.py index 07f7895f47f7..66d280e85c75 100644 --- a/tests/python/topi/python/test_topi_conv2d_int8.py +++ b/tests/python/topi/python/test_topi_conv2d_int8.py @@ -220,7 +220,7 @@ def check_target(target): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) check_target("llvm") @@ -333,7 +333,7 @@ def check_target(target): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target in ["cuda"]: check_target(target) @@ -452,7 +452,7 @@ def check_target(target): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) verify_workload_padding() diff --git a/tests/python/topi/python/test_topi_conv2d_nchw.py b/tests/python/topi/python/test_topi_conv2d_nchw.py index 5aff6e807633..ab2ba51aa7b1 100644 --- a/tests/python/topi/python/test_topi_conv2d_nchw.py +++ b/tests/python/topi/python/test_topi_conv2d_nchw.py @@ -140,7 +140,7 @@ def check_target(target): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4) for target, dev in tvm.testing.enabled_targets(): with autotvm.tophub.context(target): # load tophub pre-tuned parameters diff --git a/tests/python/topi/python/test_topi_conv2d_nhwc.py b/tests/python/topi/python/test_topi_conv2d_nhwc.py index 98a9387e8777..cdb7c0e8d4aa 100644 --- a/tests/python/topi/python/test_topi_conv2d_nhwc.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc.py @@ -73,7 +73,7 @@ def check_device(device): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], device) func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for device in ["llvm", "cuda"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py b/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py index a191f2eb9d37..8b20961a8cdf 100644 --- a/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py @@ -66,7 +66,7 @@ def check_device(device): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], device) func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) # for device in ['llvm -mcpu=skylake-avx512']: for device in ["llvm"]: diff --git a/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py b/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py index 5761dccf48fc..14a9dca12522 100644 --- a/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py @@ -126,7 +126,7 @@ def check_device(device): func(a, w, c) rtol = 1e-3 - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=rtol) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) check_device(devices) diff --git a/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py b/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py index cb1fd3d233fa..3a4b99a00dce 100644 --- a/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py @@ -132,7 +132,7 @@ def check_device(device): ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=2e-3) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=2e-3) check_device(devices) diff --git a/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py b/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py index 95159a05d54f..05fd4639155a 100644 --- a/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py +++ b/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py @@ -83,8 +83,8 @@ def check(fcompute, fschedule, target, dev): func2 = tvm.build(s2, [A, W, C], target) func1(a, w, b) func2(a, w, c) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) def check_generic(target, dev): print("Running generic on target: %s" % target) diff --git a/tests/python/topi/python/test_topi_conv2d_winograd.py b/tests/python/topi/python/test_topi_conv2d_winograd.py index c91447f1096f..82368f118f32 100644 --- a/tests/python/topi/python/test_topi_conv2d_winograd.py +++ b/tests/python/topi/python/test_topi_conv2d_winograd.py @@ -122,7 +122,7 @@ def check_device(device): func(a, w, c) rtol = 1e-3 - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=rtol) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) for device in devices: check_device(device) @@ -218,7 +218,7 @@ def get_ref_data(): func(a, w, c) rtol = 1e-3 - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=rtol) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) def test_conv2d_nhwc(): diff --git a/tests/python/topi/python/test_topi_conv3d_ncdhw.py b/tests/python/topi/python/test_topi_conv3d_ncdhw.py index e28aea5bc4f6..056ef7fc146a 100644 --- a/tests/python/topi/python/test_topi_conv3d_ncdhw.py +++ b/tests/python/topi/python/test_topi_conv3d_ncdhw.py @@ -116,7 +116,7 @@ def check_target(target, dev): % (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4) for target, dev in tvm.testing.enabled_targets(): with autotvm.tophub.context(target): # load tophub pre-tuned parameters diff --git a/tests/python/topi/python/test_topi_conv3d_ndhwc.py b/tests/python/topi/python/test_topi_conv3d_ndhwc.py index e5791c3bb482..a9437645b7bb 100644 --- a/tests/python/topi/python/test_topi_conv3d_ndhwc.py +++ b/tests/python/topi/python/test_topi_conv3d_ndhwc.py @@ -76,7 +76,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, W, B], target) func(a, w, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py b/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py index 8340ff27fddf..c8c54e4c3e1f 100644 --- a/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py +++ b/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py @@ -122,7 +122,7 @@ def check_device(device): func(a, w, c) rtol = 1e-3 - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=rtol) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol) check_device(devices) diff --git a/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py b/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py index fadbc8d48403..87b8d1f84637 100644 --- a/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py +++ b/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py @@ -87,8 +87,8 @@ def check_target(target, dev): func2 = tvm.build(s2, [A, W, C], target) func1(a, w, b) func2(a, w, c) - tvm.testing.assert_allclose(b.asnumpy(), b_np, atol=1e-4, rtol=1e-4) - tvm.testing.assert_allclose(c.asnumpy(), c_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(b.numpy(), b_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e-4, rtol=1e-4) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_conv3d_winograd.py b/tests/python/topi/python/test_topi_conv3d_winograd.py index 650ead3ff009..0b9d579287c3 100644 --- a/tests/python/topi/python/test_topi_conv3d_winograd.py +++ b/tests/python/topi/python/test_topi_conv3d_winograd.py @@ -138,7 +138,7 @@ def check_device(device): ), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4) for device in ["cuda"]: with autotvm.tophub.context(device): # load tophub pre-tuned parameters diff --git a/tests/python/topi/python/test_topi_correlation.py b/tests/python/topi/python/test_topi_correlation.py index 33df5a1a00f4..e6323065d9be 100644 --- a/tests/python/topi/python/test_topi_correlation.py +++ b/tests/python/topi/python/test_topi_correlation.py @@ -80,7 +80,7 @@ def check_device(target, dev): func = tvm.build(s, [A, B, C], target) func(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) diff --git a/tests/python/topi/python/test_topi_deformable_conv2d.py b/tests/python/topi/python/test_topi_deformable_conv2d.py index 20df09f30d12..70cc9a690cdc 100644 --- a/tests/python/topi/python/test_topi_deformable_conv2d.py +++ b/tests/python/topi/python/test_topi_deformable_conv2d.py @@ -110,7 +110,7 @@ def check_device(device): func = tvm.build(s, [A, Offset, W, C], device) func(a, offset, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for device in ["llvm", "cuda"]: check_device(device) @@ -190,7 +190,7 @@ def check_device(device): func = tvm.build(s, [A, Offset, W, C], device) func(a, offset, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for device in ["llvm"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_dense.py b/tests/python/topi/python/test_topi_dense.py index fa966b6f00e5..07301fad822c 100644 --- a/tests/python/topi/python/test_topi_dense.py +++ b/tests/python/topi/python/test_topi_dense.py @@ -77,7 +77,7 @@ def check_device(device, dev): d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev) f = tvm.build(s, [A, B, C, D], device, name="dense") f(a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-5) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-5) for device, dev in tvm.testing.enabled_targets(): check_device(device, dev) @@ -122,7 +122,7 @@ def check_device(device): d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev) f = tvm.build(s, [A, B, C, D], device, name="dense") f(a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-5) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-5) for device in ["cuda"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_dense_tensorcore.py b/tests/python/topi/python/test_topi_dense_tensorcore.py index 4ea3202569cb..a3657af2c1ca 100644 --- a/tests/python/topi/python/test_topi_dense_tensorcore.py +++ b/tests/python/topi/python/test_topi_dense_tensorcore.py @@ -65,7 +65,7 @@ def check_device(device): d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev) f = tvm.build(s, [A, B, C, D], device, name="dense") f(a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-3) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-3) check_device("cuda") diff --git a/tests/python/topi/python/test_topi_depth_to_space.py b/tests/python/topi/python/test_topi_depth_to_space.py index 93731868e922..dae5d21b363b 100644 --- a/tests/python/topi/python/test_topi_depth_to_space.py +++ b/tests/python/topi/python/test_topi_depth_to_space.py @@ -59,7 +59,7 @@ def check_device(device, dev): b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev) f = tvm.build(s, [A, B], device) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for device, dev in tvm.testing.enabled_targets(): check_device(device, dev) diff --git a/tests/python/topi/python/test_topi_depthwise_conv2d.py b/tests/python/topi/python/test_topi_depthwise_conv2d.py index 91ee1ee02fe4..fe80bb76d63e 100644 --- a/tests/python/topi/python/test_topi_depthwise_conv2d.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d.py @@ -248,10 +248,10 @@ def verify_workload_padding(): timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1) tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean tvm.testing.assert_allclose( - depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5 + depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5 ) - tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5) - tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5) + tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5) + tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): with autotvm.tophub.context(target): # load tophub pre-tuned parameters @@ -374,11 +374,9 @@ def get_ref_data(): timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1) tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean relu_scipy = np.maximum(scale_shift_scipy, 0) - tvm.testing.assert_allclose( - depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5 - ) - tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5) - tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5) + tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5) + tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5) + tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): with autotvm.tophub.context(target): # load tophub pre-tuned parameters @@ -509,10 +507,8 @@ def get_ref_data(): f1(input_tvm, filter_tvm, depthwise_conv2d_tvm) # launch kernel 2 (depthwise_conv2d + relu) f2(input_tvm, filter_tvm, relu_tvm) - tvm.testing.assert_allclose( - depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5 - ) - tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5) + tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5) + tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5) # test llvm only for now since depthwise_conv2d_NCHWc implement is missing in other backend. for target in ["llvm"]: diff --git a/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py b/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py index d8c8f0e195c4..b0a263172010 100644 --- a/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py @@ -123,7 +123,7 @@ def get_ref_data(): # launch the kernel timer = f.time_evaluator(f.entry_name, dev, number=1) tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean - tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.numpy(), rtol=1e-5) check_device("opencl") check_device("cuda") diff --git a/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py b/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py index daf7b5c82d41..8e30ed6840e3 100644 --- a/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py @@ -107,7 +107,7 @@ def get_ref_data(): # launch the kernel timer = f.time_evaluator(f.entry_name, dev, number=1) tcost = timer(input_tvm, out_grad_tvm, weight_grad_tvm).mean - tvm.testing.assert_allclose(weight_grad_np, weight_grad_tvm.asnumpy(), rtol=1e-4) + tvm.testing.assert_allclose(weight_grad_np, weight_grad_tvm.numpy(), rtol=1e-4) check_device("opencl") check_device("cuda") diff --git a/tests/python/topi/python/test_topi_dilate.py b/tests/python/topi/python/test_topi_dilate.py index c09bcc0deaa6..4a89926919e9 100644 --- a/tests/python/topi/python/test_topi_dilate.py +++ b/tests/python/topi/python/test_topi_dilate.py @@ -43,7 +43,7 @@ def _test_dilate(input_size, strides, dilation_value=None): output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), device=dev) f = tvm.build(schedule, [Input, Output], target) f(input_tvm, output_tvm) - tvm.testing.assert_allclose(output_tvm.asnumpy(), output_np, rtol=1e-5) + tvm.testing.assert_allclose(output_tvm.numpy(), output_np, rtol=1e-5) _test_dilate((32,), (2,)) _test_dilate((32, 32), (2, 2)) diff --git a/tests/python/topi/python/test_topi_einsum.py b/tests/python/topi/python/test_topi_einsum.py index 35de9306deaf..994d5438e661 100644 --- a/tests/python/topi/python/test_topi_einsum.py +++ b/tests/python/topi/python/test_topi_einsum.py @@ -38,7 +38,7 @@ def with_tvm(lam, *args): s = te.create_schedule([out.op]) m = tvm.build(s, pls + [out], "llvm") m(*(vals_nd + [out_nd])) - return out_nd.asnumpy() + return out_nd.numpy() def verify_einsum(subscripts, shapes): diff --git a/tests/python/topi/python/test_topi_group_conv2d.py b/tests/python/topi/python/test_topi_group_conv2d.py index 7f4803b9a8cd..e5a2fe7f28ab 100644 --- a/tests/python/topi/python/test_topi_group_conv2d.py +++ b/tests/python/topi/python/test_topi_group_conv2d.py @@ -147,7 +147,7 @@ def check_target(target): ), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target in ["llvm", "cuda"]: check_target(target) @@ -273,7 +273,7 @@ def check_target(target): ), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target in ["cuda"]: check_target(target) @@ -386,7 +386,7 @@ def check_target(target): ), ) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) for target in ["llvm"]: check_target(target) diff --git a/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py b/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py index e69a3094ac4d..dba2e4e05817 100644 --- a/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py +++ b/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py @@ -143,7 +143,7 @@ def check_device(device): ) # print(tvm.lower(s, [A, W, C], simple_mode=True)) func(a, w, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-3) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) # for device in ["llvm"]: for device in ["llvm -mcpu=skylake-avx512"]: diff --git a/tests/python/topi/python/test_topi_image.py b/tests/python/topi/python/test_topi_image.py index 7ca46a375906..2730783907fd 100644 --- a/tests/python/topi/python/test_topi_image.py +++ b/tests/python/topi/python/test_topi_image.py @@ -75,7 +75,7 @@ def check_target(target, dev): f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -158,7 +158,7 @@ def check_target(target, dev): f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -232,7 +232,7 @@ def check_target(target, dev): f = tvm.build(s, [images, boxes, box_ind, out], target, name="crop_and_resize") f(tvm_images, tvm_boxes, tvm_indices, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), baseline_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(tvm_out.numpy(), baseline_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -277,7 +277,7 @@ def check_target(target, dev): f = tvm.build(s, [data, out], target) f(tvm_data, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), out_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -314,7 +314,7 @@ def check_target(target, dev): f = tvm.build(s, [data, grid, out], target) f(tvm_data, tvm_grid, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), out_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_lrn.py b/tests/python/topi/python/test_topi_lrn.py index 203680b14781..f9fb7dbd4ec4 100644 --- a/tests/python/topi/python/test_topi_lrn.py +++ b/tests/python/topi/python/test_topi_lrn.py @@ -55,7 +55,7 @@ def check_device(device): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev) f = tvm.build(s, [A, B], device) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_math.py b/tests/python/topi/python/test_topi_math.py index e2d978190877..c7f80033bdf3 100644 --- a/tests/python/topi/python/test_topi_math.py +++ b/tests/python/topi/python/test_topi_math.py @@ -66,7 +66,7 @@ def check_target(target, dev): a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros_like(b_np), dev) foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -102,7 +102,7 @@ def check_target(target, dev): a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros_like(b_np), dev) foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -132,7 +132,7 @@ def check_target(target, dev): a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros_like(b_np), dev) foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -185,7 +185,7 @@ def verify(from_dtype, to_dtype, low=-100, high=100): a = tvm.nd.array(a_np, dev) b = tvm.nd.empty(shape=shape, dtype=to_dtype, device=dev) foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np) + tvm.testing.assert_allclose(b.numpy(), b_np) verify("int32", "float32") verify("int32", "float64") @@ -216,7 +216,7 @@ def check_target(target): a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros_like(b_np), dev) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) check_target("llvm") check_target("llvm -device=arm-cpu") diff --git a/tests/python/topi/python/test_topi_matmul.py b/tests/python/topi/python/test_topi_matmul.py index b0d71b315add..e5a21a3ad3b7 100644 --- a/tests/python/topi/python/test_topi_matmul.py +++ b/tests/python/topi/python/test_topi_matmul.py @@ -38,7 +38,7 @@ def with_tvm(lam, *args): s = te.create_schedule([out.op]) m = tvm.build(s, pls + [out], "llvm") m(*(vals_nd + [out_nd])) - return out_nd.asnumpy() + return out_nd.numpy() def verify_matmul(sa, sb, transp_a, transp_b): diff --git a/tests/python/topi/python/test_topi_pooling.py b/tests/python/topi/python/test_topi_pooling.py index 6d4bd71642b6..57877e3d202c 100644 --- a/tests/python/topi/python/test_topi_pooling.py +++ b/tests/python/topi/python/test_topi_pooling.py @@ -116,7 +116,7 @@ def check_target(target, dev): pool_grad = tvm.nd.array(np.zeros(get_const_tuple(PoolGrad.shape), dtype=dtype), dev) f = tvm.build(s, [A, OutGrad, PoolGrad], target) f(a, out_grad, pool_grad) - tvm.testing.assert_allclose(pool_grad.asnumpy(), pool_grad_np, rtol=1e-5) + tvm.testing.assert_allclose(pool_grad.numpy(), pool_grad_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -174,7 +174,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -218,7 +218,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), dev) f = tvm.build(s, [data, out], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np_out, rtol=4e-5, atol=1e-6) + tvm.testing.assert_allclose(b.numpy(), np_out, rtol=4e-5, atol=1e-6) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -329,7 +329,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev) f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), ref_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_prng.py b/tests/python/topi/python/test_topi_prng.py index 9189fa792898..1be32e6ea1d1 100644 --- a/tests/python/topi/python/test_topi_prng.py +++ b/tests/python/topi/python/test_topi_prng.py @@ -29,7 +29,7 @@ def threefry_split(target, dev, gen): left = tvm.nd.array(np.zeros(gen.shape, dtype="uint64")) right = tvm.nd.array(np.zeros(gen.shape, dtype="uint64")) f(tvm.nd.array(gen), left, right) - return left.asnumpy(), right.asnumpy() + return left.numpy(), right.numpy() def threefry_generate(target, dev, gen, size): @@ -40,7 +40,7 @@ def threefry_generate(target, dev, gen, size): out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64")) rands = tvm.nd.array(np.zeros(size, dtype="uint64")) f(tvm.nd.array(gen), out_gen, rands) - return out_gen.asnumpy(), rands.asnumpy() + return out_gen.numpy(), rands.numpy() def uniform(target, dev, gen, low, high, size, dtype): @@ -63,7 +63,7 @@ def uniform(target, dev, gen, low, high, size, dtype): @tvm.testing.parametrize_targets def test_threefry_split(target, dev): # test that results of split do not equal eachother or the input - gen = tvm.relay.random.threefry_key(0).data.asnumpy() + gen = tvm.relay.random.threefry_key(0).data.numpy() a, b = threefry_split(target, dev, gen) assert (a != b).any() and ( a != gen @@ -101,7 +101,7 @@ def test_threefry_split(target, dev): @tvm.testing.parametrize_targets def test_threefry_generate(target, dev): - gen = tvm.relay.random.threefry_key(0).data.asnumpy() + gen = tvm.relay.random.threefry_key(0).data.numpy() # check that we can generate some data a, rands = threefry_generate(target, dev, gen, (2048,)) diff --git a/tests/python/topi/python/test_topi_qnn.py b/tests/python/topi/python/test_topi_qnn.py index 995cfd2df666..12c868029b27 100644 --- a/tests/python/topi/python/test_topi_qnn.py +++ b/tests/python/topi/python/test_topi_qnn.py @@ -72,7 +72,7 @@ def check_target(target, dev): func(a, d, s, z, q) # Check correctness against the true qnn output. - mismatch = q.asnumpy() != real_q_out.asnumpy().astype("float32") + mismatch = q.numpy() != real_q_out.numpy().astype("float32") # Allow some rounding errors due to GPU fp32 arithmetic. assert np.sum(mismatch) <= 3 @@ -139,9 +139,7 @@ def check_target(target, dev): func(a, d, s, z, dq) # Check correctness against the true qnn output. - tvm.testing.assert_allclose( - dq.asnumpy(), real_dq_out.asnumpy().astype("float32"), rtol=1e-5 - ) + tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_reduce.py b/tests/python/topi/python/test_topi_reduce.py index c6de8d7c7f4d..07656032f878 100644 --- a/tests/python/topi/python/test_topi_reduce.py +++ b/tests/python/topi/python/test_topi_reduce.py @@ -103,7 +103,7 @@ def check_device(device, dev): for _ in range(1): foo(data_tvm, out_tvm) if type == "argmax" or type == "argmin": - out_tvm_indices = out_tvm.asnumpy() + out_tvm_indices = out_tvm.numpy() if keepdims: out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis) if axis is None: @@ -117,7 +117,7 @@ def check_device(device, dev): elif type == "argmin": tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3) else: - tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1e-3, 1e-3) + tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3) for device, dev in tvm.testing.enabled_targets(): check_device(device, dev) @@ -174,7 +174,7 @@ def test_complex_reduce(): data_tvm = tvm.nd.array(in_npy, device=dev) out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=dtype) foo(data_tvm, out_tvm) - tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1e-3, 1e-3) + tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3) if __name__ == "__main__": diff --git a/tests/python/topi/python/test_topi_relu.py b/tests/python/topi/python/test_topi_relu.py index 947a6ca007c8..3dc6e7de8069 100644 --- a/tests/python/topi/python/test_topi_relu.py +++ b/tests/python/topi/python/test_topi_relu.py @@ -46,7 +46,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) foo = tvm.build(s, [A, B], target, name="relu") foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -64,7 +64,7 @@ def verify_leaky_relu(m, alpha): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) foo = tvm.build(s, [A, B], "llvm", name="leaky_relu") foo(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def verify_prelu(x, w, axis, weight_reshape): @@ -87,7 +87,7 @@ def _prelu_numpy(x, W): foo = tvm.build(s, [X, W, B], "llvm", name="prelu") foo(x_tvm, w_tvm, b) out_np = _prelu_numpy(x_np, w_np) - tvm.testing.assert_allclose(b.asnumpy(), out_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), out_np, rtol=1e-5) @tvm.testing.uses_gpu diff --git a/tests/python/topi/python/test_topi_reorg.py b/tests/python/topi/python/test_topi_reorg.py index e26a05287e05..f41b4b740bec 100644 --- a/tests/python/topi/python/test_topi_reorg.py +++ b/tests/python/topi/python/test_topi_reorg.py @@ -60,7 +60,7 @@ def check_device(device): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) func = tvm.build(s, [A, B], device) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) for device in ["llvm", "cuda"]: check_device(device) diff --git a/tests/python/topi/python/test_topi_softmax.py b/tests/python/topi/python/test_topi_softmax.py index 84fa0d24e434..8af038a1f7ce 100644 --- a/tests/python/topi/python/test_topi_softmax.py +++ b/tests/python/topi/python/test_topi_softmax.py @@ -44,7 +44,7 @@ def check_target(A, B, a_np, b_np, target, dev, name): b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) f = tvm.build(s, [A, B], target, name=name) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) def verify_softmax(m, n, dtype="float32"): diff --git a/tests/python/topi/python/test_topi_sort.py b/tests/python/topi/python/test_topi_sort.py index c52dc8d3929a..65b2ae590308 100644 --- a/tests/python/topi/python/test_topi_sort.py +++ b/tests/python/topi/python/test_topi_sort.py @@ -73,7 +73,7 @@ def check_target(target): tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev) f = tvm.build(s, [data, out], target) f(tvm_data, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), np_sort, rtol=1e0) + tvm.testing.assert_allclose(tvm_out.numpy(), np_sort, rtol=1e0) for target in ["llvm", "cuda", "opencl", "vulkan", "nvptx"]: check_target(target) @@ -113,7 +113,7 @@ def check_target(target): tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev) f = tvm.build(s, [data, out], target) f(tvm_data, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), np_indices.astype(data_dtype), rtol=1e0) + tvm.testing.assert_allclose(tvm_out.numpy(), np_indices.astype(data_dtype), rtol=1e0) for target in ["llvm", "cuda", "opencl", "vulkan", "nvptx"]: check_target(target) @@ -160,12 +160,12 @@ def check_target(target): f = tvm.build(s, [data] + outs, target) f(tvm_data, *tvm_res) if ret_type == "both": - tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_values) - tvm.testing.assert_allclose(tvm_res[1].asnumpy(), np_indices) + tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values) + tvm.testing.assert_allclose(tvm_res[1].numpy(), np_indices) elif ret_type == "values": - tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_values) + tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values) else: - tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_indices) + tvm.testing.assert_allclose(tvm_res[0].numpy(), np_indices) for target in ["llvm", "cuda", "opencl", "vulkan", "nvptx"]: check_target(target) diff --git a/tests/python/topi/python/test_topi_space_to_batch_nd.py b/tests/python/topi/python/test_topi_space_to_batch_nd.py index 21654dd9f084..039f91aa059e 100644 --- a/tests/python/topi/python/test_topi_space_to_batch_nd.py +++ b/tests/python/topi/python/test_topi_space_to_batch_nd.py @@ -50,7 +50,7 @@ def check_target(target, dev): b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev) f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_space_to_depth.py b/tests/python/topi/python/test_topi_space_to_depth.py index 7fd49dc363cb..ddd7daf4237d 100644 --- a/tests/python/topi/python/test_topi_space_to_depth.py +++ b/tests/python/topi/python/test_topi_space_to_depth.py @@ -57,7 +57,7 @@ def check_device(device, dev): b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev) f = tvm.build(s, [A, B], device) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3) for device, dev in tvm.testing.enabled_targets(): check_device(device, dev) diff --git a/tests/python/topi/python/test_topi_sparse.py b/tests/python/topi/python/test_topi_sparse.py index a2aa8fdd9805..003b89f7122a 100644 --- a/tests/python/topi/python/test_topi_sparse.py +++ b/tests/python/topi/python/test_topi_sparse.py @@ -75,7 +75,7 @@ def check_device(device): assert a.indptr.dtype == A.indptr.dtype f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv") f(_nr, a.data, a.indices, a.indptr, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4) for device in ["llvm"]: check_device(device) @@ -119,7 +119,7 @@ def check_device(device): f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm") f(_nr, a.data, a.indices, a.indptr, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-2, atol=1e-2) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-2, atol=1e-2) for device in ["llvm"]: check_device(device) @@ -163,7 +163,7 @@ def check_device(device): d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev) f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense") f(a.data, a.indices, a.indptr, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4) check_device("llvm") @@ -206,7 +206,7 @@ def check_device(device): d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev) f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense") f(a, b.data, b.indices, b.indptr, c, d) - tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4) check_device("llvm") @@ -269,7 +269,7 @@ def test_sparse_dense_csr(): tvm.nd.array(W_sp_np.indptr), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4) def test_sparse_dense_csr_reverse(): @@ -294,7 +294,7 @@ def test_sparse_dense_csr_reverse(): tvm.nd.array(W_sp_np.indptr), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4) def test_sparse_transpose_csr(): @@ -327,7 +327,7 @@ def test_sparse_transpose_csr(): ) X_T_out = sp.csr_matrix( - (X_T_data_tvm.asnumpy(), X_T_indices_tvm.asnumpy(), X_T_indptr_tvm.asnumpy()), shape=(N, N) + (X_T_data_tvm.numpy(), X_T_indices_tvm.numpy(), X_T_indptr_tvm.numpy()), shape=(N, N) ).todense() tvm.testing.assert_allclose(X_np_T, X_T_out, atol=1e-4, rtol=1e-4) @@ -383,7 +383,7 @@ def verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, use_relu, device, targ tvm.nd.array(W_sp_np.indptr, device=device), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4) @tvm.testing.parametrize_targets("llvm", "cuda") @@ -415,7 +415,7 @@ def test_sparse_dense_bsr_reverse(): tvm.nd.array(W_sp_np.indptr), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4) @tvm.testing.uses_gpu @@ -457,7 +457,7 @@ def check_device(device): tvm.nd.array(W_sp_np.indptr, device=dev), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-5, rtol=1e-5) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5) for device in ["llvm", "cuda"]: check_device(device) @@ -495,7 +495,7 @@ def test_sparse_dense_padded_gpu(target, dev): tvm.nd.array(W_sp_np_padded.indptr, device=dev), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np, atol=1e-5, rtol=1e-5) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5) @tvm.testing.parametrize_targets("cuda", "rocm") @@ -549,7 +549,7 @@ def test_sparse_add_csr(): tvm.nd.array(Y_sp_np.indptr.astype(indices_dtype)), Z_tvm, ) - tvm.testing.assert_allclose(Z_tvm.asnumpy(), Z_np, atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Z_tvm.numpy(), Z_np, atol=1e-4, rtol=1e-4) def verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, layout): @@ -593,7 +593,7 @@ def check_device(device): tvm.nd.array(W_sp_np.indptr, dev), Y_tvm, ) - tvm.testing.assert_allclose(Y_tvm.asnumpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4) + tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4) check_device("llvm") diff --git a/tests/python/topi/python/test_topi_tensor.py b/tests/python/topi/python/test_topi_tensor.py index 2d4eed3f5ded..167e7f944eb4 100644 --- a/tests/python/topi/python/test_topi_tensor.py +++ b/tests/python/topi/python/test_topi_tensor.py @@ -52,7 +52,7 @@ def check_target(target): tvm_nd = [tvm.nd.array(nd, dev) for nd in np_nd] + [out] f(*tvm_nd) np_out = np.sum(np.array(np_nd), axis=0) - tvm.testing.assert_allclose(out.asnumpy(), np_out, rtol=1e-5) + tvm.testing.assert_allclose(out.numpy(), np_out, rtol=1e-5) for target in ["llvm"]: check_target(target) @@ -80,11 +80,11 @@ def check_target(target): out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev) f = tvm.build(s1, [A, B], target, name="full_like") f(tvm.nd.array(np.zeros(shape, dtype), dev), out) - tvm.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5) + tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5) f = tvm.build(s2, [C], target, name="full") f(out) - tvm.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5) + tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5) for target in ["llvm"]: check_target(target) @@ -108,7 +108,7 @@ def check_targeta(targeta): np_A = tvm.nd.empty((n, m), A.dtype, dev).copyfrom(np.random.uniform(size=(n, m))) np_B = tvm.nd.empty((n, m), B.dtype, dev) fun(np_A, np_B) - tvm.testing.assert_allclose(np_B.asnumpy(), np_A.asnumpy() + 1, rtol=1e-5) + tvm.testing.assert_allclose(np_B.numpy(), np_A.numpy() + 1, rtol=1e-5) for targeta in ["cuda"]: check_targeta(targeta) diff --git a/tests/python/topi/python/test_topi_transform.py b/tests/python/topi/python/test_topi_transform.py index 94cdc613ce9c..20172f07fd9e 100644 --- a/tests/python/topi/python/test_topi_transform.py +++ b/tests/python/topi/python/test_topi_transform.py @@ -41,7 +41,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -64,7 +64,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(in_shape).astype(B.dtype), dev) foo(data_nd, out_nd) - np.testing.assert_equal(out_nd.asnumpy(), out_npy) + np.testing.assert_equal(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -84,7 +84,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=B.dtype) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -104,7 +104,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=B.dtype) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -126,7 +126,7 @@ def check_device(target, dev): out_nd_shape = out_npy.shape out_nd = tvm.nd.empty(out_nd_shape, device=dev, dtype=B.dtype) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -161,7 +161,7 @@ def check_device(target, dev): data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys] out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype) foo(*(data_nds + [out_nd])) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -184,7 +184,7 @@ def check_device(target, dev): data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys] out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype) foo(*(data_nds + [out_nd])) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -208,7 +208,7 @@ def check_device(target, dev): ] foo(*([data_nd] + out_nds)) for out_nd, out_npy in zip(out_nds, out_npys): - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -240,7 +240,7 @@ def check_device(target): tvm_shape_like = tvm.nd.array(np.zeros(out_shape).astype(B.dtype), dev) out = tvm.nd.array(np.zeros(out_shape).astype(A.dtype), dev) f(tvm_input, tvm_shape_like, out) - tvm.testing.assert_allclose(out.asnumpy(), input) + tvm.testing.assert_allclose(out.numpy(), input) for target in ["llvm"]: check_device(target) @@ -265,7 +265,7 @@ def check_device(target): data_nd = tvm.nd.array(x_np, dev) out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target in ["llvm", "cuda", "opencl", "sdaccel", "aocl_sw_emu"]: check_device(target) @@ -290,7 +290,7 @@ def check_device(target, dev): seq_lengths_nd = tvm.nd.array(seq_lengths, dev) out_nd = tvm.nd.empty(in_data.shape, device=dev, dtype=A.dtype) foo(data_nd, seq_lengths_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), ref_res) + tvm.testing.assert_allclose(out_nd.numpy(), ref_res) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -392,7 +392,7 @@ def check_device(target): indices_nd = tvm.nd.array(indices_src, dev) out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype) foo(data_nd, indices_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys) + tvm.testing.assert_allclose(out_nd.numpy(), out_npys) for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]: check_device(target) @@ -418,7 +418,7 @@ def check_device(target): data_nd = tvm.nd.array(x_np, dev) out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]: check_device(target) @@ -450,7 +450,7 @@ def check_device(target): end_nd = tvm.nd.array(np.array(end).astype("int64"), dev) strides_nd = tvm.nd.array(np.array(strides).astype("int64"), dev) foo(data_nd, begin_nd, end_nd, strides_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]: check_device(target) @@ -496,7 +496,7 @@ def check_device(target): foo(data_nd, v_nd, b_nd, e_nd, s_nd, out_nd) else: foo(data_nd, v_nd, b_nd, e_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]: check_device(target) @@ -522,7 +522,7 @@ def check_device(target, dev): indices_nd = tvm.nd.array(indices, dev) out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=data.dtype.name) func(data_nd, indices_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys) + tvm.testing.assert_allclose(out_nd.numpy(), out_npys) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -551,7 +551,7 @@ def check_device(target, dev): indices_nd = tvm.nd.array(indices_src, dev) out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype) func(data_nd, indices_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys) + tvm.testing.assert_allclose(out_nd.numpy(), out_npys) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -578,7 +578,7 @@ def check_device(target, dev): f = tvm.build(s, [A], target, name="arange") a_nd = tvm.nd.empty(a_np.shape, dtype="float32", device=dev) f(a_nd) - tvm.testing.assert_allclose(a_nd.asnumpy(), a_np) + tvm.testing.assert_allclose(a_nd.numpy(), a_np) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -598,7 +598,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -618,7 +618,7 @@ def check_device(target, dev): data_nd = tvm.nd.array(data_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev) foo(data_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -645,7 +645,7 @@ def check_device(target, dev): y_nd = tvm.nd.array(y_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev) f(cond_nd, x_nd, y_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -669,7 +669,7 @@ def check_device(target, dev): indices_nd = tvm.nd.array(indices_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), dev) fn(indices_nd, out_nd) - out_topi = out_nd.asnumpy() + out_topi = out_nd.numpy() tvm.testing.assert_allclose(out_topi, out_npy) for target, dev in tvm.testing.enabled_targets(): @@ -699,7 +699,7 @@ def check_device(target, dev): datay_nd = tvm.nd.array(y_data, dev) out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=Z.dtype) foo(datax_nd, datay_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -742,7 +742,7 @@ def check_device(target, dev): default_value_nd = tvm.nd.array(default_value_data, dev) foo(sparse_indices_nd, sparse_values_nd, default_value_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), np.array(xpected)) + tvm.testing.assert_allclose(out_nd.numpy(), np.array(xpected)) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -766,7 +766,7 @@ def check_device(target, dev): diagonal_nd = tvm.nd.array(diagonal_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(matrix_set_diag_result.dtype), dev) fn(input_nd, diagonal_nd, out_nd) - out_topi = out_nd.asnumpy() + out_topi = out_nd.numpy() tvm.testing.assert_allclose(out_topi, out_npy) for target, dev in tvm.testing.enabled_targets(): @@ -803,7 +803,7 @@ def check_device(target, dev): nd_list.append(tvm.nd.empty(out.shape, device=dev, dtype=data.dtype)) func(*nd_list) - tvm.testing.assert_allclose(nd_list[-1].asnumpy(), np.array(np_out)) + tvm.testing.assert_allclose(nd_list[-1].numpy(), np.array(np_out)) for target, dev in tvm.testing.enabled_targets(): check_device(target, dev) @@ -931,7 +931,7 @@ def test_squeeze(): a = tvm.nd.array(np.array((1, 2)).astype("float32"), device=dev) c = tvm.nd.empty((1,), dtype="float32", device=dev) func(a, c) - assert c.asnumpy()[0] == 2 + assert c.numpy()[0] == 2 @tvm.testing.uses_gpu @@ -1074,7 +1074,7 @@ def check_device(target, dev): s = tvm.topi.testing.get_injective_schedule(target)(B) f = tvm.build(s, [A, B], target, name="layout_transform") f(tvm_input, tvm_output) - tvm.testing.assert_allclose(tvm_output.asnumpy(), output) + tvm.testing.assert_allclose(tvm_output.numpy(), output) for backend, dev in tvm.testing.enabled_targets(): check_device(backend, dev) @@ -1098,7 +1098,7 @@ def check_device(target, dev): s = tvm.topi.testing.get_injective_schedule(target)(B) f = tvm.build(s, [A, B], target, name="shape") f(tvm_input, tvm_output) - tvm.testing.assert_allclose(tvm_output.asnumpy(), output) + tvm.testing.assert_allclose(tvm_output.numpy(), output) for backend, dev in tvm.testing.enabled_targets(): check_device(backend, dev) @@ -1127,7 +1127,7 @@ def check_device(target, dev): s = tvm.topi.testing.get_injective_schedule(target)(C) f = tvm.build(s, [A, B, C], target, name="SequenceMask") f(tvm_A, tvm_B, tvm_C) - tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data) + tvm.testing.assert_allclose(tvm_C.numpy(), C_gt_data) for backend, dev in tvm.testing.enabled_targets(): check_device(backend, dev) @@ -1151,7 +1151,7 @@ def check_device(target, dev): s = tvm.topi.testing.get_injective_schedule(target)(B) f = tvm.build(s, [A, B], target, name="ndarray_size") f(tvm_input, tvm_output) - tvm.testing.assert_allclose(tvm_output.asnumpy(), output) + tvm.testing.assert_allclose(tvm_output.numpy(), output) for backend, dev in tvm.testing.enabled_targets(): check_device(backend, dev) diff --git a/tests/python/topi/python/test_topi_unique.py b/tests/python/topi/python/test_topi_unique.py index a97b95029862..032b4db73918 100644 --- a/tests/python/topi/python/test_topi_unique.py +++ b/tests/python/topi/python/test_topi_unique.py @@ -69,11 +69,9 @@ def check_unique(data, is_sorted=False): func = tvm.build(s, [te_input, *outs]) func(tvm_data, tvm_unique, tvm_indices, tvm_num_unique) - assert tvm_num_unique.asnumpy()[0] == np_num_unique - np.testing.assert_allclose( - tvm_unique.asnumpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5 - ) - np.testing.assert_allclose(tvm_indices.asnumpy(), np_indices, atol=1e-5, rtol=1e-5) + assert tvm_num_unique.numpy()[0] == np_num_unique + np.testing.assert_allclose(tvm_unique.numpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5) + np.testing.assert_allclose(tvm_indices.numpy(), np_indices, atol=1e-5, rtol=1e-5) # with counts tvm_counts = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev) @@ -86,14 +84,10 @@ def check_unique(data, is_sorted=False): np_unique, np_indices, _, np_num_unique = calc_numpy_unique(data, is_sorted) num_unique = np_num_unique[0] - assert tvm_num_unique.asnumpy()[0] == np_num_unique - np.testing.assert_allclose( - tvm_unique.asnumpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5 - ) - np.testing.assert_allclose(tvm_indices.asnumpy(), np_indices, atol=1e-5, rtol=1e-5) - np.testing.assert_allclose( - tvm_counts.asnumpy()[:num_unique], np_counts, atol=1e-5, rtol=1e-5 - ) + assert tvm_num_unique.numpy()[0] == np_num_unique + np.testing.assert_allclose(tvm_unique.numpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5) + np.testing.assert_allclose(tvm_indices.numpy(), np_indices, atol=1e-5, rtol=1e-5) + np.testing.assert_allclose(tvm_counts.numpy()[:num_unique], np_counts, atol=1e-5, rtol=1e-5) for in_dtype in ["int32", "int64"]: for is_sorted in [True, False]: diff --git a/tests/python/topi/python/test_topi_upsampling.py b/tests/python/topi/python/test_topi_upsampling.py index 590043760820..0ab0e64af4c7 100644 --- a/tests/python/topi/python/test_topi_upsampling.py +++ b/tests/python/topi/python/test_topi_upsampling.py @@ -93,7 +93,7 @@ def check_target(target, dev): f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) @@ -237,7 +237,7 @@ def check_target(target, dev): f = tvm.build(s, [A, B], target) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5) for target, dev in tvm.testing.enabled_targets(): check_target(target, dev) diff --git a/tests/python/topi/python/test_topi_vision.py b/tests/python/topi/python/test_topi_vision.py index d26026265864..234107d6686e 100644 --- a/tests/python/topi/python/test_topi_vision.py +++ b/tests/python/topi/python/test_topi_vision.py @@ -113,9 +113,9 @@ def check_device(target): f = tvm.build(s, [data, outs[0], outs[1], outs[2]], target) f(tvm_input_data, tvm_out1, tvm_out2, tvm_out3) - tvm.testing.assert_allclose(tvm_out1.asnumpy(), np_out1, rtol=1e-3) - tvm.testing.assert_allclose(tvm_out2.asnumpy(), np_out2, rtol=1e-3) - tvm.testing.assert_allclose(tvm_out3.asnumpy(), np_out3, rtol=1e-3) + tvm.testing.assert_allclose(tvm_out1.numpy(), np_out1, rtol=1e-3) + tvm.testing.assert_allclose(tvm_out2.numpy(), np_out2, rtol=1e-3) + tvm.testing.assert_allclose(tvm_out3.numpy(), np_out3, rtol=1e-3) for target in ["llvm", "cuda", "opencl", "vulkan"]: check_device(target) @@ -195,12 +195,12 @@ def check_device(target): tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev) f = tvm.build(s, [data, valid_count, indices, out], target) f(tvm_data, tvm_valid_count, tvm_indices, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), np_result, rtol=1e-4) + tvm.testing.assert_allclose(tvm_out.numpy(), np_result, rtol=1e-4) tvm_indices_out = tvm.nd.array(np.zeros(indices_dshape, dtype="int32"), dev) f = tvm.build(indices_s, [data, valid_count, indices, indices_out[0]], target) f(tvm_data, tvm_valid_count, tvm_indices, tvm_indices_out) - tvm.testing.assert_allclose(tvm_indices_out.asnumpy(), np_indices_result, rtol=1e-4) + tvm.testing.assert_allclose(tvm_indices_out.numpy(), np_indices_result, rtol=1e-4) for target in ["llvm", "cuda", "opencl", "nvptx"]: check_device(target) @@ -360,7 +360,7 @@ def check_device(target): tvm_out = tvm.nd.array(np.zeros(oshape, dtype=dtype), dev) f = tvm.build(s, [data, out], target) f(tvm_input_data, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out, rtol=1e-3) + tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-3) for target in ["llvm", "opencl", "cuda"]: check_device(target) @@ -417,7 +417,7 @@ def check_device(target): tvm_out = tvm.nd.array(np.zeros((batch_size, num_anchors, 6)).astype(out.dtype), dev) f = tvm.build(s, [cls_prob, loc_preds, anchors, out], target) f(tvm_cls_prob, tvm_loc_preds, tvm_anchors, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), expected_np_out, rtol=1e-4) + tvm.testing.assert_allclose(tvm_out.numpy(), expected_np_out, rtol=1e-4) for target in ["llvm", "opencl", "cuda"]: check_device(target) @@ -472,7 +472,7 @@ def check_device(target): tvm_b = tvm.nd.array(np.zeros(get_const_tuple(b.shape), dtype=b.dtype), device=dev) f = tvm.build(s, [a, rois, b], target) f(tvm_a, tvm_rois, tvm_b) - tvm_val = tvm_b.asnumpy() + tvm_val = tvm_b.numpy() tvm.testing.assert_allclose(tvm_val, b_np, rtol=1e-3, atol=1e-4) for target in ["llvm", "cuda", "opencl"]: @@ -530,7 +530,7 @@ def check_device(target): tvm_b = tvm.nd.array(np.zeros(get_const_tuple(b.shape), dtype=b.dtype), device=dev) f = tvm.build(s, [a, rois, b], target) f(tvm_a, tvm_rois, tvm_b) - tvm.testing.assert_allclose(tvm_b.asnumpy(), b_np, rtol=1e-4) + tvm.testing.assert_allclose(tvm_b.numpy(), b_np, rtol=1e-4) for target in ["cuda", "llvm"]: check_device(target) @@ -563,7 +563,7 @@ def check_device(target): tvm_im_info = tvm.nd.array(np_im_info, device=dev) tvm_out = tvm.nd.empty(device=dev, shape=out.shape, dtype=out.dtype) f(tvm_cls_prob, tvm_bbox_pred, tvm_im_info, tvm_out) - tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out, rtol=1e-4) + tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-4) for target in ["llvm", "cuda"]: check_device(target) @@ -663,7 +663,7 @@ def check_device(target): f = tvm.build(s, [boxes, scores, out[0], out[1]], target) f(tvm_boxes, tvm_scores, selected_indices, num_detections) - tvm_res = selected_indices.asnumpy()[: num_detections.asnumpy()[0]] + tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]] np.testing.assert_equal(tvm_res, expected_indices) for target in ["llvm", "cuda", "opencl", "vulkan"]: diff --git a/tests/python/unittest/test_auto_scheduler_layout_rewrite.py b/tests/python/unittest/test_auto_scheduler_layout_rewrite.py index 91430599124e..c9291965613b 100644 --- a/tests/python/unittest/test_auto_scheduler_layout_rewrite.py +++ b/tests/python/unittest/test_auto_scheduler_layout_rewrite.py @@ -130,8 +130,8 @@ def test_correctness_layout_rewrite_rewrite_for_preTransformed(): func_ref(*args_ref) dev.sync() - tvm.testing.assert_allclose(args[0].asnumpy(), args_ref[0].asnumpy(), atol=1e-3, rtol=1e-3) - tvm.testing.assert_allclose(args[2].asnumpy(), args_ref[2].asnumpy(), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3) del measure_ctx @@ -177,9 +177,9 @@ def test_correctness_layout_rewrite_insert_transform_stage(): func_ref(*args_ref) dev.sync() - tvm.testing.assert_allclose(args[0].asnumpy(), args_ref[0].asnumpy(), atol=1e-3, rtol=1e-3) - tvm.testing.assert_allclose(args[1].asnumpy(), args_ref[1].asnumpy(), atol=1e-3, rtol=1e-3) - tvm.testing.assert_allclose(args[2].asnumpy(), args_ref[2].asnumpy(), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(args[1].numpy(), args_ref[1].numpy(), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3) del measure_ctx diff --git a/tests/python/unittest/test_auto_scheduler_search_policy.py b/tests/python/unittest/test_auto_scheduler_search_policy.py index 32245d9bba81..d114ce4f9d16 100644 --- a/tests/python/unittest/test_auto_scheduler_search_policy.py +++ b/tests/python/unittest/test_auto_scheduler_search_policy.py @@ -100,11 +100,11 @@ def search_common( tvm_arrays = [tvm.nd.array(x, ctx) for x in np_arrays] mod(*tvm_arrays) - actual = [x.asnumpy() for x in tvm_arrays] + actual = [x.numpy() for x in tvm_arrays] tvm_arrays = [tvm.nd.array(x) for x in np_arrays] mod_ref(*tvm_arrays) - expected = [x.asnumpy() for x in tvm_arrays] + expected = [x.numpy() for x in tvm_arrays] for x, y in zip(actual, expected): tvm.testing.assert_allclose(x, y, rtol=1e-5) diff --git a/tests/python/unittest/test_crt.py b/tests/python/unittest/test_crt.py index c6902429c0cd..3ba508a40a77 100644 --- a/tests/python/unittest/test_crt.py +++ b/tests/python/unittest/test_crt.py @@ -94,15 +94,15 @@ def test_compile_runtime(): with _make_add_sess(workspace) as sess: A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) - assert (A_data.asnumpy() == np.array([2, 3])).all() + assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) - assert (B_data.asnumpy() == np.array([4])).all() + assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) - assert (C_data.asnumpy() == np.array([0, 0])).all() + assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) - assert (C_data.asnumpy() == np.array([6, 7])).all() + assert (C_data.numpy() == np.array([6, 7])).all() @tvm.testing.requires_micro @@ -161,14 +161,14 @@ def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) { factory.get_graph_json(), sess.get_system_lib(), sess.device ) A_data = tvm.nd.array(np.array([2, 3], dtype="uint8"), device=sess.device) - assert (A_data.asnumpy() == np.array([2, 3])).all() + assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4, 7], dtype="uint8"), device=sess.device) - assert (B_data.asnumpy() == np.array([4, 7])).all() + assert (B_data.numpy() == np.array([4, 7])).all() graph_mod.run(a=A_data, b=B_data) out = graph_mod.get_output(0) - assert (out.asnumpy() == np.array([6, 10])).all() + assert (out.numpy() == np.array([6, 10])).all() @tvm.testing.requires_micro @@ -180,11 +180,11 @@ def test_std_math_functions(): with _make_add_sess(workspace) as sess: A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) - assert (A_data.asnumpy() == np.array([2, 3])).all() + assert (A_data.numpy() == np.array([2, 3])).all() B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) - assert (B_data.asnumpy() == np.array([4])).all() + assert (B_data.numpy() == np.array([4])).all() C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) - assert (C_data.asnumpy() == np.array([0, 0])).all() + assert (C_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(A_data, B_data, C_data) @@ -200,7 +200,7 @@ def test_std_math_functions(): lib = sess.get_system_lib() func = lib["myexpf"] func(A_data, B_data) - np.testing.assert_allclose(B_data.asnumpy(), np.array([7.389056, 20.085537])) + np.testing.assert_allclose(B_data.numpy(), np.array([7.389056, 20.085537])) @tvm.testing.requires_micro diff --git a/tests/python/unittest/test_custom_datatypes.py b/tests/python/unittest/test_custom_datatypes.py index 494973e8b573..5f962ef7f74f 100644 --- a/tests/python/unittest/test_custom_datatypes.py +++ b/tests/python/unittest/test_custom_datatypes.py @@ -104,7 +104,7 @@ def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target=" # currently this only works for comparing single output maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct) np.testing.assert_allclose( - maybe_correct_converted.asnumpy(), correct.asnumpy(), rtol=rtol, atol=atol + maybe_correct_converted.numpy(), correct.numpy(), rtol=rtol, atol=atol ) diff --git a/tests/python/unittest/test_link_params.py b/tests/python/unittest/test_link_params.py index d377e208058e..51799bba61fd 100644 --- a/tests/python/unittest/test_link_params.py +++ b/tests/python/unittest/test_link_params.py @@ -130,9 +130,9 @@ def _verify_linked_param(dtype, lib, mod, graph, name): arr_data = (_get_ctypes_dtype(dtype) * np.prod(gen_param.shape)).from_address(param_ptr.value) arr = np.ndarray(shape=gen_param.shape, dtype=gen_param.dtype, buffer=arr_data, order="C") if "int" in gen_param.dtype: - np.testing.assert_equal(gen_param.asnumpy(), arr) + np.testing.assert_equal(gen_param.numpy(), arr) else: - np.testing.assert_allclose(gen_param.asnumpy(), arr) + np.testing.assert_allclose(gen_param.numpy(), arr) return dtype == gen_param.dtype @@ -226,9 +226,9 @@ def _run_unlinked(lib): unlinked_output = _run_unlinked(lib) if "int" in dtype: - np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy()) + np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy()) else: - np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy()) + np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy()) def _get_c_datatype(dtype): @@ -278,7 +278,7 @@ def test_c_link_params(): lib.lib.save(temp_dir.relpath("test.c"), "c") c_dtype = _get_c_datatype(dtype) src_lines = src.split("\n") - param = lib.params["p0"].asnumpy().reshape(np.prod(KERNEL_SHAPE)) + param = lib.params["p0"].numpy().reshape(np.prod(KERNEL_SHAPE)) param_def = f"static const {c_dtype} __tvm_param__p0[{np.prod(param.shape)}] = {{" for i, line in enumerate(src_lines): if line == param_def: @@ -341,9 +341,9 @@ def _run_unlinked(lib_mod): unlinked_output = _run_unlinked(lib_mod) if "int" in dtype: - np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy()) + np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy()) else: - np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy()) + np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy()) @tvm.testing.requires_micro @@ -389,7 +389,7 @@ def test_crt_link_params(): # NOTE: not setting params here. graph_rt.set_input("rand_input", rand_input) graph_rt.run() - linked_output = graph_rt.get_output(0).asnumpy() + linked_output = graph_rt.get_output(0).numpy() with tvm.transform.PassContext(opt_level=3): lib = tvm.relay.build(mod, "llvm --system-lib", params=param_init) @@ -399,7 +399,7 @@ def _run_unlinked(lib): graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0)) graph_rt.set_input("rand_input", rand_input, **lowered_params) graph_rt.run() - return graph_rt.get_output(0).asnumpy() + return graph_rt.get_output(0).numpy() unlinked_output = _run_unlinked(lib) diff --git a/tests/python/unittest/test_lower_build.py b/tests/python/unittest/test_lower_build.py index 21f41321887f..4505a7bed244 100644 --- a/tests/python/unittest/test_lower_build.py +++ b/tests/python/unittest/test_lower_build.py @@ -29,9 +29,9 @@ def _check_module_with_numpy(mod, shape=(128, 128, 128)): a = tvm.nd.array(np.random.rand(m, k).astype("float32")) b = tvm.nd.array(np.random.rand(n, k).astype("float32")) c = tvm.nd.array(np.zeros((m, n), dtype="float32")) - c_np = np.dot(a.asnumpy(), b.asnumpy().transpose()) + c_np = np.dot(a.numpy(), b.numpy().transpose()) mod(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) # pylint: disable=no-self-argument, missing-class-docstring, missing-function-docstring diff --git a/tests/python/unittest/test_runtime_container.py b/tests/python/unittest/test_runtime_container.py index 4607892a5a4c..9d4255c86b5e 100644 --- a/tests/python/unittest/test_runtime_container.py +++ b/tests/python/unittest/test_runtime_container.py @@ -52,7 +52,7 @@ def test_tuple_object(): value_tuple = _container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))]) # pass an ADT object to evaluate out = f(value_tuple) - tvm.testing.assert_allclose(out.asnumpy(), np.array(11)) + tvm.testing.assert_allclose(out.numpy(), np.array(11)) def test_string(): diff --git a/tests/python/unittest/test_runtime_extension.py b/tests/python/unittest/test_runtime_extension.py index 14f128c97b7b..be4ca55ebf8b 100644 --- a/tests/python/unittest/test_runtime_extension.py +++ b/tests/python/unittest/test_runtime_extension.py @@ -47,7 +47,7 @@ def test_dltensor_compatible(): a = tvm.nd.array(np.zeros(10, dtype=dtype)) aview = MyTensorView(a) f(aview) - np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) if __name__ == "__main__": diff --git a/tests/python/unittest/test_runtime_graph.py b/tests/python/unittest/test_runtime_graph.py index 44f20878b800..1259e77afbf8 100644 --- a/tests/python/unittest/test_runtime_graph.py +++ b/tests/python/unittest/test_runtime_graph.py @@ -63,7 +63,7 @@ def check_verify(): a = np.random.uniform(size=(n,)).astype(A.dtype) mod.run(x=a) out = mod.get_output(0, tvm.nd.empty((n,))) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) def check_remote(): mlib = tvm.build(s, [A, B], "llvm", name="myadd") @@ -80,7 +80,7 @@ def check_remote(): mod.run(x=tvm.nd.array(a, dev)) out = tvm.nd.empty((n,), device=dev) out = mod.get_output(0, out) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) def check_sharing(): x = relay.var("x", shape=(1, 10)) @@ -104,14 +104,14 @@ def check_sharing(): for mod in mods: mod.run(y=a) out = mod.get_output(0, tvm.nd.empty((1, 10))) - np.testing.assert_equal(out.asnumpy(), x_in + a) + np.testing.assert_equal(out.numpy(), x_in + a) # Explicitly delete the shared module and verify correctness. del mod_shared for mod in mods: mod.run(y=a) out = mod.get_output(0, tvm.nd.empty((1, 10))) - np.testing.assert_equal(out.asnumpy(), x_in + a) + np.testing.assert_equal(out.numpy(), x_in + a) del mod check_verify() diff --git a/tests/python/unittest/test_runtime_graph_cuda_graph.py b/tests/python/unittest/test_runtime_graph_cuda_graph.py index fb0c736090e5..0282161c60f8 100644 --- a/tests/python/unittest/test_runtime_graph_cuda_graph.py +++ b/tests/python/unittest/test_runtime_graph_cuda_graph.py @@ -83,7 +83,7 @@ def check_verify(): a = np.random.uniform(size=(n,)).astype(A.dtype) mod.run(x=a) # The first run captured a CUDA graph out = mod.get_output(0, tvm.nd.empty((n,))) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) # capture / run CUDA graph manually mod.capture_cuda_graph() @@ -91,7 +91,7 @@ def check_verify(): mod.set_input(x=a) mod.run_cuda_graph() out = mod.get_output(0, tvm.nd.empty((n,))) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) check_verify() diff --git a/tests/python/unittest/test_runtime_graph_debug.py b/tests/python/unittest/test_runtime_graph_debug.py index b7df33c2a66e..192e0dad702f 100644 --- a/tests/python/unittest/test_runtime_graph_debug.py +++ b/tests/python/unittest/test_runtime_graph_debug.py @@ -154,7 +154,7 @@ def split_debug_line(i): # verify the output is correct out = mod.get_output(0, tvm.nd.empty((n,))) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) mod.exit() # verify dump root delete after cleanup @@ -179,7 +179,7 @@ def check_remote(): mod.run(x=tvm.nd.array(a, dev)) out = tvm.nd.empty((n,), device=dev) out = mod.get_output(0, out) - np.testing.assert_equal(out.asnumpy(), a + 1) + np.testing.assert_equal(out.numpy(), a + 1) check_verify() check_remote() diff --git a/tests/python/unittest/test_runtime_heterogeneous.py b/tests/python/unittest/test_runtime_heterogeneous.py index 5388dee2fa58..167f61d748c2 100644 --- a/tests/python/unittest/test_runtime_heterogeneous.py +++ b/tests/python/unittest/test_runtime_heterogeneous.py @@ -181,7 +181,7 @@ def check_device(device, target_device): mod.set_input(**params) mod.run() out = mod.get_output(0, tvm.nd.empty(shape)) - np.testing.assert_equal(out.asnumpy(), (tensor_a + tensor_b) - tensor_c) + np.testing.assert_equal(out.numpy(), (tensor_a + tensor_b) - tensor_c) dev_tar = {"cuda": "cuda", "opencl": "opencl"} for device, target in dev_tar.items(): @@ -414,7 +414,7 @@ def check_verify(): mod.set_input(**params) mod.run() out = mod.get_output(0, tvm.nd.empty(shape)) - np.testing.assert_equal(out.asnumpy(), tensor_a + tensor_b - tensor_c + tensor_d) + np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d) def check_load_module(): temp = utils.tempdir() @@ -428,7 +428,7 @@ def check_load_module(): mod.set_input(**params) mod.run() out = mod.get_output(0, tvm.nd.empty(shape)) - np.testing.assert_equal(out.asnumpy(), tensor_a + tensor_b - tensor_c + tensor_d) + np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d) check_verify() check_load_module() diff --git a/tests/python/unittest/test_runtime_module_based_interface.py b/tests/python/unittest/test_runtime_module_based_interface.py index ff1e7ff42f9c..9bb05dfed65f 100644 --- a/tests/python/unittest/test_runtime_module_based_interface.py +++ b/tests/python/unittest/test_runtime_module_based_interface.py @@ -41,7 +41,7 @@ def verify(data): module.set_input("data", data) module.set_input(**graph_params) module.run() - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() return out @@ -59,7 +59,7 @@ def test_legacy_compatibility(): module.set_input("data", data) module.set_input(**graph_params) module.run() - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) @@ -79,14 +79,14 @@ def test_cpu(): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev)) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) @@ -106,14 +106,14 @@ def test_gpu(): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev)) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) @@ -154,14 +154,14 @@ def setup_gmod(): data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32") set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(setup_gmod()) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_gpu_export(obj_format): @@ -200,14 +200,14 @@ def setup_gmod(): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(setup_gmod()) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_rpc_cpu_export(obj_format): @@ -244,14 +244,14 @@ def verify_rpc_cpu_export(obj_format): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data, device=dev)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(loaded_lib["default"](dev)) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_rpc_gpu_export(obj_format): @@ -289,14 +289,14 @@ def verify_rpc_gpu_export(obj_format): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data, device=dev)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper gmod = graph_executor.GraphModule(loaded_lib["default"](dev)) gmod.set_input("data", data) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) for obj_format in [".so", ".tar"]: @@ -343,7 +343,7 @@ def verify_cpu_remove_package_params(obj_format): set_input("data", tvm.nd.array(data)) load_params(loaded_params) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper @@ -352,7 +352,7 @@ def verify_cpu_remove_package_params(obj_format): gmod.set_input("data", data) gmod.load_params(loaded_params) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_gpu_remove_package_params(obj_format): @@ -390,7 +390,7 @@ def verify_gpu_remove_package_params(obj_format): set_input("data", tvm.nd.array(data)) load_params(loaded_params) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper @@ -399,7 +399,7 @@ def verify_gpu_remove_package_params(obj_format): gmod.set_input("data", data) gmod.load_params(loaded_params) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_rpc_cpu_remove_package_params(obj_format): @@ -443,7 +443,7 @@ def verify_rpc_cpu_remove_package_params(obj_format): set_input("data", tvm.nd.array(data, device=dev)) load_params(loaded_params) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper @@ -452,7 +452,7 @@ def verify_rpc_cpu_remove_package_params(obj_format): gmod.set_input("data", data) gmod.load_params(loaded_params) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) def verify_rpc_gpu_remove_package_params(obj_format): @@ -496,7 +496,7 @@ def verify_rpc_gpu_remove_package_params(obj_format): set_input("data", tvm.nd.array(data, device=dev)) load_params(loaded_params) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # graph executor wrapper @@ -505,7 +505,7 @@ def verify_rpc_gpu_remove_package_params(obj_format): gmod.set_input("data", data) gmod.load_params(loaded_params) gmod.run() - out = gmod.get_output(0).asnumpy() + out = gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) for obj_format in [".so", ".tar"]: @@ -536,7 +536,7 @@ def test_debug_graph_executor(): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # debug graph executor wrapper @@ -548,7 +548,7 @@ def test_debug_graph_executor(): ) debug_g_mod.set_input("data", data) debug_g_mod.run() - out = debug_g_mod.get_output(0).asnumpy() + out = debug_g_mod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) @@ -570,14 +570,14 @@ def test_cuda_graph_executor(): get_output = gmod["get_output"] set_input("data", tvm.nd.array(data)) run() - out = get_output(0).asnumpy() + out = get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) # cuda graph executor wrapper cu_gmod = cuda_graph_executor.GraphModuleCudaGraph(gmod) cu_gmod.set_input("data", data) cu_gmod.run() - out = cu_gmod.get_output(0).asnumpy() + out = cu_gmod.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) diff --git a/tests/python/unittest/test_runtime_module_load.py b/tests/python/unittest/test_runtime_module_load.py index c34b2f292d33..523065465172 100644 --- a/tests/python/unittest/test_runtime_module_load.py +++ b/tests/python/unittest/test_runtime_module_load.py @@ -38,7 +38,7 @@ ff = tvm.runtime.load_module(path_dso) a = tvm.nd.array(np.zeros(10, dtype=dtype)) ff(a) -np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) +np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) print("Finish runtime checking...") """ @@ -79,10 +79,10 @@ def save_object(names): f2 = tvm.runtime.load_module(path_ll) a = tvm.nd.array(np.zeros(10, dtype=dtype)) f1(a) - np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) a = tvm.nd.array(np.zeros(10, dtype=dtype)) f2(a) - np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) path_runtime_py = temp.relpath("runtime.py") with open(path_runtime_py, "w") as fo: @@ -126,11 +126,11 @@ def check_device(device): a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) f1(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) if sys.platform != "win32": f2 = tvm.runtime.system_lib() f2[name](a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) def check_stackvm(device): dev = tvm.device(device, 0) @@ -146,7 +146,7 @@ def check_stackvm(device): a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) f(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) for device in ["cuda", "vulkan", "opencl", "metal"]: check_device(device) @@ -183,9 +183,9 @@ def check_llvm(): a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev) fadd1(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) fadd2(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) def check_system_lib(): dev = tvm.cpu(0) @@ -208,9 +208,9 @@ def check_system_lib(): a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev) mm["myadd1"](a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) mm["myadd2"](a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) if sys.platform != "win32": check_system_lib() diff --git a/tests/python/unittest/test_runtime_rpc.py b/tests/python/unittest/test_runtime_rpc.py index 1fb8446a1d4b..182645117f36 100644 --- a/tests/python/unittest/test_runtime_rpc.py +++ b/tests/python/unittest/test_runtime_rpc.py @@ -77,7 +77,7 @@ def verify_rpc(remote, target, shape, dtype): remote.upload(path_dso) f = remote.load_module("dev_lib.o") f(a, b) - tvm.testing.assert_allclose(a.asnumpy() + 1, b.asnumpy()) + tvm.testing.assert_allclose(a.numpy() + 1, b.numpy()) print("Test RPC connection to PowerPC...") remote = rpc.connect(host, port) @@ -119,7 +119,7 @@ def test_rpc_array(): remote = rpc.connect("127.0.0.1", server.port) r_cpu = tvm.nd.array(x, remote.cpu(0)) assert str(r_cpu.device).startswith("remote") - np.testing.assert_equal(r_cpu.asnumpy(), x) + np.testing.assert_equal(r_cpu.numpy(), x) fremote = remote.get_function("rpc.test.remote_array_func") fremote(r_cpu) @@ -134,8 +134,8 @@ def test_rpc_large_array(): b_np = np.ones((720, 192)).astype("float32") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) - np.testing.assert_equal(a.asnumpy(), a_np) - np.testing.assert_equal(b.asnumpy(), b_np) + np.testing.assert_equal(a.numpy(), a_np) + np.testing.assert_equal(b.numpy(), b_np) @tvm.testing.requires_rpc @@ -224,7 +224,7 @@ def check_remote(remote): time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Download the file from the remote path_tar = temp.relpath("dev_lib.tar") @@ -237,7 +237,7 @@ def check_remote(remote): a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0)) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0)) fupdated(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) def check_minrpc(): if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None: @@ -260,7 +260,7 @@ def check_minrpc(): b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1) cost = time_f(a, b).mean - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # change to not executable os.chmod(path_minrpc, stat.S_IRUSR) @@ -301,7 +301,7 @@ def check_remote_link_cl(remote): a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) fhost(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Option 2: export library as a tar ball then handled by remote compiler path_tar = temp.relpath("myadd.tar") f.export_library(path_tar) @@ -310,7 +310,7 @@ def check_remote_link_cl(remote): a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) fhost(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) check_remote(rpc.LocalSession()) check_remote(client) @@ -348,7 +348,7 @@ def check_multi_hop(): assert bytes(fecho(bytearray(b"123"))) == b"123" nd = tvm.nd.array([1, 2, 3], device=client.cpu(0)) - assert nd.asnumpy()[1] == 2 + assert nd.numpy()[1] == 2 def check_error_handling(): with pytest.raises(tvm.error.RPCError): diff --git a/tests/python/unittest/test_runtime_trace.py b/tests/python/unittest/test_runtime_trace.py index 951e88d7efdd..b1a7a5338fb5 100644 --- a/tests/python/unittest/test_runtime_trace.py +++ b/tests/python/unittest/test_runtime_trace.py @@ -52,9 +52,9 @@ def check_assign(dtype): znd = tvm.nd.array(np.zeros((n, n, n), dtype=z.dtype)) f(xnd, ynd, znd) - assert np.array_equal(xnd.asnumpy(), np.ones((n, n, n))) - assert np.array_equal(ynd.asnumpy(), np.ones((n, n, n))) - assert np.array_equal(znd.asnumpy(), np.ones((n, n, n))) + assert np.array_equal(xnd.numpy(), np.ones((n, n, n))) + assert np.array_equal(ynd.numpy(), np.ones((n, n, n))) + assert np.array_equal(znd.numpy(), np.ones((n, n, n))) for t in ["float64", "float32", "int64", "int32"]: check_assign(t) @@ -80,7 +80,7 @@ def check_expr_sum(dtype): ynd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=b.dtype))) znd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype)) f(xnd, ynd, znd) - assert np.array_equal(znd.asnumpy(), xnd.asnumpy() + ynd.asnumpy()) + assert np.array_equal(znd.numpy(), xnd.numpy() + ynd.numpy()) for t in ["float64", "float32", "int64", "int32"]: check_expr_sum(t) @@ -114,7 +114,7 @@ def check_expr_sum(dtype): c_nd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype)) f(a_nd, b_nd, d_nd, e_nd, c_nd) assert np.array_equal( - c_nd.asnumpy(), a_nd.asnumpy() + b_nd.asnumpy() + d_nd.asnumpy() + e_nd.asnumpy() + c_nd.numpy(), a_nd.numpy() + b_nd.numpy() + d_nd.numpy() + e_nd.numpy() ) for t in ["float64", "float32", "int64", "int32"]: @@ -143,7 +143,7 @@ def check_expr_sum_custom(dtype): ynd = tvm.nd.array(npb) znd = tvm.nd.array(np.zeros((n, n), dtype=c.dtype)) f(xnd, ynd, znd) - assert np.array_equal(znd.asnumpy(), npa + npb) + assert np.array_equal(znd.numpy(), npa + npb) for t in ["float64", "float32", "int64", "int32"]: check_expr_sum_custom(t) @@ -172,8 +172,8 @@ def check_assign(dtype): f(xnd, ynd, znd) check_array_first = np.array([13, 13, 13, 13]) check_array_second = np.array([14, 14, 14, 14]) - assert np.array_equal(ynd.asnumpy(), check_array_first) - assert np.array_equal(znd.asnumpy(), check_array_second) + assert np.array_equal(ynd.numpy(), check_array_first) + assert np.array_equal(znd.numpy(), check_array_second) for t in ["int64", "int32"]: check_assign(t) @@ -204,8 +204,8 @@ def check_assign(dtype): f(xnd, ynd, znd) check_array_first = np.array([13.0, 13.0, 13.0, 13.0]) check_array_second = np.array([14.0, 14.0, 14.0, 14.0]) - assert np.array_equal(ynd.asnumpy(), check_array_first) - assert np.array_equal(znd.asnumpy(), check_array_second) + assert np.array_equal(ynd.numpy(), check_array_first) + assert np.array_equal(znd.numpy(), check_array_second) for t in ["float64", "float32"]: check_assign(t) diff --git a/tests/python/unittest/test_target_codegen_blob.py b/tests/python/unittest/test_target_codegen_blob.py index 2a309893e663..62dcf924b43b 100644 --- a/tests/python/unittest/test_target_codegen_blob.py +++ b/tests/python/unittest/test_target_codegen_blob.py @@ -42,7 +42,7 @@ def verify(data): module = graph_executor.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() return out synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload(input_shape=input_shape) @@ -61,7 +61,7 @@ def verify(data): module = graph_executor.GraphModule(loaded_lib["default"](dev)) module.set_input("data", data) module.run() - out = module.get_output(0).asnumpy() + out = module.get_output(0).numpy() tvm.testing.assert_allclose(out, verify(data), atol=1e-5) @@ -92,7 +92,7 @@ def test_cuda_lib(): a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev) m["add"](a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) if __name__ == "__main__": diff --git a/tests/python/unittest/test_target_codegen_bool.py b/tests/python/unittest/test_target_codegen_bool.py index 527741a5ef0d..bd6cf27ccaa4 100644 --- a/tests/python/unittest/test_target_codegen_bool.py +++ b/tests/python/unittest/test_target_codegen_bool.py @@ -46,8 +46,8 @@ def check_llvm(): d = tvm.nd.array(np.zeros(n, dtype=D.dtype), dev) f(a, b, d) np.testing.assert_equal( - d.asnumpy(), - np.logical_and(a.asnumpy() > b.asnumpy(), a.asnumpy() > 1).astype("float32"), + d.numpy(), + np.logical_and(a.numpy() > b.numpy(), a.numpy() > 1).astype("float32"), ) def check_device(device): @@ -66,8 +66,8 @@ def check_device(device): d = tvm.nd.array(np.zeros(n, dtype=D.dtype), dev) f(a, b, d) np.testing.assert_equal( - d.asnumpy(), - np.logical_and(a.asnumpy() > b.asnumpy(), a.asnumpy() > 1).astype("float32"), + d.numpy(), + np.logical_and(a.numpy() > b.numpy(), a.numpy() > 1).astype("float32"), ) check_llvm() diff --git a/tests/python/unittest/test_target_codegen_c_host.py b/tests/python/unittest/test_target_codegen_c_host.py index a61bb39bd211..95cd967dd207 100644 --- a/tests/python/unittest/test_target_codegen_c_host.py +++ b/tests/python/unittest/test_target_codegen_c_host.py @@ -43,7 +43,7 @@ def check_c(): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_c() @@ -88,7 +88,7 @@ def check_c(): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_c() @@ -114,7 +114,7 @@ def check_c(): a = tvm.nd.array(np.random.randint(-(2 ** 30), 2 ** 30, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fadd(a, b) - tvm.testing.assert_allclose(b.asnumpy(), (2 + a.asnumpy()).view("float32")) + tvm.testing.assert_allclose(b.numpy(), (2 + a.numpy()).view("float32")) check_c() @@ -138,7 +138,7 @@ def check_c(): a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fceil(a, b) - tvm.testing.assert_allclose(b.asnumpy(), (np.ceil(a.asnumpy()).view("float32"))) + tvm.testing.assert_allclose(b.numpy(), (np.ceil(a.numpy()).view("float32"))) check_c() @@ -162,7 +162,7 @@ def check_c(): a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) ffloor(a, b) - tvm.testing.assert_allclose(b.asnumpy(), (np.floor(a.asnumpy()).view("float32"))) + tvm.testing.assert_allclose(b.numpy(), (np.floor(a.numpy()).view("float32"))) check_c() @@ -186,7 +186,7 @@ def check_c(): a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fround(a, b) - tvm.testing.assert_allclose(b.asnumpy(), (np.round(a.asnumpy()).view("float32"))) + tvm.testing.assert_allclose(b.numpy(), (np.round(a.numpy()).view("float32"))) check_c() diff --git a/tests/python/unittest/test_target_codegen_cross_llvm.py b/tests/python/unittest/test_target_codegen_cross_llvm.py index feb1d43d5ce2..8758ae2a04e8 100644 --- a/tests/python/unittest/test_target_codegen_cross_llvm.py +++ b/tests/python/unittest/test_target_codegen_cross_llvm.py @@ -84,7 +84,7 @@ def build_arm(): b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) farm(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) print("Verification finish on remote..") build_i386() diff --git a/tests/python/unittest/test_target_codegen_cuda.py b/tests/python/unittest/test_target_codegen_cuda.py index 846bdcb54bd1..fc138bb43f1a 100644 --- a/tests/python/unittest/test_target_codegen_cuda.py +++ b/tests/python/unittest/test_target_codegen_cuda.py @@ -50,7 +50,7 @@ def check_cuda(dtype, n, lanes): a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), B.dtype, dev) fun(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_cuda("float32", 64, 2) check_cuda("float32", 64, 3) @@ -106,7 +106,7 @@ def check_cuda(n, lanes): c = tvm.nd.empty((n,), B.dtype, dev) fun(a, c) c = tvm.nd.empty((n, lanes), "uint16", dev).copyfrom(c) - tvm.testing.assert_allclose(c.asnumpy(), np_float2np_bf16(np_a + 1)) + tvm.testing.assert_allclose(c.numpy(), np_float2np_bf16(np_a + 1)) check_cuda(64, 2) check_cuda(64, 4) @@ -144,7 +144,7 @@ def check_cuda(dtype, n, lanes): c = tvm.nd.empty((n,), C.dtype, dev).copyfrom(np_c) d = tvm.nd.empty((n,), D.dtype, dev) fun(a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), np_d) + tvm.testing.assert_allclose(d.numpy(), np_d) check_cuda("int8", 64, 4) @@ -167,7 +167,7 @@ def check_cuda(dtype, n, lanes): a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_a) b = tvm.nd.empty((n,), B.dtype, dev) fun(a, b) - tvm.testing.assert_allclose(a.asnumpy(), b.asnumpy()) + tvm.testing.assert_allclose(a.numpy(), b.numpy()) check_cuda("int8", 64, 2) check_cuda("int8", 64, 3) @@ -191,7 +191,7 @@ def check_cuda(n, value, lanes): np_a = np.full((n, lanes), value, dtype=dtype) a = tvm.nd.empty(np_a.shape, dtype, dev) fun(a) - np.testing.assert_equal(a.asnumpy(), np_a) + np.testing.assert_equal(a.numpy(), np_a) check_cuda(64, 0xAB, 4) check_cuda(64, 0, 4) @@ -219,7 +219,7 @@ def check_cuda(n, value, lanes): np_a = np.full((n, lanes), value, dtype="int8") a = tvm.nd.empty((n, lanes), dtype, dev) fun(a) - np.testing.assert_equal(a.asnumpy(), np_a) + np.testing.assert_equal(a.numpy(), np_a) check_cuda(64, 1, 8) check_cuda(64, 7, 8) @@ -302,7 +302,7 @@ def _transform(f, *_): ref = a_ + np.array((list(range(4))) * 16, dtype="int32") nda, ndb, ndc = [tvm.nd.array(i, tvm.cuda(0)) for i in [a_, b_, c_]] module(nda, ndb, ndc) - tvm.testing.assert_allclose(ndc.asnumpy(), ref) + tvm.testing.assert_allclose(ndc.numpy(), ref) @tvm.testing.parametrize_targets("cuda", "rocm") @@ -331,7 +331,7 @@ def verify(nthd): a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-3) verify(16) verify(32) @@ -369,7 +369,7 @@ def verify(nthdx, nthdy): a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=(1, 2)), rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=(1, 2)), rtol=1e-3) verify(16, 16) verify(32, 32) @@ -446,7 +446,7 @@ def test_cuda_const_float_to_half(): a = tvm.nd.array(a_np, dev) c = tvm.nd.array(c_np, dev) func(a, c) - np.testing.assert_equal(c.asnumpy(), a_np > b.value) + np.testing.assert_equal(c.numpy(), a_np > b.value) @tvm.testing.requires_gpu @@ -473,7 +473,7 @@ def check(device, dtype, m=32, n=32): b_nd = tvm.nd.array(b_np, dev) g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev) func(a_nd, b_nd, g_nd) - tvm.testing.assert_allclose(g_nd.asnumpy(), g_np, rtol=1e-3) + tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-3) check("cuda", "float32") check("rocm", "float32") @@ -504,7 +504,7 @@ def check(device, dtype, m=32, n=32): a_nd = tvm.nd.array(a_np, dev) b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev) func(a_nd, b_nd) - tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3) + tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3) check("cuda", "float32") check("rocm", "float32") @@ -534,7 +534,7 @@ def test_cuda_floordiv_with_vectorization(): a_nd = tvm.nd.array(a_np, dev) b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev) func(a_nd, b_nd) - tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3) + tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3) @tvm.testing.requires_gpu @@ -560,7 +560,7 @@ def test_cuda_floormod_with_vectorization(): a_nd = tvm.nd.array(a_np, dev) b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev) func(a_nd, b_nd) - tvm.testing.assert_allclose(b_nd.asnumpy(), b_np, rtol=1e-3) + tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3) @tvm.testing.requires_gpu @@ -594,7 +594,7 @@ def check(t0, t1, factor): b_nd = tvm.nd.array(b_np, dev) c_nd = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np.dtype), dev) func(a_nd, b_nd, c_nd) - tvm.testing.assert_allclose(c_nd.asnumpy(), c_np, rtol=1e-3) + tvm.testing.assert_allclose(c_nd.numpy(), c_np, rtol=1e-3) def skip(t0, t1): if t0 == t1: @@ -690,7 +690,7 @@ def run_test(tvm_intrin, np_func, dtype): a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3) for func in test_funcs: run_test(*func, "float32") @@ -716,7 +716,7 @@ def run_test(tvm_intrin, np_func): a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-3, rtol=1e-3) + tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3) for func in test_funcs: run_test(*func) @@ -742,8 +742,8 @@ def run_test(dtype): a = tvm.nd.array(np.random.randint(0, 100000, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(shape=(n,)).astype(B.dtype), dev) f(a, b) - ref = np.vectorize(ref_popcount)(a.asnumpy()) - tvm.testing.assert_allclose(b.asnumpy(), ref) + ref = np.vectorize(ref_popcount)(a.numpy()) + tvm.testing.assert_allclose(b.numpy(), ref) run_test("uint32") run_test("uint64") @@ -782,7 +782,7 @@ def check_cuda(dtype, n, l, padding, lanes): ref = np.pad( np_a_reshape, ((0, 0), (padding, padding), (0, 0)), mode="constant", constant_values=0 ) - tvm.testing.assert_allclose(b.asnumpy(), ref) + tvm.testing.assert_allclose(b.numpy(), ref) check_cuda("int8", 64, 16, 3, 2) check_cuda("uint8", 64, 16, 3, 2) @@ -838,7 +838,7 @@ def post_visit(stmt): b = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), dev) c = tvm.nd.array(np.zeros((512, 512), dtype="float32"), dev) mod(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5) @tvm.testing.requires_gpu @@ -984,7 +984,7 @@ def test_unrolled_vectorization(): c_tvm = tvm.nd.empty((N, N), device=dev) func_tvm = tvm.build(s, [A, B, C], target=target) func_tvm(a_tvm, b_tvm, c_tvm) - c_np = c_tvm.asnumpy() + c_np = c_tvm.numpy() tvm.testing.assert_allclose(c_np, N * np.ones((N, N))) diff --git a/tests/python/unittest/test_target_codegen_device.py b/tests/python/unittest/test_target_codegen_device.py index 4ce7a021981d..99b504219c14 100644 --- a/tests/python/unittest/test_target_codegen_device.py +++ b/tests/python/unittest/test_target_codegen_device.py @@ -42,7 +42,7 @@ def check_target(device): # launch the kernel. a = tvm.nd.empty((n,), dtype=A.dtype, device=dev) f(a) - assert a.asnumpy()[0] == value + 3 + assert a.numpy()[0] == value + 3 check_target("cuda") check_target("vulkan") @@ -79,7 +79,7 @@ def check_target(device, host="stackvm"): b = tvm.nd.array(np.random.uniform(size=()).astype(B.dtype), dev) d = tvm.nd.array(np.zeros(n, dtype=D.dtype), dev) f(a, b, d) - tvm.testing.assert_allclose(d.asnumpy(), a.asnumpy() + b.asnumpy() + 1) + tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + 1) check_target("cuda", host="llvm") check_target("nvptx", host="llvm") diff --git a/tests/python/unittest/test_target_codegen_extern.py b/tests/python/unittest/test_target_codegen_extern.py index 2aefee97d649..38fac332e9de 100644 --- a/tests/python/unittest/test_target_codegen_extern.py +++ b/tests/python/unittest/test_target_codegen_extern.py @@ -74,7 +74,7 @@ def check_target(target): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_target("llvm") check_target("opencl") @@ -109,7 +109,7 @@ def check_target(target): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy()) check_target("stackvm") check_target("llvm") @@ -142,11 +142,11 @@ def check_target(target): @tvm.register_func def my_extern_array_func2(aa, bb): assert aa.shape == a.shape - tvm.testing.assert_allclose(aa.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(aa.numpy(), a.numpy() + 1) aa.copyto(bb) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_target("llvm") diff --git a/tests/python/unittest/test_target_codegen_llvm.py b/tests/python/unittest/test_target_codegen_llvm.py index 96b67ea26c64..10cbcd68f362 100644 --- a/tests/python/unittest/test_target_codegen_llvm.py +++ b/tests/python/unittest/test_target_codegen_llvm.py @@ -108,7 +108,7 @@ def check_llvm(): # launch the kernel. a = tvm.nd.empty((), dtype=A.dtype, device=dev) f(a) - assert a.asnumpy() == value + 3 + assert a.numpy() == value + 3 check_llvm() @@ -137,7 +137,7 @@ def check_llvm(): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), np.sqrt(a.asnumpy() + 1) * 2 + 2, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), np.sqrt(a.numpy() + 1) * 2 + 2, rtol=1e-5) check_llvm() @@ -160,7 +160,7 @@ def check_llvm(nn, base): a = tvm.nd.array(np.random.uniform(size=(n + base)).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy()[::-1][:n]) + tvm.testing.assert_allclose(c.numpy(), a.numpy()[::-1][:n]) check_llvm(4, 0) check_llvm(128, 8) @@ -189,7 +189,7 @@ def check_llvm(n, lanes): a = tvm.nd.empty((n,), A.dtype).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), C.dtype, dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_llvm(64, 2) check_llvm(512, 2) @@ -213,7 +213,7 @@ def check_llvm(nn, base, stride): a = tvm.nd.array(np.random.uniform(size=(n + base, stride)).astype(A.dtype), dev) c = tvm.nd.array(np.zeros((n, stride), dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy()[base:] + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy()[base:] + 1) check_llvm(64, 0, 2) check_llvm(4, 0, 1) @@ -240,7 +240,7 @@ def check_llvm(): a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1 + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1 + 1) check_llvm() @@ -272,9 +272,9 @@ def check_llvm(): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd1(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) fadd2(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_llvm() @@ -292,9 +292,9 @@ def check_llvm(n, offset): a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) c = tvm.nd.empty((n,), A.dtype, dev) f(a, c) - c_np = a.asnumpy() + c_np = a.numpy() c_np[:offset] = 0 - tvm.testing.assert_allclose(c.asnumpy(), c_np) + tvm.testing.assert_allclose(c.numpy(), c_np) check_llvm(64, 8) @@ -312,8 +312,8 @@ def check_llvm(n): a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev) c = tvm.nd.empty((n,), C.dtype, dev) f(a, c) - c_np = a.asnumpy() == 1 - tvm.testing.assert_allclose(c.asnumpy(), c_np) + c_np = a.numpy() == 1 + tvm.testing.assert_allclose(c.numpy(), c_np) check_llvm(64) @@ -335,8 +335,8 @@ def check_llvm(n): sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev) d = tvm.nd.empty((), D.dtype, dev) f(a, sc, d) - d_np = np.sum(a.asnumpy()) * sc.asnumpy() + 1 - tvm.testing.assert_allclose(d.asnumpy(), d_np) + d_np = np.sum(a.numpy()) * sc.numpy() + 1 + tvm.testing.assert_allclose(d.numpy(), d_np) check_llvm(64) @@ -359,8 +359,8 @@ def check_llvm(n): sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev) d = tvm.nd.empty((), D.dtype, dev) f(a, sc, d) - d_np = np.sum(a.asnumpy()) * sc.asnumpy() + 1 - tvm.testing.assert_allclose(d.asnumpy(), d_np) + d_np = np.sum(a.numpy()) * sc.numpy() + 1 + tvm.testing.assert_allclose(d.numpy(), d_np) check_llvm(64) @@ -461,8 +461,8 @@ def clipb(x): # Run the function and convert the results to numpy f(A_arr, B_arr, D_arr, M_arr) - D_arr = D_arr.asnumpy() - M_arr = M_arr.asnumpy() + D_arr = D_arr.numpy() + M_arr = M_arr.numpy() # This helper just prints additional info on failure def _show_info(): @@ -555,7 +555,7 @@ def check_llvm_reciprocal(n): a = tvm.nd.array(np.full((n,), 100, "float32")) b = tvm.nd.empty((n,), "float32") f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.zeros((n,), "float32")) + tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32")) check_llvm_reciprocal(4) check_llvm_reciprocal(8) @@ -571,7 +571,7 @@ def check_llvm_sigmoid(n): a = tvm.nd.array(np.full((n,), -1000, "float32")) b = tvm.nd.empty((n,), "float32") f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.zeros((n,), "float32")) + tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32")) check_llvm_sigmoid(4) check_llvm_sigmoid(8) @@ -684,7 +684,7 @@ def _transform(f, *_): b_ = tvm.nd.array(np.arange(8, 0, -1, dtype="int32")) c_ = tvm.nd.array(np.zeros((8,), dtype="int32")) module(a_, b_, c_) - tvm.testing.assert_allclose(c_.asnumpy(), (a_.asnumpy() * 2).astype("int32")) + tvm.testing.assert_allclose(c_.numpy(), (a_.numpy() * 2).astype("int32")) def np_float2np_bf16(arr): @@ -710,7 +710,7 @@ def np_bf162np_float(arr): def np_bf16_cast_and_cast_back(arr): - """ Convert a numpy array of float to bf16 and cast back""" + """Convert a numpy array of float to bf16 and cast back""" return np_bf162np_float(np_float2np_bf16(arr)) @@ -735,7 +735,7 @@ def dotest(do_vectorize): b_ = np_float2tvm_bf16(npb) c_ = tvm.nd.empty((32,), "uint16") module(a_, b_, c_) - tvm.testing.assert_allclose(np_bf162np_float(c_.asnumpy()), res) + tvm.testing.assert_allclose(np_bf162np_float(c_.numpy()), res) dotest(True) dotest(False) @@ -815,7 +815,7 @@ def do_atomic_add(A): f(a) ref = np.zeros((size,)).astype(A.dtype) ref[0] = size - tvm.testing.assert_allclose(a.asnumpy(), ref, rtol=1e-5) + tvm.testing.assert_allclose(a.numpy(), ref, rtol=1e-5) if __name__ == "__main__": diff --git a/tests/python/unittest/test_target_codegen_rocm.py b/tests/python/unittest/test_target_codegen_rocm.py index 9eb0b5cf938d..894c8ecd0ac9 100644 --- a/tests/python/unittest/test_target_codegen_rocm.py +++ b/tests/python/unittest/test_target_codegen_rocm.py @@ -50,7 +50,7 @@ def test_rocm_cross_thread_reduction(): a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) frocm(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4) + tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4) @tvm.testing.requires_rocm @@ -97,9 +97,9 @@ def check_rocm(dtype, n): dev = tvm.rocm(0) a_np = np.random.uniform(size=(n,)).astype(A.dtype) a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np) - b_np = a.asnumpy() + b_np = a.numpy() tvm.testing.assert_allclose(a_np, b_np) - tvm.testing.assert_allclose(a_np, a.asnumpy()) + tvm.testing.assert_allclose(a_np, a.numpy()) for _ in range(100): dtype = np.random.choice(["float32", "float16", "int8", "int32"]) @@ -124,7 +124,7 @@ def check_rocm(dtype, n, lanes): a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), B.dtype, dev) fun(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_rocm("float32", 64, 2) check_rocm("float16", 64, 2) diff --git a/tests/python/unittest/test_target_codegen_static_init.py b/tests/python/unittest/test_target_codegen_static_init.py index b0c19dfcffeb..4d993e5d6b7b 100644 --- a/tests/python/unittest/test_target_codegen_static_init.py +++ b/tests/python/unittest/test_target_codegen_static_init.py @@ -39,7 +39,7 @@ def test_static_callback(): a = tvm.nd.array(np.zeros(10, dtype=dtype)) f(a) f(a) - np.testing.assert_equal(a.asnumpy(), np.ones(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.ones(a.shape[0])) def test_static_init(): diff --git a/tests/python/unittest/test_target_codegen_vm_basic.py b/tests/python/unittest/test_target_codegen_vm_basic.py index 9bbee76e2736..5667521dc659 100644 --- a/tests/python/unittest/test_target_codegen_vm_basic.py +++ b/tests/python/unittest/test_target_codegen_vm_basic.py @@ -71,7 +71,7 @@ def test_stack_vm_loop(): def check(f): f(a) - np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) run_jit(mod, check) @@ -97,7 +97,7 @@ def check(f): f(a) y = np.arange(a.shape[0]) * 2 y[5:] -= 1 - np.testing.assert_equal(a.asnumpy(), y) + np.testing.assert_equal(a.numpy(), y) run_jit(mod, check) @@ -117,7 +117,7 @@ def test_vm_parallel(): def check(f): a = tvm.nd.array(np.zeros(10, dtype=dtype)) f(a) - np.testing.assert_equal(a.asnumpy(), np.ones(a.shape[0])) + np.testing.assert_equal(a.numpy(), np.ones(a.shape[0])) run_jit(mod, check) diff --git a/tests/python/unittest/test_target_codegen_vulkan.py b/tests/python/unittest/test_target_codegen_vulkan.py index 56181db677d8..2770ae5878d0 100644 --- a/tests/python/unittest/test_target_codegen_vulkan.py +++ b/tests/python/unittest/test_target_codegen_vulkan.py @@ -28,7 +28,7 @@ def check_mod(mod, x_np, res_np): target = "vulkan" dev = tvm.device(target, 0) ex = relay.create_executor("vm", mod=mod, device=dev, target=target) - res = ex.evaluate()(x_np).asnumpy() + res = ex.evaluate()(x_np).numpy() tvm.testing.assert_allclose(res, res_np, atol=1e-5) @@ -79,9 +79,9 @@ def check_vulkan(dtype, n): dev = tvm.vulkan(0) a_np = np.random.uniform(size=(n,)).astype(A.dtype) a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np) - b_np = a.asnumpy() + b_np = a.numpy() tvm.testing.assert_allclose(a_np, b_np) - tvm.testing.assert_allclose(a_np, a.asnumpy()) + tvm.testing.assert_allclose(a_np, a.numpy()) for _ in range(100): dtype = np.random.choice(["float32", "float16", "int8", "int32"]) @@ -106,7 +106,7 @@ def check_vulkan(dtype, n, lanes): a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), B.dtype, dev) fun(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_vulkan("float32", 64, 2) check_vulkan("float16", 64, 2) @@ -158,7 +158,7 @@ def build_f(f_ref): f(a, b, c) for ((_, ref), c) in zip(fs, cs): - tvm.testing.assert_allclose(c.asnumpy(), ref(a.asnumpy(), b.asnumpy())) + tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy())) ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))] for t in ts: @@ -214,7 +214,7 @@ def do_copy(A, B, n): b = tvm.nd.array(b_np, dev) func(a, b) ref = a_np.astype(np.int32) - tvm.testing.assert_allclose(b.asnumpy(), ref) + tvm.testing.assert_allclose(b.numpy(), ref) @tvm.testing.requires_vulkan @@ -289,7 +289,7 @@ def test_scalar_params(num_int_params): b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) f_add(*scalars, a, b) - tvm.testing.assert_allclose(a.asnumpy() + sum(scalars), b.asnumpy()) + tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy()) # f_add has 3+num_int_params scalar parameters. The other three # are length_n, stride1, and stride2. @@ -349,12 +349,12 @@ def do_compute(A, B, n): a = tvm.nd.array(np.array([5], dtype=A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), [55]) + tvm.testing.assert_allclose(b.numpy(), [55]) a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev) func(a, b) - tvm.testing.assert_allclose(b.asnumpy(), [210]) + tvm.testing.assert_allclose(b.numpy(), [210]) if __name__ == "__main__": diff --git a/tests/python/unittest/test_te_autodiff.py b/tests/python/unittest/test_te_autodiff.py index 7471e8a1eee4..a5995ff0337f 100644 --- a/tests/python/unittest/test_te_autodiff.py +++ b/tests/python/unittest/test_te_autodiff.py @@ -63,7 +63,7 @@ def check_device(device, host="llvm"): grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)] mgrad(*grad_data, *input_data, *arg_vals) - g_res = [g.asnumpy() for g in grad_data] + g_res = [g.numpy() for g in grad_data] if desired_grads: assert isinstance(desired_grads, list) @@ -74,10 +74,10 @@ def check_device(device, host="llvm"): def forward(*in_data): out_data = tvm.nd.empty(out_shape, out.dtype) mout(out_data, *[tvm.nd.array(d) for d in list(in_data)]) - return out_data.asnumpy().sum() + return out_data.numpy().sum() tvm.testing.check_numerical_grads( - forward, [d.asnumpy() for d in input_data + arg_vals], g_res + forward, [d.numpy() for d in input_data + arg_vals], g_res ) check_device("cpu") diff --git a/tests/python/unittest/test_te_hybrid_script.py b/tests/python/unittest/test_te_hybrid_script.py index f3091c7b71a6..30b96546f991 100644 --- a/tests/python/unittest/test_te_hybrid_script.py +++ b/tests/python/unittest/test_te_hybrid_script.py @@ -78,7 +78,7 @@ def tvm_val_2_py_val(val): module(*nd_args) for nd, np in zip(out_tensors, ref_data): - tvm.testing.assert_allclose(nd.asnumpy(), np, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(nd.numpy(), np, rtol=1e-5, atol=1e-5) module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs @@ -413,7 +413,7 @@ def intrin_real(a): tvm_b = tvm.nd.array(numpy.zeros((8,), dtype="float32")) b = intrin_real(a) func(tvm_a, tvm_b) - tvm.testing.assert_allclose(b, tvm_b.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(b, tvm_b.numpy(), rtol=1e-5) @script def intrin_int(a): @@ -431,7 +431,7 @@ def intrin_int(a): tvm_b = tvm.nd.array(numpy.array([0]).astype("int32")) b = intrin_int(a) func(tvm_a, tvm_b) - assert tvm_b.asnumpy()[0] == b[0] + assert tvm_b.numpy()[0] == b[0] # test non caconical loops @@ -536,7 +536,7 @@ def upstream(a): tvm_d = tvm.nd.array(numpy.zeros((20,)).astype("float32")) func(tvm_a, tvm_b, tvm_d) - tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5) + tvm.testing.assert_allclose(tvm_d.numpy(), ref, 1e-5, 1e-5) def test_downstream(): @@ -563,7 +563,7 @@ def downstream(a): tvm_a = tvm.nd.array(a) tvm_c = tvm.nd.array(numpy.zeros((20,)).astype("float32")) module(tvm_a, tvm_c) - tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5) + tvm.testing.assert_allclose(tvm_c.numpy(), ref, 1e-5, 1e-5) def test_const_param(): @@ -590,7 +590,7 @@ def add_something(a, b): module(nd_a, nd_c) ref = add_something(np_a, 11) - tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5) + tvm.testing.assert_allclose(nd_c.numpy(), ref, 1e-5, 1e-5) def test_value_index(): @@ -624,7 +624,7 @@ def kernel_b(b, a): res = tvm.nd.array(numpy.zeros((4, 4)).astype("int32")) module(tvm.nd.array(np_a), res) - tvm.testing.assert_allclose(res.asnumpy(), ref) + tvm.testing.assert_allclose(res.numpy(), ref) def test_func_call(): @@ -833,7 +833,7 @@ def sum_array(inputs): out_ref += arr out_nd = tvm.nd.array(numpy.zeros((10,), "float32")) mod(*input_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_ref) + tvm.testing.assert_allclose(out_nd.numpy(), out_ref) if __name__ == "__main__": diff --git a/tests/python/unittest/test_te_schedule_ops.py b/tests/python/unittest/test_te_schedule_ops.py index 255e0cdb1f21..bc4bc4f56e19 100644 --- a/tests/python/unittest/test_te_schedule_ops.py +++ b/tests/python/unittest/test_te_schedule_ops.py @@ -496,7 +496,7 @@ def test_reduction_and_dummy_fuse_split(): args = [tvm.nd.empty((), "int32")] + [tvm.nd.array(np.ones((n,), dtype="int32"))] f(*args) - assert args[0].asnumpy() == n + assert args[0].numpy() == n n = 10 X = te.placeholder(shape=(n,), dtype="int32", name="X") @@ -510,7 +510,7 @@ def test_reduction_and_dummy_fuse_split(): tvm.nd.array(np.ones((n,), dtype="int32")) ] f(*args) - assert np.all(args[0].asnumpy() == n) + assert np.all(args[0].numpy() == n) def test_schedule_compute_inline(): diff --git a/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py b/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py index 0f97e4921cf5..83584ad56400 100644 --- a/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py +++ b/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py @@ -112,7 +112,7 @@ def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96): print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3)) c_np = np.dot(a_np, b_np) - np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3) + np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3) def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2): @@ -211,7 +211,7 @@ def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2): for bs in range(batch): c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :]) - np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3) + np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3) @tvm.testing.requires_tensorcore diff --git a/tests/python/unittest/test_te_schedule_tensor_core.py b/tests/python/unittest/test_te_schedule_tensor_core.py index e0cf58392d21..d86b05ad83f1 100644 --- a/tests/python/unittest/test_te_schedule_tensor_core.py +++ b/tests/python/unittest/test_te_schedule_tensor_core.py @@ -270,7 +270,7 @@ def test_tensor_core_batch_matmal(): func(a, b, c) a_np = a_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) b_np = b_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) - c_np = c.asnumpy().transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) + c_np = c.numpy().transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) np.testing.assert_allclose( c_np, np.matmul(a_np.astype(C.dtype), b_np.astype(C.dtype)), rtol=1e-4, atol=1e-4 ) @@ -448,9 +448,7 @@ def test_tensor_core_batch_conv(): kernel_h, kernel_w, in_channels, out_channels ) c_np = ( - c.asnumpy() - .transpose((0, 4, 1, 2, 3, 5)) - .reshape(batch_size, height, width, out_channels) + c.numpy().transpose((0, 4, 1, 2, 3, 5)).reshape(batch_size, height, width, out_channels) ) c_std = conv2d_nhwc_python( a_np.astype(Conv.dtype), w_np.astype(Conv.dtype), (stride_h, stride_w), (pad_h, pad_w) diff --git a/tests/python/unittest/test_te_tensor.py b/tests/python/unittest/test_te_tensor.py index 9f1400c41a15..ed4a21397885 100644 --- a/tests/python/unittest/test_te_tensor.py +++ b/tests/python/unittest/test_te_tensor.py @@ -355,7 +355,7 @@ def test_tensor_scalar_mixed(): @tvm.register_func("tvm.test_tensor_scalar_scale") def my_scale(tensor, scalar, out): - out_np = tensor.asnumpy() * scalar.asnumpy() + out_np = tensor.numpy() * scalar.numpy() tvm.nd.array(out_np).copyto(out) A = te.placeholder(a.shape, name="A") @@ -375,7 +375,7 @@ def my_scale(tensor, scalar, out): tb = tvm.nd.array(b) tc = tvm.nd.array(c) f(ta, tb, tc) - tvm.testing.assert_allclose(a * b, tc.asnumpy()) + tvm.testing.assert_allclose(a * b, tc.numpy()) def test_tensor_scalar(): @@ -400,7 +400,7 @@ def mycopy(x, y): ta = tvm.nd.array(a) tb = tvm.nd.array(b) f(ta, tb) - tvm.testing.assert_allclose(ta.asnumpy(), tb.asnumpy()) + tvm.testing.assert_allclose(ta.numpy(), tb.numpy()) if __name__ == "__main__": diff --git a/tests/python/unittest/test_te_tensor_overload.py b/tests/python/unittest/test_te_tensor_overload.py index 33dc19a19be9..6ee2bae3525d 100644 --- a/tests/python/unittest/test_te_tensor_overload.py +++ b/tests/python/unittest/test_te_tensor_overload.py @@ -84,7 +84,7 @@ def test_combination(): c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), dev) d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev) foo(x, a, b, c, d) - tvm.testing.assert_allclose(d.asnumpy(), k + a.asnumpy() - b.asnumpy() * c.asnumpy() + x) + tvm.testing.assert_allclose(d.numpy(), k + a.numpy() - b.numpy() * c.numpy() + x) def verify_tensor_scalar_bop(shape, typ="add"): @@ -129,7 +129,7 @@ def check_device(device): a_nd = tvm.nd.array(a_npy, dev) b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev) foo(a_nd, b_nd, k_, *shape) - tvm.testing.assert_allclose(b_nd.asnumpy(), b_npy, rtol=1e-5) + tvm.testing.assert_allclose(b_nd.numpy(), b_npy, rtol=1e-5) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) @@ -178,7 +178,7 @@ def check_device(device): out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev) for _ in range(1): foo(lhs_nd, rhs_nd, out_nd) - tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) @@ -237,7 +237,7 @@ def check_device(device): b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev) c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), dev) foo(a_nd, w_nd, b_nd, c_nd) - tvm.testing.assert_allclose(c_nd.asnumpy(), c_npy, rtol=1e-4, atol=1e-4) + tvm.testing.assert_allclose(c_nd.numpy(), c_npy, rtol=1e-4, atol=1e-4) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) diff --git a/tests/python/unittest/test_tir_buffer.py b/tests/python/unittest/test_tir_buffer.py index de03cddfb50c..83377e443764 100644 --- a/tests/python/unittest/test_tir_buffer.py +++ b/tests/python/unittest/test_tir_buffer.py @@ -154,7 +154,7 @@ def check(): b = tvm.nd.array(np.random.uniform(size=(2, 1, 1)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check() @@ -183,7 +183,7 @@ def check_stride(): b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev) fadd(a, b, c, 4, 1) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) def check_no_stride(): fadd = tvm.build( @@ -194,7 +194,7 @@ def check_no_stride(): b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev) fadd(a, b, c, 4, 1) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) def check_auto_bind(): # Let build bind buffers @@ -204,7 +204,7 @@ def check_auto_bind(): b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev) c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev) fadd(a, b, c, 4, 1) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_stride() check_no_stride() diff --git a/tests/python/unittest/test_tir_intrin.py b/tests/python/unittest/test_tir_intrin.py index 79b2819212b7..a8d57b3da780 100644 --- a/tests/python/unittest/test_tir_intrin.py +++ b/tests/python/unittest/test_tir_intrin.py @@ -44,7 +44,7 @@ def test_nearbyint(): # This is the default rounding mode with libc as well. # However one can set a different rounding mode and in that # case numpy result might differ. - tvm.testing.assert_allclose(a_rounded.asnumpy(), np.rint(a.asnumpy())) + tvm.testing.assert_allclose(a_rounded.numpy(), np.rint(a.numpy())) def test_round_intrinsics_on_int(): @@ -86,7 +86,7 @@ def run_test(tvm_intrin, np_func): a = tvm.nd.array(np.random.uniform(0.1, 0.5, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) f(a, b) - tvm.testing.assert_allclose(b.asnumpy(), np_func(a.asnumpy()), atol=1e-5, rtol=1e-5) + tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-5, rtol=1e-5) for func in test_funcs: run_test(*func) @@ -115,9 +115,7 @@ def run_test(tvm_intrin, np_func): b = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(B.dtype), dev) c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose( - c.asnumpy(), np_func(a.asnumpy(), b.asnumpy()), atol=1e-5, rtol=1e-5 - ) + tvm.testing.assert_allclose(c.numpy(), np_func(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5) for func in test_funcs: run_test(*func) @@ -138,9 +136,7 @@ def test_ldexp(): b = tvm.nd.array(np.random.randint(0, 5, size=n).astype(B.dtype), dev) c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) f(a, b, c) - tvm.testing.assert_allclose( - c.asnumpy(), np.ldexp(a.asnumpy(), b.asnumpy()), atol=1e-5, rtol=1e-5 - ) + tvm.testing.assert_allclose(c.numpy(), np.ldexp(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5) def test_clz(): @@ -182,7 +178,7 @@ def clz_np(x, dtype): b = tvm.nd.array(np.zeros((n,)).astype("int32"), dev) f(a, b) ref = clz_np(a_np, dtype) - np.testing.assert_equal(b.asnumpy(), ref) + np.testing.assert_equal(b.numpy(), ref) @tvm.script.tir diff --git a/tests/python/unittest/test_tir_ir_builder.py b/tests/python/unittest/test_tir_ir_builder.py index 0b05c1093bc6..355d3abed559 100644 --- a/tests/python/unittest/test_tir_ir_builder.py +++ b/tests/python/unittest/test_tir_ir_builder.py @@ -114,7 +114,7 @@ def check_target(target): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_target("llvm") @@ -167,7 +167,7 @@ def check_target(target): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_target("opencl") check_target("cuda") @@ -223,7 +223,7 @@ def check_target(target, ir): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) func(a, b, c) ref = num_iter * (a_np + b_np) - tvm.testing.assert_allclose(c.asnumpy(), ref, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5) check_target("llvm", test_ir) @@ -287,7 +287,7 @@ def check_target(target, ir): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) func(c) ref = np.array([collatz_ref(i) for i in range(n)]) - tvm.testing.assert_allclose(c.asnumpy(), ref) + tvm.testing.assert_allclose(c.numpy(), ref) check_target("llvm", collatz_ir_cpu) @@ -400,7 +400,7 @@ def check_target(target, ir): dev = tvm.device(target, 0) c = tvm.nd.array(np.zeros(shape, dtype=C.dtype), dev) func(c) - tvm.testing.assert_allclose(c.asnumpy(), ref, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5) check_target("llvm", mandel_ir_cpu) check_target("npvtx", mandel_ir_gpu) @@ -489,7 +489,7 @@ def check_target(target, ir): c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) func(a, b, c) ref = np.searchsorted(a_np, b_np) - tvm.testing.assert_allclose(c.asnumpy(), ref) + tvm.testing.assert_allclose(c.numpy(), ref) check_target("llvm", searchsorted_ir_cpu) check_target("cuda", searchsorted_ir_gpu) diff --git a/tests/python/unittest/test_tir_transform_instrument_bound_checkers.py b/tests/python/unittest/test_tir_transform_instrument_bound_checkers.py index c035fd063dba..2c9997f6fe78 100644 --- a/tests/python/unittest/test_tir_transform_instrument_bound_checkers.py +++ b/tests/python/unittest/test_tir_transform_instrument_bound_checkers.py @@ -114,7 +114,7 @@ def test_in_bounds_vectorize_llvm(): a = tvm.nd.empty((n,), A.dtype).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), C.dtype, dev) f(a, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) @tvm.testing.requires_llvm @@ -507,8 +507,8 @@ def test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm(): sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev) d = tvm.nd.empty((), D.dtype, dev) f(a, sc, d) - d_np = np.sum(a.asnumpy()) * sc.asnumpy() + 1 - tvm.testing.assert_allclose(d.asnumpy(), d_np) + d_np = np.sum(a.numpy()) * sc.numpy() + 1 + tvm.testing.assert_allclose(d.numpy(), d_np) if __name__ == "__main__": diff --git a/tests/python/unittest/test_tir_transform_loop_partition.py b/tests/python/unittest/test_tir_transform_loop_partition.py index f5a5e4ca6563..9e8848083908 100644 --- a/tests/python/unittest/test_tir_transform_loop_partition.py +++ b/tests/python/unittest/test_tir_transform_loop_partition.py @@ -507,8 +507,8 @@ def test_double_splitting_with_indivisible_factors(): dev, ) func(a, c, d) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy(), rtol=1e-5) - tvm.testing.assert_allclose(d.asnumpy(), a.asnumpy(), rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), a.numpy(), rtol=1e-5) + tvm.testing.assert_allclose(d.numpy(), a.numpy(), rtol=1e-5) def test_simple_rfactor(): diff --git a/tests/python/unittest/test_tir_transform_lower_intrin.py b/tests/python/unittest/test_tir_transform_lower_intrin.py index fb3790e281fd..0764daac461a 100644 --- a/tests/python/unittest/test_tir_transform_lower_intrin.py +++ b/tests/python/unittest/test_tir_transform_lower_intrin.py @@ -55,7 +55,7 @@ def make_binds(i): c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype)) f(a, b, c) cref = np.array([fref(x, y) for x, y in data]) - np.testing.assert_equal(c.asnumpy(), cref) + np.testing.assert_equal(c.numpy(), cref) def get_ref_data(): diff --git a/tests/python/unittest/test_tir_transform_lower_tvm_builtin.py b/tests/python/unittest/test_tir_transform_lower_tvm_builtin.py index d6b427a50fae..63772dea65d7 100644 --- a/tests/python/unittest/test_tir_transform_lower_tvm_builtin.py +++ b/tests/python/unittest/test_tir_transform_lower_tvm_builtin.py @@ -22,7 +22,7 @@ @tvm.register_func("tvm.test_matmul") def my_matmul(a, b, c): - c.copyfrom(np.dot(a.asnumpy(), b.asnumpy())) + c.copyfrom(np.dot(a.numpy(), b.numpy())) def check_packed_func(target="llvm"): @@ -169,7 +169,7 @@ def build_tir(): f = tvm.build(mod, None, "llvm") a = tvm.nd.array(np.zeros(2, dtype="float32")) f(a) - tvm.testing.assert_allclose(a.asnumpy(), expected_value) + tvm.testing.assert_allclose(a.numpy(), expected_value) if __name__ == "__main__": diff --git a/tests/python/unittest/test_tir_transform_lower_warp_memory.py b/tests/python/unittest/test_tir_transform_lower_warp_memory.py index 2a8407823f69..ef474c15cfbb 100644 --- a/tests/python/unittest/test_tir_transform_lower_warp_memory.py +++ b/tests/python/unittest/test_tir_transform_lower_warp_memory.py @@ -131,7 +131,7 @@ def check_cuda(dtype): A_nd = tvm.nd.array(A_np, dev) B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev) func(A_nd, B_nd) - tvm.testing.assert_allclose(B_nd.asnumpy(), B_np, rtol=1e-3) + tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @@ -188,7 +188,7 @@ def check_cuda(dtype): A_nd = tvm.nd.array(A_np, dev) B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev) func(A_nd, B_nd) - tvm.testing.assert_allclose(B_nd.asnumpy(), B_np, rtol=1e-3) + tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @@ -236,7 +236,7 @@ def check_cuda(dtype): B_nd = tvm.nd.array(AB_np, dev) C_nd = tvm.nd.array(np.zeros(C_np.shape, dtype=C_np.dtype), dev) func(A_nd, B_nd, C_nd) - tvm.testing.assert_allclose(C_nd.asnumpy(), C_np, rtol=1e-3) + tvm.testing.assert_allclose(C_nd.numpy(), C_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @@ -268,7 +268,7 @@ def check(device, m): B_nd = tvm.nd.array(B_np, dev) func(A_nd, B_nd) B_np = A_np + 1 - tvm.testing.assert_allclose(B_nd.asnumpy(), B_np) + tvm.testing.assert_allclose(B_nd.numpy(), B_np) for device in ["cuda", "rocm"]: if not tvm.testing.device_enabled(device): diff --git a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py index 8664c86d6262..e0fa45218aae 100644 --- a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py +++ b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py @@ -153,7 +153,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): func(data_tvm, weight_tvm, bias_tvm, out_tvm) # Check results -np.testing.assert_allclose(out_np, out_tvm.asnumpy(), rtol=1e-3) +np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3) # Evaluate execution time evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) diff --git a/tutorials/auto_scheduler/tune_sparse_x86.py b/tutorials/auto_scheduler/tune_sparse_x86.py index d8e6404e31f6..55ee76ef6c4f 100644 --- a/tutorials/auto_scheduler/tune_sparse_x86.py +++ b/tutorials/auto_scheduler/tune_sparse_x86.py @@ -271,7 +271,7 @@ def apply_func(search_policy, state, stage_id): func(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, B_tvm, Y_tvm) # Check results -tvm.testing.assert_allclose(Y_np, Y_tvm.asnumpy(), atol=1e-4, rtol=1e-4) +tvm.testing.assert_allclose(Y_np, Y_tvm.numpy(), atol=1e-4, rtol=1e-4) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) diff --git a/tutorials/autotvm/tune_conv2d_cuda.py b/tutorials/autotvm/tune_conv2d_cuda.py index c46180de6d89..4e80a74413aa 100644 --- a/tutorials/autotvm/tune_conv2d_cuda.py +++ b/tutorials/autotvm/tune_conv2d_cuda.py @@ -236,7 +236,7 @@ def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding): c_tvm = tvm.nd.empty(c_np.shape, device=dev) func(a_tvm, w_tvm, c_tvm) -tvm.testing.assert_allclose(c_np, c_tvm.asnumpy(), rtol=1e-2) +tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-2) # Evaluate running time. Here we choose a large repeat number (400) to reduce the noise # and the overhead of kernel launch. You can also use nvprof to validate the result. diff --git a/tutorials/dev/bring_your_own_datatypes.py b/tutorials/dev/bring_your_own_datatypes.py index f9dc8bcdc948..06d96e14d28c 100644 --- a/tutorials/dev/bring_your_own_datatypes.py +++ b/tutorials/dev/bring_your_own_datatypes.py @@ -264,7 +264,7 @@ def get_cat_image(): ex = tvm.relay.create_executor("graph", mod=module) input = get_cat_image() -result = ex.evaluate()(input, **params).asnumpy() +result = ex.evaluate()(input, **params).numpy() # print first 10 elements print(result.flatten()[:10]) @@ -402,7 +402,7 @@ def convert_ndarray(dst_dtype, array): # Vectorization is not implemented with custom datatypes. with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): result_myfloat = ex.evaluate(expr)(input, **params) - result_myfloat = convert_ndarray(src_dtype, result_myfloat).asnumpy() + result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy() # print first 10 elements print(result_myfloat.flatten()[:10]) diff --git a/tutorials/frontend/build_gcn.py b/tutorials/frontend/build_gcn.py index e73dc2dca287..2dcc7ba49b80 100644 --- a/tutorials/frontend/build_gcn.py +++ b/tutorials/frontend/build_gcn.py @@ -343,7 +343,7 @@ def prepare_params(g, data): # Run the TVM model, test for accuracy and verify with DGL # -------------------------------------------------------- m.run() -logits_tvm = m.get_output(0).asnumpy() +logits_tvm = m.get_output(0).numpy() print("Print the first five outputs from TVM execution\n", logits_tvm[:5]) labels = data.labels diff --git a/tutorials/frontend/deploy_model_on_android.py b/tutorials/frontend/deploy_model_on_android.py index 864e81328c99..f435befb8250 100644 --- a/tutorials/frontend/deploy_model_on_android.py +++ b/tutorials/frontend/deploy_model_on_android.py @@ -328,7 +328,7 @@ def transform_image(image): out = module.get_output(0) # get top1 result -top1 = np.argmax(out.asnumpy()) +top1 = np.argmax(out.numpy()) print("TVM prediction top-1: {}".format(synset[top1])) print("Evaluate inference time cost...") diff --git a/tutorials/frontend/deploy_model_on_rasp.py b/tutorials/frontend/deploy_model_on_rasp.py index a59665f62f1c..58e9c55de2c7 100644 --- a/tutorials/frontend/deploy_model_on_rasp.py +++ b/tutorials/frontend/deploy_model_on_rasp.py @@ -226,5 +226,5 @@ def transform_image(image): # get output out = module.get_output(0) # get top1 result -top1 = np.argmax(out.asnumpy()) +top1 = np.argmax(out.numpy()) print("TVM prediction top-1: {}".format(synset[top1])) diff --git a/tutorials/frontend/deploy_object_detection_pytorch.py b/tutorials/frontend/deploy_object_detection_pytorch.py index 34a589d8055f..8e8f3947eb5a 100644 --- a/tutorials/frontend/deploy_object_detection_pytorch.py +++ b/tutorials/frontend/deploy_object_detection_pytorch.py @@ -142,9 +142,9 @@ def forward(self, inp): # Get boxes with score larger than 0.9 # ------------------------------------ score_threshold = 0.9 -boxes = tvm_res[0].asnumpy().tolist() +boxes = tvm_res[0].numpy().tolist() valid_boxes = [] -for i, score in enumerate(tvm_res[1].asnumpy().tolist()): +for i, score in enumerate(tvm_res[1].numpy().tolist()): if score > score_threshold: valid_boxes.append(boxes[i]) else: diff --git a/tutorials/frontend/deploy_prequantized.py b/tutorials/frontend/deploy_prequantized.py index 308027a4a193..a59655222278 100644 --- a/tutorials/frontend/deploy_prequantized.py +++ b/tutorials/frontend/deploy_prequantized.py @@ -94,7 +94,7 @@ def run_tvm_model(mod, params, input_name, inp, target="llvm"): runtime.set_input(input_name, inp) runtime.run() - return runtime.get_output(0).asnumpy(), runtime + return runtime.get_output(0).numpy(), runtime ################################################################################# diff --git a/tutorials/frontend/deploy_prequantized_tflite.py b/tutorials/frontend/deploy_prequantized_tflite.py index e0f9a6b2ebde..e3934e9b250f 100644 --- a/tutorials/frontend/deploy_prequantized_tflite.py +++ b/tutorials/frontend/deploy_prequantized_tflite.py @@ -135,7 +135,7 @@ def get_real_image(im_height, im_width): ############################################################################### # Lets run TFLite pre-quantized model inference and get the TFLite prediction. def run_tflite_model(tflite_model_buf, input_data): - """ Generic function to execute TFLite """ + """Generic function to execute TFLite""" try: from tensorflow import lite as interpreter_wrapper except ImportError: @@ -173,7 +173,7 @@ def run_tvm(lib): rt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0))) rt_mod.set_input("input", data) rt_mod.run() - tvm_res = rt_mod.get_output(0).asnumpy() + tvm_res = rt_mod.get_output(0).numpy() tvm_pred = np.squeeze(tvm_res).argsort()[-5:][::-1] return tvm_pred, rt_mod diff --git a/tutorials/frontend/deploy_sparse.py b/tutorials/frontend/deploy_sparse.py index eb9b4ee23a1a..d3375c40fe72 100644 --- a/tutorials/frontend/deploy_sparse.py +++ b/tutorials/frontend/deploy_sparse.py @@ -281,7 +281,7 @@ def random_sparse_bert_params(func, params, density, BS_R, BS_C): def deepcopy(param_dic): ret = {} for k, v in param_dic.items(): - ret[k] = tvm.nd.array(v.asnumpy()) + ret[k] = tvm.nd.array(v.numpy()) return ret new_params = deepcopy(params) diff --git a/tutorials/frontend/deploy_ssd_gluoncv.py b/tutorials/frontend/deploy_ssd_gluoncv.py index 40b40ce1f441..ebe18670c6a3 100644 --- a/tutorials/frontend/deploy_ssd_gluoncv.py +++ b/tutorials/frontend/deploy_ssd_gluoncv.py @@ -123,9 +123,9 @@ def run(lib, dev): ax = utils.viz.plot_bbox( img, - bounding_boxs.asnumpy()[0], - scores.asnumpy()[0], - class_IDs.asnumpy()[0], + bounding_boxs.numpy()[0], + scores.numpy()[0], + class_IDs.numpy()[0], class_names=block.classes, ) plt.show() diff --git a/tutorials/frontend/from_caffe2.py b/tutorials/frontend/from_caffe2.py index 1c00f92cfb4f..263f98c9454f 100644 --- a/tutorials/frontend/from_caffe2.py +++ b/tutorials/frontend/from_caffe2.py @@ -117,7 +117,7 @@ def transform_image(image): m.run() # get outputs tvm_out = m.get_output(0) -top1_tvm = np.argmax(tvm_out.asnumpy()[0]) +top1_tvm = np.argmax(tvm_out.numpy()[0]) ##################################################################### # Look up synset name diff --git a/tutorials/frontend/from_coreml.py b/tutorials/frontend/from_coreml.py index ea8817d3a0a8..98d1969f3639 100644 --- a/tutorials/frontend/from_coreml.py +++ b/tutorials/frontend/from_coreml.py @@ -92,7 +92,7 @@ m.run() # get outputs tvm_output = m.get_output(0) -top1 = np.argmax(tvm_output.asnumpy()[0]) +top1 = np.argmax(tvm_output.numpy()[0]) ##################################################################### # Look up synset name diff --git a/tutorials/frontend/from_darknet.py b/tutorials/frontend/from_darknet.py index b29ed3d962c7..232058641ab0 100644 --- a/tutorials/frontend/from_darknet.py +++ b/tutorials/frontend/from_darknet.py @@ -137,10 +137,10 @@ layer_out = {} layer_out["type"] = "Region" # Get the region layer attributes (n, out_c, out_h, out_w, classes, coords, background) - layer_attr = m.get_output(2).asnumpy() - layer_out["biases"] = m.get_output(1).asnumpy() + layer_attr = m.get_output(2).numpy() + layer_out["biases"] = m.get_output(1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) - layer_out["output"] = m.get_output(0).asnumpy().reshape(out_shape) + layer_out["output"] = m.get_output(0).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] layer_out["coords"] = layer_attr[5] layer_out["background"] = layer_attr[6] @@ -151,11 +151,11 @@ layer_out = {} layer_out["type"] = "Yolo" # Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total) - layer_attr = m.get_output(i * 4 + 3).asnumpy() - layer_out["biases"] = m.get_output(i * 4 + 2).asnumpy() - layer_out["mask"] = m.get_output(i * 4 + 1).asnumpy() + layer_attr = m.get_output(i * 4 + 3).numpy() + layer_out["biases"] = m.get_output(i * 4 + 2).numpy() + layer_out["mask"] = m.get_output(i * 4 + 1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) - layer_out["output"] = m.get_output(i * 4).asnumpy().reshape(out_shape) + layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] tvm_out.append(layer_out) @@ -164,11 +164,11 @@ layer_out = {} layer_out["type"] = "Yolo" # Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total) - layer_attr = m.get_output(i * 4 + 3).asnumpy() - layer_out["biases"] = m.get_output(i * 4 + 2).asnumpy() - layer_out["mask"] = m.get_output(i * 4 + 1).asnumpy() + layer_attr = m.get_output(i * 4 + 3).numpy() + layer_out["biases"] = m.get_output(i * 4 + 2).numpy() + layer_out["mask"] = m.get_output(i * 4 + 1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) - layer_out["output"] = m.get_output(i * 4).asnumpy().reshape(out_shape) + layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] tvm_out.append(layer_out) thresh = 0.560 diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py index 8625465dd2f5..1c48aff799d4 100644 --- a/tutorials/frontend/from_keras.py +++ b/tutorials/frontend/from_keras.py @@ -105,7 +105,7 @@ # --------------- dtype = "float32" tvm_out = executor.evaluate()(tvm.nd.array(data.astype(dtype)), **params) -top1_tvm = np.argmax(tvm_out.asnumpy()[0]) +top1_tvm = np.argmax(tvm_out.numpy()[0]) ##################################################################### # Look up synset name diff --git a/tutorials/frontend/from_mxnet.py b/tutorials/frontend/from_mxnet.py index da1bf4e120b4..0ce610a2cdd6 100644 --- a/tutorials/frontend/from_mxnet.py +++ b/tutorials/frontend/from_mxnet.py @@ -115,7 +115,7 @@ def transform_image(image): m.run() # get outputs tvm_output = m.get_output(0) -top1 = np.argmax(tvm_output.asnumpy()[0]) +top1 = np.argmax(tvm_output.numpy()[0]) print("TVM prediction top-1:", top1, synset[top1]) ###################################################################### diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py index 1b969bcd4e53..4eba297935f0 100644 --- a/tutorials/frontend/from_onnx.py +++ b/tutorials/frontend/from_onnx.py @@ -98,7 +98,7 @@ # Execute on TVM # --------------------------------------------- dtype = "float32" -tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).asnumpy() +tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).numpy() ###################################################################### # Display results diff --git a/tutorials/frontend/from_pytorch.py b/tutorials/frontend/from_pytorch.py index 5f515e656bc8..e8d0b4998f9e 100644 --- a/tutorials/frontend/from_pytorch.py +++ b/tutorials/frontend/from_pytorch.py @@ -155,7 +155,7 @@ class_id_to_key = [x.strip() for x in class_id_to_key] # Get top-1 result for TVM -top1_tvm = np.argmax(tvm_output.asnumpy()[0]) +top1_tvm = np.argmax(tvm_output.numpy()[0]) tvm_class_key = class_id_to_key[top1_tvm] # Convert input to PyTorch variable and get PyTorch result for comparison diff --git a/tutorials/frontend/from_tensorflow.py b/tutorials/frontend/from_tensorflow.py index 468caf5292e9..fc87c07fb569 100644 --- a/tutorials/frontend/from_tensorflow.py +++ b/tutorials/frontend/from_tensorflow.py @@ -165,7 +165,7 @@ # Process the output # ------------------ # Process the model output to human readable text for InceptionV1. -predictions = tvm_output.asnumpy() +predictions = tvm_output.numpy() predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. diff --git a/tutorials/frontend/from_tflite.py b/tutorials/frontend/from_tflite.py index a85cfcea913c..b72040236654 100644 --- a/tutorials/frontend/from_tflite.py +++ b/tutorials/frontend/from_tflite.py @@ -160,7 +160,7 @@ def extract(path): module.run() # Get output -tvm_output = module.get_output(0).asnumpy() +tvm_output = module.get_output(0).numpy() ###################################################################### # Display results diff --git a/tutorials/frontend/using_external_lib.py b/tutorials/frontend/using_external_lib.py index 232f618bb28a..8b6957d1dbf6 100644 --- a/tutorials/frontend/using_external_lib.py +++ b/tutorials/frontend/using_external_lib.py @@ -84,7 +84,7 @@ module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) -out_cuda = out.asnumpy() +out_cuda = out.numpy() ###################################################################### # The generated pseudo code should look something like below. # Note how bias add, batch normalization, and ReLU activation are fused into the convolution kernel. @@ -505,7 +505,7 @@ module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) -out_cudnn = out.asnumpy() +out_cudnn = out.numpy() ###################################################################### # Note that if you use cuDNN, Relay cannot fuse convolution with layers following it. diff --git a/tutorials/get_started/auto_tuning_with_python.py b/tutorials/get_started/auto_tuning_with_python.py index a1d569b7d783..8160442cdefd 100644 --- a/tutorials/get_started/auto_tuning_with_python.py +++ b/tutorials/get_started/auto_tuning_with_python.py @@ -187,7 +187,7 @@ module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) -tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).asnumpy() +tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() ################################################################################ # Collect Basic Performance Data @@ -419,7 +419,7 @@ module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) -tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).asnumpy() +tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() scores = softmax(tvm_output) scores = np.squeeze(scores) diff --git a/tutorials/get_started/autotvm_matmul.py b/tutorials/get_started/autotvm_matmul.py index b63f69361d9f..234315b53ff9 100644 --- a/tutorials/get_started/autotvm_matmul.py +++ b/tutorials/get_started/autotvm_matmul.py @@ -361,7 +361,7 @@ def matmul(N, L, M, dtype): c_tvm = tvm.nd.empty(c_np.shape) func(tvm.nd.array(a_np), tvm.nd.array(b_np), c_tvm) -tvm.testing.assert_allclose(c_np, c_tvm.asnumpy(), rtol=1e-4) +tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-4) ################################################################################ # Final Notes and Summary diff --git a/tutorials/get_started/cross_compilation_and_rpc.py b/tutorials/get_started/cross_compilation_and_rpc.py index 3c23c4956262..25208369f74d 100644 --- a/tutorials/get_started/cross_compilation_and_rpc.py +++ b/tutorials/get_started/cross_compilation_and_rpc.py @@ -185,7 +185,7 @@ b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) # the function will run on the remote device func(a, b) -np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) +np.testing.assert_equal(b.numpy(), a.numpy() + 1) ###################################################################### # When you want to evaluate the performance of the kernel on the remote @@ -249,7 +249,7 @@ def run_opencl(): a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) func(a, b) - np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) + np.testing.assert_equal(b.numpy(), a.numpy() + 1) print("OpenCL test passed!") diff --git a/tutorials/get_started/relay_quick_start.py b/tutorials/get_started/relay_quick_start.py index 9bd3065bdd1c..9f58b1602d7f 100644 --- a/tutorials/get_started/relay_quick_start.py +++ b/tutorials/get_started/relay_quick_start.py @@ -116,7 +116,7 @@ # run module.run() # get output -out = module.get_output(0, tvm.nd.empty(out_shape)).asnumpy() +out = module.get_output(0, tvm.nd.empty(out_shape)).numpy() # Print first 10 elements of output print(out.flatten()[0:10]) @@ -145,7 +145,7 @@ module = graph_executor.GraphModule(loaded_lib["default"](dev)) module.run(data=input_data) -out_deploy = module.get_output(0).asnumpy() +out_deploy = module.get_output(0).numpy() # Print first 10 elements of output print(out_deploy.flatten()[0:10]) diff --git a/tutorials/get_started/tensor_expr_get_started.py b/tutorials/get_started/tensor_expr_get_started.py index 4f8f04126846..ee13d9e475f6 100644 --- a/tutorials/get_started/tensor_expr_get_started.py +++ b/tutorials/get_started/tensor_expr_get_started.py @@ -151,7 +151,7 @@ b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) +tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # To get a comparison of how fast this version is compared to numpy, create a @@ -220,7 +220,7 @@ def evaluate_addition(func, target, optimization, log): fadd_parallel = tvm.build(s, [A, B, C], tgt, name="myadd_parallel") fadd_parallel(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) +tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) evaluate_addition(fadd_parallel, tgt, "parallel", log=log) @@ -361,7 +361,7 @@ def evaluate_addition(func, target, optimization, log): # - We first create a GPU device. # - Then tvm.nd.array copies the data to the GPU. # - ``fadd`` runs the actual computation - # - ``asnumpy()`` copies the GPU array back to the CPU (so we can verify correctness). + # - ``numpy()`` copies the GPU array back to the CPU (so we can verify correctness). # # Note that copying the data to and from the memory on the GPU is a required step. @@ -372,7 +372,7 @@ def evaluate_addition(func, target, optimization, log): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Inspect the Generated GPU Code @@ -449,7 +449,7 @@ def evaluate_addition(func, target, optimization, log): fadd1.import_module(fadd1_dev) fadd1(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) +tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Pack Everything into One Library @@ -462,7 +462,7 @@ def evaluate_addition(func, target, optimization, log): fadd.export_library(temp.relpath("myadd_pack.so")) fadd2 = tvm.runtime.load_module(temp.relpath("myadd_pack.so")) fadd2(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) +tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. note:: Runtime API and Thread-Safety @@ -494,7 +494,7 @@ def evaluate_addition(func, target, optimization, log): b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd_cl(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) + tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. note:: TE Scheduling Primitives @@ -593,7 +593,7 @@ def evaluate_addition(func, target, optimization, log): ) print("Numpy running time: %f" % (np_running_time / np_repeat)) -answer = numpy.dot(a.asnumpy(), b.asnumpy()) +answer = numpy.dot(a.numpy(), b.numpy()) ################################################################################ # Now we write a basic matrix multiplication using TVM TE and verify that it @@ -613,7 +613,7 @@ def evaluate_addition(func, target, optimization, log): c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) def evaluate_operation(s, vars, target, name, optimization, log): @@ -622,7 +622,7 @@ def evaluate_operation(s, vars, target, name, optimization, log): c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) - tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) + tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) mean_time = evaluator(a, b, c).mean diff --git a/tutorials/get_started/tune_matmul_x86.py b/tutorials/get_started/tune_matmul_x86.py index 931f877595f5..8156d0e106ff 100644 --- a/tutorials/get_started/tune_matmul_x86.py +++ b/tutorials/get_started/tune_matmul_x86.py @@ -158,7 +158,7 @@ def matmul_add(N, L, M, dtype): func(a_tvm, b_tvm, c_tvm, out_tvm) # Check results -np.testing.assert_allclose(out_np, out_tvm.asnumpy(), rtol=1e-3) +np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) diff --git a/tutorials/language/extern_op.py b/tutorials/language/extern_op.py index 277af712d90b..fb9b2eaf8d13 100644 --- a/tutorials/language/extern_op.py +++ b/tutorials/language/extern_op.py @@ -84,7 +84,7 @@ d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev) bb = 10.0 f(a, b, d, bb) -tvm.testing.assert_allclose(d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 10, rtol=1e-5) +tvm.testing.assert_allclose(d.numpy(), np.dot(a.numpy(), b.numpy()) + 10, rtol=1e-5) ###################################################################### # Extern Contrib Wrappers @@ -113,7 +113,7 @@ @tvm.register_func("tvm.contrib.my_tvm_addone") def my_tvm_addone(x, y): print("my_tvm_addone signatures: %s, %s" % (type(x), type(y))) - tvm.nd.array(x.asnumpy() + 1).copyto(y) + tvm.nd.array(x.numpy() + 1).copyto(y) A = te.placeholder((n,), name="A") @@ -128,7 +128,7 @@ def my_tvm_addone(x, y): a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev) f(a, b) -tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1, rtol=1e-5) +tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1, rtol=1e-5) ###################################################################### # Summary diff --git a/tutorials/language/reduction.py b/tutorials/language/reduction.py index 206848cebbb4..164f36dafc79 100644 --- a/tutorials/language/reduction.py +++ b/tutorials/language/reduction.py @@ -141,7 +141,7 @@ a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) fcuda(a, b) -tvm.testing.assert_allclose(b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4) +tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4) ###################################################################### # Describe Convolution via 2D Reduction diff --git a/tutorials/language/scan.py b/tutorials/language/scan.py index 88769211a1fd..ba8b5a9f8e06 100644 --- a/tutorials/language/scan.py +++ b/tutorials/language/scan.py @@ -90,7 +90,7 @@ a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros((m, n), dtype=s_scan.dtype), dev) fscan(a, b) -tvm.testing.assert_allclose(b.asnumpy(), np.cumsum(a_np, axis=0)) +tvm.testing.assert_allclose(b.numpy(), np.cumsum(a_np, axis=0)) ###################################################################### # Multi-Stage Scan Cell diff --git a/tutorials/language/tensorize.py b/tutorials/language/tensorize.py index a1575fe832c3..40e68074adc8 100644 --- a/tutorials/language/tensorize.py +++ b/tutorials/language/tensorize.py @@ -191,7 +191,7 @@ def gemv_impl(): b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) -tvm.testing.assert_allclose(c.asnumpy(), np.dot(a, b.T), rtol=1e-3) +tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Reduce-update for Tensorize @@ -302,7 +302,7 @@ def _reduce_update(): b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) -tvm.testing.assert_allclose(c.asnumpy(), np.dot(a, b.T), rtol=1e-3) +tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Summary diff --git a/tutorials/micro/micro_tflite.py b/tutorials/micro/micro_tflite.py index 4169a56e6570..b2896306b7b2 100644 --- a/tutorials/micro/micro_tflite.py +++ b/tutorials/micro/micro_tflite.py @@ -276,5 +276,5 @@ graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32"))) graph_mod.run() - tvm_output = graph_mod.get_output(0).asnumpy() + tvm_output = graph_mod.get_output(0).numpy() print("result is: " + str(tvm_output)) diff --git a/tutorials/optimize/opt_gemm.py b/tutorials/optimize/opt_gemm.py index 72a8b0a0701e..7af772784cd6 100644 --- a/tutorials/optimize/opt_gemm.py +++ b/tutorials/optimize/opt_gemm.py @@ -95,7 +95,7 @@ ) print("Numpy running time: %f" % (np_runing_time / np_repeat)) -answer = numpy.dot(a.asnumpy(), b.asnumpy()) +answer = numpy.dot(a.numpy(), b.numpy()) # Algorithm k = te.reduce_axis((0, K), "k") @@ -110,7 +110,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=1) print("Baseline: %f" % evaluator(a, b, c).mean) @@ -145,7 +145,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) # By simply tiling the loop 32x32, and hoisting ko, ki outside the blocking loops, # we can see big speedup compared with the baseline. @@ -181,7 +181,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt2: %f" % evaluator(a, b, c).mean) @@ -214,7 +214,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt3: %f" % evaluator(a, b, c).mean) @@ -270,7 +270,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt4: %f" % evaluator(a, b, c).mean) @@ -316,7 +316,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) print("Opt5: %f" % evaluator(a, b, c).mean) @@ -359,7 +359,7 @@ c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) -tvm.testing.assert_allclose(c.asnumpy(), answer, rtol=1e-5) +tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=50) opt6_time = evaluator(a, b, c).mean diff --git a/tutorials/topi/intro_topi.py b/tutorials/topi/intro_topi.py index 5ddb87872df1..8138e4718cd9 100644 --- a/tutorials/topi/intro_topi.py +++ b/tutorials/topi/intro_topi.py @@ -107,7 +107,7 @@ b_nd = tvm.nd.array(b_np, dev) g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev) func(a_nd, b_nd, g_nd) -tvm.testing.assert_allclose(g_nd.asnumpy(), g_np, rtol=1e-5) +tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-5) ###################################################################### # TOPI also provides common neural nets operations such as _softmax_ with optimized schedule diff --git a/vta/tests/python/integration/test_benchmark_gemm.py b/vta/tests/python/integration/test_benchmark_gemm.py index f69766ceaaad..e5e0c61fab1c 100644 --- a/vta/tests/python/integration/test_benchmark_gemm.py +++ b/vta/tests/python/integration/test_benchmark_gemm.py @@ -99,7 +99,7 @@ def verify(s): print("Execution statistics:") for k, v in stats.items(): print("\t{:<16}: {:>16}".format(k, v)) - res_unpack = res_arr.asnumpy().reshape( + res_unpack = res_arr.numpy().reshape( batch_size // env.BATCH, channel // env.BLOCK_OUT, env.BATCH, env.BLOCK_OUT ) return cost diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_conv2d.py index daf9b4a7f022..340a725f4c13 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d.py @@ -264,7 +264,7 @@ def get_ref_data(): # Check correctness correct = False if check_correctness: - res_orig = res_arr.asnumpy() + res_orig = res_arr.numpy() if data_pack: res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape( wl.batch, wl.out_filter, fout_height, fout_width diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py index d2516faac00b..a1996c54596d 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py @@ -259,7 +259,7 @@ def get_ref_data(): # Check correctness correct = False if check_correctness: - res_orig = res_arr.asnumpy() + res_orig = res_arr.numpy() if data_pack: res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape( wl.batch, wl.out_filter, fout_height, fout_width diff --git a/vta/tests/python/integration/test_benchmark_topi_dense.py b/vta/tests/python/integration/test_benchmark_topi_dense.py index ceeed1ce8ddb..663e82704a53 100644 --- a/vta/tests/python/integration/test_benchmark_topi_dense.py +++ b/vta/tests/python/integration/test_benchmark_topi_dense.py @@ -170,7 +170,7 @@ def get_ref_data(): # Check correctness correct = False if check_correctness: - res_orig = res_arr.asnumpy() + res_orig = res_arr.numpy() if data_pack: res_orig = res_orig.reshape(batch_size, out_feat) res_ref = res_ref >> 8 diff --git a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py index b7c7b0aa0a8d..34cc5e24d774 100644 --- a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py @@ -258,7 +258,7 @@ def get_ref_data(): # Check correctness correct = False if check_correctness: - res_orig = res_arr.asnumpy() + res_orig = res_arr.numpy() if data_pack: res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape( wl.batch, wl.out_filter, fout_height, fout_width diff --git a/vta/tests/python/unittest/test_vta_insn.py b/vta/tests/python/unittest/test_vta_insn.py index 2817ef01b5fa..bb0896318a3f 100644 --- a/vta/tests/python/unittest/test_vta_insn.py +++ b/vta/tests/python/unittest/test_vta_insn.py @@ -70,7 +70,7 @@ def _run(env, remote): f(x_nd, y_nd) - np.testing.assert_equal(y_np, y_nd.asnumpy()) + np.testing.assert_equal(y_np, y_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -149,7 +149,7 @@ def check_padded_load(pad_before, pad_after, test_name=None): f(x_nd, y_nd) - np.testing.assert_equal(y_np, y_nd.asnumpy()) + np.testing.assert_equal(y_np, y_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -240,7 +240,7 @@ def verify(s, name=None): f(x_nd, w_nd, y_nd) - np.testing.assert_equal(y_np, y_nd.asnumpy()) + np.testing.assert_equal(y_np, y_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -398,7 +398,7 @@ def check_alu(tvm_op, np_op=None, use_imm=False, test_name=None): b_nd = tvm.nd.array(b_np, dev) f(a_nd, b_nd, res_nd) - np.testing.assert_equal(res_np, res_nd.asnumpy()) + np.testing.assert_equal(res_np, res_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -470,7 +470,7 @@ def _run(env, remote): f(a_nd, res_nd) - np.testing.assert_equal(res_np, res_nd.asnumpy()) + np.testing.assert_equal(res_np, res_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -533,7 +533,7 @@ def _run(env, remote): f(a_nd, res_nd) - np.testing.assert_equal(res_np, res_nd.asnumpy()) + np.testing.assert_equal(res_np, res_nd.numpy()) if env.TARGET in ["sim", "tsim"]: sim_stats = simulator.stats() @@ -550,7 +550,7 @@ def _run(env, remote): dev = remote.ext_dev(0) x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype("int8") x_nd = tvm.nd.array(x_np, dev) - np.testing.assert_equal(x_np, x_nd.asnumpy()) + np.testing.assert_equal(x_np, x_nd.numpy()) vta.testing.run(_run) diff --git a/vta/tutorials/frontend/deploy_classification.py b/vta/tutorials/frontend/deploy_classification.py index b72301d60c0c..493db87d46d5 100644 --- a/vta/tutorials/frontend/deploy_classification.py +++ b/vta/tutorials/frontend/deploy_classification.py @@ -286,7 +286,7 @@ # Get classification results tvm_output = m.get_output(0, tvm.nd.empty((env.BATCH, 1000), "float32", remote.cpu(0))) for b in range(env.BATCH): - top_categories = np.argsort(tvm_output.asnumpy()[b]) + top_categories = np.argsort(tvm_output.numpy()[b]) # Report top-5 classification results print("\n{} prediction for sample {}".format(model, b)) print("\t#1:", synset[top_categories[-1]]) diff --git a/vta/tutorials/frontend/legacy/deploy_detection.py b/vta/tutorials/frontend/legacy/deploy_detection.py index 696d0508b956..1d78786848e7 100644 --- a/vta/tutorials/frontend/legacy/deploy_detection.py +++ b/vta/tutorials/frontend/legacy/deploy_detection.py @@ -301,11 +301,11 @@ layer_out = {} layer_out["type"] = "Yolo" # Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total) - layer_attr = m.get_output(i * 4 + 3).asnumpy() - layer_out["biases"] = m.get_output(i * 4 + 2).asnumpy() - layer_out["mask"] = m.get_output(i * 4 + 1).asnumpy() + layer_attr = m.get_output(i * 4 + 3).numpy() + layer_out["biases"] = m.get_output(i * 4 + 2).numpy() + layer_out["mask"] = m.get_output(i * 4 + 1).numpy() out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3]) - layer_out["output"] = m.get_output(i * 4).asnumpy().reshape(out_shape) + layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape) layer_out["classes"] = layer_attr[4] tvm_out.append(layer_out) thresh = 0.560 diff --git a/vta/tutorials/matrix_multiply.py b/vta/tutorials/matrix_multiply.py index 593ac3c5a0ee..d802a878a859 100644 --- a/vta/tutorials/matrix_multiply.py +++ b/vta/tutorials/matrix_multiply.py @@ -410,7 +410,7 @@ # - We first create a remote context (for remote execution on the Pynq). # - Then :code:`tvm.nd.array` formats the data accordingly. # - :code:`f()` runs the actual computation. -# - :code:`asnumpy()` copies the result array back in a format that can be +# - :code:`numpy()` copies the result array back in a format that can be # interpreted. # @@ -446,7 +446,7 @@ # Compute reference result with numpy C_ref = np.dot(A_orig.astype(env.acc_dtype), B_orig.T.astype(env.acc_dtype)).astype(C.dtype) C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3)) -np.testing.assert_equal(C_ref, C_nd.asnumpy()) +np.testing.assert_equal(C_ref, C_nd.numpy()) # Print stats if env.TARGET in ["sim", "tsim"]: diff --git a/vta/tutorials/optimize/convolution_opt.py b/vta/tutorials/optimize/convolution_opt.py index 185b71fdc210..8eff3d4236ef 100644 --- a/vta/tutorials/optimize/convolution_opt.py +++ b/vta/tutorials/optimize/convolution_opt.py @@ -434,7 +434,7 @@ fout_width, ) ).transpose((0, 2, 4, 5, 1, 3)) -tvm.testing.assert_allclose(res_ref, res_nd.asnumpy()) +tvm.testing.assert_allclose(res_ref, res_nd.numpy()) # Print stats if env.TARGET in ["sim", "tsim"]: diff --git a/vta/tutorials/optimize/matrix_multiply_opt.py b/vta/tutorials/optimize/matrix_multiply_opt.py index c9d1c137fbff..2b89f159dd13 100644 --- a/vta/tutorials/optimize/matrix_multiply_opt.py +++ b/vta/tutorials/optimize/matrix_multiply_opt.py @@ -351,7 +351,7 @@ res_ref = res_ref.reshape( batch_size // env.BATCH, env.BATCH, out_channels // env.BLOCK_OUT, env.BLOCK_OUT ).transpose((0, 2, 1, 3)) -np.testing.assert_equal(res_ref, res_nd.asnumpy()) +np.testing.assert_equal(res_ref, res_nd.numpy()) # Print stats if env.TARGET in ["sim", "tsim"]: diff --git a/vta/tutorials/vta_get_started.py b/vta/tutorials/vta_get_started.py index f64cae11cccc..fb82d41a8d69 100644 --- a/vta/tutorials/vta_get_started.py +++ b/vta/tutorials/vta_get_started.py @@ -349,7 +349,7 @@ # - We first create a remote context (for remote execution on the Pynq). # - Then :code:`tvm.nd.array` formats the data accordingly. # - :code:`f()` runs the actual computation. -# - :code:`asnumpy()` copies the result array back in a format that can be +# - :code:`numpy()` copies the result array back in a format that can be # interpreted. # @@ -381,7 +381,7 @@ # Compute reference result with numpy C_ref = (A_orig.astype(env.acc_dtype) + B_orig.astype(env.acc_dtype)).astype(C.dtype) C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3)) -np.testing.assert_equal(C_ref, C_nd.asnumpy()) +np.testing.assert_equal(C_ref, C_nd.numpy()) print("Successful vector add test!") ######################################################################