From b4a04eab02d53856264177662c1ae824d86d6ff4 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Sun, 26 Nov 2023 10:21:26 +0000 Subject: [PATCH 01/14] add tensor apply --- paddle/fluid/pybind/eager_method.cc | 31 +++++++++++ .../base/dygraph/tensor_patch_methods.py | 53 +++++++++++++++++++ test/legacy_test/test_apply.py | 43 +++++++++++++++ test/legacy_test/test_inplace.py | 29 ++++++++++ 4 files changed, 156 insertions(+) create mode 100644 test/legacy_test/test_apply.py diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 7a9172f376539f..47fa901c9f79e7 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -2166,6 +2166,29 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } +static PyObject* tensor_apply(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + PyObject* apply_func = PyTuple_GET_ITEM(args, 0); + PyTensorHook func = PyTensorHook(apply_func); + paddle::Tensor out = func(self->tensor); + return ToPyObject(out); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + +static PyObject* tensor_apply_(TensorObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + PyObject* apply_func = PyTuple_GET_ITEM(args, 0); + PyTensorHook func = PyTensorHook(apply_func); + paddle::Tensor out = func(self->tensor); + self->tensor.set_impl(out.impl()); + RETURN_PY_NONE + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* tensor_register_grad_hook(TensorObject* self, PyObject* args, PyObject* kwargs) { @@ -3587,6 +3610,14 @@ PyMethodDef variable_methods[] = { // NOLINT (PyCFunction)(void (*)())tensor__setitem_dygraph, METH_VARARGS | METH_KEYWORDS, nullptr}, + {"_apply", + (PyCFunction)(void (*)())tensor_apply, + METH_VARARGS | METH_KEYWORDS, + nullptr}, + {"_apply_", + (PyCFunction)(void (*)())tensor_apply_, + METH_VARARGS | METH_KEYWORDS, + nullptr}, {"_register_grad_hook", (PyCFunction)(void (*)())tensor_register_grad_hook, METH_VARARGS | METH_KEYWORDS, diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index af36e3219fc219..a517fc0b3f69ba 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -368,6 +368,57 @@ def gradient(self): return (np.array(self.grad), np.array(self.grad.rows())) return np.array(self.grad) + @framework.dygraph_only + def apply_(self, func): + """ + Inplace apply the python function to the tensor. + + Returns: + None + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor(5.) + >>> f = lambda x: 3*x+2 + >>> x.apply_(f) + >>> print(x) # 17 + + """ + if self.stop_gradient is True: + raise RuntimeError( + "Cannot apply function on a tensor that stop gradient." + ) + self._apply_(func) + + @framework.dygraph_only + def apply(self, func): + """ + Apply the python function to the tensor. + + Returns: + None + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor(5.) + >>> f = lambda x: 3*x+2 + >>> y = x.apply(f) + >>> print(y) # 17 + >>> print(x) # 5 + + """ + if self.stop_gradient is True: + raise RuntimeError( + "Cannot apply function on a tensor that stop gradient." + ) + return self._apply(func) + @framework.dygraph_only def register_hook(self, hook): """ @@ -1105,6 +1156,8 @@ def coalesce(self, name=None): ("clear_grad", clear_grad), ("inplace_version", inplace_version), ("gradient", gradient), + ("apply_", apply_), + ("apply", apply), ("register_hook", register_hook), ("__str__", __str__), ("__repr__", __str__), diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py new file mode 100644 index 00000000000000..7d6bcf847205b8 --- /dev/null +++ b/test/legacy_test/test_apply.py @@ -0,0 +1,43 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle + + +class TestTensorApplyAPI(unittest.TestCase): + def setUp(self): + self.x = paddle.to_tensor([1, 2, 3, 4, 5]) + + def test_dygraph(self): + def f(x): + return 3 * x + 2 + + y = self.x.apply(f) + np.testing.assert_allclose(f(self.x).numpy(), y.numpy(), rtol=1e-05) + + def test_error(self): + self.x.stop_gradient = False + + def f(x): + x.apply_(x) + + self.assertRaises(RuntimeError, f, self.x) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 34b50d48d76ac4..2d7c2747b82864 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -1632,5 +1632,34 @@ def non_inplace_api_processing(self, var): return paddle.index_fill(var, self.index, self.axis, self.value) +class TestDygraphTensorApplyInplace(unittest.TestCase): + def setUp(self): + self.init_data() + self.set_np_compare_func() + + def init_data(self): + self.input_var_numpy = np.random.uniform(-5, 5, [10, 20, 1]) + self.dtype = "float32" + + def set_np_compare_func(self): + self.np_compare = np.array_equal + + def non_inplace_api_processing(self, var, f): + return var.apply(f) + + def inplace_api_processing(self, var, f): + return var.apply_(f) + + def test_inplace_api(self): + var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) + f = lambda x: 3 * x + 2 + non_inplace_var = self.non_inplace_api_processing(var, f) + inplace_var = self.inplace_api_processing(var, f) + self.assertTrue(id(var) == id(inplace_var)) + np.testing.assert_array_equal( + non_inplace_var.numpy(), inplace_var.numpy() + ) + + if __name__ == '__main__': unittest.main() From 594fecb0513f4cecd571ece54970edf5f2d7934f Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Sun, 26 Nov 2023 14:54:01 +0000 Subject: [PATCH 02/14] fix --- test/legacy_test/test_apply.py | 18 +++++++++--------- test/legacy_test/test_inplace.py | 4 +++- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 7d6bcf847205b8..34a19348d88b48 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -21,22 +21,22 @@ class TestTensorApplyAPI(unittest.TestCase): def setUp(self): - self.x = paddle.to_tensor([1, 2, 3, 4, 5]) + self.x = paddle.to_tensor([1, 2, 3, 4, 5], stop_gradient=True) + self.function = lambda x: 3 * x + 2 def test_dygraph(self): - def f(x): - return 3 * x + 2 - - y = self.x.apply(f) - np.testing.assert_allclose(f(self.x).numpy(), y.numpy(), rtol=1e-05) + y = self.x.apply(self.function) + np.testing.assert_allclose( + self.function(self.x).numpy(), y.numpy(), rtol=1e-05 + ) def test_error(self): self.x.stop_gradient = False - def f(x): - x.apply_(x) + def fn(x): + x.apply_(self.function) - self.assertRaises(RuntimeError, f, self.x) + self.assertRaises(RuntimeError, fn, self.x) if __name__ == "__main__": diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 2d7c2747b82864..c316e847da1f6f 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -1651,7 +1651,9 @@ def inplace_api_processing(self, var, f): return var.apply_(f) def test_inplace_api(self): - var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) + var = paddle.to_tensor(self.input_var_numpy, stop_gradient=True).astype( + self.dtype + ) f = lambda x: 3 * x + 2 non_inplace_var = self.non_inplace_api_processing(var, f) inplace_var = self.inplace_api_processing(var, f) From 93473ee34b5c5af5413798d41d57cee5a85207d0 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Mon, 27 Nov 2023 03:30:20 +0000 Subject: [PATCH 03/14] fix 2023-11-27 --- python/paddle/base/dygraph/tensor_patch_methods.py | 8 ++++---- test/legacy_test/test_inplace.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index a517fc0b3f69ba..6fec6da6c54388 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -387,9 +387,9 @@ def apply_(self, func): >>> print(x) # 17 """ - if self.stop_gradient is True: + if not self.stop_gradient: raise RuntimeError( - "Cannot apply function on a tensor that stop gradient." + "Cannot apply function on a tensor that required gradient." ) self._apply_(func) @@ -413,9 +413,9 @@ def apply(self, func): >>> print(x) # 5 """ - if self.stop_gradient is True: + if not self.stop_gradient: raise RuntimeError( - "Cannot apply function on a tensor that stop gradient." + "Cannot apply function on a tensor that required gradient." ) return self._apply(func) diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index c316e847da1f6f..05b95bd9341794 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -1648,7 +1648,8 @@ def non_inplace_api_processing(self, var, f): return var.apply(f) def inplace_api_processing(self, var, f): - return var.apply_(f) + var.apply_(f) + return var def test_inplace_api(self): var = paddle.to_tensor(self.input_var_numpy, stop_gradient=True).astype( From 64fddc47258cb7563360a2d89172eb8455fb3ceb Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 28 Nov 2023 07:16:03 +0000 Subject: [PATCH 04/14] fix --- paddle/fluid/pybind/eager_method.cc | 2 +- python/paddle/base/dygraph/tensor_patch_methods.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 47fa901c9f79e7..4b29358c366489 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -2185,7 +2185,7 @@ static PyObject* tensor_apply_(TensorObject* self, PyTensorHook func = PyTensorHook(apply_func); paddle::Tensor out = func(self->tensor); self->tensor.set_impl(out.impl()); - RETURN_PY_NONE + return ToPyObject(self->tensor); EAGER_CATCH_AND_THROW_RETURN_NULL } diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 6fec6da6c54388..c4d80bb18b2ff8 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -391,7 +391,7 @@ def apply_(self, func): raise RuntimeError( "Cannot apply function on a tensor that required gradient." ) - self._apply_(func) + return self._apply_(func) @framework.dygraph_only def apply(self, func): From 038ff03e3b91ae71703783cad3a88f6a01d66635 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 28 Nov 2023 07:49:50 +0000 Subject: [PATCH 05/14] fix V2 --- paddle/fluid/pybind/eager_method.cc | 3 ++- test/legacy_test/test_inplace.py | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index bc447d5385ec58..a0730f4390adf1 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1773,7 +1773,8 @@ static PyObject* tensor_apply_(TensorObject* self, PyTensorHook func = PyTensorHook(apply_func); paddle::Tensor out = func(self->tensor); self->tensor.set_impl(out.impl()); - return ToPyObject(self->tensor); + Py_INCREF(self); + return reinterpret_cast(self); EAGER_CATCH_AND_THROW_RETURN_NULL } diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 05b95bd9341794..c316e847da1f6f 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -1648,8 +1648,7 @@ def non_inplace_api_processing(self, var, f): return var.apply(f) def inplace_api_processing(self, var, f): - var.apply_(f) - return var + return var.apply_(f) def test_inplace_api(self): var = paddle.to_tensor(self.input_var_numpy, stop_gradient=True).astype( From 8ea22e46d562080cfc73c4f8d1772a2901527d9a Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 12 Dec 2023 14:11:17 +0000 Subject: [PATCH 06/14] add apply in Variable --- python/paddle/base/dygraph/tensor_patch_methods.py | 10 ++++++---- python/paddle/base/framework.py | 10 ++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 05dba6ca61171b..cb4cf948aac808 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -384,7 +384,8 @@ def apply_(self, func): >>> x = paddle.to_tensor(5.) >>> f = lambda x: 3*x+2 >>> x.apply_(f) - >>> print(x) # 17 + >>> print(x) + 17 """ if not self.stop_gradient: @@ -393,7 +394,6 @@ def apply_(self, func): ) return self._apply_(func) - @framework.dygraph_only def apply(self, func): """ Apply the python function to the tensor. @@ -409,8 +409,10 @@ def apply(self, func): >>> x = paddle.to_tensor(5.) >>> f = lambda x: 3*x+2 >>> y = x.apply(f) - >>> print(y) # 17 - >>> print(x) # 5 + >>> print(y) + 17 + >>> print(x) + 5 """ if not self.stop_gradient: diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index 697eb77d8ae40a..77ff46b7747f13 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -1795,6 +1795,16 @@ def forward_hook_wrapper(x): skip_vars_in_backward_input=[self], ) + def apply(self, func): + if not self.stop_gradient: + raise RuntimeError( + "Cannot apply function on a tensor that required gradient." + ) + try: + return func(self) + except: + raise ValueError(f"The PyFunc {func.__name__} could not be applied") + def __str__(self): return self._to_readable_code() From 565993320bf5312a2594a4ab3a07320b1263a862 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 12 Dec 2023 19:46:37 +0000 Subject: [PATCH 07/14] add apply in newir --- paddle/fluid/pybind/pir.cc | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/pybind/pir.cc b/paddle/fluid/pybind/pir.cc index 51517e0ddaea05..d445eda3686449 100644 --- a/paddle/fluid/pybind/pir.cc +++ b/paddle/fluid/pybind/pir.cc @@ -23,8 +23,6 @@ #include #include "paddle/fluid/framework/ir/pass.h" -#include "paddle/fluid/pybind/pybind_variant_caster.h" - #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" @@ -46,6 +44,8 @@ #include "paddle/fluid/pir/transforms/inplace_pass.h" #include "paddle/fluid/pir/transforms/replace_fetch_with_shadow_output_pass.h" #include "paddle/fluid/pybind/control_flow_api.h" +#include "paddle/fluid/pybind/eager_utils.h" +#include "paddle/fluid/pybind/pybind_variant_caster.h" #include "paddle/phi/core/enforce.h" #include "paddle/pir/core/attribute.h" #include "paddle/pir/core/block.h" @@ -535,6 +535,33 @@ const phi::DDim &GetValueDims(Value value) { } } +pir::OpResult apply(Value self, py::object func) { + py::gil_scoped_acquire gil; + PyObject *py_func = func.release().ptr(); + Py_INCREF(py_func); + PyObject *res = nullptr; + try { + py::object obj = py::cast(self); + PyObject *tmp_self = obj.release().ptr(); + Py_INCREF(tmp_self); + res = PyObject_CallFunctionObjArgs(py_func, tmp_self, nullptr); + Py_DECREF(tmp_self); + } catch (std::exception &e) { + PADDLE_THROW(phi::errors::Unavailable( + "Hook function of Tensor raises an exception: %s.", e.what())); + } catch (...) { + PADDLE_THROW(phi::errors::Fatal( + "Hook function of Tensor raises an unknown exception.")); + } + if (res == Py_None) { + return self.dyn_cast(); + } + auto out = CastPyArg2Value(res, "", 0); + Py_DECREF(py_func); + Py_DECREF(res); + return out.dyn_cast(); +} + #define OVERRIDE_OPERATOR(operator, api, other_type) \ value.def(#operator, [](Value self, other_type other) { \ return paddle::dialect::api(self, other); \ @@ -692,7 +719,8 @@ void BindValue(py::module *m) { .def("__eq__", &Value::operator==) .def("__hash__", [](Value self) { return std::hash{}(self); }) .def("__str__", &Value2String) - .def("__repr__", &Value2String); + .def("__repr__", &Value2String) + .def("apply", &apply); // For basaic operators OVERRIDE_OPERATOR_FOR_EACH(__add__, add, 1.0, other, true); OVERRIDE_OPERATOR_FOR_EACH(__sub__, subtract, 1.0, -1.0 * other, true); From bd99a1dab0a698e7177f23b504519fee3e9c28c3 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 19 Dec 2023 21:46:01 +0800 Subject: [PATCH 08/14] add test --- test/legacy_test/test_apply.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 34a19348d88b48..53b6d1c6b17326 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -38,6 +38,34 @@ def fn(x): self.assertRaises(RuntimeError, fn, self.x) + def test_to_pir(self): + def fn(x): + y = x.apply(self.function) + return y + + with paddle.jit.api.sot_mode_guard(False): + paddle.disable_static() + jit_g = paddle.jit.to_static(fn) + out = jit_g(self.x) + + np.testing.assert_allclose( + self.function(self.x).numpy(), out.numpy(), rtol=1e-05 + ) + + def test_to_legacy_ir(self): + def fn(x): + y = x.apply(self.function) + return y + + with paddle.jit.api.sot_mode_guard(False): + with paddle.pir_utils.IrGuard(): + paddle.disable_static() + jit_g = paddle.jit.to_static(fn) + out = jit_g(self.x) + np.testing.assert_allclose( + self.function(self.x).numpy(), out.numpy(), rtol=1e-05 + ) + if __name__ == "__main__": unittest.main() From a4ccbaa2327940861429da2cf1f19ab1e475e854 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Tue, 19 Dec 2023 22:41:44 +0800 Subject: [PATCH 09/14] fix --- paddle/fluid/pybind/pir.cc | 11 +++++--- test/legacy_test/test_apply.py | 49 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/pybind/pir.cc b/paddle/fluid/pybind/pir.cc index 7d36befdb1acf0..6f7a2fbe9bdf90 100644 --- a/paddle/fluid/pybind/pir.cc +++ b/paddle/fluid/pybind/pir.cc @@ -581,6 +581,11 @@ const phi::DDim &GetValueDims(Value value) { pir::OpResult apply(Value self, py::object func) { py::gil_scoped_acquire gil; + auto stop_gradient = self.attribute(kAttrStopGradients); + if (stop_gradient && !stop_gradient.data()) { + PADDLE_THROW(phi::errors::Unavailable( + "Cannot apply function on a tensor that required gradient.")); + } PyObject *py_func = func.release().ptr(); Py_INCREF(py_func); PyObject *res = nullptr; @@ -592,10 +597,10 @@ pir::OpResult apply(Value self, py::object func) { Py_DECREF(tmp_self); } catch (std::exception &e) { PADDLE_THROW(phi::errors::Unavailable( - "Hook function of Tensor raises an exception: %s.", e.what())); + "Apply function of Tensor raises an exception: %s.", e.what())); } catch (...) { PADDLE_THROW(phi::errors::Fatal( - "Hook function of Tensor raises an unknown exception.")); + "Apply function of Tensor raises an unknown exception.")); } if (res == Py_None) { return self.dyn_cast(); @@ -816,7 +821,7 @@ void BindValue(py::module *m) { [](Value self) { return paddle::dialect::scale(self, -1.0, 0.0, true); }) - .def("apply", &apply); + .def("apply", &apply) .def("is_same", &Value::operator==) .def("hash", [](Value self) { return std::hash{}(self); }) .def("__repr__", &Value2String); diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 53b6d1c6b17326..2ce4adb7a07600 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -33,37 +33,46 @@ def test_dygraph(self): def test_error(self): self.x.stop_gradient = False - def fn(x): + def fn_inplace(x): x.apply_(self.function) - self.assertRaises(RuntimeError, fn, self.x) + def fn_outplace(x, func): + x.apply(func) - def test_to_pir(self): - def fn(x): - y = x.apply(self.function) + self.assertRaises(RuntimeError, fn_inplace, self.x) + self.assertRaises(RuntimeError, fn_outplace, self.x, self.function) + with paddle.jit.api.sot_mode_guard(False): + self.assertRaises( + RuntimeError, + paddle.jit.to_static(fn_outplace), + self.x, + self.function, + ) + with paddle.pir_utils.IrGuard(): + self.assertRaises( + RuntimeError, + paddle.jit.to_static(fn_outplace), + self.x, + self.function, + ) + + def test_to_static(self): + def fn(x, func): + y = x.apply(func) return y with paddle.jit.api.sot_mode_guard(False): paddle.disable_static() jit_g = paddle.jit.to_static(fn) - out = jit_g(self.x) - - np.testing.assert_allclose( - self.function(self.x).numpy(), out.numpy(), rtol=1e-05 - ) - - def test_to_legacy_ir(self): - def fn(x): - y = x.apply(self.function) - return y - - with paddle.jit.api.sot_mode_guard(False): + out_legacy_ir = jit_g(self.x, self.function) with paddle.pir_utils.IrGuard(): - paddle.disable_static() jit_g = paddle.jit.to_static(fn) - out = jit_g(self.x) + out_pir = jit_g(self.x, self.function) + np.testing.assert_allclose( + self.function(self.x).numpy(), out_legacy_ir.numpy(), rtol=1e-05 + ) np.testing.assert_allclose( - self.function(self.x).numpy(), out.numpy(), rtol=1e-05 + self.function(self.x).numpy(), out_pir.numpy(), rtol=1e-05 ) From 7708eed7ee9a0f5a311f4d2159eab7dde81241a6 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 20 Dec 2023 03:29:15 +0800 Subject: [PATCH 10/14] fix2 --- test/legacy_test/test_apply.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 2ce4adb7a07600..3f91cfcdb18031 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -49,6 +49,7 @@ def fn_outplace(x, func): self.function, ) with paddle.pir_utils.IrGuard(): + paddle.disable_static() self.assertRaises( RuntimeError, paddle.jit.to_static(fn_outplace), @@ -62,10 +63,10 @@ def fn(x, func): return y with paddle.jit.api.sot_mode_guard(False): - paddle.disable_static() jit_g = paddle.jit.to_static(fn) out_legacy_ir = jit_g(self.x, self.function) with paddle.pir_utils.IrGuard(): + paddle.disable_static() jit_g = paddle.jit.to_static(fn) out_pir = jit_g(self.x, self.function) np.testing.assert_allclose( From 1d95b726c6a3c0edcef4a749ad33718ebbd6a311 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 20 Dec 2023 13:09:12 +0800 Subject: [PATCH 11/14] fix example code --- python/paddle/base/dygraph/tensor_patch_methods.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index beab3b9ed8d6f4..de1ad789cf7a16 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -400,7 +400,8 @@ def apply_(self, func): >>> f = lambda x: 3*x+2 >>> x.apply_(f) >>> print(x) - 17 + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 17.) """ if not self.stop_gradient: @@ -425,9 +426,11 @@ def apply(self, func): >>> f = lambda x: 3*x+2 >>> y = x.apply(f) >>> print(y) - 17 + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 17.) >>> print(x) - 5 + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, + 5.) """ if not self.stop_gradient: From fca838982bdd8374bdb5b86efed31017b8377192 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 20 Dec 2023 21:03:36 +0800 Subject: [PATCH 12/14] change shape --- .../base/dygraph/tensor_patch_methods.py | 84 +++++++++++++++++-- test/legacy_test/test_apply.py | 13 +++ 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index de1ad789cf7a16..0cddd28c4f6fcd 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -396,13 +396,48 @@ def apply_(self, func): >>> import paddle - >>> x = paddle.to_tensor(5.) + >>> paddle.seed(0) + >>> x = paddle.rand(shape=[3,3]).to("cpu", "float64") + >>> print(x) + Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, + [[0.39904648, 0.51667917, 0.02493039], + [0.94007939, 0.94585413, 0.79673123], + [0.41501412, 0.82025695, 0.22904338]]) >>> f = lambda x: 3*x+2 >>> x.apply_(f) >>> print(x) - Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, - 17.) + Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, + [[3.19713950, 3.55003738, 2.07479119], + [4.82023811, 4.83756256, 4.39019394], + [3.24504232, 4.46077061, 2.68713021]]) + + + >>> x = paddle.rand(shape=[3,3]).to("cpu", "float16") + >>> x.apply_(f) + >>> print(x) + Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, + [[3.19726562, 3.55078125, 2.07421875], + [4.82031250, 4.83593750, 4.39062500], + [3.24414062, 4.46093750, 2.68750000]]) + + >>> x = paddle.rand(shape=[3,3]).to("cpu", "bfloat16") + >>> x.apply_(f) + >>> print(x) + Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, + [[3.18750000, 3.54687500, 2.06250000], + [4.81250000, 4.81250000, 4.37500000], + [3.23437500, 4.43750000, 2.67187500]]) + + + >>> if paddle.is_compiled_with_cuda(): + >>> x = paddle.rand(shape=[3,3]).to("gpu", "float32") + >>> x.apply_(f) + >>> print(x) + Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[3.19713950, 3.55003738, 2.07479119], + [4.82023811, 4.83756256, 4.39019394], + [3.24504232, 4.46077061, 2.68713021]]) """ if not self.stop_gradient: raise RuntimeError( @@ -422,15 +457,48 @@ def apply(self, func): >>> import paddle - >>> x = paddle.to_tensor(5.) + >>> paddle.seed(0) + >>> x = paddle.rand(shape=[3,3]).to("cpu", "float64") >>> f = lambda x: 3*x+2 >>> y = x.apply(f) >>> print(y) - Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, - 17.) + Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, + [[3.19713950, 3.55003738, 2.07479119], + [4.82023811, 4.83756256, 4.39019394], + [3.24504232, 4.46077061, 2.68713021]]) >>> print(x) - Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, - 5.) + Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, + [[0.39904648, 0.51667917, 0.02493039], + [0.94007939, 0.94585413, 0.79673123], + [0.41501412, 0.82025695, 0.22904338]]) + + + >>> x = paddle.rand(shape=[3,3]).to("cpu", "float16") + >>> y = x.apply(f) + >>> print(y) + Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, + [[3.19726562, 3.55078125, 2.07421875], + [4.82031250, 4.83593750, 4.39062500], + [3.24414062, 4.46093750, 2.68750000]]) + + + >>> x = paddle.rand(shape=[3,3]).to("cpu", "bfloat16") + >>> y = x.apply(f) + >>> print(y) + Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, + [[3.18750000, 3.54687500, 2.06250000], + [4.81250000, 4.81250000, 4.37500000], + [3.23437500, 4.43750000, 2.67187500]]) + + + >>> if paddle.is_compiled_with_cuda(): + >>> x = paddle.rand(shape=[3,3]).to("gpu", "float32") + >>> y = x.apply(f) + >>> print(y) + Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[3.19713950, 3.55003738, 2.07479119], + [4.82023811, 4.83756256, 4.39019394], + [3.24504232, 4.46077061, 2.68713021]]) """ if not self.stop_gradient: diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 3f91cfcdb18031..28499d154c7e13 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -24,6 +24,19 @@ def setUp(self): self.x = paddle.to_tensor([1, 2, 3, 4, 5], stop_gradient=True) self.function = lambda x: 3 * x + 2 + def test_dtype(self): + for dtype in ["float64", "float16", "bfloat16"]: + self.x.to(dtype) + self.test_dygraph() + + @unittest.skipIf( + not paddle.is_compiled_with_cuda(), + "only support cuda", + ) + def test_on_gpu(self): + self.x.to("gpu") + self.test_dygraph() + def test_dygraph(self): y = self.x.apply(self.function) np.testing.assert_allclose( From 13e298369838654c4cf888efaa19b5f341f27d09 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Thu, 21 Dec 2023 15:36:45 +0800 Subject: [PATCH 13/14] fix docs --- .../base/dygraph/tensor_patch_methods.py | 66 ++++++++++--------- test/legacy_test/test_apply.py | 11 ++++ 2 files changed, 47 insertions(+), 30 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 0cddd28c4f6fcd..a7882ce0a5b79d 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -397,31 +397,32 @@ def apply_(self, func): >>> import paddle >>> paddle.seed(0) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "float64") - >>> print(x) - Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[0.39904648, 0.51667917, 0.02493039], - [0.94007939, 0.94585413, 0.79673123], - [0.41501412, 0.82025695, 0.22904338]]) + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float64") >>> f = lambda x: 3*x+2 >>> x.apply_(f) >>> print(x) Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[3.19713950, 3.55003738, 2.07479119], - [4.82023811, 4.83756256, 4.39019394], - [3.24504232, 4.46077061, 2.68713021]]) + [[3.19713944, 3.55003750, 2.07479117], + [4.82023817, 4.83756238, 4.39019370], + [3.24504235, 4.46077085, 2.68713014]]) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "float16") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float16") >>> x.apply_(f) >>> print(x) Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, - [[3.19726562, 3.55078125, 2.07421875], - [4.82031250, 4.83593750, 4.39062500], - [3.24414062, 4.46093750, 2.68750000]]) + [[3.19531250, 3.54882812, 2.07421875], + [4.81640625, 4.83593750, 4.39062500], + [3.24414062, 4.46093750, 2.68554688]]) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "bfloat16") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "bfloat16") >>> x.apply_(f) >>> print(x) Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, @@ -431,7 +432,9 @@ def apply_(self, func): >>> if paddle.is_compiled_with_cuda(): - >>> x = paddle.rand(shape=[3,3]).to("gpu", "float32") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("gpu", "float32") >>> x.apply_(f) >>> print(x) Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, @@ -458,31 +461,32 @@ def apply(self, func): >>> import paddle >>> paddle.seed(0) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "float64") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float64") >>> f = lambda x: 3*x+2 >>> y = x.apply(f) >>> print(y) Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[3.19713950, 3.55003738, 2.07479119], - [4.82023811, 4.83756256, 4.39019394], - [3.24504232, 4.46077061, 2.68713021]]) - >>> print(x) - Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[0.39904648, 0.51667917, 0.02493039], - [0.94007939, 0.94585413, 0.79673123], - [0.41501412, 0.82025695, 0.22904338]]) + [[3.19713944, 3.55003750, 2.07479117], + [4.82023817, 4.83756238, 4.39019370], + [3.24504235, 4.46077085, 2.68713014]]) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "float16") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float16") >>> y = x.apply(f) >>> print(y) Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, - [[3.19726562, 3.55078125, 2.07421875], - [4.82031250, 4.83593750, 4.39062500], - [3.24414062, 4.46093750, 2.68750000]]) + [[3.19531250, 3.54882812, 2.07421875], + [4.81640625, 4.83593750, 4.39062500], + [3.24414062, 4.46093750, 2.68554688]]) - >>> x = paddle.rand(shape=[3,3]).to("cpu", "bfloat16") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "bfloat16") >>> y = x.apply(f) >>> print(y) Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, @@ -492,7 +496,9 @@ def apply(self, func): >>> if paddle.is_compiled_with_cuda(): - >>> x = paddle.rand(shape=[3,3]).to("gpu", "float32") + >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], + >>> [0.94007939, 0.94585413, 0.79673123], + >>> [0.41501412, 0.82025695, 0.22904338]]).to("gpu", "float32") >>> y = x.apply(f) >>> print(y) Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, diff --git a/test/legacy_test/test_apply.py b/test/legacy_test/test_apply.py index 28499d154c7e13..2c11bd26e932cd 100644 --- a/test/legacy_test/test_apply.py +++ b/test/legacy_test/test_apply.py @@ -52,6 +52,9 @@ def fn_inplace(x): def fn_outplace(x, func): x.apply(func) + def function(x, y, z): + return x + y + z + self.assertRaises(RuntimeError, fn_inplace, self.x) self.assertRaises(RuntimeError, fn_outplace, self.x, self.function) with paddle.jit.api.sot_mode_guard(False): @@ -61,6 +64,14 @@ def fn_outplace(x, func): self.x, self.function, ) + self.x.stop_gradient = True + self.assertRaises( + ValueError, + paddle.jit.to_static(fn_outplace), + self.x, + function, + ) + self.x.stop_gradient = False with paddle.pir_utils.IrGuard(): paddle.disable_static() self.assertRaises( From e5b8f61b227aef0a751e2221871f5819f833cef8 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Thu, 21 Dec 2023 21:38:13 +0800 Subject: [PATCH 14/14] fix docs --- .../base/dygraph/tensor_patch_methods.py | 92 ++++++------------- 1 file changed, 30 insertions(+), 62 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index a7882ce0a5b79d..c016746b32a57f 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -396,51 +396,35 @@ def apply_(self, func): >>> import paddle - >>> paddle.seed(0) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float64") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "float64") >>> f = lambda x: 3*x+2 >>> x.apply_(f) >>> print(x) Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[3.19713944, 3.55003750, 2.07479117], - [4.82023817, 4.83756238, 4.39019370], - [3.24504235, 4.46077085, 2.68713014]]) + [[2.90000004, 3.50000000, 2.30000000], + [4.69999993, 4.69999993, 4.09999996], + [3.20000002, 4.40000004, 2.60000001]]) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float16") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "float16") >>> x.apply_(f) - >>> print(x) - Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, - [[3.19531250, 3.54882812, 2.07421875], - [4.81640625, 4.83593750, 4.39062500], - [3.24414062, 4.46093750, 2.68554688]]) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "bfloat16") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "bfloat16") >>> x.apply_(f) - >>> print(x) - Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, - [[3.18750000, 3.54687500, 2.06250000], - [4.81250000, 4.81250000, 4.37500000], - [3.23437500, 4.43750000, 2.67187500]]) >>> if paddle.is_compiled_with_cuda(): - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("gpu", "float32") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("gpu", "float32") >>> x.apply_(f) - >>> print(x) - Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [[3.19713950, 3.55003738, 2.07479119], - [4.82023811, 4.83756256, 4.39019394], - [3.24504232, 4.46077061, 2.68713021]]) """ if not self.stop_gradient: raise RuntimeError( @@ -460,51 +444,35 @@ def apply(self, func): >>> import paddle - >>> paddle.seed(0) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float64") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "float64") >>> f = lambda x: 3*x+2 >>> y = x.apply(f) >>> print(y) Tensor(shape=[3, 3], dtype=float64, place=Place(cpu), stop_gradient=True, - [[3.19713944, 3.55003750, 2.07479117], - [4.82023817, 4.83756238, 4.39019370], - [3.24504235, 4.46077085, 2.68713014]]) + [[2.90000004, 3.50000000, 2.30000000], + [4.69999993, 4.69999993, 4.09999996], + [3.20000002, 4.40000004, 2.60000001]]) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "float16") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "float16") >>> y = x.apply(f) - >>> print(y) - Tensor(shape=[3, 3], dtype=float16, place=Place(cpu), stop_gradient=True, - [[3.19531250, 3.54882812, 2.07421875], - [4.81640625, 4.83593750, 4.39062500], - [3.24414062, 4.46093750, 2.68554688]]) - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("cpu", "bfloat16") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("cpu", "bfloat16") >>> y = x.apply(f) - >>> print(y) - Tensor(shape=[3, 3], dtype=bfloat16, place=Place(cpu), stop_gradient=True, - [[3.18750000, 3.54687500, 2.06250000], - [4.81250000, 4.81250000, 4.37500000], - [3.23437500, 4.43750000, 2.67187500]]) >>> if paddle.is_compiled_with_cuda(): - >>> x = paddle.to_tensor([[0.39904648, 0.51667917, 0.02493039], - >>> [0.94007939, 0.94585413, 0.79673123], - >>> [0.41501412, 0.82025695, 0.22904338]]).to("gpu", "float32") + >>> x = paddle.to_tensor([[0.3, 0.5, 0.1], + >>> [0.9, 0.9, 0.7], + >>> [0.4, 0.8, 0.2]]).to("gpu", "float32") >>> y = x.apply(f) - >>> print(y) - Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [[3.19713950, 3.55003738, 2.07479119], - [4.82023811, 4.83756256, 4.39019394], - [3.24504232, 4.46077061, 2.68713021]]) """ if not self.stop_gradient: