From 7ff7c227df26e9474d82880eba928b81cc1a7e32 Mon Sep 17 00:00:00 2001 From: yoyoIcy Date: Sat, 9 Sep 2023 17:25:47 +0800 Subject: [PATCH 1/2] [Doctest]fix No.335, test=docs_preview --- .../base/dygraph/tensor_patch_methods.py | 368 +++++++++--------- 1 file changed, 194 insertions(+), 174 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 2db290e1e1d4e..89250f1cea419 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -100,15 +100,14 @@ def _to_static_var(self, to_parameter=False, **kwargs): Examples: .. code-block:: python - import paddle.base as base - from paddle.base.dygraph.base import to_variable - import numpy as np - - data = np.ones([3, 1024], dtype='float32') - with base.dygraph.guard(): - tensor = to_variable(data) - static_var = tensor._to_static_var() - + >>> import paddle.base as base + >>> from paddle.base.dygraph.base import to_variable + >>> import numpy as np + + >>> data = np.ones([3, 1024], dtype='float32') + >>> with base.dygraph.guard(): + ... tensor = to_variable(data) + ... static_var = tensor._to_static_var() """ # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. @@ -172,20 +171,19 @@ def set_value(self, value): Examples: .. code-block:: python - import paddle.base as base - from paddle.base.dygraph.base import to_variable - from paddle.nn import Linear - import numpy as np - - data = np.ones([3, 1024], dtype='float32') - with base.dygraph.guard(): - linear = Linear(1024, 4) - t = to_variable(data) - linear(t) # call with default weight - custom_weight = np.random.randn(1024, 4).astype("float32") - linear.weight.set_value(custom_weight) # change existing weight - out = linear(t) # call with different weight - + >>> import paddle.base as base + >>> from paddle.base.dygraph.base import to_variable + >>> from paddle.nn import Linear + >>> import numpy as np + + >>> data = np.ones([3, 1024], dtype='float32') + >>> with base.dygraph.guard(): + ... linear = Linear(1024, 4) + ... t = to_variable(data) + ... linear(t) # call with default weight + ... custom_weight = np.random.randn(1024, 4).astype("float32") + ... linear.weight.set_value(custom_weight) # change existing weight + ... out = linear(t) # call with different weight """ base_tensor = core.eager.Tensor assert isinstance( @@ -252,32 +250,32 @@ def backward(self, grad_tensor=None, retain_graph=False): Examples: .. code-block:: python - import paddle - x = paddle.to_tensor(5., stop_gradient=False) - for i in range(5): - y = paddle.pow(x, 4.0) - y.backward() - print("{}: {}".format(i, x.grad)) - # 0: [500.] - # 1: [1000.] - # 2: [1500.] - # 3: [2000.] - # 4: [2500.] - - x.clear_grad() - print("{}".format(x.grad)) - # 0. - - grad_tensor=paddle.to_tensor(2.) - for i in range(5): - y = paddle.pow(x, 4.0) - y.backward(grad_tensor) - print("{}: {}".format(i, x.grad)) - # 0: [1000.] - # 1: [2000.] - # 2: [3000.] - # 3: [4000.] - # 4: [5000.] + >>> import paddle + >>> x = paddle.to_tensor(5., stop_gradient=False) + >>> for i in range(5): + ... y = paddle.pow(x, 4.0) + ... y.backward() + ... print("{}: {}".format(i, x.grad)) + 0: [500.] + 1: [1000.] + 2: [1500.] + 3: [2000.] + 4: [2500.] + + >>> x.clear_grad() + >>> print("{}".format(x.grad)) + 0. + + >>> grad_tensor=paddle.to_tensor(2.) + >>> for i in range(5): + ... y = paddle.pow(x, 4.0) + ... y.backward(grad_tensor) + ... print("{}: {}".format(i, x.grad)) + 0: [1000.] + 1: [2000.] + 2: [3000.] + 3: [4000.] + 4: [5000.] """ if framework.in_dygraph_mode(): @@ -334,13 +332,13 @@ def gradient(self): Examples: .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor(5., stop_gradient=False) - y = paddle.pow(x, 4.0) - y.backward() - print("grad of x: {}".format(x.gradient())) - # [500.] + >>> x = paddle.to_tensor(5., stop_gradient=False) + >>> y = paddle.pow(x, 4.0) + >>> y.backward() + >>> print("grad of x: {}".format(x.gradient())) + [500.] """ if self.grad is None: @@ -372,41 +370,44 @@ def register_hook(self, hook): Examples: .. code-block:: python - import paddle - - # hook function return None - def print_hook_fn(grad): - print(grad) - - # hook function return Tensor - def double_hook_fn(grad): - grad = grad * 2 - return grad - - x = paddle.to_tensor([0., 1., 2., 3.], stop_gradient=False) - y = paddle.to_tensor([4., 5., 6., 7.], stop_gradient=False) - z = paddle.to_tensor([1., 2., 3., 4.]) - - # one Tensor can register multiple hooks - h = x.register_hook(print_hook_fn) - x.register_hook(double_hook_fn) - - w = x + y - # register hook by lambda function - w.register_hook(lambda grad: grad * 2) - - o = z.matmul(w) - o.backward() - # print_hook_fn print content in backward - # Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=False, - # [2., 4., 6., 8.]) - - print("w.grad:", w.grad) # w.grad: [1. 2. 3. 4.] - print("x.grad:", x.grad) # x.grad: [ 4. 8. 12. 16.] - print("y.grad:", y.grad) # y.grad: [2. 4. 6. 8.] - - # remove hook - h.remove() + >>> import paddle + + >>> # hook function return None + >>> def print_hook_fn(grad): + ... print(grad) + ... + >>> # hook function return Tensor + >>> def double_hook_fn(grad): + ... grad = grad * 2 + ... return grad + ... + >>> x = paddle.to_tensor([0., 1., 2., 3.], stop_gradient=False) + >>> y = paddle.to_tensor([4., 5., 6., 7.], stop_gradient=False) + >>> z = paddle.to_tensor([1., 2., 3., 4.]) + + >>> # one Tensor can register multiple hooks + >>> h = x.register_hook(print_hook_fn) + >>> x.register_hook(double_hook_fn) + + >>> w = x + y + >>> # register hook by lambda function + >>> w.register_hook(lambda grad: grad * 2) + + >>> o = z.matmul(w) + >>> o.backward() + >>> # print_hook_fn print content in backward + Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=False, + [2., 4., 6., 8.]) + + >>> print("w.grad:", w.grad) + w.grad: [1. 2. 3. 4.] + >>> print("x.grad:", x.grad) + x.grad: [ 4. 8. 12. 16.] + >>> print("y.grad:", y.grad) + y.grad: [2. 4. 6. 8.] + + >>> # remove hook + >>> h.remove() """ if self.stop_gradient is True: raise RuntimeError( @@ -519,13 +520,13 @@ def grad(self): Examples: .. code-block:: python - import paddle + >>> import paddle - x = paddle.to_tensor(5., stop_gradient=False) - y = paddle.pow(x, 4.0) - y.backward() - print("grad of x: {}".format(x.grad)) - # Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=False, 500.) + >>> x = paddle.to_tensor(5., stop_gradient=False) + >>> y = paddle.pow(x, 4.0) + >>> y.backward() + >>> print("grad of x: {}".format(x.grad)) + grad of x: Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=False, 500.) """ msg = ( @@ -564,27 +565,37 @@ def item(self, *args): Examples: .. code-block:: python - import paddle - - x = paddle.to_tensor(1) - print(x.item()) #1 - print(type(x.item())) # - - x = paddle.to_tensor(1.0) - print(x.item()) #1.0 - print(type(x.item())) # - - x = paddle.to_tensor(True) - print(x.item()) #True - print(type(x.item())) # - - x = paddle.to_tensor(1+1j) - print(x.item()) #(1+1j) - print(type(x.item())) # - - x = paddle.to_tensor([[1.1, 2.2, 3.3]]) - print(x.item(2)) #3.3 - print(x.item(0, 2)) #3.3 + >>> import paddle + + >>> x = paddle.to_tensor(1) + >>> print(x.item()) + 1 + >>> print(type(x.item())) + + + >>> x = paddle.to_tensor(1.0) + >>> print(x.item()) + 1.0 + >>> print(type(x.item())) + + + >>> x = paddle.to_tensor(True) + >>> print(x.item()) + True + >>> print(type(x.item())) + + + >>> x = paddle.to_tensor(1+1j) + >>> print(x.item()) + (1+1j) + >>> print(type(x.item())) + + + >>> x = paddle.to_tensor([[1.1, 2.2, 3.3]]) + >>> print(x.item(2)) + 3.299999952316284 + >>> print(x.item(0, 2)) + 3.299999952316284 """ scalar = self._getitem_from_offset(*args) @@ -601,14 +612,16 @@ def inplace_version(self): **Notes: This is a read-only property** Examples: - .. code-block:: python + .. code-block:: python - import paddle - var = paddle.ones(shape=[4, 2, 3], dtype="float32") - print(var.inplace_version) # 0 + >>> import paddle + >>> var = paddle.ones(shape=[4, 2, 3], dtype="float32") + >>> print(var.inplace_version) + 0 - var[1] = 2.2 - print(var.inplace_version) # 1 + >>> var[1] = 2.2 + >>> print(var.inplace_version) + 1 """ return self._inplace_version() @@ -622,13 +635,13 @@ def __str__(self): Examples: .. code-block:: python - import paddle - x = paddle.rand([2, 5]) - print(x) + >>> import paddle + >>> x = paddle.rand([2, 5]) + >>> print(x) - # Tensor(shape=[2, 5], dtype=float32, place=CPUPlace, - # [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436], - # [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) + Tensor(shape=[2, 5], dtype=float32, place=CPUPlace, + [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436], + [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) """ from paddle.tensor.to_string import tensor_to_string @@ -641,19 +654,18 @@ def __deepcopy__(self, memo): Examples: .. code-block:: python - import paddle - import copy - x = paddle.to_tensor(2.) - y = copy.deepcopy(x) - - print(x) - # Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, - # 2.) + >>> import paddle + >>> import copy + >>> x = paddle.to_tensor(2.) + >>> y = copy.deepcopy(x) - print(y) - # Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, - # 2.) + >>> print(x) + Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, + 2.) + >>> print(y) + Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, + 2.) """ if not self.is_leaf: raise RuntimeError( @@ -694,13 +706,15 @@ def __array__(self, dtype=None): Examples: .. code-block:: python - import paddle - import numpy as np - x = paddle.randn([2, 2]) - x_array = np.array(x) + >>> import paddle + >>> import numpy as np + >>> x = paddle.randn([2, 2]) + >>> x_array = np.array(x) - print(type(x_array)) # - print(x_array.shape) #(2, 2) + >>> print(type(x_array)) + + >>> print(x_array.shape) + (2, 2) """ array = self.numpy(False) if dtype: @@ -821,11 +835,12 @@ def _uva(self, device_id=0): Examples: .. code-block:: python - # required: gpu - import paddle - x = paddle.to_tensor([1, 2, 3], place=paddle.CPUPlace()) - x._uva() - print(x) + >>> # doctest: +REQUIRES(env:GPU) + >>> import paddle + >>> paddle.device.set_device('gpu') + >>> x = paddle.to_tensor([1, 2, 3], place=paddle.CPUPlace()) + >>> x._uva() + >>> print(x) ''' self._tensor_uva(device_id) @@ -881,13 +896,14 @@ def values(self): Examples: .. code-block:: python - import paddle - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - print(sparse_x.values()) - #[1, 2, 3, 4, 5] + >>> import paddle + >>> indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + >>> values = [1, 2, 3, 4, 5] + >>> dense_shape = [3, 4] + >>> sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int32'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + >>> print(sparse_x.values()) + Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, + [1., 2., 3., 4., 5.]) """ return _C_ops.sparse_values(self) @@ -904,15 +920,17 @@ def to_dense(self): Examples: .. code-block:: python - import paddle - indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] - values = [1, 2, 3, 4, 5] - dense_shape = [3, 4] - sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) - dense_x = sparse_x.to_dense() - #[[0., 1., 0., 2.], - # [0., 0., 3., 0.], - # [4., 5., 0., 0.]] + >>> import paddle + >>> indices = [[0, 0, 1, 2, 2], [1, 3, 2, 0, 1]] + >>> values = [1, 2, 3, 4, 5] + >>> dense_shape = [3, 4] + >>> sparse_x = paddle.sparse.sparse_coo_tensor(paddle.to_tensor(indices, dtype='int64'), paddle.to_tensor(values, dtype='float32'), shape=dense_shape) + >>> dense_x = sparse_x.to_dense() + >>> print(dense_x) + Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0., 1., 0., 2.], + [0., 0., 3., 0.], + [4., 5., 0., 0.]]) """ return _C_ops.sparse_to_dense(self) @@ -930,13 +948,15 @@ def to_sparse_coo(self, sparse_dim): Examples: .. code-block:: python - import paddle - dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] - dense_x = paddle.to_tensor(dense_x, dtype='float32') - sparse_x = dense_x.to_sparse_coo(sparse_dim=2) - #indices=[[0, 0, 1, 1], - # [1, 3, 2, 3]], - #values=[1., 2., 3., 4.] + >>> import paddle + >>> dense_x = [[0, 1, 0, 2], [0, 0, 3, 4]] + >>> dense_x = paddle.to_tensor(dense_x, dtype='float32') + >>> sparse_x = dense_x.to_sparse_coo(sparse_dim=2) + >>> print(sparse_x) + Tensor(shape=[2, 4], dtype=paddle.float32, place=Place(cpu), stop_gradient=True, + indices=[[0, 0, 1, 1], + [1, 3, 2, 3]], + values=[1., 2., 3., 4.]) """ return _C_ops.sparse_to_sparse_coo(self, sparse_dim) From bc2a81e8fa7651e286844552861cd85d17540735 Mon Sep 17 00:00:00 2001 From: yoyoIcy Date: Sat, 9 Sep 2023 22:38:28 +0800 Subject: [PATCH 2/2] [Doctest]fix No.335, test=docs_preview --- .../base/dygraph/tensor_patch_methods.py | 60 +++++++++++-------- 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 89250f1cea419..83bb82ee70d17 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -256,27 +256,37 @@ def backward(self, grad_tensor=None, retain_graph=False): ... y = paddle.pow(x, 4.0) ... y.backward() ... print("{}: {}".format(i, x.grad)) - 0: [500.] - 1: [1000.] - 2: [1500.] - 3: [2000.] - 4: [2500.] + 0: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 500.) + 1: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 1000.) + 2: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 1500.) + 3: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 2000.) + 4: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 2500.) >>> x.clear_grad() >>> print("{}".format(x.grad)) - 0. + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 0.) >>> grad_tensor=paddle.to_tensor(2.) >>> for i in range(5): ... y = paddle.pow(x, 4.0) ... y.backward(grad_tensor) ... print("{}: {}".format(i, x.grad)) - 0: [1000.] - 1: [2000.] - 2: [3000.] - 3: [4000.] - 4: [5000.] - + 0: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 1000.) + 1: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 2000.) + 2: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 3000.) + 3: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 4000.) + 4: Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=False, + 5000.) """ if framework.in_dygraph_mode(): if in_profiler_mode(): @@ -338,7 +348,7 @@ def gradient(self): >>> y = paddle.pow(x, 4.0) >>> y.backward() >>> print("grad of x: {}".format(x.gradient())) - [500.] + grad of x: 500.0 """ if self.grad is None: @@ -396,15 +406,17 @@ def register_hook(self, hook): >>> o = z.matmul(w) >>> o.backward() >>> # print_hook_fn print content in backward - Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=False, + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=False, [2., 4., 6., 8.]) >>> print("w.grad:", w.grad) - w.grad: [1. 2. 3. 4.] + w.grad: None >>> print("x.grad:", x.grad) - x.grad: [ 4. 8. 12. 16.] + x.grad: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=False, + [4. , 8. , 12., 16.]) >>> print("y.grad:", y.grad) - y.grad: [2. 4. 6. 8.] + y.grad: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=False, + [2., 4., 6., 8.]) >>> # remove hook >>> h.remove() @@ -636,12 +648,12 @@ def __str__(self): .. code-block:: python >>> import paddle + >>> paddle.seed(2023) >>> x = paddle.rand([2, 5]) >>> print(x) - - Tensor(shape=[2, 5], dtype=float32, place=CPUPlace, - [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436], - [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) + Tensor(shape=[2, 5], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.86583614, 0.52014720, 0.25960937, 0.90525323, 0.42400089], + [0.40641287, 0.97020894, 0.74437362, 0.51785129, 0.73292869]]) """ from paddle.tensor.to_string import tensor_to_string @@ -658,13 +670,11 @@ def __deepcopy__(self, memo): >>> import copy >>> x = paddle.to_tensor(2.) >>> y = copy.deepcopy(x) - >>> print(x) - Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, 2.) - >>> print(y) - Tensor(shape=[], dtype=float32, place=CPUPlace, stop_gradient=True, + Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True, 2.) """ if not self.is_leaf: