diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 50c1142c7bfb62..7084acb81145b8 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -264,7 +264,6 @@ from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS -from .fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS from .fluid.layers import crop_tensor as crop #DEFINE_ALIAS from . import jit diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index db1a705167cb92..5f0d8e089822c9 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -272,8 +272,6 @@ class no_grad_: import numpy as np import paddle - paddle.disable_static() - # use as generator data = np.array([[2, 3], [4, 5]]).astype('float32') diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 3d06b4ab911ac4..5ba4446970fef5 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -38,7 +38,7 @@ from ..fluid.core import VarBase #DEFINE_ALIAS from paddle.fluid import core #DEFINE_ALIAS -from ..fluid.dygraph.base import no_grad #DEFINE_ALIAS +from ..fluid.dygraph.base import no_grad_ as no_grad #DEFINE_ALIAS from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS from ..fluid.dygraph.base import grad #DEFINE_ALIAS from .io import save diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 351afc97a2a88d..fdeed0ae49dfd6 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -418,12 +418,11 @@ class MSELoss(fluid.dygraph.layers.Layer): input_data = np.array([1.5]).astype("float32") label_data = np.array([1.7]).astype("float32") - paddle.disable_static() mse_loss = paddle.nn.loss.MSELoss() input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) output = mse_loss(input, label) - print(output.numpy()) + print(output) # [0.04000002] """ diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 56933cf73ef98d..1cdd19ec216ec5 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -273,18 +273,15 @@ def _elementwise_op(helper): def add(x, y, name=None): """ -Examples: + Examples: .. code-block:: python import paddle - - paddle.disable_static() x = paddle.to_tensor([2, 3, 4], 'float64') y = paddle.to_tensor([1, 5, 2], 'float64') z = paddle.add(x, y) - np_z = z.numpy() - print(np_z) # [3., 8., 6. ] + print(z) # [3., 8., 6. ] """ op_type = 'elementwise_add' @@ -1358,9 +1355,6 @@ def addcmul(input, tensor1, tensor2, value=1.0, name=None): def clip(x, min=None, max=None, name=None): """ - :alias_main: paddle.clip - :alias: paddle.clip,paddle.tensor.clip,paddle.tensor.math.clip - **clip layer** This operator clip all elements in input into the range [ min, max ] and return @@ -1387,15 +1381,13 @@ def clip(x, min=None, max=None, name=None): .. code-block:: python import paddle - - paddle.disable_static() x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32') out1 = paddle.clip(x1, min=3.5, max=5.0) out2 = paddle.clip(x1, min=2.5) - print(out1.numpy()) + print(out1) # [[3.5, 3.5] # [4.5, 5.0]] - print(out2.numpy()) + print(out2) # [[2.5, 3.5] # [[4.5, 6.4] """