Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cleanup][B-2] clean some paddle.base.dygraph.to_variable for test #61904

Merged
merged 11 commits into from
Feb 23, 2024
33 changes: 17 additions & 16 deletions test/dygraph_to_static/seq2seq_dygraph_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import paddle
from paddle import base
from paddle.base import ParamAttr
from paddle.base.dygraph.base import to_variable
from paddle.nn import Embedding, Layer

INF = 1.0 * 1e5
Expand Down Expand Up @@ -211,10 +210,10 @@ def forward(self, inputs):

# NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully.
# Because nested list can't be transformed now.
enc_hidden_0 = to_variable(
enc_hidden_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
enc_cell_0 = to_variable(
enc_cell_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
zero = paddle.zeros(shape=[1], dtype="int64")
Expand Down Expand Up @@ -312,10 +311,10 @@ def beam_search(self, inputs):
self.batch_size = src.shape[0]

src_emb = self.src_embeder(self._transpose_batch_time(src))
enc_hidden_0 = to_variable(
enc_hidden_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
enc_cell_0 = to_variable(
enc_cell_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
zero = paddle.zeros(shape=[1], dtype="int64")
Expand Down Expand Up @@ -367,20 +366,20 @@ def beam_search(self, inputs):

# beam search
batch_beam_shape = (self.batch_size, self.beam_size)
vocab_size_tensor = to_variable(
vocab_size_tensor = paddle.to_tensor(
np.full((1), self.tar_vocab_size)
).astype("int64")
start_token_tensor = to_variable(
start_token_tensor = paddle.to_tensor(
np.full(batch_beam_shape, self.beam_start_token, dtype='int64')
)
end_token_tensor = to_variable(
end_token_tensor = paddle.to_tensor(
np.full(batch_beam_shape, self.beam_end_token, dtype='int64')
)
step_input = self.tar_embeder(start_token_tensor)
beam_finished = to_variable(
beam_finished = paddle.to_tensor(
np.full(batch_beam_shape, 0, dtype='float32')
)
beam_state_log_probs = to_variable(
beam_state_log_probs = paddle.to_tensor(
np.array(
[[0.0] + [-self.kinf] * (self.beam_size - 1)], dtype="float32"
)
Expand All @@ -395,15 +394,17 @@ def beam_search(self, inputs):

batch_pos = paddle.expand(
paddle.unsqueeze(
to_variable(np.arange(0, self.batch_size, 1, dtype="int64")),
paddle.to_tensor(
np.arange(0, self.batch_size, 1, dtype="int64")
),
[1],
),
[-1, self.beam_size],
)
predicted_ids = []
parent_ids = []

for step_idx in range(paddle.to_tensor(self.beam_max_step_num)):
for step_idx in range(paddle.paddle.to_tensor(self.beam_max_step_num)):
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
if paddle.sum(1 - beam_finished) == 0:
break
step_input = self._merge_batch_beams(step_input)
Expand Down Expand Up @@ -437,7 +438,7 @@ def beam_search(self, inputs):
)
noend_array = [-self.kinf] * self.tar_vocab_size
noend_array[self.beam_end_token] = 0
noend_mask_tensor = to_variable(
noend_mask_tensor = paddle.to_tensor(
np.array(noend_array, dtype='float32')
)

Expand Down Expand Up @@ -726,11 +727,11 @@ def forward(self, inputs):

# NOTE: modify model code about `enc_hidden` and `enc_cell` to transform dygraph code successfully.
# Because nested list can't be transformed now.
enc_hidden_0 = to_variable(
enc_hidden_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
enc_hidden_0.stop_gradient = True
enc_cell_0 = to_variable(
enc_cell_0 = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
enc_hidden_0.stop_gradient = True
Expand Down Expand Up @@ -789,7 +790,7 @@ def forward(self, inputs):
enc_outputs = self._transpose_batch_time(enc_outputs)

# train
input_feed = to_variable(
input_feed = paddle.to_tensor(
np.zeros((self.batch_size, self.hidden_size), dtype='float32')
)
# NOTE: set stop_gradient here, otherwise grad var is null
Expand Down
6 changes: 3 additions & 3 deletions test/dygraph_to_static/simnet_dygraph_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
x # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -298,14 +298,14 @@ class FC(paddle.nn.Layer):

Examples:
.. code-block:: python
from paddle.base.dygraph.base import to_variable
import paddle
import paddle.base as base
from paddle.base.dygraph import FC
import numpy as np
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with base.dygraph.guard():
fc = FC("fc", 64, num_flatten_dims=2)
data = to_variable(data)
data = paddle.to_tensor(data)
conv = fc(data)
"""

Expand Down
24 changes: 2 additions & 22 deletions test/dygraph_to_static/test_basic_api_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,27 +21,15 @@
)

import paddle
from paddle import base, to_tensor
from paddle.base import dygraph
from paddle.base.dygraph import to_variable
from paddle import to_tensor
from paddle.jit.api import to_static

SEED = 2020
np.random.seed(SEED)


def dyfunc_to_variable(x):
res = base.dygraph.to_variable(x, name=None, zero_copy=None)
return res


def dyfunc_to_variable_2(x):
res = dygraph.to_variable(value=np.zeros(shape=(1), dtype=np.int32))
return res


def dyfunc_to_variable_3(x):
res = to_variable(x, name=None, zero_copy=None)
res = paddle.to_tensor(x)
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
return res


Expand Down Expand Up @@ -73,15 +61,7 @@ def setUp(self):
dyfunc_bool_to_tensor,
dyfunc_int_to_tensor,
dyfunc_float_to_tensor,
dyfunc_to_variable,
dyfunc_to_variable_2,
dyfunc_to_variable_3,
]
self.place = (
paddle.CUDAPlace(0)
if paddle.is_compiled_with_cuda()
else paddle.CPUPlace()
)

def get_dygraph_output(self):
res = self.dygraph_func(self.input).numpy()
Expand Down
14 changes: 7 additions & 7 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ def test_out_name(self):
def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([0.1])
x = base.dygraph.to_variable(np_x)
x = paddle.to_tensor(np_x)
z = eval("paddle.%s(x).numpy()" % self.op_type)
z_expected = eval("np.%s(np_x)" % self.op_type)
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
Expand Down Expand Up @@ -963,7 +963,7 @@ def test_out_name(self):
def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([0.1])
x = base.dygraph.to_variable(np_x)
x = paddle.to_tensor(np_x)
z = paddle.atan(x).numpy()
z_expected = np.arctan(np_x)
self.assertEqual(z, z_expected)
Expand Down Expand Up @@ -1036,7 +1036,7 @@ class TestSinhAPI(unittest.TestCase):
def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([0.1])
x = base.dygraph.to_variable(np_x)
x = paddle.to_tensor(np_x)
z = paddle.sinh(x).numpy()
z_expected = np.sinh(np_x)
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
Expand Down Expand Up @@ -1075,7 +1075,7 @@ def test_backward(self):
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
"float32"
)
var = base.dygraph.to_variable(input_x)
var = paddle.to_tensor(input_x)
var.stop_gradient = False
loss = paddle.sinh(var)
loss.backward()
Expand Down Expand Up @@ -1168,7 +1168,7 @@ class TestCoshAPI(unittest.TestCase):
def test_dygraph(self):
with base.dygraph.guard():
np_x = np.array([0.1])
x = base.dygraph.to_variable(np_x)
x = paddle.to_tensor(np_x)
z = paddle.cosh(x).numpy()
z_expected = np.cosh(np_x)
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
Expand Down Expand Up @@ -1206,7 +1206,7 @@ def test_backward(self):
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
"float32"
)
var = base.dygraph.to_variable(input_x)
var = paddle.to_tensor(input_x)
var.stop_gradient = False
loss = paddle.cosh(var)
loss.backward()
Expand Down Expand Up @@ -4067,7 +4067,7 @@ def test_api(self):
# dygraph
with base.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
data_x = base.dygraph.to_variable(np_x)
data_x = paddle.to_tensor(np_x)
z = paddle.log1p(data_x)
np_z = z.numpy()
z_expected = np.array(np.log1p(np_x))
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_adam_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -725,7 +725,7 @@ def test_pir_adam_op(self):
def test_adam_op_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = base.dygraph.to_variable(value)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)

adam = paddle.optimizer.Adam(
Expand Down Expand Up @@ -773,7 +773,7 @@ def test_adam_op_with_state_dict(self):
def test_adam_with_grad_clip(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = base.dygraph.to_variable(value)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
adam = paddle.optimizer.Adam(
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_adaptive_avg_pool1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def setUp(self):
def check_adaptive_avg_dygraph_results(self, place):
with base.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
result = F.adaptive_avg_pool1d(input, output_size=16)
result_np = avg_pool1D_forward_naive(
input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_adaptive_max_pool1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def setUp(self):
def check_adaptive_max_dygraph_results(self, place):
with base.dygraph.guard(place):
input_np = np.random.random([2, 3, 32]).astype("float32")
input = base.dygraph.to_variable(input_np)
input = paddle.to_tensor(input_np)
result = F.adaptive_max_pool1d(input, output_size=16)

result_np = max_pool1D_forward_naive(
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_addmm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,9 +324,9 @@ def test_api_with_dygraph(self):
np_y = np.random.random((6, 30)).astype(np.float32)

with base.dygraph.guard():
input = base.dygraph.to_variable(np_input)
x = base.dygraph.to_variable(np_x)
y = base.dygraph.to_variable(np_y)
input = paddle.to_tensor(np_input)
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
out = paddle.tensor.addmm(input, x, y)
np.testing.assert_allclose(
np_input + np.dot(np_x, np_y), out.numpy(), rtol=1e-5, atol=1e-8
Expand Down
6 changes: 4 additions & 2 deletions test/legacy_test/test_affine_grid_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,10 +122,12 @@ def test_static_api(self):
def paddle_dygraph_layer(self):
paddle.disable_static()
theta_var = (
dg.to_variable(self.theta) if not self.invalid_theta else "invalid"
paddle.to_tensor(self.theta)
if not self.invalid_theta
else "invalid"
)
output_shape = (
dg.to_variable(self.output_shape)
paddle.to_tensor(self.output_shape)
if self.variable_output_shape
else self.output_shape
)
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_array_read_write_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,9 @@ def test_read_write(self):
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)

with base.dygraph.guard(place):
tensor1 = base.dygraph.to_variable(tensor)
tensor2 = base.dygraph.to_variable(tensor)
tensor3 = base.dygraph.to_variable(tensor)
tensor1 = paddle.to_tensor(tensor)
tensor2 = paddle.to_tensor(tensor)
tensor3 = paddle.to_tensor(tensor)
x_dygraph = [tensor1, tensor2, tensor3]
for each_x in x_dygraph:
each_x.stop_gradient = False
Expand Down