Skip to content

Commit

Permalink
[Cleanup][B-8] Replace to_variable (#61581)
Browse files Browse the repository at this point in the history

---------

Co-authored-by: Nyakku Shigure <sigure.qaq@gmail.com>
  • Loading branch information
zade23 and SigureMo authored Feb 5, 2024
1 parent 4cd7b13 commit 4bded6d
Show file tree
Hide file tree
Showing 10 changed files with 52 additions and 56 deletions.
6 changes: 3 additions & 3 deletions test/legacy_test/test_functional_conv1d_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ def setUp(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv1d_transpose(
x,
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_functional_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,9 @@ def static_graph_case_2(self):

def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
x = paddle.to_tensor(self.input)
weight = paddle.to_tensor(self.weight)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
y = F.conv2d(
x,
weight,
Expand Down Expand Up @@ -534,12 +534,12 @@ def static_graph_case(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv2d(
x,
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_functional_conv2d_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ def static_graph_case_2(self):

def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
x = paddle.to_tensor(self.input)
weight = paddle.to_tensor(self.weight)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
y = F.conv2d_transpose(
x,
weight,
Expand Down Expand Up @@ -542,12 +542,12 @@ def static_graph_case(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv2d_transpose(
x,
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_functional_conv3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,9 @@ def static_graph_case_2(self):

def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
x = paddle.to_tensor(self.input)
weight = paddle.to_tensor(self.weight)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
y = F.conv3d(
x,
weight,
Expand Down Expand Up @@ -509,12 +509,12 @@ def static_graph_case(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv3d(
x,
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_functional_conv3d_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,9 @@ def static_graph_case_2(self):

def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
x = paddle.to_tensor(self.input)
weight = paddle.to_tensor(self.weight)
bias = None if self.no_bias else paddle.to_tensor(self.bias)
y = F.conv3d_transpose(
x,
weight,
Expand Down Expand Up @@ -567,12 +567,12 @@ def static_graph_case(self):

def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
x = paddle.to_tensor(self.input, dtype=paddle.float32)
w = paddle.to_tensor(self.filter, dtype=paddle.float32)
b = (
None
if self.bias is None
else dg.to_variable(self.bias, dtype=paddle.float32)
else paddle.to_tensor(self.bias, dtype=paddle.float32)
)
y = F.conv3d_transpose(
x,
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_gather_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,8 +695,8 @@ def test_imperative(self):
paddle.disable_static()
input_1 = np.array([[1, 2], [3, 4], [5, 6]])
index_1 = np.array([[1]])
input = base.dygraph.to_variable(input_1)
index = base.dygraph.to_variable(index_1)
input = paddle.to_tensor(input_1)
index = paddle.to_tensor(index_1)
output = paddle.gather(input, index)
output_np = output.numpy()
expected_output = np.array([[3, 4]])
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_gelu_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def _test_case1_cpu(self, approximate):

place = base.CPUPlace()
with dg.guard(place) as g:
x_var = dg.to_variable(x)
x_var = paddle.to_tensor(x)
y_var = F.gelu(x_var, approximate)
y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)
Expand All @@ -56,7 +56,7 @@ def _test_case1_gpu(self, approximate):

place = base.CUDAPlace(0)
with dg.guard(place) as g:
x_var = dg.to_variable(x)
x_var = paddle.to_tensor(x)
y_var = F.gelu(x_var, approximate)
y_test = y_var.numpy()
np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)
Expand Down
14 changes: 7 additions & 7 deletions test/legacy_test/test_grad_clip_minimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

import numpy as np

import paddle
from paddle import base
from paddle.base.dygraph.base import to_variable
from paddle.nn import ClipGradByGlobalNorm, ClipGradByNorm, ClipGradByValue


Expand Down Expand Up @@ -64,8 +64,8 @@ def get_dygrap_global_norm_result(self):
gloabl_norm_clip = ClipGradByGlobalNorm(self.max_global_norm)
p_g_var = []
for p, g in self.para_and_grad:
new_p = to_variable(p)
new_g = to_variable(g)
new_p = paddle.to_tensor(p)
new_g = paddle.to_tensor(g)
p_g_var.append((new_p, new_g))

new_p_g_var = gloabl_norm_clip(p_g_var)
Expand Down Expand Up @@ -136,8 +136,8 @@ def get_dygrap_norm_result(self):
norm_clip = ClipGradByNorm(self.max_norm)
p_g_var = []
for p, g in self.para_and_grad:
new_p = to_variable(p)
new_g = to_variable(g)
new_p = paddle.to_tensor(p)
new_g = paddle.to_tensor(g)
p_g_var.append((new_p, new_g))

new_p_g_var = norm_clip(p_g_var)
Expand Down Expand Up @@ -204,8 +204,8 @@ def get_dygrap_clip_result(self):
value_clip = ClipGradByValue(max=self.max_value, min=self.min_value)
p_g_var = []
for p, g in self.para_and_grad:
new_p = to_variable(p)
new_g = to_variable(g)
new_p = paddle.to_tensor(p)
new_g = paddle.to_tensor(g)
p_g_var.append((new_p, new_g))

new_p_g_var = value_clip(p_g_var)
Expand Down
20 changes: 8 additions & 12 deletions test/legacy_test/test_gradient_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ def test_gradient_clip(self):
with base.dygraph.guard():
linear = paddle.nn.Linear(5, 5)
inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
out = linear(base.dygraph.to_variable(inputs))
out = linear(paddle.to_tensor(inputs))
loss = paddle.mean(out)
loss.backward()
sgd_optimizer = paddle.optimizer.SGD(
Expand All @@ -722,12 +722,8 @@ def setUp(self):

def check_clip_result(self, loss, optimizer):
# if grad is None
x = base.dygraph.to_variable(
np.array([2, 3]).astype("float32"), name="x"
)
y = base.dygraph.to_variable(
np.array([3, 4]).astype("float32"), name="y"
)
x = paddle.to_tensor(np.array([2, 3]).astype("float32"))
y = paddle.to_tensor(np.array([3, 4]).astype("float32"))
assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2
# get params and grads from network
opt, params_grads = optimizer.minimize(loss)
Expand Down Expand Up @@ -762,10 +758,10 @@ def setUp(self):

def check_clip_result(self, loss, optimizer):
# if grad is None
x = base.dygraph.to_variable(np.array([2, 3]).astype("float32"))
x = paddle.to_tensor(np.array([2, 3]).astype("float32"))
assert len(self.clip([(x, None)])) == 0
# get params and grads from network
self.clip([(base.dygraph.to_variable(np.array([2, 3])), None)])
self.clip([(paddle.to_tensor(np.array([2, 3])), None)])
opt, params_grads = optimizer.minimize(loss)
_, grads = zip(*params_grads)
params_grads = self.clip(params_grads)
Expand All @@ -791,7 +787,7 @@ def setUp(self):

def check_clip_result(self, loss, optimizer):
# if grad is None
x = base.dygraph.to_variable(np.array([2, 3]).astype("float32"))
x = paddle.to_tensor(np.array([2, 3]).astype("float32"))
assert len(self.clip([(x, None)])) == 0
# get params and grads from network
opt, params_grads = optimizer.minimize(loss)
Expand Down Expand Up @@ -839,7 +835,7 @@ def test_gradient_clip(self):
'float32'
)
with paddle.amp.auto_cast(level='O2'):
out = model(base.dygraph.to_variable(inputs))
out = model(paddle.to_tensor(inputs))
loss = paddle.mean(out)
scaled = scaler.scale(loss)
scaled.backward()
Expand Down Expand Up @@ -884,7 +880,7 @@ def test_gradient_clip(self):
with base.dygraph.guard():
inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
linear = paddle.nn.Linear(5, 5)
out = linear(base.dygraph.to_variable(inputs))
out = linear(paddle.to_tensor(inputs))
loss = paddle.mean(out)
loss.backward()
# before clip
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_group_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,12 +589,12 @@ def test_dygraph_api(self):
input = np.random.random(self.shape).astype(self.dtype)

with base.dygraph.guard():
tensor_1 = base.dygraph.to_variable(input)
tensor_1 = paddle.to_tensor(input)
tensor_1.stop_gradient = False
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
tensor_eager_1 = base.dygraph.to_variable(input)
tensor_eager_1 = paddle.to_tensor(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
Expand All @@ -609,12 +609,12 @@ def test_dygraph_api(self):
input = np.random.random(self.shape).astype(self.dtype)

with base.dygraph.guard():
tensor_1 = base.dygraph.to_variable(input)
tensor_1 = paddle.to_tensor(input)
tensor_1.stop_gradient = False
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
tensor_eager_1 = base.dygraph.to_variable(input)
tensor_eager_1 = paddle.to_tensor(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
Expand All @@ -635,12 +635,12 @@ def test_dygraph_api(self):
input = np.random.random(self.shape).astype(self.dtype)

with base.dygraph.guard():
tensor_1 = base.dygraph.to_variable(input)
tensor_1 = paddle.to_tensor(input)
tensor_1.stop_gradient = False
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
tensor_eager_1 = base.dygraph.to_variable(input)
tensor_eager_1 = paddle.to_tensor(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret2 = groupNorm_eager(tensor_eager_1)
Expand Down

0 comments on commit 4bded6d

Please sign in to comment.