Skip to content

Commit

Permalink
bugfix
Browse files Browse the repository at this point in the history
  • Loading branch information
jayggh authored and GuoGuanghao committed Dec 8, 2022
1 parent 5477510 commit 15ac0eb
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions tests/test_ops/test_fused_bias_leakyrelu.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def setup_class(cls):
cls.input_tensor = torch.randn((2, 2, 2, 2),
requires_grad=True).cuda()
cls.bias = torch.zeros(2, requires_grad=True).cuda()
else:
elif IS_NPU_AVAILABLE:
cls.input_tensor = torch.randn((2, 2, 2, 2),
requires_grad=True).npu()
cls.bias = torch.zeros(2, requires_grad=True).npu()
Expand All @@ -47,7 +47,7 @@ def test_gradient(self, device):
self.input_tensor,
delta=1e-4,
pt_atol=1e-3)
else:
elif IS_NPU_AVAILABLE:
gradcheck(
FusedBiasLeakyReLU(2).npu(),
self.input_tensor,
Expand All @@ -60,7 +60,7 @@ def test_gradient(self, device):
self.input_tensor,
eps=1e-4,
atol=1e-3)
else:
elif IS_NPU_AVAILABLE:
gradcheck(
FusedBiasLeakyReLU(2).npu(),
self.input_tensor,
Expand All @@ -86,7 +86,7 @@ def test_gradgradient(self, device):
self.input_tensor,
eps=1e-4,
atol=1e-3)
else:
elif IS_NPU_AVAILABLE:
gradcheck(
FusedBiasLeakyReLU(2).npu(),
self.input_tensor,
Expand Down

0 comments on commit 15ac0eb

Please sign in to comment.