Skip to content

Commit

Permalink
Use xmlrunner.XMLTestRunner accordingly in tests/L0/run_test.py (N…
Browse files Browse the repository at this point in the history
…VIDIA#1451)

* Use xmlrunner.XMLTestRunner accordingly

TODO:
- [x] Remove `subTest` because it's not compatible with the current way
of running L0 tests

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* use `torch.testing` more to enable xmlrunner

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* Remove `subTest` for xmlrunner

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* removing subTest

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* not depend on an env var

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* fix syntax errors

* open with `"wb"`

* xml file per dir

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* remove comment-out

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* Refactor `TestTransformer`: define member methods (#5)

* setUpClass to define `test_` methods

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* manually define

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* add a missing test

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* remove print

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

* remove ext

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>

Signed-off-by: Masaki Kozuki <mkozuki@nvidia.com>
  • Loading branch information
crcrpar authored and hubertlu-tw committed Dec 28, 2022
1 parent 14ce259 commit 7c3f9d1
Showing 1 changed file with 10 additions and 77 deletions.
87 changes: 10 additions & 77 deletions tests/L0/run_mlp/test_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@


# note(crcrpar): On Ampere, this test should be run without TF32 enabled.
class TestMLP(unittest.TestCase):
class TestMLP(common_utils.TestCase):
def test_creation(self):
MLP(mlp_sizes)

Expand Down Expand Up @@ -85,83 +85,13 @@ def test_mlp(self, use_activation: str, bias: bool):
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
torch.testing.assert_close(mlp_out, ref_out)
self.assertEqual(mlp_out, ref_out, msg=msg)

# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.0).backward()
ref_out.mean().mul(10.0).backward()
torch.testing.assert_close(test_input.grad, ref_input.grad)
torch.testing.assert_close(mlp.biases[0].grad, ref_mlp[0].bias.grad)

@skipFlakyTest
def test_no_bias(self):
for use_activation in ["none", "relu", "sigmoid"]:
with self.subTest(use_activation=use_activation):
mlp = MLP(mlp_sizes, bias=False, activation=use_activation).cuda()

mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=False)
mlp.weights[i].data.copy_(linear.weight)
mlp_layers.append(linear)
if use_activation == "relu":
mlp_layers.append(nn.ReLU(inplace=True))
if use_activation == "sigmoid":
mlp_layers.append(nn.Sigmoid())

ref_mlp = nn.Sequential(*mlp_layers).cuda()

test_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda")
.uniform_(-1.0, 1.0)
.requires_grad_()
)
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
torch.testing.assert_close(mlp_out, ref_out)

# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.0).backward()
ref_out.mean().mul(10.0).backward()
torch.testing.assert_close(test_input.grad, ref_input.grad)
torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad)

@skipFlakyTest
def test_with_bias(self):
for use_activation in ["none", "relu", "sigmoid"]:
with self.subTest(use_activation=use_activation):
mlp = MLP(mlp_sizes, bias=True, activation=use_activation).cuda()

mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=True)
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if use_activation == "relu":
mlp_layers.append(nn.ReLU(inplace=True))
if use_activation == "sigmoid":
mlp_layers.append(nn.Sigmoid())

ref_mlp = nn.Sequential(*mlp_layers).cuda()

test_input = (
torch.empty(batch_size, mlp_sizes[0], device="cuda")
.uniform_(-1.0, 1.0)
.requires_grad_()
)
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
torch.testing.assert_close(mlp_out, ref_out)

# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.0).backward()
ref_out.mean().mul(10.0).backward()
torch.testing.assert_close(test_input.grad, ref_input.grad)
torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad)
torch.testing.assert_close(mlp.biases[0].grad, ref_mlp[0].bias.grad)
self.assertEqual(test_input.grad, ref_input.grad, msg=msg)
self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad, msg=msg)

@skipFlakyTest
def test_no_grad(self):
Expand All @@ -182,11 +112,11 @@ def test_no_grad(self):
ref_input = test_input.clone().detach()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input)
torch.testing.assert_close(mlp_out, ref_out)
self.assertEqual(mlp_out, ref_out)

# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
ref_out.mean().mul(10.0).backward()
torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad)
self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad)

def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
Expand Down Expand Up @@ -255,6 +185,9 @@ def test_performance_half(self):
)


instantiate_device_type_tests(TestMLP, globals(), only_for=("cuda",))


if __name__ == "__main__":
unittest.main()
common_utils.run_tests()

0 comments on commit 7c3f9d1

Please sign in to comment.