From 7c3f9d13786fa38f9c7577e8647d079557d0775d Mon Sep 17 00:00:00 2001 From: Masaki Kozuki Date: Mon, 15 Aug 2022 12:02:33 -0700 Subject: [PATCH] Use `xmlrunner.XMLTestRunner` accordingly in `tests/L0/run_test.py` (#1451) * Use xmlrunner.XMLTestRunner accordingly TODO: - [x] Remove `subTest` because it's not compatible with the current way of running L0 tests Signed-off-by: Masaki Kozuki * use `torch.testing` more to enable xmlrunner Signed-off-by: Masaki Kozuki * Remove `subTest` for xmlrunner Signed-off-by: Masaki Kozuki * removing subTest Signed-off-by: Masaki Kozuki * not depend on an env var Signed-off-by: Masaki Kozuki * fix syntax errors * open with `"wb"` * xml file per dir Signed-off-by: Masaki Kozuki * remove comment-out Signed-off-by: Masaki Kozuki * Refactor `TestTransformer`: define member methods (#5) * setUpClass to define `test_` methods Signed-off-by: Masaki Kozuki * manually define Signed-off-by: Masaki Kozuki Signed-off-by: Masaki Kozuki * add a missing test Signed-off-by: Masaki Kozuki * remove print Signed-off-by: Masaki Kozuki * remove ext Signed-off-by: Masaki Kozuki Signed-off-by: Masaki Kozuki --- tests/L0/run_mlp/test_mlp.py | 87 +++++------------------------------- 1 file changed, 10 insertions(+), 77 deletions(-) diff --git a/tests/L0/run_mlp/test_mlp.py b/tests/L0/run_mlp/test_mlp.py index 0b51e58e4..a5cfb0c3b 100644 --- a/tests/L0/run_mlp/test_mlp.py +++ b/tests/L0/run_mlp/test_mlp.py @@ -18,7 +18,7 @@ # note(crcrpar): On Ampere, this test should be run without TF32 enabled. -class TestMLP(unittest.TestCase): +class TestMLP(common_utils.TestCase): def test_creation(self): MLP(mlp_sizes) @@ -85,83 +85,13 @@ def test_mlp(self, use_activation: str, bias: bool): ref_input = test_input.clone().detach().requires_grad_() mlp_out = mlp(test_input) ref_out = ref_mlp(ref_input) - torch.testing.assert_close(mlp_out, ref_out) + self.assertEqual(mlp_out, ref_out, msg=msg) # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out mlp_out.mean().mul(10.0).backward() ref_out.mean().mul(10.0).backward() - torch.testing.assert_close(test_input.grad, ref_input.grad) - torch.testing.assert_close(mlp.biases[0].grad, ref_mlp[0].bias.grad) - - @skipFlakyTest - def test_no_bias(self): - for use_activation in ["none", "relu", "sigmoid"]: - with self.subTest(use_activation=use_activation): - mlp = MLP(mlp_sizes, bias=False, activation=use_activation).cuda() - - mlp_layers = [] - for i in range(mlp.num_layers): - linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=False) - mlp.weights[i].data.copy_(linear.weight) - mlp_layers.append(linear) - if use_activation == "relu": - mlp_layers.append(nn.ReLU(inplace=True)) - if use_activation == "sigmoid": - mlp_layers.append(nn.Sigmoid()) - - ref_mlp = nn.Sequential(*mlp_layers).cuda() - - test_input = ( - torch.empty(batch_size, mlp_sizes[0], device="cuda") - .uniform_(-1.0, 1.0) - .requires_grad_() - ) - ref_input = test_input.clone().detach().requires_grad_() - mlp_out = mlp(test_input) - ref_out = ref_mlp(ref_input) - torch.testing.assert_close(mlp_out, ref_out) - - # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out - mlp_out.mean().mul(10.0).backward() - ref_out.mean().mul(10.0).backward() - torch.testing.assert_close(test_input.grad, ref_input.grad) - torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad) - - @skipFlakyTest - def test_with_bias(self): - for use_activation in ["none", "relu", "sigmoid"]: - with self.subTest(use_activation=use_activation): - mlp = MLP(mlp_sizes, bias=True, activation=use_activation).cuda() - - mlp_layers = [] - for i in range(mlp.num_layers): - linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=True) - mlp.weights[i].data.copy_(linear.weight) - mlp.biases[i].data.copy_(linear.bias) - mlp_layers.append(linear) - if use_activation == "relu": - mlp_layers.append(nn.ReLU(inplace=True)) - if use_activation == "sigmoid": - mlp_layers.append(nn.Sigmoid()) - - ref_mlp = nn.Sequential(*mlp_layers).cuda() - - test_input = ( - torch.empty(batch_size, mlp_sizes[0], device="cuda") - .uniform_(-1.0, 1.0) - .requires_grad_() - ) - ref_input = test_input.clone().detach().requires_grad_() - mlp_out = mlp(test_input) - ref_out = ref_mlp(ref_input) - torch.testing.assert_close(mlp_out, ref_out) - - # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out - mlp_out.mean().mul(10.0).backward() - ref_out.mean().mul(10.0).backward() - torch.testing.assert_close(test_input.grad, ref_input.grad) - torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad) - torch.testing.assert_close(mlp.biases[0].grad, ref_mlp[0].bias.grad) + self.assertEqual(test_input.grad, ref_input.grad, msg=msg) + self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad, msg=msg) @skipFlakyTest def test_no_grad(self): @@ -182,11 +112,11 @@ def test_no_grad(self): ref_input = test_input.clone().detach() mlp_out = mlp(test_input) ref_out = ref_mlp(ref_input) - torch.testing.assert_close(mlp_out, ref_out) + self.assertEqual(mlp_out, ref_out) # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out ref_out.mean().mul(10.0).backward() - torch.testing.assert_close(mlp.weights[0].grad, ref_mlp[0].weight.grad) + self.assertEqual(mlp.weights[0].grad, ref_mlp[0].weight.grad) def test_performance_half(self): mlp = MLP(mlp_sizes).cuda().half() @@ -255,6 +185,9 @@ def test_performance_half(self): ) +instantiate_device_type_tests(TestMLP, globals(), only_for=("cuda",)) + + if __name__ == "__main__": - unittest.main() + common_utils.run_tests()