diff --git a/tests/test_losses/test_gen_auxiliary_loss.py b/tests/test_losses/test_gen_auxiliary_loss.py index 96fccf04e..9c9aa219d 100644 --- a/tests/test_losses/test_gen_auxiliary_loss.py +++ b/tests/test_losses/test_gen_auxiliary_loss.py @@ -35,7 +35,10 @@ def test_path_regularizer_cpu(self): with pytest.raises(AssertionError): _ = pl(1., 2, outputs_dict=output_dict) - @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') + @pytest.mark.skipif( + not torch.cuda.is_available() + or not hasattr(torch.backends.cudnn, 'allow_tf32'), + reason='requires cuda') def test_path_regularizer_cuda(self): gen = self.gen.cuda() diff --git a/tests/test_ops/test_conv_gradfix.py b/tests/test_ops/test_conv_gradfix.py index aaaa49764..12de521a7 100644 --- a/tests/test_ops/test_conv_gradfix.py +++ b/tests/test_ops/test_conv_gradfix.py @@ -16,7 +16,10 @@ def setup_class(cls): cls.input = torch.randn((1, 3, 32, 32)) cls.weight = nn.Parameter(torch.randn(1, 3, 3, 3)) - @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') + @pytest.mark.skipif( + not torch.cuda.is_available() + or not hasattr(torch.backends.cudnn, 'allow_tf32'), + reason='requires cuda') def test_conv2d_cuda(self): x = self.input.cuda() weight = self.weight.cuda() @@ -32,7 +35,10 @@ def setup_class(cls): cls.input = torch.randn((1, 3, 32, 32)) cls.weight = nn.Parameter(torch.randn(3, 1, 3, 3)) - @pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda') + @pytest.mark.skipif( + not torch.cuda.is_available() + or not hasattr(torch.backends.cudnn, 'allow_tf32'), + reason='requires cuda') def test_conv2d_transposed_cuda(self): x = self.input.cuda() weight = self.weight.cuda()