From 19f36b03525ecd2d4c3660d705413f1ab6abdc83 Mon Sep 17 00:00:00 2001
From: Stephan Seitz <stephan.seitz@fau.de>
Date: Tue, 17 Dec 2019 17:46:28 +0100
Subject: [PATCH] Remove unnecessary xfails

---
 tests/backends/test_torch_native_compilation.py | 2 +-
 tests/test_tensorflow_jit.py                    | 5 +++--
 tests/test_tfmad.py                             | 8 ++++++--
 3 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py
index e979c5d..c4db892 100644
--- a/tests/backends/test_torch_native_compilation.py
+++ b/tests/backends/test_torch_native_compilation.py
@@ -150,7 +150,7 @@ def test_torch_native_compilation_gpu():
     assert 'call_backward' in dir(torch_extension)
 
 
-@pytest.mark.parametrize('target', (pytest.param('gpu', marks=pytest.mark.xfail), 'cpu'))
+@pytest.mark.skipif('CI' in os.environ, reason="GPU too old on GITLAB CI")
 def test_execute_torch(target):
     import pycuda.autoinit
     module_name = "Ololol" + target
diff --git a/tests/test_tensorflow_jit.py b/tests/test_tensorflow_jit.py
index 2b62374..5bbb089 100644
--- a/tests/test_tensorflow_jit.py
+++ b/tests/test_tensorflow_jit.py
@@ -8,18 +8,19 @@
 
 """
 
+import os
 from os.path import exists
 
 import pytest
+import sympy
 
 import pystencils
 import pystencils_autodiff
-import sympy
 from pystencils_autodiff import create_backward_assignments
 from pystencils_autodiff.backends.astnodes import TensorflowModule
 
 
-@pytest.mark.xfail(reason="cannot link against cudart on GITLAB CI", strict=False)
+@pytest.mark.skipif('CI' in os.environ, reason="GPU too old on GITLAB CI")
 def test_tensorflow_jit_gpu():
 
     pytest.importorskip('tensorflow')
diff --git a/tests/test_tfmad.py b/tests/test_tfmad.py
index 7c2055c..e5789eb 100644
--- a/tests/test_tfmad.py
+++ b/tests/test_tfmad.py
@@ -231,7 +231,10 @@ def test_tfmad_gradient_check_torch_native(with_offsets, with_cuda):
         [dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True)
 
 
-@pytest.mark.parametrize('with_cuda', (False, pytest.param(True, marks=pytest.mark.xfail)))
+@pytest.mark.parametrize('with_cuda',
+                         (False, pytest.param(True,
+                                              marks=pytest.mark.skipif('CI' in os.environ,
+                                                                       reason="GPU too old on GITLAB CI"))))
 def test_tfmad_gradient_check_two_outputs(with_cuda):
     torch = pytest.importorskip('torch')
     import torch
@@ -283,7 +286,8 @@ def test_tfmad_gradient_check_two_outputs(with_cuda):
 
 
 @pytest.mark.parametrize('gradient_check', (False, 'with_gradient_check'))
-@pytest.mark.parametrize('with_cuda', (False, pytest.param('with_cuda', marks=pytest.mark.xfail)))
+@pytest.mark.parametrize('with_cuda', (False, pytest.param('with_cuda',
+                                                           marks=pytest.mark.skipif('CI' in os.environ, reason="GPU too old on GITLAB CI"))))
 @pytest.mark.parametrize('with_offsets', (False, 'with_offsets'))
 # @pytest.mark.xfail(reason="", strict=False)
 def test_tfmad_gradient_check_tensorflow_native(with_offsets, with_cuda, gradient_check):
-- 
GitLab