diff --git a/.travis.yml b/.travis.yml
index 215d2cf192216b573e514b62a6cad9c510cd2b0a..bd5987b873f292c98119113fffed83867cfca901 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,13 +2,11 @@
 # Read more under http://docs.travis-ci.com/user/build-configuration/
 # THIS SCRIPT IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS!
 
-dist: xenial
+dist: bionic
 sudo: false
 language: python
 virtualenv:
   system_site_packages: false
-before_install:
-  - sudo apt-get install -y ninja-build build-essential nvidia-cuda-toolkit
 addons:
   apt:
     update: true
@@ -28,19 +26,39 @@ matrix:
       env: DISTRIB="conda" PYTHON_VERSION="3.6" COVERAGE="false" LINT="false"
       before_install:
         - sudo apt-get install -y ninja-build build-essential nvidia-cuda-toolkit
-    - name: "Documentation and Lint"
+    - name: "Lint and documentation test"
       env: DISTRIB="ubuntu" TOX_PYTHON_VERSION="py36" COVERAGE="false" LINT="true"
+    - name: "Python 3.7.2 on macOS"
+      os: osx
+      osx_image: xcode10.2  # Python 3.7.2 running on macOS 10.14.3
+      language: shell       # 'language: python' is an error on Travis CI macOS
+      before_install:
+       - brew update && brew upgrade python 
+       - brew install ninja
+       - alias python=python3  
+       - alias pip="python3 -m pip"
+       - shopt -s expand_aliases
+      before_cache:
+        - brew cleanup
+    - name: "Python 3.7.3 on Windows"
+      os: windows           # Windows 10.0.17134 N/A Build 17134
+      language: shell       # 'language: python' is an error on Travis CI Windows
+      before_install:
+       - choco install python
+       - python -m pip install --upgrade pip
+      env: PATH=/c/Python37:/c/Python37/Scripts:$PATH
 install:
   - source tests/travis_install.sh
+  - pip3 install -e .
+  - pip3 install tensorflow torch || echo "failed to install machine learning stuff"
 before_script:
-  - git config --global user.email "you@example.com"
-  - git config --global user.name "Your Name"
+  - git config --global user.email "stephan.seitz@fau.de"
+  - git config --global user.name "Stephan Seitz"
 script:
   - export NO_GPU_EXECUTION=1
-  - pip install -e .
-  - pip install tensorflow torch
-  - if [[ "$LINT" == "false" ]]; then python setup.py test; fi
-  - if [[ "$LINT" == "true" ]]; then flake8 src;python setup.py doctest; fi
+  - if [[ "$LINT" == "true" ]]; then flake8 src;python setup.py doctest; exit 0; fi
+  - python setup.py test
+
 after_success:
   - if [[ "$COVERAGE" == "true" ]]; then coveralls || echo "failed"; codecov; fi
 after_script:
@@ -49,3 +67,5 @@ cache:
   pip: true
   directories:
     - $HOME/miniconda
+    - /c/Python37
+    - $HOME/Library/Caches/Homebrew
diff --git a/setup.cfg b/setup.cfg
index 984b9c6820c44b0fdd4ac1b59c31de590ba361ea..bb14d8e5b87504ebb4827484bf3f385acfa61733 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -42,7 +42,6 @@ test_requires =
     ansi2html
     pytest-cov
     tensorflow
-    torch
 # Require a specific Python version, e.g. Python 2.7 or >= 3.4
 # python_requires = >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
 
@@ -63,7 +62,6 @@ testing =
     ansi2html
     pytest-cov
     tensorflow
-    torch
 
 [options.entry_points]
 # Add here console scripts like:
diff --git a/src/pystencils_autodiff/backends/_pytorch.py b/src/pystencils_autodiff/backends/_pytorch.py
index 22242763a6830c571af6d155faf6c2f76e29e598..659a437db1b1babcda44be57379df2b3f81c24f1 100644
--- a/src/pystencils_autodiff/backends/_pytorch.py
+++ b/src/pystencils_autodiff/backends/_pytorch.py
@@ -1,7 +1,10 @@
 import uuid
 
 import numpy as np
-import torch
+try:
+    import torch
+except ImportError:
+    pass
 
 try:
     import pycuda.autoinit
diff --git a/src/pystencils_autodiff/backends/_torch_native.py b/src/pystencils_autodiff/backends/_torch_native.py
index 1276e734f3dfca708714614dd4c8ff26cd5fcccf..fcd1ac9cd45b47f3ba7753622fd652040eb3daa3 100644
--- a/src/pystencils_autodiff/backends/_torch_native.py
+++ b/src/pystencils_autodiff/backends/_torch_native.py
@@ -4,7 +4,6 @@ from itertools import chain
 from os.path import dirname, isdir, isfile, join
 
 import jinja2
-import torch
 from appdirs import user_cache_dir
 
 import pystencils
@@ -17,6 +16,11 @@ from pystencils.cpu.kernelcreation import create_kernel
 from pystencils.gpucuda.kernelcreation import create_cuda_kernel
 from pystencils_autodiff.backends._pytorch import numpy_dtype_to_torch
 
+try:
+    import torch
+except ImportError:
+    pass
+
 
 def _read_file(file):
     with open(file, 'r') as f:
diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py
index f552192d02eea20a2e7eea7a60b3aec5b882983a..0ba80745995e7851747dfe4da46b8ce283fa3390 100644
--- a/tests/backends/test_torch_native_compilation.py
+++ b/tests/backends/test_torch_native_compilation.py
@@ -4,6 +4,7 @@
 #
 
 import os
+import subprocess
 from os.path import dirname, isfile, join
 
 # TODO: from pystencils.backends.cudabackend import generate_cuda
@@ -11,7 +12,6 @@ import appdirs
 import jinja2
 import numpy as np
 import pytest
-import torch
 
 import pystencils
 import pystencils_autodiff
@@ -20,6 +20,10 @@ from pystencils.backends.cbackend import generate_c
 from pystencils.gpucuda.kernelcreation import create_cuda_kernel
 from pystencils_autodiff.backends._torch_native import create_autograd_function, generate_torch
 
+torch = pytest.importorskip('torch')
+pytestmark = pytest.mark.skipif(subprocess.call(['ninja', '--v']) != 0,
+                                reason='torch compilation requires ninja')
+
 PROJECT_ROOT = dirname
 
 
@@ -50,23 +54,26 @@ def test_jit():
 def test_torch_native_compilation():
     x, y = pystencils.fields('x, y: float32[2d]')
 
-    assignments = pystencils.AssignmentCollection({
-        y.center(): x.center()**2
-    }, {})
+    assignments = pystencils.AssignmentCollection({y.center(): x.center()**2},
+                                                  {})
     autodiff = pystencils_autodiff.AutoDiffOp(assignments)
     backward_assignments = autodiff.backward_assignments
 
     print(assignments)
     print(backward_assignments)
 
-    template_string = read_file(join(dirname(__file__),
-                                     '../../src/pystencils_autodiff/backends/torch_native_cuda.tmpl.cpp'))
+    template_string = read_file(
+        join(
+            dirname(__file__),
+            '../../src/pystencils_autodiff/backends/torch_native_cuda.tmpl.cpp'
+        ))
     template = jinja2.Template(template_string)
 
     print(template_string)
 
     forward_kernel = create_cuda_kernel(assignments.all_assignments).body
-    backward_kernel = create_cuda_kernel(backward_assignments.all_assignments).body
+    backward_kernel = create_cuda_kernel(
+        backward_assignments.all_assignments).body
 
     forward_code = generate_c(forward_kernel)
     backward_code = generate_c(backward_kernel)
@@ -74,20 +81,31 @@ def test_torch_native_compilation():
     output = template.render(
         forward_tensors=[f.name for f in autodiff.forward_fields],
         forward_input_tensors=[f.name for f in autodiff.forward_input_fields],
-        forward_output_tensors=[f.name for f in autodiff.forward_output_fields],
-        backward_tensors=[f.name for f in autodiff.backward_fields + autodiff.forward_input_fields],
-        backward_input_tensors=[f.name for f in autodiff.backward_input_fields],
-        backward_output_tensors=[f.name for f in autodiff.backward_output_fields],
+        forward_output_tensors=[
+            f.name for f in autodiff.forward_output_fields
+        ],
+        backward_tensors=[
+            f.name
+            for f in autodiff.backward_fields + autodiff.forward_input_fields
+        ],
+        backward_input_tensors=[
+            f.name for f in autodiff.backward_input_fields
+        ],
+        backward_output_tensors=[
+            f.name for f in autodiff.backward_output_fields
+        ],
         forward_kernel=forward_code,
         backward_kernel=backward_code,
         dimensions=range(2),
         kernel_name="square",
-        dtype="float"
-    )
+        dtype="float")
     print(output)
 
-    template_string = read_file(join(dirname(__file__),
-                                     '../../src/pystencils_autodiff/backends/torch_native_cuda.tmpl.cu'))
+    template_string = read_file(
+        join(
+            dirname(__file__),
+            '../../src/pystencils_autodiff/backends/torch_native_cuda.tmpl.cu')
+    )
     template = jinja2.Template(template_string)
 
     print(template_string)
@@ -96,7 +114,9 @@ def test_torch_native_compilation():
         forward_tensors=[f for f in autodiff.forward_fields],
         forward_input_tensors=[f for f in autodiff.forward_input_fields],
         forward_output_tensors=[f for f in autodiff.forward_output_fields],
-        backward_tensors=[f for f in autodiff.backward_fields + autodiff.forward_input_fields],
+        backward_tensors=[
+            f for f in autodiff.backward_fields + autodiff.forward_input_fields
+        ],
         backward_input_tensors=[f for f in autodiff.backward_input_fields],
         backward_output_tensors=[f for f in autodiff.backward_output_fields],
         forward_kernel=forward_code,
@@ -106,12 +126,14 @@ def test_torch_native_compilation():
         forward_blocks=str({1, 1, 1}),
         forward_threads=str({1, 1, 1}),
         kernel_name="square",
-        dimensions=range(2)
-    )
+        dimensions=range(2))
     print(output)
 
-    template_string = read_file(join(dirname(__file__),
-                                     '../../src/pystencils_autodiff/backends/torch_native_cpu.tmpl.cpp'))
+    template_string = read_file(
+        join(
+            dirname(__file__),
+            '../../src/pystencils_autodiff/backends/torch_native_cpu.tmpl.cpp')
+    )
     template = jinja2.Template(template_string)
 
     print(template_string)
@@ -119,16 +141,24 @@ def test_torch_native_compilation():
     output = template.render(
         forward_tensors=[f.name for f in autodiff.forward_fields],
         forward_input_tensors=[f.name for f in autodiff.forward_input_fields],
-        forward_output_tensors=[f.name for f in autodiff.forward_output_fields],
-        backward_tensors=[f.name for f in autodiff.backward_fields + autodiff.forward_input_fields],
-        backward_input_tensors=[f.name for f in autodiff.backward_input_fields],
-        backward_output_tensors=[f.name for f in autodiff.backward_output_fields],
+        forward_output_tensors=[
+            f.name for f in autodiff.forward_output_fields
+        ],
+        backward_tensors=[
+            f.name
+            for f in autodiff.backward_fields + autodiff.forward_input_fields
+        ],
+        backward_input_tensors=[
+            f.name for f in autodiff.backward_input_fields
+        ],
+        backward_output_tensors=[
+            f.name for f in autodiff.backward_output_fields
+        ],
         forward_kernel=forward_code,
         backward_kernel=backward_code,
         kernel_name="square",
         dtype="float",
-        dimensions=range(2)
-    )
+        dimensions=range(2))
     print(output)
 
 
@@ -136,33 +166,36 @@ def test_torch_native_compilation():
 def test_generate_torch_gpu():
     x, y = pystencils.fields('x, y: float32[2d]')
 
-    assignments = pystencils.AssignmentCollection({
-        y.center(): x.center()**2
-    }, {})
+    assignments = pystencils.AssignmentCollection({y.center(): x.center()**2},
+                                                  {})
     autodiff = pystencils_autodiff.AutoDiffOp(assignments)
 
-    op_cuda = generate_torch(appdirs.user_cache_dir('pystencils'), autodiff, is_cuda=True, dtype=np.float32)
+    op_cuda = generate_torch(appdirs.user_cache_dir('pystencils'),
+                             autodiff,
+                             is_cuda=True,
+                             dtype=np.float32)
     assert op_cuda is not None
 
 
 def test_generate_torch_cpu():
     x, y = pystencils.fields('x, y: float32[2d]')
 
-    assignments = pystencils.AssignmentCollection({
-        y.center(): x.center()**2
-    }, {})
+    assignments = pystencils.AssignmentCollection({y.center(): x.center()**2},
+                                                  {})
     autodiff = pystencils_autodiff.AutoDiffOp(assignments)
 
-    op_cpp = generate_torch(appdirs.user_cache_dir('pystencils'), autodiff, is_cuda=False, dtype=np.float32)
+    op_cpp = generate_torch(appdirs.user_cache_dir('pystencils'),
+                            autodiff,
+                            is_cuda=False,
+                            dtype=np.float32)
     assert op_cpp is not None
 
 
 def test_execute_torch():
     x, y = pystencils.fields('x, y: float64[32,32]')
 
-    assignments = pystencils.AssignmentCollection({
-        y.center(): 5 + x.center()
-    }, {})
+    assignments = pystencils.AssignmentCollection({y.center(): 5 + x.center()},
+                                                  {})
     autodiff = pystencils_autodiff.AutoDiffOp(assignments)
 
     x_tensor = pystencils_autodiff.torch_tensor_from_field(x, 1, cuda=False)
@@ -174,13 +207,13 @@ def test_execute_torch():
     assert op_cpp is not None
 
 
-@pytest.mark.skipif('NO_GPU_EXECUTION' in os.environ, reason='Skip GPU execution tests')
+@pytest.mark.skipif('NO_GPU_EXECUTION' in os.environ,
+                    reason='Skip GPU execution tests')
 def test_execute_torch_gpu():
     x, y = pystencils.fields('x, y: float64[32,32]')
 
-    assignments = pystencils.AssignmentCollection({
-        y.center(): 5 + x.center()
-    }, {})
+    assignments = pystencils.AssignmentCollection({y.center(): 5 + x.center()},
+                                                  {})
     autodiff = pystencils_autodiff.AutoDiffOp(assignments)
 
     x_tensor = pystencils_autodiff.torch_tensor_from_field(x, 3, cuda=True)
@@ -193,4 +226,3 @@ def test_execute_torch_gpu():
     rtn = op_cuda.forward()
     print(y_tensor)
     print(rtn)
-
diff --git a/tests/test_autodiff.py b/tests/test_autodiff.py
index 4d917e98c08654422f596bfa1f2eed16b4db3bec..3c530eb7451bd3b145bd97c329fb01f0fdb6d367 100644
--- a/tests/test_autodiff.py
+++ b/tests/test_autodiff.py
@@ -49,4 +49,3 @@ def test_simple_2d_check_raw_assignments():
     for diff_mode in DiffModes:
         pystencils_autodiff.create_backward_assignments(
             forward_assignments, diff_mode=diff_mode)
-
diff --git a/tests/test_tfmad.py b/tests/test_tfmad.py
index 8b1820945601e21951226a83c8590e088247611d..87efa0c974ccb342144decc7c53efa5e6cbb38d3 100644
--- a/tests/test_tfmad.py
+++ b/tests/test_tfmad.py
@@ -4,8 +4,6 @@ import os
 import numpy as np
 import pytest
 import sympy as sp
-import tensorflow as tf
-import torch
 
 import pystencils as ps
 import pystencils_autodiff
@@ -46,8 +44,8 @@ def test_tfmad_two_stencils():
     print(assignment_collection)
 
     print('Backward')
-    auto_diff = pystencils_autodiff.AutoDiffOp(
-        assignment_collection, diff_mode='transposed-forward')
+    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
+                                               diff_mode='transposed-forward')
     backward = auto_diff.backward_assignments
     print(backward)
     print('Forward output fields (to check order)')
@@ -56,13 +54,15 @@ def test_tfmad_two_stencils():
     print(auto_diff)
 
 
-@pytest.mark.skipif("NO_TENSORFLOW_TEST" in os.environ, reason="Requires Tensorflow")
 @pytest.mark.skipif("TRAVIS" in os.environ, reason="Temporary skip")
 def test_tfmad_gradient_check():
+    tf = pytest.importorskip('tensorflow')
+
     a, b, out = ps.fields("a, b, out: double[21,13]")
     print(a.shape)
 
-    cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - ps.fd.Diff(b, 0) + ps.fd.Diff(b, 1)
+    cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - ps.fd.Diff(b, 0) + ps.fd.Diff(
+        b, 1)
     discretize = ps.fd.Discretization2ndOrder(dx=1)
     discretization = discretize(cont)
 
@@ -72,8 +72,8 @@ def test_tfmad_gradient_check():
     print(assignment_collection)
 
     print('Backward')
-    auto_diff = pystencils_autodiff.AutoDiffOp(
-        assignment_collection, diff_mode='transposed-forward')
+    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
+                                               diff_mode='transposed-forward')
     backward = auto_diff.backward_assignments
     print(backward)
     print('Forward output fields (to check order)')
@@ -87,7 +87,11 @@ def test_tfmad_gradient_check():
         sess.run(tf.global_variables_initializer())
 
         gradient_error = compute_gradient_error_without_border(
-            [a_tensor, b_tensor], [a.shape, b.shape], out_tensor, out.shape, num_border_pixels=2, ndim=2)
+            [a_tensor, b_tensor], [a.shape, b.shape],
+            out_tensor,
+            out.shape,
+            num_border_pixels=2,
+            ndim=2)
         print('error: %s' % gradient_error.max_error)
 
         assert any(e < 1e-4 for e in gradient_error.values())
@@ -100,30 +104,31 @@ def check_tfmad_vector_input_data(args):
 
     # create arrays
     c_arr = np.zeros(domain_shape)
-    v_arr = np.zeros(domain_shape + (ndim,))
+    v_arr = np.zeros(domain_shape + (ndim, ))
 
     # create fields
-    c, v, c_next = ps.fields("c, v(2), c_next: % s[ % i, % i]" % ("float" if dtype == np.float32 else "double",
-                                                                  domain_shape[0],
-                                                                  domain_shape[1]),
+    c, v, c_next = ps.fields("c, v(2), c_next: % s[ % i, % i]" %
+                             ("float" if dtype == np.float32 else "double",
+                              domain_shape[0], domain_shape[1]),
                              c=c_arr,
                              v=v_arr,
                              c_next=c_arr)
 
     # write down advection diffusion pde
     # the equation is represented by a single term and an implicit "=0" is assumed.
-    adv_diff_pde = ps.fd.transient(
-        c) - ps.fd.diffusion(c, sp.Symbol("D")) + ps.fd.advection(c, v)
+    adv_diff_pde = ps.fd.transient(c) - ps.fd.diffusion(
+        c, sp.Symbol("D")) + ps.fd.advection(c, v)
 
     discretize = ps.fd.Discretization2ndOrder(args.dx, args.dt)
     discretization = discretize(adv_diff_pde)
-    discretization = discretization.subs(
-        sp.Symbol("D"), args.diffusion_coefficient)
+    discretization = discretization.subs(sp.Symbol("D"),
+                                         args.diffusion_coefficient)
     forward_assignments = ps.AssignmentCollection(
         [ps.Assignment(c_next.center(), discretization)], [])
 
     autodiff = pystencils_autodiff.AutoDiffOp(
-        forward_assignments, diff_mode='transposed-forward')  # , constant_fields=[v]
+        forward_assignments,
+        diff_mode='transposed-forward')  # , constant_fields=[v]
 
     print('Forward assignments:')
     print(autodiff.forward_assignments)
@@ -133,32 +138,31 @@ def check_tfmad_vector_input_data(args):
 
 def test_tfmad_vector_input_data():
     parser = argparse.ArgumentParser()
-    parser.add_argument(
-        '--domain_shape', default=(100, 30), nargs=2, type=int, help="")
-    parser.add_argument(
-        '--dx', default=1, type=float, help="")
-    parser.add_argument(
-        '--dt', default=0.01, type=float, help="")
-    parser.add_argument(
-        '--diffusion_coefficient', default=1, type=float, help="")
-    parser.add_argument(
-        '--num_total_time_steps', default=100, type=int)
-    parser.add_argument(
-        '--num_time_steps_for_op', default=1, type=int)
-    parser.add_argument(
-        '--learning_rate', default=1e-2, type=float)
-    parser.add_argument(
-        '--dtype', default=np.float64, type=np.dtype)
-    parser.add_argument(
-        '--num_optimization_steps', default=2000, type=int)
+    parser.add_argument('--domain_shape',
+                        default=(100, 30),
+                        nargs=2,
+                        type=int,
+                        help="")
+    parser.add_argument('--dx', default=1, type=float, help="")
+    parser.add_argument('--dt', default=0.01, type=float, help="")
+    parser.add_argument('--diffusion_coefficient',
+                        default=1,
+                        type=float,
+                        help="")
+    parser.add_argument('--num_total_time_steps', default=100, type=int)
+    parser.add_argument('--num_time_steps_for_op', default=1, type=int)
+    parser.add_argument('--learning_rate', default=1e-2, type=float)
+    parser.add_argument('--dtype', default=np.float64, type=np.dtype)
+    parser.add_argument('--num_optimization_steps', default=2000, type=int)
     parser.add_argument('vargs', nargs='*')
 
     args = parser.parse_args()
     check_tfmad_vector_input_data(args)
 
 
-@pytest.mark.skipif("NO_TORCH_TEST" in os.environ, reason="Requires PyTorch")
 def test_tfmad_gradient_check_torch():
+    torch = pytest.importorskip('torch')
+
     a, b, out = ps.fields("a, b, out: float[21,13]")
 
     cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - \
@@ -172,8 +176,8 @@ def test_tfmad_gradient_check_torch():
     print(assignment_collection)
 
     print('Backward')
-    auto_diff = pystencils_autodiff.AutoDiffOp(
-        assignment_collection, diff_mode='transposed-forward')
+    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
+                                               diff_mode='transposed-forward')
     backward = auto_diff.backward_assignments
     print(backward)
     print('Forward output fields (to check order)')
@@ -182,7 +186,11 @@ def test_tfmad_gradient_check_torch():
     a_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True)
     b_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True)
 
-    function = auto_diff.create_tensorflow_op({a: a_tensor, b: b_tensor}, backend='torch')
+    function = auto_diff.create_tensorflow_op({
+        a: a_tensor,
+        b: b_tensor
+    },
+                                              backend='torch')
 
     torch.autograd.gradcheck(function.apply, [a_tensor, b_tensor])
 
@@ -230,20 +238,21 @@ def get_curl(input_field: ps.Field, curl_field: ps.Field):
 def test_tfmad_two_outputs():
 
     domain_shape = (20, 30)
-    vector_shape = domain_shape + (2,)
+    vector_shape = domain_shape + (2, )
 
-    curl_input_for_u = ps.Field.create_fixed_size(
-        field_name='curl_input', shape=domain_shape, index_dimensions=0)
-    u_field = ps.Field.create_fixed_size(
-        field_name='curl', shape=vector_shape, index_dimensions=1)
+    curl_input_for_u = ps.Field.create_fixed_size(field_name='curl_input',
+                                                  shape=domain_shape,
+                                                  index_dimensions=0)
+    u_field = ps.Field.create_fixed_size(field_name='curl',
+                                         shape=vector_shape,
+                                         index_dimensions=1)
 
-    curl_op = pystencils_autodiff.AutoDiffOp(get_curl(
-        curl_input_for_u, u_field), diff_mode="transposed-forward")
+    curl_op = pystencils_autodiff.AutoDiffOp(get_curl(curl_input_for_u,
+                                                      u_field),
+                                             diff_mode="transposed-forward")
 
     print('Forward')
     print(curl_op.forward_assignments)
 
     print('Backward')
     print(curl_op.backward_assignments)
-
-
diff --git a/tests/travis_install.sh b/tests/travis_install.sh
index 88388647ec2647dbf4f803fc6a8cfe486326c585..d37350e625281e087fce8aaa9654ad98d2a53b73 100644
--- a/tests/travis_install.sh
+++ b/tests/travis_install.sh
@@ -38,13 +38,15 @@ if [[ "$DISTRIB" == "conda" ]]; then
     # (prefer local venv, since the miniconda folder is cached)
     conda create -p ./.venv --yes python=${PYTHON_VERSION} pip virtualenv
     source activate ./.venv
+    alias pip3='python -m pip'
+    shopt -s expand_aliases
 fi
 
 # for all
-pip install -U pip setuptools
-pip install tox
-pip install codecov
-pip install sphinx
+pip3 install -U pip wheel setuptools
+pip3 install tox
+pip3 install codecov
+pip3 install sphinx
 
 if [[ -z "$PYSTENCIL_FROM_PIP" ]]; then
     pip install git+https://github.com/mabau/pystencils.git
@@ -54,7 +56,7 @@ fi
 pip install flake8
 
 if [[ "$COVERAGE" == "true" ]]; then
-    pip install -U pytest-cov pytest-virtualenv coverage coveralls flake8
+    pip3 install -U pytest-cov pytest-virtualenv coverage coveralls flake8
 fi