diff --git a/src/pystencils_autodiff/_autodiff.py b/src/pystencils_autodiff/_autodiff.py
index 4fe67dd5615b128ad58bccb7bea51825b4efa304..5e11401c2c49d19a3b713b33f717b32724cd8f7d 100644
--- a/src/pystencils_autodiff/_autodiff.py
+++ b/src/pystencils_autodiff/_autodiff.py
@@ -246,12 +246,12 @@ Backward:
             self._forward_assignments = forward_assignments
             self._forward_read_accesses = None
             self._forward_write_accesses = None
-            self._forward_input_fields = list(forward_assignments.free_fields)
-            self._forward_output_fields = list(forward_assignments.bound_fields)
+            self._forward_input_fields = list(sorted(forward_assignments.free_fields, key=lambda x: str(x)))
+            self._forward_output_fields = list(sorted(forward_assignments.bound_fields, key=lambda x: str(x)))
             self._backward_assignments = backward_assignments
             self._backward_field_map = None
-            self._backward_input_fields = list(backward_assignments.free_fields)
-            self._backward_output_fields = list(backward_assignments.bound_fields)
+            self._backward_input_fields = list(sorted(backward_assignments.free_fields, key=lambda x: str(x)))
+            self._backward_output_fields = list(sorted(backward_assignments.bound_fields, key=lambda x: str(x)))
         else:
             # if no_caching:
             if diff_mode == 'transposed':
@@ -270,12 +270,12 @@ Backward:
                     self._forward_assignments = forward_assignments
                     self._forward_read_accesses = None
                     self._forward_write_accesses = None
-                    self._forward_input_fields = list(forward_assignments.free_fields)
-                    self._forward_output_fields = list(forward_assignments.bound_fields)
+                    self._forward_input_fields = list(sorted(forward_assignments.free_fields, key=lambda x: str(x)))
+                    self._forward_output_fields = list(sorted(forward_assignments.bound_fields, key=lambda x: str(x)))
                     self._backward_assignments = backward_assignments
                     self._backward_field_map = None
-                    self._backward_input_fields = list(backward_assignments.free_fields)
-                    self._backward_output_fields = list(backward_assignments.bound_fields)
+                    self._backward_input_fields = list(sorted(backward_assignments.free_fields, key=lambda x: str(x)))
+                    self._backward_output_fields = list(sorted(backward_assignments.bound_fields, key=lambda x: str(x)))
                     self._backward_field_map = None
             else:
                 raise NotImplementedError()
diff --git a/src/pystencils_autodiff/field_tensor_conversion.py b/src/pystencils_autodiff/field_tensor_conversion.py
index 9e0b054bda31259e37266433c73d27218c0b11c9..642a4cd072c268949e517f6439673591906fd1fe 100644
--- a/src/pystencils_autodiff/field_tensor_conversion.py
+++ b/src/pystencils_autodiff/field_tensor_conversion.py
@@ -64,7 +64,7 @@ def create_field_from_array_like(field_name, maybe_array, annotations=None):
         index_dimensions = 0
         field_type = FieldType.GENERIC
 
-    if 'tensorflow.python.' in str(type(maybe_array)) and 'Tensor' in str(type(maybe_array)):
+    if 'tensorflow' in str(type(maybe_array)) and 'Tensor' in str(type(maybe_array)):
         try:
             # This fails on eager execution
             return Field.create_fixed_size(maybe_array.name or field_name,
@@ -96,7 +96,10 @@ def coerce_to_field(field_name, array_like):
 
 def is_array_like(a):
     import pycuda.gpuarray
-    return (hasattr(a, '__array__') or isinstance(a, pycuda.gpuarray.GPUArray)) and not isinstance(a, sympy.Matrix)
+    return (hasattr(a, '__array__')
+            or isinstance(a, pycuda.gpuarray.GPUArray)
+            or ('tensorflow' in str(type(a)) and 'Tensor' in str(type(a)))
+            or 'torch.Tensor' in str(type(a))) and not isinstance(a, sympy.Matrix)
 
 
 def tf_constant_from_field(field, init_val=0):
diff --git a/tests/test_tfmad.py b/tests/test_tfmad.py
index b763a81a5b914d0b72f7db1690d057872abf79a5..74a9aa8a049b0a5cf426c37b0bbda99bd9ad6474 100644
--- a/tests/test_tfmad.py
+++ b/tests/test_tfmad.py
@@ -2,6 +2,7 @@ import os
 
 import numpy as np
 import pytest
+import sympy
 
 import pystencils as ps
 import pystencils_autodiff
@@ -230,6 +231,57 @@ def test_tfmad_gradient_check_torch_native(with_offsets, with_cuda):
         [dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True)
 
 
+@pytest.mark.parametrize('with_cuda', (False, 'with_cuda'))
+def test_tfmad_gradient_check_two_outputs(with_cuda):
+    torch = pytest.importorskip('torch')
+    import torch
+
+    a, b, out1, out2, out3 = ps.fields("a, b, out1, out2, out3: float64[21,13]")
+
+    assignment_collection = ps.AssignmentCollection({
+        out1.center: a.center + b.center,
+        out2.center: a.center - b.center,
+        out3.center: sympy.exp(b[-1, 0])
+    })
+    print('Forward')
+    print(assignment_collection)
+
+    print('Backward')
+    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
+                                               boundary_handling='zeros',
+                                               diff_mode='transposed-forward')
+    print(auto_diff.backward_fields)
+    backward = auto_diff.backward_assignments
+    print(backward)
+    print('Forward output fields (to check order)')
+    print(auto_diff.forward_input_fields)
+
+    a_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True).contiguous()
+    b_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
+    out1_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True).contiguous()
+    out2_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
+    out3_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
+
+    if with_cuda:
+        a_tensor = a_tensor.cuda()
+        b_tensor = b_tensor.cuda()
+        out1_tensor = out1_tensor.cuda()
+        out2_tensor = out2_tensor.cuda()
+        out3_tensor = out3_tensor.cuda()
+
+    function = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='torch_native')
+
+    dict = {
+        a: a_tensor,
+        b: b_tensor,
+        out1_tensor: out1_tensor,
+        out2_tensor: out2_tensor,
+        out3_tensor: out3_tensor,
+    }
+    torch.autograd.gradcheck(function.apply, tuple(
+        [dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True)
+
+
 @pytest.mark.parametrize('gradient_check', (False, 'with_gradient_check'))
 @pytest.mark.parametrize('with_cuda', (False, pytest.param('with_cuda', marks=pytest.mark.xfail)))
 @pytest.mark.parametrize('with_offsets', (False, 'with_offsets'))
@@ -266,27 +318,18 @@ def test_tfmad_gradient_check_tensorflow_native(with_offsets, with_cuda, gradien
 
     # out_tensor = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
     # print(out_tensor)
-    with tf.Graph().as_default():
-        a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
-        b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
-        op = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
-        out_tensor = op(a=a_tensor, b=b_tensor)
-
-        with tf.compat.v1.Session() as sess:
-            sess.run(tf.compat.v1.global_variables_initializer())
-
-            if gradient_check:
-                gradient_error = compute_gradient_error_without_border(
-                    [a_tensor, b_tensor], [a.shape, b.shape],
-                    out_tensor,
-                    out.shape,
-                    num_border_pixels=0,
-                    ndim=2,
-                    debug=True)
-                print('error: %s' % gradient_error.max_error)
-                print('avg error: %s' % gradient_error.avg_error)
-
-                assert any(e < 1e-4 for e in gradient_error.values())
+
+    a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
+    b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
+    op = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
+
+    theoretical, numerical = tf.test.compute_gradient(
+        op,
+        [a_tensor, b_tensor],
+        delta=0.001
+    )
+    assert np.allclose(theoretical[0], numerical[0])
+    assert np.allclose(theoretical[1], numerical[1])
 
 
 def get_curl(input_field: ps.Field, curl_field: ps.Field):