diff --git a/src/pystencils_autodiff/backends/_torch_native.py b/src/pystencils_autodiff/backends/_torch_native.py
index 2fd3c9eae02900dda019b80c985bdf3ac87e77fb..1fab51c5e80c1934fa4ba9b98137b5a5c1b99bf6 100644
--- a/src/pystencils_autodiff/backends/_torch_native.py
+++ b/src/pystencils_autodiff/backends/_torch_native.py
@@ -139,8 +139,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo
             os.mkdir(cache_dir)
         # TODO: create function and stuff
 
-        compiled_operation = generate_torch(cache_dir, autodiff_obj, is_cuda,
-                                            dtype)
+        compiled_operation = generate_torch(cache_dir, autodiff_obj, is_cuda, dtype)
         field_to_tensor_dict = inputfield_to_tensor_dict
         # Allocate output tensor for forward and backward pass
         for field in chain(autodiff_obj.forward_output_fields, autodiff_obj.backward_output_fields):
@@ -164,7 +163,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo
         cls.saved = None
         cls.forward = forward
         cls.backward = backward
-        return cls
+        return cls()
     else:
         op = pystencils_autodiff.backends._pytorch.create_autograd_function(autodiff_obj,
                                                                             inputfield_to_tensor_dict,
diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py
index f23fd21db6c9b61b2f52cdb16f197dd1769fbbc9..6a7a053a5f8e1c5eeb2c22104968f073ad866b56 100644
--- a/tests/backends/test_torch_native_compilation.py
+++ b/tests/backends/test_torch_native_compilation.py
@@ -168,7 +168,7 @@ def test_execute_torch():
     y_tensor = pystencils_autodiff.torch_tensor_from_field(y, 1, cuda=False)
 
     op_cpp = create_autograd_function(autodiff, {x: x_tensor, y: y_tensor})
-    foo = op_cpp.forward(x_tensor)
+    foo = op_cpp.forward()
     print(foo)
     assert op_cpp is not None
 
@@ -189,7 +189,7 @@ def test_execute_torch_gpu():
 
     op_cuda = create_autograd_function(autodiff, {x: x_tensor, y: y_tensor})
     assert op_cuda is not None
-    rtn = op_cuda.forward(y_tensor, x_tensor)
+    rtn = op_cuda.forward()
     print(y_tensor)
     print(rtn)