From bb88ec515e0729fa8bd160ae5f0822f158735b1a Mon Sep 17 00:00:00 2001 From: Stephan Seitz <stephan.seitz@fau.de> Date: Wed, 7 Aug 2019 15:42:09 +0200 Subject: [PATCH] Instantiate function class --- src/pystencils_autodiff/backends/_torch_native.py | 5 ++--- tests/backends/test_torch_native_compilation.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/pystencils_autodiff/backends/_torch_native.py b/src/pystencils_autodiff/backends/_torch_native.py index 2fd3c9e..1fab51c 100644 --- a/src/pystencils_autodiff/backends/_torch_native.py +++ b/src/pystencils_autodiff/backends/_torch_native.py @@ -139,8 +139,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo os.mkdir(cache_dir) # TODO: create function and stuff - compiled_operation = generate_torch(cache_dir, autodiff_obj, is_cuda, - dtype) + compiled_operation = generate_torch(cache_dir, autodiff_obj, is_cuda, dtype) field_to_tensor_dict = inputfield_to_tensor_dict # Allocate output tensor for forward and backward pass for field in chain(autodiff_obj.forward_output_fields, autodiff_obj.backward_output_fields): @@ -164,7 +163,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo cls.saved = None cls.forward = forward cls.backward = backward - return cls + return cls() else: op = pystencils_autodiff.backends._pytorch.create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py index f23fd21..6a7a053 100644 --- a/tests/backends/test_torch_native_compilation.py +++ b/tests/backends/test_torch_native_compilation.py @@ -168,7 +168,7 @@ def test_execute_torch(): y_tensor = pystencils_autodiff.torch_tensor_from_field(y, 1, cuda=False) op_cpp = create_autograd_function(autodiff, {x: x_tensor, y: y_tensor}) - foo = op_cpp.forward(x_tensor) + foo = op_cpp.forward() print(foo) assert op_cpp is not None @@ -189,7 +189,7 @@ def test_execute_torch_gpu(): op_cuda = create_autograd_function(autodiff, {x: x_tensor, y: y_tensor}) assert op_cuda is not None - rtn = op_cuda.forward(y_tensor, x_tensor) + rtn = op_cuda.forward() print(y_tensor) print(rtn) -- GitLab