From 830e5c0732f8cbee3d9465b27d600bf31579d9e9 Mon Sep 17 00:00:00 2001 From: Stephan Seitz <stephan.seitz@fau.de> Date: Tue, 6 Aug 2019 18:33:41 +0200 Subject: [PATCH] Autoformat and minor fixes --- src/pystencils_autodiff/autodiff.py | 2 +- .../backends/_torch_native.py | 6 ++-- .../backends/test_torch_native_compilation.py | 23 ++++++++------- tests/lbm/_layout_fixer.py | 2 +- tests/lbm/autodiff.py | 28 +++++++++---------- tests/lbm/backends/_tensorflow.py | 4 +-- tests/lbm/backends/_torch_native.py | 19 +++++++------ 7 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/pystencils_autodiff/autodiff.py b/src/pystencils_autodiff/autodiff.py index e167b27..6aea11c 100644 --- a/src/pystencils_autodiff/autodiff.py +++ b/src/pystencils_autodiff/autodiff.py @@ -99,7 +99,7 @@ Backward: raise NotImplementedError() def __hash__(self): - return hash((self.forward_assignments, self.backward_assignments)) + return hash(str(self.forward_assignments), str(self.backward_assignments)) def __repr__(self): return self._REPR_TEMPLATE.render(forward_assignments=str(self.forward_assignments), diff --git a/src/pystencils_autodiff/backends/_torch_native.py b/src/pystencils_autodiff/backends/_torch_native.py index 7fc8bdf..60599e4 100644 --- a/src/pystencils_autodiff/backends/_torch_native.py +++ b/src/pystencils_autodiff/backends/_torch_native.py @@ -1,21 +1,21 @@ import os -import types import uuid +from itertools import chain from os.path import dirname, isdir, isfile, join import jinja2 import torch from appdirs import user_cache_dir +import pystencils import pystencils_autodiff import pystencils_autodiff.backends._pytorch from pystencils.astnodes import FieldShapeSymbol -from pystencils_autodiff.backends._pytorch import numpy_dtype_to_torch from pystencils.backends.cbackend import generate_c from pystencils.backends.cuda_backend import CudaSympyPrinter, generate_cuda from pystencils.cpu.kernelcreation import create_kernel from pystencils.gpucuda.kernelcreation import create_cuda_kernel -from itertools import chain +from pystencils_autodiff.backends._pytorch import numpy_dtype_to_torch def _read_file(file): diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py index f950f9d..00bad34 100644 --- a/tests/backends/test_torch_native_compilation.py +++ b/tests/backends/test_torch_native_compilation.py @@ -13,12 +13,11 @@ import numpy as np import torch import pystencils -import pystencils.autodiff -from pystencils.autodiff.backends._torch_native import ( - create_autograd_function, generate_torch) +import pystencils_autodiff # from pystencils.cpu.kernelcreation import create_kernel from pystencils.backends.cbackend import generate_c from pystencils.gpucuda.kernelcreation import create_cuda_kernel +from pystencils_autodiff.backends._torch_native import create_autograd_function, generate_torch PROJECT_ROOT = dirname @@ -43,7 +42,7 @@ def test_jit(): from torch.utils.cpp_extension import load lltm_cuda = load( - join(dirname(__file__), 'lltm_cuda'), [cpp_file, cuda_file], verbose=True, extra_cuda_cflags=["-ccbin=g++-6"]) + join(dirname(__file__), 'lltm_cuda'), [cpp_file, cuda_file], verbose=True, extra_cuda_cflags=[]) assert lltm_cuda is not None print('hallo') @@ -54,7 +53,7 @@ def test_torch_native_compilation(): assignments = pystencils.AssignmentCollection({ y.center(): x.center()**2 }, {}) - autodiff = pystencils.autodiff.AutoDiffOp(assignments) + autodiff = pystencils_autodiff.AutoDiffOp(assignments) backward_assignments = autodiff.backward_assignments print(assignments) @@ -136,7 +135,7 @@ def test_generate_torch(): assignments = pystencils.AssignmentCollection({ y.center(): x.center()**2 }, {}) - autodiff = pystencils.autodiff.AutoDiffOp(assignments) + autodiff = pystencils_autodiff.AutoDiffOp(assignments) op_cuda = generate_torch(appdirs.user_cache_dir('pystencils'), autodiff, is_cuda=True, dtype=np.float32) assert op_cuda is not None @@ -150,10 +149,10 @@ def test_execute_torch(): assignments = pystencils.AssignmentCollection({ y.center(): 5 + x.center() }, {}) - autodiff = pystencils.autodiff.AutoDiffOp(assignments) + autodiff = pystencils_autodiff.AutoDiffOp(assignments) - x_tensor = pystencils.autodiff.torch_tensor_from_field(x, 1, cuda=False) - y_tensor = pystencils.autodiff.torch_tensor_from_field(y, 1, cuda=False) + x_tensor = pystencils_autodiff.torch_tensor_from_field(x, 1, cuda=False) + y_tensor = pystencils_autodiff.torch_tensor_from_field(y, 1, cuda=False) op_cpp = create_autograd_function(autodiff, {x: x_tensor, y: y_tensor}) foo = op_cpp.forward(x_tensor) @@ -167,10 +166,10 @@ def test_execute_torch_gpu(): assignments = pystencils.AssignmentCollection({ y.center(): 5 + x.center() }, {}) - autodiff = pystencils.autodiff.AutoDiffOp(assignments) + autodiff = pystencils_autodiff.AutoDiffOp(assignments) - x_tensor = pystencils.autodiff.torch_tensor_from_field(x, 3, cuda=True) - y_tensor = pystencils.autodiff.torch_tensor_from_field(y, 4, cuda=True) + x_tensor = pystencils_autodiff.torch_tensor_from_field(x, 3, cuda=True) + y_tensor = pystencils_autodiff.torch_tensor_from_field(y, 4, cuda=True) assert y_tensor.is_cuda assert torch.cuda.is_available() diff --git a/tests/lbm/_layout_fixer.py b/tests/lbm/_layout_fixer.py index c0e8afc..2ff9781 100644 --- a/tests/lbm/_layout_fixer.py +++ b/tests/lbm/_layout_fixer.py @@ -1,6 +1,6 @@ import numpy as np import pystencils as ps -from pystencils.autodiff.backends import AVAILABLE_BACKENDS +from pystencils_autodiff.backends import AVAILABLE_BACKENDS def fix_layout(array, target_field, backend): diff --git a/tests/lbm/autodiff.py b/tests/lbm/autodiff.py index 7f79844..aa2497e 100644 --- a/tests/lbm/autodiff.py +++ b/tests/lbm/autodiff.py @@ -5,10 +5,10 @@ import numpy as np import sympy as sp import pystencils as ps -import pystencils.autodiff -import pystencils.autodiff._assignment_transforms -import pystencils.autodiff._layout_fixer -from pystencils.autodiff.backends import AVAILABLE_BACKENDS +import pystencils_autodiff +import pystencils_autodiff._assignment_transforms +import pystencils_autodiff._layout_fixer +from pystencils_autodiff.backends import AVAILABLE_BACKENDS """Mode of backward differentiation (see https://autodiff-workshop.github.io/slides/Hueckelheim_nips_autodiff_CNN_PDE.pdf)""" @@ -212,9 +212,9 @@ class AutoDiffOp(object): write_field_accesses = [a.lhs for a in forward_assignments] # for every field create a corresponding diff field - diff_read_fields = {f: pystencils.autodiff.AdjointField(f, diff_fields_prefix) + diff_read_fields = {f: pystencils_autodiff.AdjointField(f, diff_fields_prefix) for f in read_fields} - diff_write_fields = {f: pystencils.autodiff.AdjointField(f, diff_fields_prefix) + diff_write_fields = {f: pystencils_autodiff.AdjointField(f, diff_fields_prefix) for f in write_fields} assert all(isinstance(w, ps.Field.Access) @@ -452,7 +452,7 @@ class AutoDiffOp(object): def forward_function(**kwargs): for field in self.forward_input_fields: - kwargs[field.name] = pystencils.autodiff._layout_fixer.fix_layout( + kwargs[field.name] = pystencils_autodiff._layout_fixer.fix_layout( kwargs[field.name], field, backend) # TODO: check dangerous function `as_strided` for s in self._additional_symbols: @@ -471,7 +471,7 @@ class AutoDiffOp(object): def backward_function(**kwargs): for field in self.backward_input_fields + self.forward_input_fields: - kwargs[field.name] = pystencils.autodiff._layout_fixer.fix_layout( + kwargs[field.name] = pystencils_autodiff._layout_fixer.fix_layout( kwargs[field.name], field, backend) for s in self._additional_symbols: kwargs[s.name] = getattr(self, s.name) @@ -487,16 +487,16 @@ class AutoDiffOp(object): backward_loop = backward_function if backend == 'tensorflow': - import pystencils.autodiff.backends._tensorflow - op = pystencils.autodiff.backends._tensorflow.tensorflowop_from_autodiffop( + import pystencils_autodiff.backends._tensorflow + op = pystencils_autodiff.backends._tensorflow.tensorflowop_from_autodiffop( self, inputfield_tensor_dict, forward_loop, backward_loop) elif backend == 'torch': - import pystencils.autodiff.backends._pytorch - op = pystencils.autodiff.backends._pytorch.create_autograd_function( + import pystencils_autodiff.backends._pytorch + op = pystencils_autodiff.backends._pytorch.create_autograd_function( self, inputfield_tensor_dict, forward_loop, backward_loop) elif backend == 'torch_native': - import pystencils.autodiff.backends._torch_native - op = pystencils.autodiff.backends._torch_native.create_autograd_function( + import pystencils_autodiff.backends._torch_native + op = pystencils_autodiff.backends._torch_native.create_autograd_function( self, inputfield_tensor_dict, None, None) else: raise NotImplementedError() diff --git a/tests/lbm/backends/_tensorflow.py b/tests/lbm/backends/_tensorflow.py index dafac95..a4df191 100644 --- a/tests/lbm/backends/_tensorflow.py +++ b/tests/lbm/backends/_tensorflow.py @@ -1,5 +1,5 @@ import tensorflow as tf -import pystencils.autodiff +import pystencils_autodiff import numpy as np from pystencils.utils import DotDict @@ -38,7 +38,7 @@ def _py_func(func, inp, Tout, stateful=False, name=None, grad=None): return tf.py_func(func, inp, Tout, stateful=stateful, name=name) -def tensorflowop_from_autodiffop(autodiffop: pystencils.autodiff.AutoDiffOp, inputfield_tensor_dict, forward_function, backward_function): +def tensorflowop_from_autodiffop(autodiffop: pystencils_autodiff.AutoDiffOp, inputfield_tensor_dict, forward_function, backward_function): def helper_forward(*args): kwargs = dict() diff --git a/tests/lbm/backends/_torch_native.py b/tests/lbm/backends/_torch_native.py index 7158c0c..bb21e45 100644 --- a/tests/lbm/backends/_torch_native.py +++ b/tests/lbm/backends/_torch_native.py @@ -1,21 +1,22 @@ import os import types import uuid +from itertools import chain from os.path import dirname, isdir, isfile, join import jinja2 import torch from appdirs import user_cache_dir -import pystencils.autodiff -import pystencils.autodiff.backends._pytorch +import pystencils +import pystencils_autodiff +import pystencils_autodiff.backends._pytorch from pystencils.astnodes import FieldShapeSymbol -from pystencils.autodiff.backends._pytorch import numpy_dtype_to_torch from pystencils.backends.cbackend import generate_c from pystencils.backends.cuda_backend import CudaSympyPrinter, generate_cuda from pystencils.cpu.kernelcreation import create_kernel from pystencils.gpucuda.kernelcreation import create_cuda_kernel -from itertools import chain +from pystencils_autodiff.backends._pytorch import numpy_dtype_to_torch def _read_file(file): @@ -29,7 +30,7 @@ def _write_file(filename, content): def generate_torch(destination_folder, - autodiff: pystencils.autodiff.AutoDiffOp, + autodiff: pystencils_autodiff.AutoDiffOp, is_cuda, dtype, forward_ast=None, @@ -74,8 +75,8 @@ def generate_torch(destination_folder, block_and_thread_numbers = backward_ast.indexing.call_parameters(backward_shape) backward_block = ', '.join(printer.doprint(i) for i in block_and_thread_numbers['block']) backward_grid = ', '.join(printer.doprint(i) for i in block_and_thread_numbers['grid']) - cuda_globals = pystencils.backends.cuda_backend.get_global_declarations(forward_ast) | \ - pystencils.backends.cuda_backend.get_global_declarations(backward_ast) + cuda_globals = pystencils.backends.cbackend.get_global_declarations(forward_ast) | \ + pystencils.backends.cbackend.get_global_declarations(backward_ast) cuda_globals = [generate_cuda(g) for g in cuda_globals] else: backward_block = forward_block = "INVALID" @@ -131,7 +132,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo is_cuda = all(t.is_cuda for t in inputfield_to_tensor_dict.values()) assert all(t.is_cuda for t in inputfield_to_tensor_dict.values()) or \ all(not t.is_cuda for t in inputfield_to_tensor_dict.values()), "All tensor should be on GPU or all on CPU" - dtype = pystencils.autodiff.backends._pytorch.torch_dtype_to_numpy( + dtype = pystencils_autodiff.backends._pytorch.torch_dtype_to_numpy( list(inputfield_to_tensor_dict.values())[0].dtype) cache_dir = user_cache_dir('pystencils') @@ -166,7 +167,7 @@ def create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_lo cls.backward = backward return cls else: - op = pystencils.autodiff.backends._pytorch.create_autograd_function(autodiff_obj, + op = pystencils_autodiff.backends._pytorch.create_autograd_function(autodiff_obj, inputfield_to_tensor_dict, forward_loop, backward_loop, -- GitLab