From 2101c5709d9d77f2ec54c2929d2b4786b04766f6 Mon Sep 17 00:00:00 2001 From: Stephan Seitz <stephan.seitz@fau.de> Date: Tue, 17 Sep 2019 19:44:54 +0200 Subject: [PATCH] Cache in 'object_cache', make tests less hard coded --- src/pystencils_autodiff/backends/astnodes.py | 7 +- src/pystencils_autodiff/tensorflow_jit.py | 3 +- .../backends/test_torch_native_compilation.py | 16 ++--- tests/test_native_tensorflow_compilation.py | 72 +++++++++---------- 4 files changed, 45 insertions(+), 53 deletions(-) diff --git a/src/pystencils_autodiff/backends/astnodes.py b/src/pystencils_autodiff/backends/astnodes.py index 7ca379b..3d56e61 100644 --- a/src/pystencils_autodiff/backends/astnodes.py +++ b/src/pystencils_autodiff/backends/astnodes.py @@ -14,6 +14,7 @@ from os.path import dirname, exists, join import pystencils from pystencils.astnodes import FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol +from pystencils.cpu.cpujit import get_cache_config from pystencils_autodiff._file_io import read_template_from_file, write_file from pystencils_autodiff.backends.python_bindings import ( PybindFunctionWrapping, PybindPythonBindings, TensorflowFunctionWrapping, @@ -97,7 +98,7 @@ class TorchModule(JinjaCppFile): file_extension = '.cu' if self.is_cuda else '.cpp' source_code = str(self) hash = _hash(source_code.encode()).hexdigest() - file_name = join(pystencils.cache.cache_dir, f'{hash}{file_extension}') + file_name = join(get_cache_config()['object_cache'], f'{hash}{file_extension}') if not exists(file_name): write_file(file_name, source_code) @@ -150,13 +151,13 @@ setup_pybind11(cfg) assert not self.is_cuda + cache_dir = get_cache_config()['object_cache'] source_code = self.CPP_IMPORT_PREFIX + str(self) - file_name = join(pystencils.cache.cache_dir, f'{self.module_name}.cpp') + file_name = join(cache_dir, f'{self.module_name}.cpp') if not exists(file_name): write_file(file_name, source_code) # TODO: propagate extra headers - cache_dir = pystencils.cache.cache_dir if cache_dir not in sys.path: sys.path.append(cache_dir) diff --git a/src/pystencils_autodiff/tensorflow_jit.py b/src/pystencils_autodiff/tensorflow_jit.py index 90e46c7..bd7b2f6 100644 --- a/src/pystencils_autodiff/tensorflow_jit.py +++ b/src/pystencils_autodiff/tensorflow_jit.py @@ -176,7 +176,8 @@ def compile_sources_and_load(host_sources, source_code = source file_extension = '.cu' if is_cuda else '.cpp' - file_name = join(pystencils.cache.cache_dir, f'{_hash(source_code.encode()).hexdigest()}{file_extension}') + file_name = join(get_cache_config()['object_cache'], + f'{_hash(source_code.encode()).hexdigest()}{file_extension}') if not exists(file_name): write_file(file_name, source_code) diff --git a/tests/backends/test_torch_native_compilation.py b/tests/backends/test_torch_native_compilation.py index c3916ff..2d01db7 100644 --- a/tests/backends/test_torch_native_compilation.py +++ b/tests/backends/test_torch_native_compilation.py @@ -5,7 +5,6 @@ import os import subprocess -import tempfile from os.path import dirname, isfile, join import numpy as np @@ -14,13 +13,14 @@ import sympy import pystencils from pystencils_autodiff import create_backward_assignments -from pystencils_autodiff._file_io import write_file +from pystencils_autodiff._file_io import write_cached_content, write_file from pystencils_autodiff.backends.astnodes import PybindModule, TorchModule torch = pytest.importorskip('torch') pytestmark = pytest.mark.skipif(subprocess.call(['ninja', '--v']) != 0, reason='torch compilation requires ninja') + PROJECT_ROOT = dirname @@ -66,10 +66,8 @@ def test_torch_native_compilation_cpu(): module = TorchModule(module_name, [forward_ast, backward_ast]) print(module) - temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') - print(temp_file.name) - write_file(temp_file.name, str(module)) - torch_extension = load(module_name, [temp_file.name]) + temp_file = write_cached_content(str(module), '.cpp') + torch_extension = load(module_name, [temp_file]) assert torch_extension is not None assert 'call_forward' in dir(torch_extension) assert 'call_backward' in dir(torch_extension) @@ -135,10 +133,8 @@ def test_torch_native_compilation_gpu(): module = TorchModule(module_name, [forward_ast, backward_ast]) print(module) - temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') - print(temp_file.name) - write_file(temp_file.name, str(module)) - torch_extension = load(module_name, [temp_file.name]) + temp_file = write_cached_content(str(module), suffix='.cu') + torch_extension = load(module_name, [temp_file]) assert torch_extension is not None assert 'call_forward' in dir(torch_extension) assert 'call_backward' in dir(torch_extension) diff --git a/tests/test_native_tensorflow_compilation.py b/tests/test_native_tensorflow_compilation.py index d3fb6db..bb7e8c6 100644 --- a/tests/test_native_tensorflow_compilation.py +++ b/tests/test_native_tensorflow_compilation.py @@ -10,7 +10,6 @@ import os import subprocess -import tempfile from os.path import join from sysconfig import get_paths @@ -21,7 +20,7 @@ import pystencils from pystencils.cpu.cpujit import get_compiler_config from pystencils.include import get_pystencils_include_path from pystencils_autodiff import create_backward_assignments -from pystencils_autodiff._file_io import write_file +from pystencils_autodiff._file_io import write_cached_content, write_file from pystencils_autodiff.backends.astnodes import TensorflowModule from pystencils_autodiff.tensorflow_jit import _compile_env @@ -71,17 +70,14 @@ def test_native_tensorflow_compilation_cpu(): module = TensorflowModule(module_name, [forward_ast, backward_ast]) print(module) - temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') - print(temp_file.name) - write_file(temp_file.name, str(module)) - write_file('/tmp/foo.cpp', str(module)) + # temp_file = write_cached_content(str(module), '.cpp') - command = ['c++', '-fPIC', temp_file.name, '-O2', '-shared', - '-o', 'foo.so'] + compile_flags + link_flags + extra_flags - print(command) - subprocess.check_call(command, env=_compile_env) + # command = ['c++', '-fPIC', temp_file, '-O2', '-shared', + # '-o', 'foo.so'] + compile_flags + link_flags + extra_flags + # print(command) + # subprocess.check_call(command, env=_compile_env) - lib = tf.load_op_library(join(os.getcwd(), 'foo.so')) + lib = module.compile() assert 'call_forward' in dir(lib) assert 'call_backward' in dir(lib) @@ -115,36 +111,34 @@ def test_native_tensorflow_compilation_gpu(): module = TensorflowModule(module_name, [forward_ast, backward_ast]) print(str(module)) - temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') - print(temp_file.name) - write_file(temp_file.name, str(module)) - if 'tensorflow_host_compiler' not in get_compiler_config(): - get_compiler_config()['tensorflow_host_compiler'] = get_compiler_config()['command'] + # temp_file = write_cached_content(str(module), '.cu') + # if 'tensorflow_host_compiler' not in get_compiler_config(): + # get_compiler_config()['tensorflow_host_compiler'] = get_compiler_config()['command'] - # on my machine g++-6 and clang-7 are working + # # on my machine g++-6 and clang-7 are working + # # '-ccbin', + # # 'g++-6', + # command = ['nvcc', + # temp_file.name, + # '--expt-relaxed-constexpr', # '-ccbin', - # 'g++-6', - command = ['nvcc', - temp_file.name, - '--expt-relaxed-constexpr', - '-ccbin', - get_compiler_config()['tensorflow_host_compiler'], - '-std=c++14', - '-x', - 'cu', - '-Xcompiler', - '-fPIC', - '-c', - '-o', - 'foo_gpu.o'] + compile_flags + extra_flags - - subprocess.check_call(command) - - command = ['c++', '-fPIC', 'foo_gpu.o', - '-shared', '-o', 'foo_gpu.so'] + link_flags - - subprocess.check_call(command) - lib = tf.load_op_library(join(os.getcwd(), 'foo_gpu.so')) + # get_compiler_config()['tensorflow_host_compiler'], + # '-std=c++14', + # '-x', + # 'cu', + # '-Xcompiler', + # '-fPIC', + # '-c', + # '-o', + # 'foo_gpu.o'] + compile_flags + extra_flags + + # subprocess.check_call(command) + + # command = ['c++', '-fPIC', 'foo_gpu.o', + # '-shared', '-o', 'foo_gpu.so'] + link_flags + + # subprocess.check_call(command) + lib = module.compile() assert 'call_forward2' in dir(lib) # -- GitLab