Skip to content
Snippets Groups Projects
Commit 2101c570 authored by Stephan Seitz's avatar Stephan Seitz
Browse files

Cache in 'object_cache', make tests less hard coded

parent f8071a9e
No related branches found
No related tags found
No related merge requests found
Pipeline #18104 failed
...@@ -14,6 +14,7 @@ from os.path import dirname, exists, join ...@@ -14,6 +14,7 @@ from os.path import dirname, exists, join
import pystencils import pystencils
from pystencils.astnodes import FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol from pystencils.astnodes import FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol
from pystencils.cpu.cpujit import get_cache_config
from pystencils_autodiff._file_io import read_template_from_file, write_file from pystencils_autodiff._file_io import read_template_from_file, write_file
from pystencils_autodiff.backends.python_bindings import ( from pystencils_autodiff.backends.python_bindings import (
PybindFunctionWrapping, PybindPythonBindings, TensorflowFunctionWrapping, PybindFunctionWrapping, PybindPythonBindings, TensorflowFunctionWrapping,
...@@ -97,7 +98,7 @@ class TorchModule(JinjaCppFile): ...@@ -97,7 +98,7 @@ class TorchModule(JinjaCppFile):
file_extension = '.cu' if self.is_cuda else '.cpp' file_extension = '.cu' if self.is_cuda else '.cpp'
source_code = str(self) source_code = str(self)
hash = _hash(source_code.encode()).hexdigest() hash = _hash(source_code.encode()).hexdigest()
file_name = join(pystencils.cache.cache_dir, f'{hash}{file_extension}') file_name = join(get_cache_config()['object_cache'], f'{hash}{file_extension}')
if not exists(file_name): if not exists(file_name):
write_file(file_name, source_code) write_file(file_name, source_code)
...@@ -150,13 +151,13 @@ setup_pybind11(cfg) ...@@ -150,13 +151,13 @@ setup_pybind11(cfg)
assert not self.is_cuda assert not self.is_cuda
cache_dir = get_cache_config()['object_cache']
source_code = self.CPP_IMPORT_PREFIX + str(self) source_code = self.CPP_IMPORT_PREFIX + str(self)
file_name = join(pystencils.cache.cache_dir, f'{self.module_name}.cpp') file_name = join(cache_dir, f'{self.module_name}.cpp')
if not exists(file_name): if not exists(file_name):
write_file(file_name, source_code) write_file(file_name, source_code)
# TODO: propagate extra headers # TODO: propagate extra headers
cache_dir = pystencils.cache.cache_dir
if cache_dir not in sys.path: if cache_dir not in sys.path:
sys.path.append(cache_dir) sys.path.append(cache_dir)
......
...@@ -176,7 +176,8 @@ def compile_sources_and_load(host_sources, ...@@ -176,7 +176,8 @@ def compile_sources_and_load(host_sources,
source_code = source source_code = source
file_extension = '.cu' if is_cuda else '.cpp' file_extension = '.cu' if is_cuda else '.cpp'
file_name = join(pystencils.cache.cache_dir, f'{_hash(source_code.encode()).hexdigest()}{file_extension}') file_name = join(get_cache_config()['object_cache'],
f'{_hash(source_code.encode()).hexdigest()}{file_extension}')
if not exists(file_name): if not exists(file_name):
write_file(file_name, source_code) write_file(file_name, source_code)
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import os import os
import subprocess import subprocess
import tempfile
from os.path import dirname, isfile, join from os.path import dirname, isfile, join
import numpy as np import numpy as np
...@@ -14,13 +13,14 @@ import sympy ...@@ -14,13 +13,14 @@ import sympy
import pystencils import pystencils
from pystencils_autodiff import create_backward_assignments from pystencils_autodiff import create_backward_assignments
from pystencils_autodiff._file_io import write_file from pystencils_autodiff._file_io import write_cached_content, write_file
from pystencils_autodiff.backends.astnodes import PybindModule, TorchModule from pystencils_autodiff.backends.astnodes import PybindModule, TorchModule
torch = pytest.importorskip('torch') torch = pytest.importorskip('torch')
pytestmark = pytest.mark.skipif(subprocess.call(['ninja', '--v']) != 0, pytestmark = pytest.mark.skipif(subprocess.call(['ninja', '--v']) != 0,
reason='torch compilation requires ninja') reason='torch compilation requires ninja')
PROJECT_ROOT = dirname PROJECT_ROOT = dirname
...@@ -66,10 +66,8 @@ def test_torch_native_compilation_cpu(): ...@@ -66,10 +66,8 @@ def test_torch_native_compilation_cpu():
module = TorchModule(module_name, [forward_ast, backward_ast]) module = TorchModule(module_name, [forward_ast, backward_ast])
print(module) print(module)
temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') temp_file = write_cached_content(str(module), '.cpp')
print(temp_file.name) torch_extension = load(module_name, [temp_file])
write_file(temp_file.name, str(module))
torch_extension = load(module_name, [temp_file.name])
assert torch_extension is not None assert torch_extension is not None
assert 'call_forward' in dir(torch_extension) assert 'call_forward' in dir(torch_extension)
assert 'call_backward' in dir(torch_extension) assert 'call_backward' in dir(torch_extension)
...@@ -135,10 +133,8 @@ def test_torch_native_compilation_gpu(): ...@@ -135,10 +133,8 @@ def test_torch_native_compilation_gpu():
module = TorchModule(module_name, [forward_ast, backward_ast]) module = TorchModule(module_name, [forward_ast, backward_ast])
print(module) print(module)
temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') temp_file = write_cached_content(str(module), suffix='.cu')
print(temp_file.name) torch_extension = load(module_name, [temp_file])
write_file(temp_file.name, str(module))
torch_extension = load(module_name, [temp_file.name])
assert torch_extension is not None assert torch_extension is not None
assert 'call_forward' in dir(torch_extension) assert 'call_forward' in dir(torch_extension)
assert 'call_backward' in dir(torch_extension) assert 'call_backward' in dir(torch_extension)
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
import os import os
import subprocess import subprocess
import tempfile
from os.path import join from os.path import join
from sysconfig import get_paths from sysconfig import get_paths
...@@ -21,7 +20,7 @@ import pystencils ...@@ -21,7 +20,7 @@ import pystencils
from pystencils.cpu.cpujit import get_compiler_config from pystencils.cpu.cpujit import get_compiler_config
from pystencils.include import get_pystencils_include_path from pystencils.include import get_pystencils_include_path
from pystencils_autodiff import create_backward_assignments from pystencils_autodiff import create_backward_assignments
from pystencils_autodiff._file_io import write_file from pystencils_autodiff._file_io import write_cached_content, write_file
from pystencils_autodiff.backends.astnodes import TensorflowModule from pystencils_autodiff.backends.astnodes import TensorflowModule
from pystencils_autodiff.tensorflow_jit import _compile_env from pystencils_autodiff.tensorflow_jit import _compile_env
...@@ -71,17 +70,14 @@ def test_native_tensorflow_compilation_cpu(): ...@@ -71,17 +70,14 @@ def test_native_tensorflow_compilation_cpu():
module = TensorflowModule(module_name, [forward_ast, backward_ast]) module = TensorflowModule(module_name, [forward_ast, backward_ast])
print(module) print(module)
temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') # temp_file = write_cached_content(str(module), '.cpp')
print(temp_file.name)
write_file(temp_file.name, str(module))
write_file('/tmp/foo.cpp', str(module))
command = ['c++', '-fPIC', temp_file.name, '-O2', '-shared', # command = ['c++', '-fPIC', temp_file, '-O2', '-shared',
'-o', 'foo.so'] + compile_flags + link_flags + extra_flags # '-o', 'foo.so'] + compile_flags + link_flags + extra_flags
print(command) # print(command)
subprocess.check_call(command, env=_compile_env) # subprocess.check_call(command, env=_compile_env)
lib = tf.load_op_library(join(os.getcwd(), 'foo.so')) lib = module.compile()
assert 'call_forward' in dir(lib) assert 'call_forward' in dir(lib)
assert 'call_backward' in dir(lib) assert 'call_backward' in dir(lib)
...@@ -115,36 +111,34 @@ def test_native_tensorflow_compilation_gpu(): ...@@ -115,36 +111,34 @@ def test_native_tensorflow_compilation_gpu():
module = TensorflowModule(module_name, [forward_ast, backward_ast]) module = TensorflowModule(module_name, [forward_ast, backward_ast])
print(str(module)) print(str(module))
temp_file = tempfile.NamedTemporaryFile(suffix='.cu' if target == 'gpu' else '.cpp') # temp_file = write_cached_content(str(module), '.cu')
print(temp_file.name) # if 'tensorflow_host_compiler' not in get_compiler_config():
write_file(temp_file.name, str(module)) # get_compiler_config()['tensorflow_host_compiler'] = get_compiler_config()['command']
if 'tensorflow_host_compiler' not in get_compiler_config():
get_compiler_config()['tensorflow_host_compiler'] = get_compiler_config()['command']
# on my machine g++-6 and clang-7 are working # # on my machine g++-6 and clang-7 are working
# # '-ccbin',
# # 'g++-6',
# command = ['nvcc',
# temp_file.name,
# '--expt-relaxed-constexpr',
# '-ccbin', # '-ccbin',
# 'g++-6', # get_compiler_config()['tensorflow_host_compiler'],
command = ['nvcc', # '-std=c++14',
temp_file.name, # '-x',
'--expt-relaxed-constexpr', # 'cu',
'-ccbin', # '-Xcompiler',
get_compiler_config()['tensorflow_host_compiler'], # '-fPIC',
'-std=c++14', # '-c',
'-x', # '-o',
'cu', # 'foo_gpu.o'] + compile_flags + extra_flags
'-Xcompiler',
'-fPIC', # subprocess.check_call(command)
'-c',
'-o', # command = ['c++', '-fPIC', 'foo_gpu.o',
'foo_gpu.o'] + compile_flags + extra_flags # '-shared', '-o', 'foo_gpu.so'] + link_flags
subprocess.check_call(command) # subprocess.check_call(command)
lib = module.compile()
command = ['c++', '-fPIC', 'foo_gpu.o',
'-shared', '-o', 'foo_gpu.so'] + link_flags
subprocess.check_call(command)
lib = tf.load_op_library(join(os.getcwd(), 'foo_gpu.so'))
assert 'call_forward2' in dir(lib) assert 'call_forward2' in dir(lib)
# #
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment