Skip to content
Snippets Groups Projects
Commit e462d7f4 authored by Markus Holzer's avatar Markus Holzer
Browse files

Extended datahandling test cases

parent 24ef1d2f
Branches
Tags
No related merge requests found
...@@ -86,6 +86,13 @@ class DataHandling(ABC): ...@@ -86,6 +86,13 @@ class DataHandling(ABC):
Args: Args:
description (str): String description of the fields to add description (str): String description of the fields to add
dtype: data type of the array as numpy data type dtype: data type of the array as numpy data type
ghost_layers: number of ghost layers - if not specified a default value specified in the constructor
is used
layout: memory layout of array, either structure of arrays 'SoA' or array of structures 'AoS'.
this is only important if values_per_cell > 1
cpu: allocate field on the CPU
gpu: allocate field on the GPU, if None, a GPU field is allocated if default_target is 'gpu'
alignment: either False for no alignment, or the number of bytes to align to
Returns: Returns:
Fields representing the just created arrays Fields representing the just created arrays
""" """
...@@ -200,6 +207,10 @@ class DataHandling(ABC): ...@@ -200,6 +207,10 @@ class DataHandling(ABC):
directly passed to the kernel function and override possible parameters from the DataHandling directly passed to the kernel function and override possible parameters from the DataHandling
""" """
@abstractmethod
def get_kernel_kwargs(self, kernel_function, **kwargs):
"""Returns the input arguments of a kernel"""
@abstractmethod @abstractmethod
def swap(self, name1, name2, gpu=False): def swap(self, name1, name2, gpu=False):
"""Swaps data of two arrays""" """Swaps data of two arrays"""
......
...@@ -266,10 +266,10 @@ class SerialDataHandling(DataHandling): ...@@ -266,10 +266,10 @@ class SerialDataHandling(DataHandling):
return name in self.gpu_arrays return name in self.gpu_arrays
def synchronization_function_cpu(self, names, stencil_name=None, **_): def synchronization_function_cpu(self, names, stencil_name=None, **_):
return self.synchronization_function(names, stencil_name, 'cpu') return self.synchronization_function(names, stencil_name, target='cpu')
def synchronization_function_gpu(self, names, stencil_name=None, **_): def synchronization_function_gpu(self, names, stencil_name=None, **_):
return self.synchronization_function(names, stencil_name, 'gpu') return self.synchronization_function(names, stencil_name, target='gpu')
def synchronization_function(self, names, stencil=None, target=None, **_): def synchronization_function(self, names, stencil=None, target=None, **_):
if target is None: if target is None:
......
...@@ -111,6 +111,14 @@ def kernel_execution_jacobi(dh, target): ...@@ -111,6 +111,14 @@ def kernel_execution_jacobi(dh, target):
test_gpu = target == 'gpu' or target == 'opencl' test_gpu = target == 'gpu' or target == 'opencl'
dh.add_array('f', gpu=test_gpu) dh.add_array('f', gpu=test_gpu)
dh.add_array('tmp', gpu=test_gpu) dh.add_array('tmp', gpu=test_gpu)
if test_gpu:
assert dh.is_on_gpu('f')
assert dh.is_on_gpu('tmp')
with pytest.raises(ValueError):
dh.add_array('f', gpu=test_gpu)
stencil_2d = [(1, 0), (-1, 0), (0, 1), (0, -1)] stencil_2d = [(1, 0), (-1, 0), (0, 1), (0, -1)]
stencil_3d = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)] stencil_3d = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]
stencil = stencil_2d if dh.dim == 2 else stencil_3d stencil = stencil_2d if dh.dim == 2 else stencil_3d
...@@ -197,6 +205,7 @@ def test_access_and_gather(): ...@@ -197,6 +205,7 @@ def test_access_and_gather():
def test_kernel(): def test_kernel():
for domain_shape in [(4, 5), (3, 4, 5)]: for domain_shape in [(4, 5), (3, 4, 5)]:
dh = create_data_handling(domain_size=domain_shape, periodicity=True) dh = create_data_handling(domain_size=domain_shape, periodicity=True)
assert all(dh.periodicity)
kernel_execution_jacobi(dh, 'cpu') kernel_execution_jacobi(dh, 'cpu')
reduction(dh) reduction(dh)
...@@ -243,3 +252,61 @@ def test_add_arrays(): ...@@ -243,3 +252,61 @@ def test_add_arrays():
assert y_ == y assert y_ == y
assert x == dh.fields['x'] assert x == dh.fields['x']
assert y == dh.fields['y'] assert y == dh.fields['y']
def test_get_kwarg():
domain_shape = (10, 10)
field_description = 'src, dst'
dh = create_data_handling(domain_size=domain_shape, default_ghost_layers=1)
src, dst = dh.add_arrays(field_description)
dh.fill("src", 1.0, ghost_layers=True)
dh.fill("dst", 0.0, ghost_layers=True)
ur = ps.Assignment(src.center, dst.center)
kernel = ps.create_kernel(ur).compile()
kw = dh.get_kernel_kwargs(kernel)
assert np.all(kw[0]['src'] == dh.cpu_arrays['src'])
assert np.all(kw[0]['dst'] == dh.cpu_arrays['dst'])
def test_add_custom_data():
pytest.importorskip('pycuda')
import pycuda.gpuarray as gpuarray
import pycuda.autoinit # noqa
def cpu_data_create_func():
return np.ones((2, 2), dtype=np.float64)
def gpu_data_create_func():
return gpuarray.zeros((2, 2), dtype=np.float64)
def cpu_to_gpu_transfer_func(gpuarr, cpuarray):
gpuarr.set(cpuarray)
def gpu_to_cpu_transfer_func(gpuarr, cpuarray):
gpuarr.get(cpuarray)
dh = create_data_handling(domain_size=(10, 10))
dh.add_custom_data('custom_data',
cpu_data_create_func,
gpu_data_create_func,
cpu_to_gpu_transfer_func,
gpu_to_cpu_transfer_func)
assert np.all(dh.custom_data_cpu['custom_data'] == 1)
assert np.all(dh.custom_data_gpu['custom_data'].get() == 0)
dh.to_cpu(name='custom_data')
dh.to_gpu(name='custom_data')
assert 'custom_data' in dh.custom_data_names
def test_log():
dh = create_data_handling(domain_size=(10, 10))
dh.log_on_root()
assert dh.is_root
assert dh.world_rank == 0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment