Skip to content
Snippets Groups Projects
Commit dadb684e authored by Markus Holzer's avatar Markus Holzer
Browse files

Update parallel datahandling test cases

parent 9c219dad
No related branches found
No related tags found
1 merge request!272Testing
...@@ -18,6 +18,7 @@ test-report ...@@ -18,6 +18,7 @@ test-report
pystencils/boundaries/createindexlistcython.c pystencils/boundaries/createindexlistcython.c
pystencils/boundaries/createindexlistcython.*.so pystencils/boundaries/createindexlistcython.*.so
pystencils_tests/tmp pystencils_tests/tmp
pystencils_tests/var
pystencils_tests/kerncraft_inputs/.2d-5pt.c_kerncraft/ pystencils_tests/kerncraft_inputs/.2d-5pt.c_kerncraft/
pystencils_tests/kerncraft_inputs/.3d-7pt.c_kerncraft/ pystencils_tests/kerncraft_inputs/.3d-7pt.c_kerncraft/
report.xml report.xml
......
...@@ -39,9 +39,7 @@ def test_access_and_gather(): ...@@ -39,9 +39,7 @@ def test_access_and_gather():
def test_gpu(): def test_gpu():
if not hasattr(wlb, 'cuda'): pytest.importorskip('waLBerla.cuda')
print("Skip GPU tests because walberla was built without CUDA")
return
block_size = (4, 7, 1) block_size = (4, 7, 1)
num_blocks = (3, 2, 1) num_blocks = (3, 2, 1)
...@@ -59,24 +57,22 @@ def test_gpu(): ...@@ -59,24 +57,22 @@ def test_gpu():
np.testing.assert_equal(b['v'], 42) np.testing.assert_equal(b['v'], 42)
def test_kernel(): @pytest.mark.parametrize('target', (pystencils.Target.CPU, pystencils.Target.GPU))
def test_kernel(target):
for gpu in (True, False): if target == pystencils.Target.GPU:
if gpu and not hasattr(wlb, 'cuda'): pytest.importorskip('waLBerla.cuda')
print("Skipping CUDA tests because walberla was built without GPU support")
continue
# 3D # 3D
blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False) blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False)
dh = ParallelDataHandling(blocks) dh = ParallelDataHandling(blocks, default_target=target)
kernel_execution_jacobi(dh, pystencils.Target.GPU) kernel_execution_jacobi(dh, target)
reduction(dh) reduction(dh)
# 2D # 2D
blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 1), cellsPerBlock=(3, 2, 1), oneBlockPerProcess=False) blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 1), cellsPerBlock=(3, 2, 1), oneBlockPerProcess=False)
dh = ParallelDataHandling(blocks, dim=2) dh = ParallelDataHandling(blocks, dim=2, default_target=target)
kernel_execution_jacobi(dh, pystencils.Target.GPU) kernel_execution_jacobi(dh, target)
reduction(dh) reduction(dh)
def test_vtk_output(): def test_vtk_output():
...@@ -90,7 +86,7 @@ def test_block_iteration(): ...@@ -90,7 +86,7 @@ def test_block_iteration():
num_blocks = (2, 2, 2) num_blocks = (2, 2, 2)
blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False) blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
dh = ParallelDataHandling(blocks, default_ghost_layers=2) dh = ParallelDataHandling(blocks, default_ghost_layers=2)
dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2, gpu=True) dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2)
for b in dh.iterate(): for b in dh.iterate():
b['v'].fill(1) b['v'].fill(1)
...@@ -113,10 +109,12 @@ def test_block_iteration(): ...@@ -113,10 +109,12 @@ def test_block_iteration():
def test_getter_setter(): def test_getter_setter():
pytest.importorskip('waLBerla.cuda')
block_size = (2, 2, 2) block_size = (2, 2, 2)
num_blocks = (2, 2, 2) num_blocks = (2, 2, 2)
blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False) blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
dh = ParallelDataHandling(blocks, default_ghost_layers=2) dh = ParallelDataHandling(blocks, default_ghost_layers=2, default_target=pystencils.Target.GPU)
dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2, gpu=True) dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2, gpu=True)
assert dh.shape == (4, 4, 4) assert dh.shape == (4, 4, 4)
...@@ -135,12 +133,14 @@ def test_getter_setter(): ...@@ -135,12 +133,14 @@ def test_getter_setter():
def test_parallel_datahandling_boundary_conditions(): def test_parallel_datahandling_boundary_conditions():
pytest.importorskip('waLBerla.cuda') pytest.importorskip('waLBerla.cuda')
dh = create_data_handling(domain_size=(7, 7), periodicity=True, parallel=True, default_target=pystencils.Target.GPU)
src = dh.add_array('src') dh = create_data_handling(domain_size=(7, 7), periodicity=True, parallel=True,
src2 = dh.add_array('src2') default_target=pystencils.Target.GPU)
src = dh.add_array('src', values_per_cell=1)
dh.fill("src", 0.0, ghost_layers=True) dh.fill("src", 0.0, ghost_layers=True)
dh.fill("src", 1.0, ghost_layers=False) dh.fill("src", 1.0, ghost_layers=False)
src_cpu = dh.add_array('src_cpu', gpu=False) src_cpu = dh.add_array('src_cpu', values_per_cell=1, gpu=False)
dh.fill("src_cpu", 0.0, ghost_layers=True) dh.fill("src_cpu", 0.0, ghost_layers=True)
dh.fill("src_cpu", 1.0, ghost_layers=False) dh.fill("src_cpu", 1.0, ghost_layers=False)
...@@ -170,6 +170,7 @@ def test_parallel_datahandling_boundary_conditions(): ...@@ -170,6 +170,7 @@ def test_parallel_datahandling_boundary_conditions():
assert dh.custom_data_names == ('boundary_handling_cpuIndexArrays', 'boundary_handling_gpuIndexArrays') assert dh.custom_data_names == ('boundary_handling_cpuIndexArrays', 'boundary_handling_gpuIndexArrays')
dh.swap("src", "src2", gpu=True) dh.swap("src", "src2", gpu=True)
def test_save_data(): def test_save_data():
domain_shape = (2, 2) domain_shape = (2, 2)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment