Skip to content
Snippets Groups Projects
Select Git revision
  • 411af4763eb705440cc7ba6d41a94f52b402c51a
  • master default protected
  • v2.0-dev protected
  • zikeliml/Task-96-dotExporterForAST
  • zikeliml/124-rework-tutorials
  • fma
  • fhennig/v2.0-deprecations
  • holzer-master-patch-46757
  • 66-absolute-access-is-probably-not-copied-correctly-after-_eval_subs
  • gpu_bufferfield_fix
  • hyteg
  • vectorization_sqrt_fix
  • target_dh_refactoring
  • const_fix
  • improved_comm
  • gpu_liveness_opts
  • release/1.3.7 protected
  • release/1.3.6 protected
  • release/2.0.dev0 protected
  • release/1.3.5 protected
  • release/1.3.4 protected
  • release/1.3.3 protected
  • release/1.3.2 protected
  • release/1.3.1 protected
  • release/1.3 protected
  • release/1.2 protected
  • release/1.1.1 protected
  • release/1.1 protected
  • release/1.0.1 protected
  • release/1.0 protected
  • release/0.4.4 protected
  • last/Kerncraft
  • last/OpenCL
  • last/LLVM
  • release/0.4.3 protected
  • release/0.4.2 protected
36 results

slicing.py

Blame
  • test_datahandling_parallel.py 2.27 KiB
    import numpy as np
    import waLBerla as wlb
    
    from pystencils.datahandling.parallel_datahandling import ParallelDataHandling
    from pystencils_tests.test_datahandling import (
        access_and_gather, kernel_execution_jacobi, reduction, synchronization, vtk_output)
    
    
    def test_access_and_gather():
        block_size = (4, 7, 1)
        num_blocks = (3, 2, 1)
        cells = tuple(a * b for a, b in zip(block_size, num_blocks))
        blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False,
                                            periodic=(1, 1, 1))
        dh = ParallelDataHandling(blocks, default_ghost_layers=2)
        access_and_gather(dh, cells)
        synchronization(dh, test_gpu=False)
        if hasattr(wlb, 'cuda'):
            synchronization(dh, test_gpu=True)
    
    
    def test_gpu():
        if not hasattr(wlb, 'cuda'):
            print("Skip GPU tests because walberla was built without CUDA")
            return
    
        block_size = (4, 7, 1)
        num_blocks = (3, 2, 1)
        blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
        dh = ParallelDataHandling(blocks, default_ghost_layers=2)
        dh.add_array('v', values_per_cell=3, dtype=np.int64, ghost_layers=2, gpu=True)
    
        for b in dh.iterate():
            b['v'].fill(42)
        dh.all_to_gpu()
        for b in dh.iterate():
            b['v'].fill(0)
        dh.to_cpu('v')
        for b in dh.iterate():
            np.testing.assert_equal(b['v'], 42)
    
    
    def test_kernel():
    
        for gpu in (True, False):
            if gpu and not hasattr(wlb, 'cuda'):
                print("Skipping CUDA tests because walberla was built without GPU support")
                continue
    
            # 3D
            blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False)
            dh = ParallelDataHandling(blocks)
            kernel_execution_jacobi(dh, 'gpu')
            reduction(dh)
    
            # 2D
            blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 1), cellsPerBlock=(3, 2, 1), oneBlockPerProcess=False)
            dh = ParallelDataHandling(blocks, dim=2)
            kernel_execution_jacobi(dh, 'gpu')
            reduction(dh)
    
    
    def test_vtk_output():
        blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False)
        dh = ParallelDataHandling(blocks)
        vtk_output(dh)