Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
pystencils
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
pycodegen
pystencils
Commits
dadb684e
Commit
dadb684e
authored
3 years ago
by
Markus Holzer
Browse files
Options
Downloads
Patches
Plain Diff
Update parallel datahandling test cases
parent
9c219dad
No related branches found
No related tags found
1 merge request
!272
Testing
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.gitignore
+1
-0
1 addition, 0 deletions
.gitignore
pystencils_tests/test_datahandling_parallel.py
+26
-25
26 additions, 25 deletions
pystencils_tests/test_datahandling_parallel.py
with
27 additions
and
25 deletions
.gitignore
+
1
−
0
View file @
dadb684e
...
@@ -18,6 +18,7 @@ test-report
...
@@ -18,6 +18,7 @@ test-report
pystencils/boundaries/createindexlistcython.c
pystencils/boundaries/createindexlistcython.c
pystencils/boundaries/createindexlistcython.*.so
pystencils/boundaries/createindexlistcython.*.so
pystencils_tests/tmp
pystencils_tests/tmp
pystencils_tests/var
pystencils_tests/kerncraft_inputs/.2d-5pt.c_kerncraft/
pystencils_tests/kerncraft_inputs/.2d-5pt.c_kerncraft/
pystencils_tests/kerncraft_inputs/.3d-7pt.c_kerncraft/
pystencils_tests/kerncraft_inputs/.3d-7pt.c_kerncraft/
report.xml
report.xml
...
...
This diff is collapsed.
Click to expand it.
pystencils_tests/test_datahandling_parallel.py
+
26
−
25
View file @
dadb684e
...
@@ -39,9 +39,7 @@ def test_access_and_gather():
...
@@ -39,9 +39,7 @@ def test_access_and_gather():
def
test_gpu
():
def
test_gpu
():
if
not
hasattr
(
wlb
,
'
cuda
'
):
pytest
.
importorskip
(
'
waLBerla.cuda
'
)
print
(
"
Skip GPU tests because walberla was built without CUDA
"
)
return
block_size
=
(
4
,
7
,
1
)
block_size
=
(
4
,
7
,
1
)
num_blocks
=
(
3
,
2
,
1
)
num_blocks
=
(
3
,
2
,
1
)
...
@@ -59,24 +57,22 @@ def test_gpu():
...
@@ -59,24 +57,22 @@ def test_gpu():
np
.
testing
.
assert_equal
(
b
[
'
v
'
],
42
)
np
.
testing
.
assert_equal
(
b
[
'
v
'
],
42
)
def
test_kernel
():
@pytest.mark.parametrize
(
'
target
'
,
(
pystencils
.
Target
.
CPU
,
pystencils
.
Target
.
GPU
))
def
test_kernel
(
target
):
for
gpu
in
(
True
,
False
):
if
target
==
pystencils
.
Target
.
GPU
:
if
gpu
and
not
hasattr
(
wlb
,
'
cuda
'
):
pytest
.
importorskip
(
'
waLBerla.cuda
'
)
print
(
"
Skipping CUDA tests because walberla was built without GPU support
"
)
continue
# 3D
# 3D
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
(
3
,
2
,
4
),
cellsPerBlock
=
(
3
,
2
,
5
),
oneBlockPerProcess
=
False
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
(
3
,
2
,
4
),
cellsPerBlock
=
(
3
,
2
,
5
),
oneBlockPerProcess
=
False
)
dh
=
ParallelDataHandling
(
blocks
)
dh
=
ParallelDataHandling
(
blocks
,
default_target
=
target
)
kernel_execution_jacobi
(
dh
,
pystencils
.
T
arget
.
GPU
)
kernel_execution_jacobi
(
dh
,
t
arget
)
reduction
(
dh
)
reduction
(
dh
)
# 2D
# 2D
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
(
3
,
2
,
1
),
cellsPerBlock
=
(
3
,
2
,
1
),
oneBlockPerProcess
=
False
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
(
3
,
2
,
1
),
cellsPerBlock
=
(
3
,
2
,
1
),
oneBlockPerProcess
=
False
)
dh
=
ParallelDataHandling
(
blocks
,
dim
=
2
)
dh
=
ParallelDataHandling
(
blocks
,
dim
=
2
,
default_target
=
target
)
kernel_execution_jacobi
(
dh
,
pystencils
.
T
arget
.
GPU
)
kernel_execution_jacobi
(
dh
,
t
arget
)
reduction
(
dh
)
reduction
(
dh
)
def
test_vtk_output
():
def
test_vtk_output
():
...
@@ -90,7 +86,7 @@ def test_block_iteration():
...
@@ -90,7 +86,7 @@ def test_block_iteration():
num_blocks
=
(
2
,
2
,
2
)
num_blocks
=
(
2
,
2
,
2
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
num_blocks
,
cellsPerBlock
=
block_size
,
oneBlockPerProcess
=
False
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
num_blocks
,
cellsPerBlock
=
block_size
,
oneBlockPerProcess
=
False
)
dh
=
ParallelDataHandling
(
blocks
,
default_ghost_layers
=
2
)
dh
=
ParallelDataHandling
(
blocks
,
default_ghost_layers
=
2
)
dh
.
add_array
(
'
v
'
,
values_per_cell
=
1
,
dtype
=
np
.
int64
,
ghost_layers
=
2
,
gpu
=
True
)
dh
.
add_array
(
'
v
'
,
values_per_cell
=
1
,
dtype
=
np
.
int64
,
ghost_layers
=
2
)
for
b
in
dh
.
iterate
():
for
b
in
dh
.
iterate
():
b
[
'
v
'
].
fill
(
1
)
b
[
'
v
'
].
fill
(
1
)
...
@@ -113,10 +109,12 @@ def test_block_iteration():
...
@@ -113,10 +109,12 @@ def test_block_iteration():
def
test_getter_setter
():
def
test_getter_setter
():
pytest
.
importorskip
(
'
waLBerla.cuda
'
)
block_size
=
(
2
,
2
,
2
)
block_size
=
(
2
,
2
,
2
)
num_blocks
=
(
2
,
2
,
2
)
num_blocks
=
(
2
,
2
,
2
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
num_blocks
,
cellsPerBlock
=
block_size
,
oneBlockPerProcess
=
False
)
blocks
=
wlb
.
createUniformBlockGrid
(
blocks
=
num_blocks
,
cellsPerBlock
=
block_size
,
oneBlockPerProcess
=
False
)
dh
=
ParallelDataHandling
(
blocks
,
default_ghost_layers
=
2
)
dh
=
ParallelDataHandling
(
blocks
,
default_ghost_layers
=
2
,
default_target
=
pystencils
.
Target
.
GPU
)
dh
.
add_array
(
'
v
'
,
values_per_cell
=
1
,
dtype
=
np
.
int64
,
ghost_layers
=
2
,
gpu
=
True
)
dh
.
add_array
(
'
v
'
,
values_per_cell
=
1
,
dtype
=
np
.
int64
,
ghost_layers
=
2
,
gpu
=
True
)
assert
dh
.
shape
==
(
4
,
4
,
4
)
assert
dh
.
shape
==
(
4
,
4
,
4
)
...
@@ -135,12 +133,14 @@ def test_getter_setter():
...
@@ -135,12 +133,14 @@ def test_getter_setter():
def
test_parallel_datahandling_boundary_conditions
():
def
test_parallel_datahandling_boundary_conditions
():
pytest
.
importorskip
(
'
waLBerla.cuda
'
)
pytest
.
importorskip
(
'
waLBerla.cuda
'
)
dh
=
create_data_handling
(
domain_size
=
(
7
,
7
),
periodicity
=
True
,
parallel
=
True
,
default_target
=
pystencils
.
Target
.
GPU
)
src
=
dh
.
add_array
(
'
src
'
)
dh
=
create_data_handling
(
domain_size
=
(
7
,
7
),
periodicity
=
True
,
parallel
=
True
,
src2
=
dh
.
add_array
(
'
src2
'
)
default_target
=
pystencils
.
Target
.
GPU
)
src
=
dh
.
add_array
(
'
src
'
,
values_per_cell
=
1
)
dh
.
fill
(
"
src
"
,
0.0
,
ghost_layers
=
True
)
dh
.
fill
(
"
src
"
,
0.0
,
ghost_layers
=
True
)
dh
.
fill
(
"
src
"
,
1.0
,
ghost_layers
=
False
)
dh
.
fill
(
"
src
"
,
1.0
,
ghost_layers
=
False
)
src_cpu
=
dh
.
add_array
(
'
src_cpu
'
,
gpu
=
False
)
src_cpu
=
dh
.
add_array
(
'
src_cpu
'
,
values_per_cell
=
1
,
gpu
=
False
)
dh
.
fill
(
"
src_cpu
"
,
0.0
,
ghost_layers
=
True
)
dh
.
fill
(
"
src_cpu
"
,
0.0
,
ghost_layers
=
True
)
dh
.
fill
(
"
src_cpu
"
,
1.0
,
ghost_layers
=
False
)
dh
.
fill
(
"
src_cpu
"
,
1.0
,
ghost_layers
=
False
)
...
@@ -170,6 +170,7 @@ def test_parallel_datahandling_boundary_conditions():
...
@@ -170,6 +170,7 @@ def test_parallel_datahandling_boundary_conditions():
assert
dh
.
custom_data_names
==
(
'
boundary_handling_cpuIndexArrays
'
,
'
boundary_handling_gpuIndexArrays
'
)
assert
dh
.
custom_data_names
==
(
'
boundary_handling_cpuIndexArrays
'
,
'
boundary_handling_gpuIndexArrays
'
)
dh
.
swap
(
"
src
"
,
"
src2
"
,
gpu
=
True
)
dh
.
swap
(
"
src
"
,
"
src2
"
,
gpu
=
True
)
def
test_save_data
():
def
test_save_data
():
domain_shape
=
(
2
,
2
)
domain_shape
=
(
2
,
2
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment