Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
pystencils
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
pycodegen
pystencils
Commits
ed0a6927
Commit
ed0a6927
authored
5 years ago
by
Stephan Seitz
Browse files
Options
Downloads
Patches
Plain Diff
Save initial draft GraphDataHandling
parent
85484500
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
pystencils/datahandling/graph_datahandling.py
+177
-0
177 additions, 0 deletions
pystencils/datahandling/graph_datahandling.py
pystencils_tests/test_graph_datahandling.py
+68
-0
68 additions, 0 deletions
pystencils_tests/test_graph_datahandling.py
with
245 additions
and
0 deletions
pystencils/datahandling/graph_datahandling.py
0 → 100644
+
177
−
0
View file @
ed0a6927
# -*- coding: utf-8 -*-
#
# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
"""
from
enum
import
Enum
import
numpy
as
np
import
pystencils.datahandling
import
pystencils.kernel_wrapper
from
pystencils.field
import
FieldType
from
pystencils.integer_functions
import
modulo_ceil
class
DataTransferKind
(
str
,
Enum
):
UNKNOWN
=
None
HOST_ALLOC
=
'
HOST_ALLOC
'
DEVICE_ALLOC
=
'
DEVICE_ALLOC
'
HOST_TO_DEVICE
=
'
HOST_TO_DEVICE
'
DEVICE_TO_HOST
=
'
DEVICE_TO_HOST
'
HOST_COMMUNICATION
=
'
HOST_COMMUNICATION
'
DEVICE_COMMUNICATION
=
'
DEVICE_COMMUNICATION
'
HOST_SWAP
=
'
HOST_SWAP
'
DEVICE_SWAP
=
'
DEVICE_SWAP
'
HOST_GATHER
=
'
HOST_GATHER
'
DEVICE_GATHER
=
'
DEVICE_GATHER
'
def
is_alloc
(
self
):
return
self
in
[
self
.
HOST_ALLOC
,
self
.
DEVICE_ALLOC
]
def
is_transfer
(
self
):
return
self
in
[
self
.
HOST_TO_DEVICE
,
self
.
DEVICE_TO_HOST
,
self
.
SWAP
]
class
DataTransfer
:
def
__init__
(
self
,
field
:
pystencils
.
Field
,
kind
:
DataTransferKind
):
self
.
field
=
field
self
.
kind
=
kind
class
Swap
(
DataTransfer
):
def
__init__
(
self
,
source
,
destination
,
gpu
):
self
.
kind
=
DataTransferKind
.
DEVICE_SWAP
if
gpu
else
DataTransferKind
.
HOST_SWAP
self
.
field
=
source
self
.
destination
=
destination
class
Communication
(
DataTransfer
):
def
__init__
(
self
,
field
,
stencil
,
gpu
):
self
.
kind
=
DataTransferKind
.
DEVICE_COMMUNICATION
if
gpu
else
DataTransferKind
.
HOST_COMMUNICATION
self
.
field
=
field
self
.
stencil
=
stencil
class
KernelCall
:
def
__init__
(
self
,
kernel
:
pystencils
.
kernel_wrapper
.
KernelWrapper
,
kwargs
):
self
.
kernel
=
kernel
self
.
kwargs
=
kwargs
def
__str__
(
self
):
return
"
Call
"
+
str
(
self
.
kernel
.
ast
.
function_name
)
class
GraphDataHandling
(
pystencils
.
datahandling
.
SerialDataHandling
):
"""
Docstring for GraphDataHandling.
"""
class
TimeLoop
(
pystencils
.
TimeLoop
):
def
__init__
(
self
,
parent
,
*
args
,
**
kwargs
):
self
.
parent
=
parent
super
().
__init__
(
*
args
,
**
kwargs
)
def
add_pre_run_function
(
self
,
f
):
self
.
_pre_run_functions
.
append
(
f
)
def
add_post_run_function
(
self
,
f
):
self
.
_post_run_functions
.
append
(
f
)
def
add_single_step_function
(
self
,
f
):
self
.
_single_step_functions
.
append
(
f
)
def
add_call
(
self
,
functor
,
argument_list
):
if
hasattr
(
functor
,
'
kernel
'
):
functor
=
functor
.
kernel
if
not
isinstance
(
argument_list
,
list
):
argument_list
=
[
argument_list
]
for
argument_dict
in
argument_list
:
self
.
_call_data
.
append
((
functor
,
argument_dict
))
def
__init__
(
self
,
*
args
,
**
kwargs
):
self
.
call_queue
=
[]
super
().
__init__
(
*
args
,
**
kwargs
)
def
add_array
(
self
,
name
,
values_per_cell
=
1
,
dtype
=
np
.
float64
,
latex_name
=
None
,
ghost_layers
=
None
,
layout
=
None
,
cpu
=
True
,
gpu
=
None
,
alignment
=
False
,
field_type
=
FieldType
.
GENERIC
):
super
().
add_array
(
name
,
values_per_cell
,
dtype
,
latex_name
,
ghost_layers
,
layout
,
cpu
,
gpu
,
alignment
,
field_type
)
if
cpu
:
self
.
call_queue
.
append
(
DataTransfer
(
self
.
_fields
[
name
],
DataTransferKind
.
HOST_ALLOC
))
if
gpu
:
self
.
call_queue
.
append
(
DataTransfer
(
self
.
_fields
[
name
],
DataTransferKind
.
DEVICE_ALLOC
))
def
add_custom_data
(
self
,
name
,
cpu_creation_function
,
gpu_creation_function
=
None
,
cpu_to_gpu_transfer_func
=
None
,
gpu_to_cpu_transfer_func
=
None
):
self
.
call_queue
.
append
(
'
custom data. WTF?
'
)
super
().
add_custom_data
(
name
,
cpu_creation_function
,
gpu_creation_function
,
cpu_to_gpu_transfer_func
,
gpu_to_cpu_transfer_func
)
def
gather_array
(
self
,
name
,
slice_obj
=
None
,
ghost_layers
=
False
,
**
kwargs
):
self
.
call_queue
.
append
(
'
gather_array
'
)
super
().
gather_array
(
name
,
slice_obj
,
ghost_layers
,
**
kwargs
)
def
swap
(
self
,
name1
,
name2
,
gpu
=
None
):
self
.
call_queue
.
append
(
Swap
(
self
.
_fields
[
name1
],
self
.
_fields
[
name2
],
gpu
))
super
().
swap
(
name1
,
name2
,
gpu
)
def
run_kernel
(
self
,
kernel_function
,
**
kwargs
):
self
.
call_queue
.
append
(
KernelCall
(
kernel_function
,
kwargs
))
# skip calling super
def
to_cpu
(
self
,
name
):
self
.
call_queue
.
append
(
DataTransfer
(
self
.
_fields
[
name
],
DataTransferKind
.
HOST_TO_DEVICE
))
def
to_gpu
(
self
,
name
):
if
name
in
self
.
_custom_data_transfer_functions
:
self
.
call_queue
.
append
(
'
Custom Tranfer Function
'
)
else
:
self
.
call_queue
.
append
(
DataTransfer
(
self
.
_fields
[
name
],
DataTransferKind
.
DEVICE_TO_HOST
))
def
synchronization_function
(
self
,
names
,
stencil
=
None
,
target
=
None
,
**
_
):
for
name
in
names
:
gpu
=
target
==
'
cpu
'
self
.
call_queue
.
append
(
Communication
(
self
.
_fields
[
name
],
stencil
,
gpu
))
def
__str__
(
self
):
return
'
\n
'
.
join
(
str
(
self
.
call_queue
))
def
create_timeloop
(
self
,
*
args
,
**
kwargs
):
return
self
.
TimeLoop
(
self
,
*
args
,
**
kwargs
)
def
fill
(
self
,
array_name
:
str
,
val
,
value_idx
,
slice_obj
=
None
,
ghost_layers
=
False
,
inner_ghost_layers
=
False
)
->
None
:
self
.
call_queue
(
'
Fill
'
+
array_name
)
super
().
fill
(
self
,
array_name
,
val
,
value_idx
,
slice_obj
,
ghost_layers
,
inner_ghost_layers
)
# TODO
# def reduce_float_sequence(self, sequence, operation, all_reduce=False) -> np.array:
# return np.array(sequence)
# def reduce_int_sequence(self, sequence, operation, all_reduce=False) -> np.array:
# return np.array(sequence)
# def create_vtk_writer(self, file_name, data_names, ghost_layers=False):
# pass
# def create_vtk_writer_for_flag_array(self, file_name, data_name, masks_to_name, ghost_layers=False):
# pass
This diff is collapsed.
Click to expand it.
pystencils_tests/test_graph_datahandling.py
0 → 100644
+
68
−
0
View file @
ed0a6927
# -*- coding: utf-8 -*-
#
# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
"""
import
pytest
from
lbmpy.boundaries
import
UBB
,
FixedDensity
,
NoSlip
from
lbmpy.geometry
import
add_pipe_inflow_boundary
,
add_pipe_walls
from
lbmpy.lbstep
import
LatticeBoltzmannStep
from
pystencils.datahandling
import
create_data_handling
from
pystencils.datahandling.graph_datahandling
import
GraphDataHandling
from
pystencils.slicing
import
slice_from_direction
pytest
.
importorskip
(
'
lbmpy
'
)
def
create_lid_driven_cavity
(
domain_size
=
None
,
lid_velocity
=
0.005
,
lbm_kernel
=
None
,
parallel
=
False
,
data_handling
=
None
,
**
kwargs
):
"""
Creates a lid driven cavity scenario.
Args:
domain_size: tuple specifying the number of cells in each dimension
lid_velocity: x velocity of lid in lattice coordinates.
lbm_kernel: a LBM function, which would otherwise automatically created
kwargs: other parameters are passed on to the method, see :mod:`lbmpy.creationfunctions`
parallel: True for distributed memory parallelization with walberla
data_handling: see documentation of :func:`create_fully_periodic_flow`
Returns:
instance of :class:`Scenario`
"""
assert
domain_size
is
not
None
or
data_handling
is
not
None
if
data_handling
is
None
:
optimization
=
kwargs
.
get
(
'
optimization
'
,
None
)
target
=
optimization
.
get
(
'
target
'
,
None
)
if
optimization
else
None
data_handling
=
GraphDataHandling
(
domain_size
,
periodicity
=
False
,
default_ghost_layers
=
1
,
default_target
=
target
)
step
=
LatticeBoltzmannStep
(
data_handling
=
data_handling
,
lbm_kernel
=
lbm_kernel
,
name
=
"
ldc
"
,
**
kwargs
)
my_ubb
=
UBB
(
velocity
=
[
lid_velocity
,
0
,
0
][:
step
.
method
.
dim
])
step
.
boundary_handling
.
set_boundary
(
my_ubb
,
slice_from_direction
(
'
N
'
,
step
.
dim
))
for
direction
in
(
'
W
'
,
'
E
'
,
'
S
'
)
if
step
.
dim
==
2
else
(
'
W
'
,
'
E
'
,
'
S
'
,
'
T
'
,
'
B
'
):
step
.
boundary_handling
.
set_boundary
(
NoSlip
(),
slice_from_direction
(
direction
,
step
.
dim
))
return
step
def
ldc_setup
(
**
kwargs
):
ldc
=
create_lid_driven_cavity
(
relaxation_rate
=
1.7
,
**
kwargs
)
ldc
.
run
(
50
)
return
ldc
.
density_slice
()
def
test_graph_datahandling
():
import
lbmpy
print
(
"
--- LDC 2D test ---
"
)
opt_params
=
{
'
target
'
:
'
gpu
'
,
'
gpu_indexing_params
'
:
{
'
block_size
'
:
(
8
,
4
,
2
)}}
lbm_step
:
LatticeBoltzmannStep
=
ldc_setup
(
domain_size
=
(
10
,
15
),
parallel
=
False
,
optimization
=
opt_params
)
print
(
lbm_step
.
_data_handling
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment