Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • Sparse
  • WallLaw
  • improved_comm
  • master
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
  • release/1.3.7
42 results

Target

Select target project
  • ravi.k.ayyala/lbmpy
  • brendan-waters/lbmpy
  • anirudh.jonnalagadda/lbmpy
  • jbadwaik/lbmpy
  • alexander.reinauer/lbmpy
  • itischler/lbmpy
  • he66coqe/lbmpy
  • ev81oxyl/lbmpy
  • Bindgen/lbmpy
  • da15siwa/lbmpy
  • holzer/lbmpy
  • RudolfWeeber/lbmpy
  • pycodegen/lbmpy
13 results
Select Git revision
  • GetterSetterAPI
  • HRR
  • HydroPressure
  • InplaceConfig
  • Outflow
  • PhaseField
  • Sparse
  • UBBVelocity
  • UpdateAPISparse
  • WallLaw
  • WetNodeBoundaries
  • csebug
  • feature/sparse
  • feature/try
  • improved_comm
  • install_requires
  • master
  • phaseField
  • relaxationrates
  • test_martin
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
57 results
Show changes
Showing
with 3743 additions and 72 deletions
...@@ -6,21 +6,23 @@ import sympy as sp ...@@ -6,21 +6,23 @@ import sympy as sp
from lbmpy.moments import polynomial_to_exponent_representation from lbmpy.moments import polynomial_to_exponent_representation
from pystencils.cache import disk_cache, memorycache from pystencils.cache import disk_cache, memorycache
from pystencils.sympyextensions import complete_the_squares_in_exp from pystencils.sympyextensions import complete_the_squares_in_exp, scalar_product
@memorycache() @memorycache()
def moment_generating_function(generating_function, symbols, symbols_in_result): def moment_generating_function(generating_function, symbols, symbols_in_result, velocity=None):
r""" r"""
Computes the moment generating function of a probability distribution. It is defined as: Computes the moment generating function of a probability distribution. It is defined as:
.. math :: .. math ::
F[f(\mathbf{x})](\mathbf{t}) = \int e^{<\mathbf{x}, \mathbf{t}>} f(x)\; dx F[f(\mathbf{x})](t) = \int e^{<\mathbf{x}, t>} f(\mathbf{x})\; dx
Args: Args:
generating_function: sympy expression generating_function: sympy expression
symbols: a sequence of symbols forming the vector x symbols: a sequence of symbols forming the vector :math:`\mathbf{x}`
symbols_in_result: a sequence forming the vector t symbols_in_result: a sequence forming the vector t
velocity: if the generating function generates central moments, the velocity needs to be substracted. Thus the
velocity symbols need to be passed. All generating functions need to have the same parameters.
Returns: Returns:
transformation result F: an expression that depends now on symbols_in_result transformation result F: an expression that depends now on symbols_in_result
...@@ -55,9 +57,27 @@ def moment_generating_function(generating_function, symbols, symbols_in_result): ...@@ -55,9 +57,27 @@ def moment_generating_function(generating_function, symbols, symbols_in_result):
return sp.simplify(result) return sp.simplify(result)
def cumulant_generating_function(func, symbols, symbols_in_result): def central_moment_generating_function(func, symbols, symbols_in_result, velocity=sp.symbols("u_:3")):
r"""
Computes central moment generating func, which is defined as:
.. math ::
K( \mathbf{\Xi} ) = \exp ( - \mathbf{\Xi} \cdot \mathbf{u} ) M( \mathbf{\Xi} ).
For parameter description see :func:`moment_generating_function`.
""" """
Computes cumulant generating func, which is the logarithm of the moment generating func. argument = - scalar_product(symbols_in_result, velocity)
return sp.exp(argument) * moment_generating_function(func, symbols, symbols_in_result)
def cumulant_generating_function(func, symbols, symbols_in_result, velocity=None):
r"""
Computes cumulant generating func, which is the logarithm of the moment generating func:
.. math ::
C(\mathbf{\Xi}) = \log M(\mathbf{\Xi})
For parameter description see :func:`moment_generating_function`. For parameter description see :func:`moment_generating_function`.
""" """
return sp.ln(moment_generating_function(func, symbols, symbols_in_result)) return sp.ln(moment_generating_function(func, symbols, symbols_in_result))
...@@ -93,16 +113,16 @@ def multi_differentiation(generating_function, index, symbols): ...@@ -93,16 +113,16 @@ def multi_differentiation(generating_function, index, symbols):
@memorycache(maxsize=512) @memorycache(maxsize=512)
def __continuous_moment_or_cumulant(func, moment, symbols, generating_function): def __continuous_moment_or_cumulant(func, moment, symbols, generating_function, velocity=sp.symbols("u_:3")):
if type(moment) is tuple and not symbols: if type(moment) is tuple and not symbols:
symbols = sp.symbols("xvar yvar zvar") symbols = sp.symbols("xvar yvar zvar")
dim = len(moment) if type(moment) is tuple else len(symbols) dim = len(moment) if type(moment) is tuple else len(symbols)
# not using sp.Dummy here - since it prohibits caching # not using sp.Dummy here - since it prohibits caching
t = tuple([sp.Symbol("tmpvar_%d" % i, ) for i in range(dim)]) t = sp.symbols(f"tmpvar_:{dim}")
symbols = symbols[:dim] symbols = symbols[:dim]
generating_function = generating_function(func, symbols, t) generating_function = generating_function(func, symbols, t, velocity=velocity)
if type(moment) is tuple: if type(moment) is tuple:
return multi_differentiation(generating_function, moment, t) return multi_differentiation(generating_function, moment, t)
...@@ -128,6 +148,18 @@ def continuous_moment(func, moment, symbols=None): ...@@ -128,6 +148,18 @@ def continuous_moment(func, moment, symbols=None):
return __continuous_moment_or_cumulant(func, moment, symbols, moment_generating_function) return __continuous_moment_or_cumulant(func, moment, symbols, moment_generating_function)
def continuous_central_moment(func, moment, symbols=None, velocity=sp.symbols("u_:3")):
"""Computes central moment of given function.
Args:
func: function to compute moments of
moment: tuple or polynomial describing the moment
symbols: if moment is given as polynomial, pass the moment symbols, i.e. the dof of the polynomial
"""
return __continuous_moment_or_cumulant(func, moment, symbols, central_moment_generating_function,
velocity=velocity)
def continuous_cumulant(func, moment, symbols=None): def continuous_cumulant(func, moment, symbols=None):
"""Computes cumulant of continuous function. """Computes cumulant of continuous function.
......
r"""
Creating LBM kernels and Parameter Specifications
-------------------------------------------------
Kernel functions are created in four/five steps represented by five
python functions: `create_lb_method`, *create_lb_collision_rule/create_lb_update_rule*, `create_lb_ast` and
`create_lb_function` Each of those functions is configured with three data classes.
One dataclass defines the lattice Boltzmann method itself. This class is called `LBMConfig`. It defines, for example,
which collision space or LB stencil should be used.
The second one determines optimisations that are specific to the LBM. Optimisations like the
common subexpression elimination. Most of these optimisations act on the assignment level.
This means they only manipulate the assignments. The config class is called `LBMOptimisation`.
The third data class determines hardware optimisation. This means that contrary to the `LBMOptimisation` class,
it acts on the level of the abstract syntax tree. Thus, it is independent of the assignments and the LBM
and belongs to pystencils, not lbmpy. This can be found in the pystencils module as
'pystencils.kernelcreation.CreateKernelConfig'. With this class, for example, the target (CPU, GPU etc.)
of the generated code is specified.
1. *Method*:
the method defines the collision process. Currently, there are two big categories:
moment and cumulant based methods. A method defines how each moment or cumulant is relaxed by
storing the equilibrium value and the relaxation rate for each moment/cumulant.
2. *Collision/Update Rule*:
Methods can generate a "collision rule" which is an equation collection that define the
post collision values as a function of the pre-collision values. On these equation collection
simplifications are applied to reduce the number of floating point operations.
At this stage an entropic optimisation step can also be added to determine one relaxation rate by an
entropy condition.
Then a streaming rule is added which transforms the collision rule into an update rule.
The streaming step depends on the pdf storage (source/destination, AABB pattern, EsoTwist).
Currently only the simple source/destination pattern is supported.
3. *AST*:
The abstract syntax tree describes the structure of the kernel, including loops and conditionals.
The ast can be modified, e.g., to add OpenMP pragmas, reorder loops or apply other optimisations.
4. *Function*:
This step compiles the AST into an executable function, either for CPU or GPUs. This function
behaves like a normal Python function and runs one LBM time step.
Each stage (apart from *Function*) also adds its result to the given `LBMConfig` object. The `LBMConfig`
thus coalesces all information defining the LBM kernel.
The function :func:`create_lb_function` runs the whole pipeline, the other functions in this module
execute this pipeline only up to a certain step. Each function optionally also takes the result of the previous step.
For example, to modify the AST one can run::
ast = create_lb_ast(...)
# modify ast here
func = create_lb_function(ast=ast, ...)
"""
import copy
from dataclasses import dataclass, field, replace
from typing import Union, List, Tuple, Any, Type, Iterable
from warnings import warn, filterwarnings
from ._compat import IS_PYSTENCILS_2
import sympy as sp
from lbmpy.enums import Stencil, Method, ForceModel, CollisionSpace, SubgridScaleModel
import lbmpy.forcemodels as forcemodels
from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, PdfFieldAccessor, PeriodicTwoFieldsAccessor
from lbmpy.fluctuatinglb import add_fluctuations_to_collision_rule
from lbmpy.partially_saturated_cells import (replace_by_psm_collision_rule, PSMConfig,
add_psm_solid_collision_to_collision_rule)
from lbmpy.non_newtonian_models import add_cassons_model, CassonsParameters
from lbmpy.methods import (create_mrt_orthogonal, create_mrt_raw, create_central_moment,
create_srt, create_trt, create_trt_kbc)
from lbmpy.methods.creationfunctions import CollisionSpaceInfo
from lbmpy.methods.creationfunctions import (
create_with_monomial_cumulants, create_cumulant, create_with_default_polynomial_cumulants)
from lbmpy.methods.momentbased.entropic import add_entropy_condition, add_iterative_entropy_condition
from lbmpy.relaxationrates import relaxation_rate_from_magic_number
from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.stencils import LBStencil
from lbmpy.turbulence_models import add_sgs_model
from lbmpy.updatekernels import create_lbm_kernel, create_stream_pull_with_output_kernel
from lbmpy.advanced_streaming.utility import Timestep, get_accessor
from .forcemodels import AbstractForceModel
import pystencils
from pystencils import CreateKernelConfig, create_kernel
from pystencils.cache import disk_cache_no_fallback
from pystencils.field import Field
from pystencils.simp import sympy_cse, SimplificationStrategy
# needed for the docstring
from lbmpy.methods.abstractlbmethod import LbmCollisionRule, AbstractLbMethod
from lbmpy.methods.cumulantbased import CumulantBasedLbMethod
if IS_PYSTENCILS_2:
from pystencils import Kernel as KernelFunction
else:
from pystencils.astnodes import KernelFunction
# Filter out JobLib warnings. They are not useful for use:
# https://github.com/joblib/joblib/issues/683
filterwarnings("ignore", message="Persisting input arguments took")
@dataclass
class LBMConfig:
"""
**Below all parameters for the LBMConfig are explained**
"""
stencil: LBStencil = LBStencil(Stencil.D2Q9)
"""
All stencils are defined in :class:`lbmpy.enums.Stencil`. From that :class:`lbmpy.stencils.LBStencil`
class will be created
"""
method: Method = Method.SRT
"""
Name of lattice Boltzmann method. Defined by :class:`lbmpy.enums.Method`.
This determines the selection and relaxation pattern of moments/cumulants, i.e. which moment/cumulant basis is
chosen, and which of the basis vectors are relaxed together
"""
relaxation_rates: Iterable = None
"""
Sequence of relaxation rates, number depends on selected method. If you specify more rates than
method needs, the additional rates are ignored.
If no relaxation rates are specified, the parameter `relaxation_rate` will be consulted.
"""
relaxation_rate: Union[int, float, Type[sp.Symbol]] = None
"""
The method's primary relaxation rate. In most cases, this is the relaxation rate governing shear viscosity.
For SRT, this is the only relaxation rate.
For TRT, the second relaxation rate is then determined via magic number.
In the case of raw moment, central moment, and cumulant-based MRT methods, all other relaxation rates will be
set to unity.
If neither `relaxation_rate` nor `relaxation_rates` is specified, the behaviour is as if
`relaxation_rate=sp.Symbol('omega')` was set.
"""
compressible: bool = False
"""
Affects the selection of equilibrium moments. Both options approximate the *incompressible*
Navier Stokes Equations. However when chosen as False, the approximation is better, the standard LBM derivation is
compressible.
"""
zero_centered: bool = True
"""
Governs the storage format of populations. If `False`, the discrete particle distribution vector is stored in its
absolute form. If `True`, instead, only the distribution's deviation from its rest state (typically given by the
lattice weights) is stored.
"""
delta_equilibrium: bool = None
"""
Determines whether or not the (continuous or discrete, see `continuous_equilibrium`) Maxwellian equilibrium is
expressed in its absolute form, or only by its deviation from the rest state (typically given by the reference
density and zero velocity). This parameter is only effective if `zero_centered` is set to `True`. Then, if
`delta_equilibrium` is `False`, the rest state must be reintroduced to the populations during collision. Otherwise,
if `delta_equilibrium` is `True`, the collision equations can be derived using only the deviations from the rest
state.
If `None` is passed to `delta_equilibrium`, its value will be chosen automatically, depending on the value of
`zero_centered` and the chosen `method`.
"""
equilibrium_order: int = 2
"""
Order in velocity, at which the equilibrium moment approximation is
truncated. Order 2 is sufficient to approximate Navier-Stokes. This parameter has no effect on cumulant-based
methods, whose equilibrium terms have no contributions above order one.
"""
c_s_sq: sp.Expr = sp.Rational(1, 3)
"""
The squared lattice speed of sound used to derive the LB method. It is very uncommon to use a value different
to 1 / 3.
"""
weighted: bool = True
"""
Affects only orthogonal MRT methods. If set to True a weighted Gram-Schmidt procedure is used to orthogonalise
the moments.
"""
nested_moments: List[List] = None
"""
A list of lists of modes, grouped by common relaxation times. This is usually used in
conjunction with `lbmpy.methods.default_moment_sets.mrt_orthogonal_modes_literature`.
If this argument is not provided, Gram-Schmidt orthogonalisation of the default modes is performed.
"""
force_model: Union[AbstractForceModel, ForceModel] = None
"""
Force model to determine how forcing terms enter the collision rule.
Possibilities are defined in :class: `lbmpy.enums.ForceModel`
"""
force: Union[Tuple, Field] = (0, 0, 0)
"""
Either constant force or a symbolic expression depending on field value
"""
continuous_equilibrium: bool = True
"""
Way to compute equilibrium moments/cumulants, if False the standard discretised LBM equilibrium is used,
otherwise the equilibrium moments are computed from the continuous Maxwellian. This makes only a
difference if sparse stencils are used e.g. D2Q9 and D3Q27 are not affected, D319 and DQ15 are affected.
"""
maxwellian_moments: bool = None
"""
Deprecated and due for removal by version 0.5; use `continuous_equilibrium` instead.
"""
initial_velocity: Tuple = None,
"""
Initial velocity in domain, can either be a tuple (x,y,z) velocity to set a constant
velocity everywhere, or a numpy array with the same size of the domain, with a last coordinate of shape dim to set
velocities on cell level
"""
galilean_correction: bool = False
"""
Special correction for D3Q27 cumulant LBMs. For Details see
:mod:`lbmpy.methods.cumulantbased.galilean_correction`
"""
fourth_order_correction: Union[float, bool] = False
"""
Special correction for rendering D3Q27 cumulant LBMs fourth-order accurate in diffusion. For Details see
:mod:`lbmpy.methods.cumulantbased.fourth_order_correction`. If set to `True`, the fourth-order correction is
employed without limiters (or more precisely with a very high limiter, practically disabling the limiters). If this
variable is set to a number, the latter is used for the limiters (uniformly for omega_3, omega_4 and omega_5).
"""
collision_space_info: CollisionSpaceInfo = None
"""
Information about the LB method's collision space (see :class:`lbmpy.methods.creationfunctions.CollisionSpaceInfo`)
including the classes defining how populations are transformed to these spaces.
If left at `None`, it will be inferred according to the value of `method`.
If an instance of the :class:`lbmpy.enums.CollisionSpace` enum is passed, a
:class:`lbmpy.method.CollisionSpaceInfo` instance with the space's default setup is created.
Otherwise, the selected collision space must be in accord with the chosen :class:`lbmpy.enum.Method`.
"""
entropic: bool = False
"""
In case there are two distinct relaxation rate in a method, one of them (usually the one, not
determining the viscosity) can be automatically chosen w.r.t an entropy condition. For details see
:mod:`lbmpy.methods.momentbased.entropic`
"""
entropic_newton_iterations: int = None
"""
For moment methods the entropy optimum can be calculated in closed form.
For cumulant methods this is not possible, in that case it is computed using Newton iterations.
This parameter can be used to force Newton iterations and specify how many should be done
"""
omega_output_field: Field = None
"""
A pystencils Field can be passed here, where the calculated free relaxation rate of
an entropic or subgrid-scale method is written to
"""
eddy_viscosity_field: Field = None
"""
A pystencils Field can be passed here, where the eddy-viscosity of a subgrid-scale model is written.
"""
subgrid_scale_model: Union[SubgridScaleModel, tuple[SubgridScaleModel, float], tuple[SubgridScaleModel, int]] = None
"""
Choose a subgrid-scale model (SGS) for large-eddy simulations. ``omega_output_field`` can be set to
write out adapted relaxation rates. Either provide just the SGS and use the default model constants or provide a
tuple of the SGS and its corresponding model constant.
"""
cassons: CassonsParameters = False
"""
Adds the Cassons model according to https://doi.org/10.1007/s10955-005-8415-x
The parameters are set with the ``CassonsParameters`` dataclass.
"""
fluctuating: dict = False
"""
Enables fluctuating lattice Boltzmann by randomizing collision process.
Pass dictionary with parameters to ``lbmpy.fluctuatinglb.add_fluctuations_to_collision_rule``.
Can only be used for weighed MRT collision operators.
"""
temperature: Any = None
"""
Temperature for fluctuating lattice Boltzmann methods.
"""
psm_config: PSMConfig = None
"""
If a PSM config is specified, (1 - fractionField) is added to the relaxation rates of the collision
and to the potential force term, and a solid collision is build and added to the main assignments.
"""
output: dict = field(default_factory=dict)
"""
A dictionary mapping macroscopic quantites e.g. the strings 'density' and 'velocity' to pystencils
fields. In each timestep the corresponding quantities are written to the given fields. Possible input would be:
{'density': density_field, 'velocity': velocity_field}
"""
velocity_input: Field = None
"""
Symbolic field where the velocities are read from. If `None` is given the velocity is calculated inplace from
with first order moments.
"""
density_input: Field = None
"""
Symbolic field where the density is read from. If `None` is given the density is calculated inplace from
with zeroth order moment.
"""
conserved_moments: bool = True
"""
If lower order moments are conserved or not. If velocity or density input is set the lower order moments are not
conserved anymore.
"""
kernel_type: Union[str, Type[PdfFieldAccessor]] = 'default_stream_collide'
"""
Supported values: ``'default_stream_collide'`` (default), ``'collide_only'``, ``'stream_pull_only'``.
With ``'default_stream_collide'``, streaming pattern and even/odd time-step (for in-place patterns) can be specified
by the ``streaming_pattern`` and ``timestep`` arguments. For backwards compatibility, ``kernel_type`` also accepts
``'stream_pull_collide'``, ``'collide_stream_push'``, ``'esotwist_even'``, ``'esotwist_odd'``, ``'aa_even'``
and ``'aa_odd'`` for selection of the streaming pattern.
"""
streaming_pattern: str = 'pull'
"""
The streaming pattern to be used with a ``'default_stream_collide'`` kernel. Accepted values are
``'pull'``, ``'push'``, ``'aa'`` and ``'esotwist'``.
"""
timestep: Timestep = Timestep.BOTH
"""
Timestep modulus for the streaming pattern. For two-fields patterns, this argument is irrelevant and
by default set to ``Timestep.BOTH``. For in-place patterns, ``Timestep.EVEN`` or ``Timestep.ODD`` must be specified.
"""
field_name: str = 'src'
"""
Name of the PDF field.
"""
temporary_field_name: str = 'dst'
"""
Name of the temporary PDF field.
"""
lb_method: Type[AbstractLbMethod] = None
"""
Instance of `lbmpy.methods.abstractlbmethod.AbstractLbMethod`. If this parameter is `None`, the lb_method is derived
via `create_lb_method`.
"""
collision_rule: LbmCollisionRule = None
"""
Instance of :class:`lbmpy.methods.LbmCollisionRule`. If this parameter is `None`,
the collision rule is derived via *create_lb_collision_rule*.
"""
update_rule: LbmCollisionRule = None
"""
Instance of :class:`lbmpy.methods.LbmCollisionRule`. If this parameter is `None`,
the update rule is derived via *create_lb_update_rule*.
"""
ast: KernelFunction = None
"""
Instance of *pystencils.KernelFunction*. If this parameter is `None`,
the ast is derived via `create_lb_ast`.
"""
def __post_init__(self):
if isinstance(self.method, str):
new_method = Method[self.method.upper()]
warn(f'Method "{self.method}" as str is deprecated. Use {new_method} instead')
self.method = new_method
if self.maxwellian_moments is not None:
warn("Argument 'maxwellian_moments' is deprecated and will be removed by version 0.5."
"Use `continuous_equilibrium` instead.")
self.continuous_equilibrium = self.maxwellian_moments
if not isinstance(self.stencil, LBStencil):
self.stencil = LBStencil(self.stencil)
if self.relaxation_rates is None:
# Fall back to regularized method
if self.relaxation_rate is None:
self.relaxation_rate = sp.Symbol("omega")
# if only a single relaxation rate is defined,
# it is internally treated as a list with one element and just sets the relaxation_rates parameter
if self.relaxation_rate is not None:
if self.method in [Method.TRT, Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4]:
self.relaxation_rates = [self.relaxation_rate,
relaxation_rate_from_magic_number(self.relaxation_rate)]
else:
self.relaxation_rates = [self.relaxation_rate]
# Incompressible cumulant method is not available
if not self.compressible and self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT):
raise ValueError("Incompressible cumulant-based methods are not supported (yet).")
if self.zero_centered and self.entropic:
raise ValueError("Entropic methods can only be created with `zero_centered=False`.")
# Check or infer delta-equilibrium
if self.delta_equilibrium is not None:
# Must be zero-centered
if self.delta_equilibrium:
if not self.zero_centered:
raise ValueError("`delta_equilibrium=True` requires `zero_centered=True`!")
# Must not be a cumulant-method
if self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT):
raise ValueError("Cannot create a cumulant-based method from a delta-equilibrium!")
else:
if self.zero_centered:
if self.method in (Method.CENTRAL_MOMENT, Method.MONOMIAL_CUMULANT, Method.CUMULANT):
self.delta_equilibrium = False
else:
self.delta_equilibrium = True
else:
self.delta_equilibrium = False
# Check or infer collision space
if isinstance(self.collision_space_info, CollisionSpace):
self.collision_space_info = CollisionSpaceInfo(self.collision_space_info)
if self.collision_space_info is not None:
if (self.entropic or self.fluctuating) \
and self.collision_space_info.collision_space != CollisionSpace.POPULATIONS:
# Workaround until entropic method supports relaxation in subexpressions
# and the problem with RNGs in the assignment collection has been solved
raise ValueError("Entropic and Fluctuating methods are only available in population space.")
elif not self.collision_space_info.collision_space.compatible(self.method):
raise ValueError("Given method is not compatible with given collision space.")
else:
if self.method in {Method.SRT, Method.TRT,
Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.POPULATIONS)
elif self.entropic or self.fluctuating:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.POPULATIONS)
elif self.method in {Method.MRT_RAW, Method.MRT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.RAW_MOMENTS)
elif self.method in {Method.CENTRAL_MOMENT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.CENTRAL_MOMENTS)
elif self.method in {Method.MONOMIAL_CUMULANT, Method.CUMULANT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.CUMULANTS)
else:
raise Exception(f"No default collision space is given for method {self.method}."
"This is a bug; please report it to the developers.")
# for backwards compatibility
kernel_type_to_streaming_pattern = {
'stream_pull_collide': ('pull', Timestep.BOTH),
'collide_stream_push': ('push', Timestep.BOTH),
'aa_even': ('aa', Timestep.EVEN),
'aa_odd': ('aa', Timestep.ODD),
'esotwist_even': ('esotwist', Timestep.EVEN),
'esotwist_odd': ('esotwist', Timestep.ODD)
}
if self.kernel_type in kernel_type_to_streaming_pattern.keys():
self.streaming_pattern, self.timestep = kernel_type_to_streaming_pattern[self.kernel_type]
self.kernel_type = 'default_stream_collide'
if isinstance(self.force, Field):
self.force = tuple([self.force(i) for i in range(self.stencil.D)])
force_not_zero = False
for f_i in self.force:
if f_i != 0:
force_not_zero = True
if self.force_model is None and force_not_zero:
if self.method == Method.CUMULANT:
self.force_model = forcemodels.CentralMoment(self.force[:self.stencil.D])
else:
self.force_model = forcemodels.Guo(self.force[:self.stencil.D])
force_model_dict = {
'simple': forcemodels.Simple,
'luo': forcemodels.Luo,
'guo': forcemodels.Guo,
'schiller': forcemodels.Guo,
'buick': forcemodels.Buick,
'silva': forcemodels.Buick,
'edm': forcemodels.EDM,
'kupershtokh': forcemodels.EDM,
'he': forcemodels.He,
'shanchen': forcemodels.ShanChen,
'centralmoment': forcemodels.CentralMoment
}
if self.psm_config is not None and self.psm_config.fraction_field is not None:
self.force = [(1.0 - self.psm_config.fraction_field_symbol) * f for f in self.force]
if isinstance(self.force_model, str):
new_force_model = ForceModel[self.force_model.upper()]
warn(f'ForceModel "{self.force_model}" as str is deprecated. Use {new_force_model} instead or '
f'provide a class of type AbstractForceModel', category=DeprecationWarning)
force_model_class = force_model_dict[new_force_model.name.lower()]
self.force_model = force_model_class(force=self.force[:self.stencil.D])
elif isinstance(self.force_model, ForceModel):
force_model_class = force_model_dict[self.force_model.name.lower()]
self.force_model = force_model_class(force=self.force[:self.stencil.D])
if self.density_input or self.velocity_input:
self.conserved_moments = False
@dataclass
class LBMOptimisation:
"""
**Below all parameters for the LBMOptimisation are explained**
"""
cse_pdfs: bool = False
"""
Run common subexpression elimination for opposing stencil directions.
"""
cse_global: bool = False
"""
Run common subexpression elimination after all other simplifications have been executed.
"""
simplification: Union[str, bool, SimplificationStrategy] = 'auto'
"""
Simplifications applied during the derivation of the collision rule. If ``True`` or ``'auto'``,
a default simplification strategy is selected according to the type of the method;
see :func:`lbmpy.simplificationfactory.create_simplification_strategy`.
If ``False``, no simplification is applied.
Otherwise, the given simplification strategy will be applied.
"""
pre_simplification: bool = True
"""
Simplifications applied during the derivation of the collision rule for cumulant LBMs.
For details see :mod:`lbmpy.moment_transforms`.
"""
split: bool = False
"""
Split innermost loop, to handle only two directions per loop. This reduces the number of parallel
load/store streams and thus speeds up the kernel on most architectures.
"""
field_size: Any = None
"""
Create kernel for fixed field size.
"""
field_layout: str = 'fzyx'
"""
``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse_numpy'`` or ``'f'`` for fortran
layout, this does not apply when pdf_arr was given, then the same layout as pdf_arr is used.
"""
symbolic_field: pystencils.field.Field = None
"""
Pystencils field for source (pdf field that is read)
"""
symbolic_temporary_field: pystencils.field.Field = None
"""
Pystencils field for temporary (pdf field that is written in stream, or stream-collide)
"""
builtin_periodicity: Tuple[bool] = (False, False, False)
"""
Instead of handling periodicity by copying ghost layers, the periodicity
is built into the kernel. This parameters specifies if the domain is periodic in (x,y,z) direction. Even if the
periodicity is built into the kernel, the fields have one ghost layer to be consistent with other functions.
"""
def create_lb_function(ast=None, lbm_config=None, lbm_optimisation=None, config=None, optimization=None, **kwargs):
"""Creates a Python function for the LB method"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.ast is not None:
ast = lbm_config.ast
if ast is None:
ast = create_lb_ast(lbm_config.update_rule, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation, config=config)
res = ast.compile()
res.method = ast.method
res.update_rule = ast.update_rule
return res
def create_lb_ast(update_rule=None, lbm_config=None, lbm_optimisation=None, config=None, optimization=None, **kwargs):
"""Creates a pystencils AST for the LB method"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.update_rule is not None:
update_rule = lbm_config.update_rule
if update_rule is None:
update_rule = create_lb_update_rule(lbm_config.collision_rule, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation, config=config)
config = replace(config, ghost_layers=1)
ast = create_kernel(update_rule, config=config)
ast.method = update_rule.method
ast.update_rule = update_rule
lbm_config.ast = ast
return ast
@disk_cache_no_fallback
def create_lb_update_rule(collision_rule=None, lbm_config=None, lbm_optimisation=None, config=None,
optimization=None, **kwargs):
"""Creates an update rule (list of Assignments) for a LB method that describe a full sweep"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.collision_rule is not None:
collision_rule = lbm_config.collision_rule
if collision_rule is None:
collision_rule = create_lb_collision_rule(lbm_config.lb_method, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation,
config=config)
lb_method = collision_rule.method
if IS_PYSTENCILS_2:
fallback_field_data_type = config.get_option("default_dtype")
else:
fallback_field_data_type = config.data_type[lbm_config.field_name].numpy_dtype
q = collision_rule.method.stencil.Q
if lbm_optimisation.symbolic_field is not None:
src_field = lbm_optimisation.symbolic_field
elif lbm_optimisation.field_size:
field_size = tuple([s + 2 for s in lbm_optimisation.field_size] + [q])
src_field = Field.create_fixed_size(lbm_config.field_name, field_size, index_dimensions=1,
layout=lbm_optimisation.field_layout, dtype=fallback_field_data_type)
else:
src_field = Field.create_generic(lbm_config.field_name, spatial_dimensions=collision_rule.method.dim,
index_shape=(q,), layout=lbm_optimisation.field_layout,
dtype=fallback_field_data_type)
if lbm_optimisation.symbolic_temporary_field is not None:
dst_field = lbm_optimisation.symbolic_temporary_field
else:
dst_field = src_field.new_field_with_different_name(lbm_config.temporary_field_name)
kernel_type = lbm_config.kernel_type
if kernel_type == 'stream_pull_only':
update_rule = create_stream_pull_with_output_kernel(lb_method, src_field, dst_field, lbm_config.output)
else:
if kernel_type == 'default_stream_collide':
if lbm_config.streaming_pattern == 'pull' and any(lbm_optimisation.builtin_periodicity):
accessor = PeriodicTwoFieldsAccessor(lbm_optimisation.builtin_periodicity, ghost_layers=1)
else:
accessor = get_accessor(lbm_config.streaming_pattern, lbm_config.timestep)
elif kernel_type == 'collide_only':
accessor = CollideOnlyInplaceAccessor
elif isinstance(kernel_type, PdfFieldAccessor):
accessor = kernel_type
else:
raise ValueError("Invalid value of parameter 'kernel_type'", lbm_config.kernel_type)
update_rule = create_lbm_kernel(collision_rule, src_field, dst_field, accessor)
lbm_config.update_rule = update_rule
return update_rule
@disk_cache_no_fallback
def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=None, config=None,
optimization=None, **kwargs):
"""Creates a collision rule (list of Assignments) for a LB method describing the collision operator (no stream)"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.lb_method is not None:
lb_method = lbm_config.lb_method
if lb_method is None:
lb_method = create_lb_method(lbm_config)
cqc = lb_method.conserved_quantity_computation
rho_in = lbm_config.density_input
u_in = lbm_config.velocity_input
if u_in is not None and isinstance(u_in, Field):
u_in = u_in.center_vector
if rho_in is not None and isinstance(rho_in, Field):
rho_in = rho_in.center
pre_simplification = lbm_optimisation.pre_simplification
if rho_in is not None or u_in is not None:
cqe = cqc.equilibrium_input_equations_from_pdfs(lb_method.pre_collision_pdf_symbols)
cqe_main_assignments = cqe.main_assignments_dict
if rho_in is not None:
if u_in is None:
raise ValueError("When setting 'density_input' parameter, "
"'velocity_input' has to be specified as well.")
cqe_main_assignments[cqc.density_symbol] = rho_in
cqe_main_assignments[cqc.density_deviation_symbol] = rho_in - cqc.background_density
if u_in is not None:
for u_sym, u in zip(cqc.velocity_symbols, u_in):
cqe_main_assignments[u_sym] = u
cqe.set_main_assignments_from_dict(cqe_main_assignments)
cqe = cqe.new_without_unused_subexpressions()
collision_rule = lb_method.get_collision_rule(conserved_quantity_equations=cqe,
pre_simplification=pre_simplification)
else:
collision_rule = lb_method.get_collision_rule(pre_simplification=pre_simplification)
if lbm_config.galilean_correction:
from lbmpy.methods.cumulantbased import add_galilean_correction
collision_rule = add_galilean_correction(collision_rule)
if lbm_config.fourth_order_correction:
from lbmpy.methods.cumulantbased import add_fourth_order_correction
# must provide a second relaxation rate in implementation; defaults to 1
if len(lbm_config.relaxation_rates) == 1:
lbm_config.relaxation_rates.append(1)
cumulant_limiter = 1e6 if lbm_config.fourth_order_correction is True else lbm_config.fourth_order_correction
collision_rule = add_fourth_order_correction(collision_rule=collision_rule,
shear_relaxation_rate=lbm_config.relaxation_rates[0],
bulk_relaxation_rate=lbm_config.relaxation_rates[1],
limiter=cumulant_limiter)
if lbm_config.psm_config is not None:
if lbm_config.psm_config.fraction_field is None or lbm_config.psm_config.object_velocity_field is None:
raise ValueError("Specify a fraction and object velocity field in the PSM Config")
collision_rule = replace_by_psm_collision_rule(collision_rule, lbm_config.psm_config)
if lbm_config.entropic:
if lbm_config.subgrid_scale_model or lbm_config.cassons:
raise ValueError("Choose either entropic, subgrid-scale or cassons")
if lbm_config.entropic_newton_iterations:
if isinstance(lbm_config.entropic_newton_iterations, bool):
iterations = 3
else:
iterations = lbm_config.entropic_newton_iterations
collision_rule = add_iterative_entropy_condition(collision_rule, newton_iterations=iterations,
omega_output_field=lbm_config.omega_output_field)
else:
collision_rule = add_entropy_condition(collision_rule, omega_output_field=lbm_config.omega_output_field)
elif lbm_config.subgrid_scale_model:
if lbm_config.cassons:
raise ValueError("Cassons model can not be combined with a subgrid-scale model")
model_constant = None
sgs_model = lbm_config.subgrid_scale_model
if isinstance(lbm_config.subgrid_scale_model, tuple):
sgs_model = lbm_config.subgrid_scale_model[0]
model_constant = lbm_config.subgrid_scale_model[1]
collision_rule = add_sgs_model(collision_rule=collision_rule, subgrid_scale_model=sgs_model,
model_constant=model_constant, omega_output_field=lbm_config.omega_output_field,
eddy_viscosity_field=lbm_config.eddy_viscosity_field)
if 'split_groups' in collision_rule.simplification_hints:
collision_rule.simplification_hints['split_groups'][0].append(sp.Symbol("sgs_omega"))
elif lbm_config.cassons:
collision_rule = add_cassons_model(collision_rule, parameter=lbm_config.cassons,
omega_output_field=lbm_config.omega_output_field)
if lbm_config.output:
output_eqs = cqc.output_equations_from_pdfs(lb_method.pre_collision_pdf_symbols, lbm_config.output)
collision_rule = collision_rule.new_merged(output_eqs)
if lbm_optimisation.simplification is True or lbm_optimisation.simplification == 'auto':
simplification = create_simplification_strategy(lb_method, split_inner_loop=lbm_optimisation.split)
elif callable(lbm_optimisation.simplification):
simplification = lbm_optimisation.simplification
else:
simplification = SimplificationStrategy()
collision_rule = simplification(collision_rule)
if isinstance(collision_rule.method, CumulantBasedLbMethod):
from lbmpy.methods.cumulantbased.cumulant_simplifications import check_for_logarithms
check_for_logarithms(collision_rule)
if lbm_config.fluctuating:
add_fluctuations_to_collision_rule(collision_rule, **lbm_config.fluctuating)
if lbm_optimisation.cse_pdfs:
from lbmpy.methods.momentbased.momentbasedsimplifications import cse_in_opposing_directions
collision_rule = cse_in_opposing_directions(collision_rule)
if lbm_optimisation.cse_global:
collision_rule = sympy_cse(collision_rule)
lbm_config.collision_rule = collision_rule
return collision_rule
def create_lb_method(lbm_config=None, **params):
"""Creates a LB method, defined by moments/cumulants for collision space, equilibrium and relaxation rates."""
lbm_config, _, _ = update_with_default_parameters(params, lbm_config=lbm_config)
relaxation_rates = lbm_config.relaxation_rates
dim = lbm_config.stencil.D
if isinstance(lbm_config.force, Field):
lbm_config.force = tuple(lbm_config.force(i) for i in range(dim))
if lbm_config.psm_config is None:
fraction_field = None
else:
fraction_field = lbm_config.psm_config.fraction_field_symbol
common_params = {
'compressible': lbm_config.compressible,
'zero_centered': lbm_config.zero_centered,
'delta_equilibrium': lbm_config.delta_equilibrium,
'equilibrium_order': lbm_config.equilibrium_order,
'force_model': lbm_config.force_model,
'continuous_equilibrium': lbm_config.continuous_equilibrium,
'c_s_sq': lbm_config.c_s_sq,
'collision_space_info': lbm_config.collision_space_info,
'fraction_field': fraction_field,
}
cumulant_params = {
'zero_centered': lbm_config.zero_centered,
'force_model': lbm_config.force_model,
'c_s_sq': lbm_config.c_s_sq,
'collision_space_info': lbm_config.collision_space_info,
'fraction_field': fraction_field,
}
if lbm_config.method == Method.SRT:
assert len(relaxation_rates) >= 1, "Not enough relaxation rates"
method = create_srt(lbm_config.stencil, relaxation_rates[0], **common_params)
elif lbm_config.method == Method.TRT:
assert len(relaxation_rates) >= 2, "Not enough relaxation rates"
method = create_trt(lbm_config.stencil, relaxation_rates[0], relaxation_rates[1], **common_params)
elif lbm_config.method == Method.MRT:
method = create_mrt_orthogonal(lbm_config.stencil, relaxation_rates, weighted=lbm_config.weighted,
nested_moments=lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method == Method.CENTRAL_MOMENT:
method = create_central_moment(lbm_config.stencil, relaxation_rates,
nested_moments=lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method == Method.MRT_RAW:
method = create_mrt_raw(lbm_config.stencil, relaxation_rates,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method in (Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4):
if lbm_config.stencil.D == 2 and lbm_config.stencil.Q == 9:
dim = 2
elif lbm_config.stencil.D == 3 and lbm_config.stencil.Q == 27:
dim = 3
else:
raise NotImplementedError("KBC type TRT methods can only be constructed for D2Q9 and D3Q27 stencils")
method_nr = lbm_config.method.name[-1]
method = create_trt_kbc(dim, relaxation_rates[0], relaxation_rates[1], 'KBC-N' + method_nr, **common_params)
elif lbm_config.method == Method.CUMULANT:
if lbm_config.fourth_order_correction:
if lbm_config.stencil.D != 3 and lbm_config.stencil.Q != 27:
raise ValueError("Fourth-order correction can only be applied to D3Q27 cumulant methods.")
assert len(relaxation_rates) <= 2, "Optimal parametrisation for fourth-order cumulants needs either one " \
"or two relaxation rates, associated with the shear (and bulk) " \
"viscosity. All other relaxation rates are automatically chosen " \
"optimally"
# define method in terms of symbolic relaxation rates and assign optimal values later
from lbmpy.methods.cumulantbased.fourth_order_correction import FOURTH_ORDER_RELAXATION_RATE_SYMBOLS
relaxation_rates = FOURTH_ORDER_RELAXATION_RATE_SYMBOLS
if lbm_config.nested_moments is not None:
method = create_cumulant(lbm_config.stencil, relaxation_rates, lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **cumulant_params)
else:
method = create_with_default_polynomial_cumulants(lbm_config.stencil, relaxation_rates, **cumulant_params)
elif lbm_config.method == Method.MONOMIAL_CUMULANT:
method = create_with_monomial_cumulants(lbm_config.stencil, relaxation_rates,
conserved_moments=lbm_config.conserved_moments, **cumulant_params)
else:
raise ValueError("Failed to create LB method. Please use lbmpy.enums.Method for the creation")
# >>Entropic methods can only be created for methods with two relaxation rates One free relaxation rate
# determining the viscosity and one to be determined by the entropy condition<<
# Thus we fix the conserved quantities to one of the relaxation rates because zero would be recognised as
# a third relaxation rate here.
if lbm_config.entropic:
method.set_conserved_moments_relaxation_rate(relaxation_rates[0])
lbm_config.lb_method = method
return method
def create_psm_update_rule(lbm_config, lbm_optimisation):
if IS_PYSTENCILS_2:
raise NotImplementedError(
"`create_psm_update_rule` is not yet available when using pystencils 2.0. "
"To instead derive a (potentially less efficient) PSM kernel without branches, "
"use `create_lb_update_rule` with a `PsmConfig` object instead."
)
from pystencils.astnodes import Conditional, Block
from pystencils.node_collection import NodeCollection
if lbm_config.psm_config is None:
raise ValueError("Specify a PSM Config in the LBM Config, when creating a psm update rule")
config_without_particles = copy.deepcopy(lbm_config)
config_without_particles.psm_config.max_particles_per_cell = 0
lb_update_rule = create_lb_update_rule(
lbm_config=config_without_particles, lbm_optimisation=lbm_optimisation)
node_collection = lb_update_rule.all_assignments
if lbm_config.psm_config.individual_fraction_field is None:
assert lbm_config.psm_config.max_particles_per_cell == 1
fraction_field = lbm_config.psm_config.fraction_field
else:
fraction_field = lbm_config.psm_config.individual_fraction_field
for p in range(lbm_config.psm_config.max_particles_per_cell):
psm_solid_collision = add_psm_solid_collision_to_collision_rule(lb_update_rule, lbm_config, p)
psm_update_rule = create_lb_update_rule(
collision_rule=psm_solid_collision, lbm_config=lbm_config, lbm_optimisation=lbm_optimisation)
node_collection.append(
Conditional(
fraction_field.center(p) > 0.0,
Block(psm_update_rule.all_assignments),
)
)
return NodeCollection(node_collection)
# ----------------------------------------------------------------------------------------------------------------------
def update_with_default_parameters(params, opt_params=None, lbm_config=None, lbm_optimisation=None, config=None):
# Fix CreateKernelConfig params
pystencils_config_params = ['target', 'backend', 'cpu_openmp', 'double_precision', 'gpu_indexing',
'gpu_indexing_params', 'cpu_vectorize_info']
if opt_params is not None:
config_params = {k: v for k, v in opt_params.items() if k in pystencils_config_params}
else:
config_params = {}
if 'double_precision' in config_params:
if config_params['double_precision']:
config_params['data_type'] = 'float64'
else:
config_params['data_type'] = 'float32'
del config_params['double_precision']
if not config:
config = CreateKernelConfig(**config_params)
else:
for k, v in config_params.items():
if not hasattr(config, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in CreateKernelConfig for valid settings')
config = replace(config, **config_params)
lbm_opt_params = ['cse_pdfs', 'cse_global', 'simplification', 'pre_simplification', 'split', 'field_size',
'field_layout', 'symbolic_field', 'symbolic_temporary_field', 'builtin_periodicity']
if opt_params is not None:
opt_params_dict = {k: v for k, v in opt_params.items() if k in lbm_opt_params}
else:
opt_params_dict = {}
if not lbm_optimisation:
lbm_optimisation = LBMOptimisation(**opt_params_dict)
else:
for k, v in opt_params_dict.items():
if not hasattr(lbm_optimisation, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in LBMOptimisation for valid settings')
lbm_optimisation = replace(lbm_optimisation, **opt_params_dict)
if params is None:
params = {}
if not lbm_config:
lbm_config = LBMConfig(**params)
else:
for k, v in params.items():
if not hasattr(lbm_config, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in LBMConfig for valid settings')
lbm_config = replace(lbm_config, **params)
return lbm_config, lbm_optimisation, config
...@@ -102,7 +102,7 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d ...@@ -102,7 +102,7 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d
@memorycache(maxsize=16) @memorycache(maxsize=16)
def __get_discrete_cumulant_generating_function(func, stencil, wave_numbers): def __get_discrete_cumulant_generating_function(func, stencil, wave_numbers):
assert len(stencil) == len(func) assert stencil.Q == len(func)
laplace_transformation = sum([factor * sp.exp(scalar_product(wave_numbers, e)) for factor, e in zip(func, stencil)]) laplace_transformation = sum([factor * sp.exp(scalar_product(wave_numbers, e)) for factor, e in zip(func, stencil)])
return sp.ln(laplace_transformation) return sp.ln(laplace_transformation)
...@@ -121,10 +121,10 @@ def discrete_cumulant(func, cumulant, stencil): ...@@ -121,10 +121,10 @@ def discrete_cumulant(func, cumulant, stencil):
(similar to moment description) (similar to moment description)
stencil: sequence of directions stencil: sequence of directions
""" """
assert len(stencil) == len(func) assert stencil.Q == len(func)
dim = len(stencil[0]) dim = len(stencil[0])
wave_numbers = tuple([sp.Symbol("Xi_%d" % (i,)) for i in range(dim)]) wave_numbers = sp.symbols(f"Xi_:{dim}")
generating_function = __get_discrete_cumulant_generating_function(func, stencil, wave_numbers) generating_function = __get_discrete_cumulant_generating_function(func, stencil, wave_numbers)
if type(cumulant) is tuple: if type(cumulant) is tuple:
...@@ -157,9 +157,9 @@ def cumulants_from_pdfs(stencil, cumulant_indices=None, pdf_symbols=None): ...@@ -157,9 +157,9 @@ def cumulants_from_pdfs(stencil, cumulant_indices=None, pdf_symbols=None):
dim = len(stencil[0]) dim = len(stencil[0])
if cumulant_indices is None: if cumulant_indices is None:
cumulant_indices = moments_up_to_component_order(2, dim=dim) cumulant_indices = moments_up_to_component_order(2, dim=dim)
assert len(stencil) == len(cumulant_indices), "Stencil has to have same length as cumulant_indices sequence" assert stencil.Q == len(cumulant_indices), "Stencil has to have same length as cumulant_indices sequence"
if pdf_symbols is None: if pdf_symbols is None:
pdf_symbols = __get_indexed_symbols(pdf_symbols, "f", range(len(stencil))) pdf_symbols = __get_indexed_symbols(pdf_symbols, "f", range(stencil.Q))
return {idx: discrete_cumulant(tuple(pdf_symbols), idx, stencil) for idx in cumulant_indices} return {idx: discrete_cumulant(tuple(pdf_symbols), idx, stencil) for idx in cumulant_indices}
......
import numpy as np
import sympy as sp
from ._compat import IS_PYSTENCILS_2
if IS_PYSTENCILS_2:
raise ImportError("`lbmpy.custom_code_nodes` is only available when running with pystencils 1.x")
from pystencils.typing import TypedSymbol, create_type
from pystencils.backends.cbackend import CustomCodeNode
class NeighbourOffsetArrays(CustomCodeNode):
@staticmethod
def neighbour_offset(dir_idx, stencil):
if isinstance(sp.sympify(dir_idx), sp.Integer):
return stencil[dir_idx]
else:
return tuple([sp.IndexedBase(symbol, shape=(1,))[dir_idx]
for symbol in NeighbourOffsetArrays._offset_symbols(len(stencil[0]))])
@staticmethod
def _offset_symbols(dim):
return [TypedSymbol(f"neighbour_offset_{d}", create_type('int32')) for d in ['x', 'y', 'z'][:dim]]
def __init__(self, stencil, offsets_dtype=np.int32):
offsets_dtype = create_type(offsets_dtype)
dim = len(stencil[0])
array_symbols = NeighbourOffsetArrays._offset_symbols(dim)
code = "\n"
for i, arrsymb in enumerate(array_symbols):
code += _array_pattern(offsets_dtype, arrsymb.name, (d[i] for d in stencil))
offset_symbols = NeighbourOffsetArrays._offset_symbols(dim)
super(NeighbourOffsetArrays, self).__init__(code, symbols_read=set(),
symbols_defined=set(offset_symbols))
class MirroredStencilDirections(CustomCodeNode):
@staticmethod
def mirror_stencil(direction, mirror_axis):
assert mirror_axis <= len(direction), f"only {len(direction)} axis available for mirage"
direction = list(direction)
direction[mirror_axis] = -direction[mirror_axis]
return tuple(direction)
@staticmethod
def _mirrored_symbol(mirror_axis, _stencil):
axis = ['x', 'y', 'z']
return TypedSymbol(f"{axis[mirror_axis]}_axis_mirrored_stencil_dir", create_type('int32'))
def __init__(self, stencil, mirror_axis, dtype=np.int32):
offsets_dtype = create_type(dtype)
mirrored_stencil_symbol = MirroredStencilDirections._mirrored_symbol(mirror_axis, stencil)
mirrored_directions = [stencil.index(MirroredStencilDirections.mirror_stencil(direction, mirror_axis))
for direction in stencil]
code = "\n"
code += _array_pattern(offsets_dtype, mirrored_stencil_symbol.name, mirrored_directions)
super(MirroredStencilDirections, self).__init__(code, symbols_read=set(),
symbols_defined={mirrored_stencil_symbol})
class LbmWeightInfo(CustomCodeNode):
def __init__(self, lb_method, data_type='double'):
self.weights_symbol = TypedSymbol("weights", data_type)
weights = [f"(({self.weights_symbol.dtype.c_name})({str(w.evalf(17))}))" for w in lb_method.weights]
weights = ", ".join(weights)
w_sym = self.weights_symbol
code = f"const {self.weights_symbol.dtype.c_name} {w_sym.name} [] = {{{weights}}};\n"
super(LbmWeightInfo, self).__init__(code, symbols_read=set(), symbols_defined={w_sym})
def weight_of_direction(self, dir_idx, lb_method=None):
if isinstance(sp.sympify(dir_idx), sp.Integer):
return lb_method.weights[dir_idx].evalf(17)
else:
return sp.IndexedBase(self.weights_symbol, shape=(1,))[dir_idx]
class TranslationArraysNode(CustomCodeNode):
def __init__(self, array_content, symbols_defined):
code = ''
for content in array_content:
code += _array_pattern(*content)
super(TranslationArraysNode, self).__init__(code, symbols_read=set(), symbols_defined=symbols_defined)
def __str__(self):
return "Variable PDF Access Translation Arrays"
def __repr__(self):
return "Variable PDF Access Translation Arrays"
def _array_pattern(dtype, name, content):
return f"const {str(dtype)} {name} [] = {{ {','.join(str(c) for c in content)} }}; \n"
import json
import six
import inspect
from pystencils.runhelper.db import PystencilsJsonEncoder
from pystencils.simp import SimplificationStrategy
from lbmpy import LBStencil, Method, CollisionSpace, SubgridScaleModel
from lbmpy.creationfunctions import LBMConfig, LBMOptimisation
from lbmpy.methods import CollisionSpaceInfo
from lbmpy.forcemodels import AbstractForceModel
from lbmpy.non_newtonian_models import CassonsParameters
class LbmpyJsonEncoder(PystencilsJsonEncoder):
def default(self, obj):
if isinstance(obj, (LBMConfig, LBMOptimisation, CollisionSpaceInfo, CassonsParameters)):
return obj.__dict__
if isinstance(obj, (LBStencil, Method, CollisionSpace, SubgridScaleModel)):
return obj.name
if isinstance(obj, AbstractForceModel):
return obj.__class__.__name__
if isinstance(obj, SimplificationStrategy):
return obj.__str__()
if inspect.isclass(obj):
return obj.__name__
return PystencilsJsonEncoder.default(self, obj)
class LbmpyJsonSerializer(object):
@classmethod
def serialize(cls, data):
if six.PY3:
if isinstance(data, bytes):
return json.dumps(data.decode('utf-8'), cls=LbmpyJsonEncoder, ensure_ascii=False).encode('utf-8')
else:
return json.dumps(data, cls=LbmpyJsonEncoder, ensure_ascii=False).encode('utf-8')
else:
return json.dumps(data, cls=LbmpyJsonEncoder, ensure_ascii=False).encode('utf-8')
@classmethod
def deserialize(cls, data):
if six.PY3:
return json.loads(data.decode('utf-8'))
else:
return json.loads(data.decode('utf-8'))
from enum import Enum, auto
class Stencil(Enum):
"""
The Stencil enumeration represents all possible lattice Boltzmann stencils that are available in lbmpy.
It should be passed to :class:`lbmpy.stencils.LBStenil`. This class then creates a stencils representation
containing the concrete neighbour directions as a tuple of tuples.
The number of spatial dimensions *d* and the number of discrete velocities *q* are stated in the DdQq notation
"""
D2Q9 = auto()
"""
A two dimensional stencil using 9 discrete velocities.
"""
D2V17 = auto()
"""
A two dimensional stencil using 17 discrete velocities. (long range stencil).
"""
D2V37 = auto()
"""
A two dimensional stencil using 37 discrete velocities. (long range stencil).
"""
D3Q7 = auto()
"""
A three dimensional stencil using 7 discrete velocities.
"""
D3Q15 = auto()
"""
A three dimensional stencil using 15 discrete velocities.
"""
D3Q19 = auto()
"""
A three dimensional stencil using 19 discrete velocities.
"""
D3Q27 = auto()
"""
A three dimensional stencil using 27 discrete velocities.
"""
class Method(Enum):
"""
The Method enumeration represents all possible lattice Boltzmann collision operators that are available in lbmpy.
It should be passed to :class:`lbmpy.creationfunctions.LBMConfig`. The LBM configuration *dataclass* then derives
the respective collision equations when passed to the creations functions in the `lbmpy.creationfunctions`
module of lbmpy.
Note here, when using a specific enumeration to derive a particular LBM collision operator,
different parameters of the :class:`lbmpy.creationfunctions.LBMConfig` might become necessary.
For example, it does not make sense to define *relaxation_rates* for a single relaxation rate method, which
is essential for multiple relaxation rate methods. Important specific parameters are listed below to the enum value.
A specific creation function is stated for each case which explains these parameters in detail.
"""
SRT = auto()
"""
See :func:`lbmpy.methods.create_srt`,
Single relaxation time method
"""
TRT = auto()
"""
See :func:`lbmpy.methods.create_trt`,
Two relaxation time, the first relaxation rate is for even moments and determines the
viscosity (as in SRT). The second relaxation rate is used for relaxing odd moments and controls the
bulk viscosity. For details in the TRT collision operator see :cite:`TRT`
"""
MRT_RAW = auto()
"""
See :func:`lbmpy.methods.create_mrt_raw`,
Non-orthogonal MRT where all relaxation rates can be specified independently, i.e. there are as many relaxation
rates as stencil entries. Look at the generated method in Jupyter to see which moment<->relaxation rate mapping.
Originally defined in :cite:`raw_moments`
"""
MRT = auto()
"""
See :func:`lbmpy.methods.create_mrt_orthogonal`
Orthogonal multi relaxation time model, relaxation rates are used in this order for *shear modes*, *bulk modes*,
*third-order modes*, *fourth-order modes*, etc. Requires also a parameter *weighted* that should be `True` if the
moments should be orthogonal w.r.t. weighted scalar product using the lattice weights. If `False`, the normal
scalar product is used. For custom definition of the method, a *nested_moments* can be passed.
For example: [ [1, x, y], [x*y, x**2, y**2], ... ] that groups all moments together that should be relaxed
at the same rate. Literature values of this list can be obtained through
:func:`lbmpy.methods.creationfunctions.mrt_orthogonal_modes_literature`.
WMRT collision operators are reported to be numerically more stable and more accurate,
whilst also having a lower computational cos :cite:`FAKHARI201722`
"""
CENTRAL_MOMENT = auto()
"""
See :func:`lbmpy.methods.create_central_moment`
Creates moment based LB method where the collision takes place in the central moment space. By default,
a raw-moment set is used where the bulk and the shear viscosity are separated. An original derivation can be
found in :cite:`Geier2006`
"""
TRT_KBC_N1 = auto()
"""
See :func:`lbmpy.methods.create_trt_kbc`
Particular two-relaxation rate method. This is not the entropic method yet, only the relaxation pattern.
To get the entropic method also *entropic* needs to be set to `True`.
There are four KBC methods available in lbmpy. The naming is according to :cite:`karlin2015entropic`
"""
TRT_KBC_N2 = auto()
"""
See :func:`lbmpy.methods.create_trt_kbc`
Particular two-relaxation rate method. This is not the entropic method yet, only the relaxation pattern.
To get the entropic method also *entropic* needs to be set to `True`.
There are four KBC methods available in lbmpy. The naming is according to :cite:`karlin2015entropic`
"""
TRT_KBC_N3 = auto()
"""
See :func:`lbmpy.methods.create_trt_kbc`
Particular two-relaxation rate method. This is not the entropic method yet, only the relaxation pattern.
To get the entropic method also *entropic* needs to be set to `True`.
There are four KBC methods available in lbmpy. The naming is according to :cite:`karlin2015entropic`
"""
TRT_KBC_N4 = auto()
"""
See :func:`lbmpy.methods.create_trt_kbc`
Particular two-relaxation rate method. This is not the entropic method yet, only the relaxation pattern.
To get the entropic method also *entropic* needs to be set to `True`.
There are four KBC methods available in lbmpy. The naming is according to :cite:`karlin2015entropic`
"""
CUMULANT = auto()
"""
See :func:`lbmpy.methods.create_with_default_polynomial_cumulants`
Cumulant-based LB method which relaxes groups of polynomial cumulants chosen to optimize rotational invariance.
For details on the method see :cite:`geier2015`
"""
MONOMIAL_CUMULANT = auto()
"""
See :func:`lbmpy.methods.create_with_monomial_cumulants`
Cumulant-based LB method which relaxes monomial cumulants.
For details on the method see :cite:`geier2015` and :cite:`Coreixas2019`
"""
class CollisionSpace(Enum):
"""
The CollisionSpace enumeration lists all possible spaces for collision to occur in.
"""
POPULATIONS = auto()
"""
Population space, meaning post-collision populations are obtained directly by relaxation of linear combinations of
pre-collision populations. Default for `lbmpy.enums.Method.SRT` and `lbmpy.enums.Method.TRT`.
Results in the creation of an instance of :class:`lbmpy.methods.momentbased.MomentBasedLbMethod`.
"""
RAW_MOMENTS = auto()
"""
Raw moment space, meaning relaxation is applied to a set of linearly independent, polynomial raw moments of the
discrete population vector. Default for `lbmpy.enums.Method.MRT`.
Results in the creation of an instance of :class:`lbmpy.methods.momentbased.MomentBasedLbMethod`.
"""
CENTRAL_MOMENTS = auto()
"""
Central moment space, meaning relaxation is applied to a set of linearly independent, polynomial central moments
of the discrete population vector. Default for `lbmpy.enums.Method.CENTRAL_MOMENT`.
Results in the creation of an instance of :class:`lbmpy.methods.momentbased.CentralMomentBasedLbMethod`.
"""
CUMULANTS = auto()
"""
Cumulant space, meaning relaxation is applied to a set of linearly independent, polynomial cumulants of the
discrete population vector. Default for `lbmpy.enums.Method.CUMULANT` and `lbmpy.enums.Method.MONOMIAL_CUMULANT`.
Results in the creation of an instance of :class:`lbmpy.methods.cumulantbased.CumulantBasedLbMethod`.
"""
def compatible(self, method: Method):
"""Determines if the given `lbmpy.enums.Method` is compatible with this collision space."""
compat_dict = {
CollisionSpace.POPULATIONS: {Method.SRT, Method.TRT, Method.MRT_RAW, Method.MRT,
Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4},
CollisionSpace.RAW_MOMENTS: {Method.SRT, Method.TRT, Method.MRT_RAW, Method.MRT},
CollisionSpace.CENTRAL_MOMENTS: {Method.CENTRAL_MOMENT},
CollisionSpace.CUMULANTS: {Method.MONOMIAL_CUMULANT, Method.CUMULANT}
}
return method in compat_dict[self]
class ForceModel(Enum):
"""
The ForceModel enumeration defines which force model is used to introduce forcing terms in the collision operator
of the lattice Boltzmann method. A short summary of the theory behind is shown in `lbmpy.forcemodels`.
More precise definitions are given in Chapter 6 and 10 of :cite:`lbm_book`
"""
SIMPLE = auto()
"""
See :class:`lbmpy.forcemodels.Simple`
"""
LUO = auto()
"""
See :class:`lbmpy.forcemodels.Luo`
"""
GUO = auto()
"""
See :class:`lbmpy.forcemodels.Guo`
"""
BUICK = auto()
"""
See :class:`lbmpy.forcemodels.Buick`
"""
SILVA = auto()
"""
See :class:`lbmpy.forcemodels.Buick`
"""
EDM = auto()
"""
See :class:`lbmpy.forcemodels.EDM`
"""
KUPERSHTOKH = auto()
"""
See :class:`lbmpy.forcemodels.EDM`
"""
HE = auto()
"""
See :class:`lbmpy.forcemodels.He`
"""
SHANCHEN = auto()
"""
See :class:`lbmpy.forcemodels.ShanChen`
"""
CENTRALMOMENT = auto()
"""
See :class:`lbmpy.forcemodels.CentralMoment`
"""
class SubgridScaleModel(Enum):
"""
The SubgridScaleModel enumeration defines which subgrid-scale model (SGS) is used to perform
Large-Eddy-Simulations (LES).
"""
SMAGORINSKY = auto()
"""
See :func:`lbmpy.turbulence_models.add_smagorinsky_model`
"""
QR = auto()
"""
See :func:`lbmpy.turbulence_models.add_qr_model`
"""
r"""
This module contains various classes encapsulating equilibrium distributions used in the lattice Boltzmann
method. These include both the continuous and the discretized variants of the Maxwellian equilibrium of
hydrodynamics. Furthermore, a lightweight wrapper class for custom discrete equilibria is provided.
Custom equilibria may also be implemented by manually overriding the abstract base class
:class:`lbmpy.equilibrium.AbstractEquilibrium`.
"""
from .abstract_equilibrium import AbstractEquilibrium
from .continuous_hydro_maxwellian import ContinuousHydrodynamicMaxwellian, default_background_distribution
from .generic_discrete_equilibrium import GenericDiscreteEquilibrium, discrete_equilibrium_from_matching_moments
from .discrete_hydro_maxwellian import DiscreteHydrodynamicMaxwellian
__all__ = [
"AbstractEquilibrium",
"ContinuousHydrodynamicMaxwellian", "default_background_distribution",
"GenericDiscreteEquilibrium", "discrete_equilibrium_from_matching_moments",
"DiscreteHydrodynamicMaxwellian"
]
from abc import ABC, abstractmethod
import sympy as sp
from pystencils.cache import sharedmethodcache
from lbmpy.moments import polynomial_to_exponent_representation
class AbstractEquilibrium(ABC):
"""
Abstract Base Class for description of equilibrium distribution functions used in lattice
Boltzmann methods.
**Equilibrium Representation:**
This class provides the common interface for describing equilibrium distribution functions,
which is then used by the various method classes in the derivation of collision equations.
An equilibrium distribution is defined by either its continuous equation (see :attr:`continuous_equation`)
or a set of discrete populations
(see :attr:`discrete_populations` and :class:`lbmpy.equilibrium.GenericDiscreteEquilibrium`).
The distribution function may be given either in its regular, absolute form; or only as its
deviation from the rest state, represented by the background distribution (see :attr:`background_distribution`).
**Computation of Statistical Modes:**
The major computational task of an equilbrium class is the computation of the distribution's
statistical modes. For discrete distributions, the subclass :class:`lbmpy.equilibrium.GenericDiscreteEquilibrium`
provides a generic implementation for their computation. For continuous distributions, computation
of raw moments, central moments, and cumulants is more complicated, but may also be simplified using special
tricks.
As the computation of statistical modes is a time-consuming process, the abstract base class provides caching
functionality to avoid recomputing quantities that are already known.
**Instructions to Override:**
If you wish to model a simple custom discrete distribution, just using the class
:class:`lbmpy.equilibrium.GenericDiscreteEquilibrium` might already be sufficient.
If, however, you need to implement more specific functionality, custom properties,
a background distribution, etc., or if you wish to model a continuous distribution,
you will have to set up a custom subclass of :class:`AbstractEquilibrium`.
A subclass must implement all abstract properties according to their docstrings.
For computation of statistical modes, a large part of the infrastructure is already given in the abstract base
class. The public interface for computing e.g. raw moments reduces the computation of polynomial moments to their
contained monomials (for details on how moments are represented in *lbmpy*, see :mod:`lbmpy.moments`). The values
of both polynomial and monomial moments, once computed, will be cached per instance of the equilibrium class.
To take full advantage of the caching functionality, you will have to override only :func:`_monomial_raw_moment`
and its central moment and cumulant counterparts. These methods will be called only once for each monomial quantity
when it is required for the first time. Afterward, the cached value will be used.
"""
def __init__(self, dim=3):
self._dim = dim
@property
def dim(self):
"""This distribution's spatial dimensionality."""
return self._dim
# -------------- Abstract Properties, to be overridden in subclass ----------------------------------------------
@property
@abstractmethod
def deviation_only(self):
"""Whether or not this equilibrium distribution is represented only by its deviation
from the background distribution."""
raise NotImplementedError("'deviation_only' must be provided by subclass.")
@property
@abstractmethod
def continuous_equation(self):
"""Returns the continuous equation defining this equilibrium distribution,
or `None` if no such equation is available."""
raise NotImplementedError("'continuous_equation' must be provided by subclass.")
@property
@abstractmethod
def discrete_populations(self):
"""Returns the discrete populations of this equilibrium distribution as a tuple,
or `None` if none are available."""
raise NotImplementedError("'discrete_populations' must be provided by subclass.")
@property
@abstractmethod
def background_distribution(self):
"""Returns this equilibrium distribution's background distribution, which is
the distribution the discrete populations are centered around in the case of
zero-centered storage. If no background distribution is available, `None` must be
returned."""
raise NotImplementedError("'background_distribution' must be provided by subclass.")
@property
@abstractmethod
def zeroth_order_moment_symbol(self):
"""Returns a symbol referring to the zeroth-order moment of this distribution,
which is the area under it's curve."""
raise NotImplementedError("'zeroth_order_moment' must be provided by subclass.")
@property
@abstractmethod
def first_order_moment_symbols(self):
"""Returns a vector of symbols referring to the first-order moment of this distribution,
which is its mean value."""
raise NotImplementedError("'first_order_moments' must be provided by subclass.")
# -------------- Statistical Modes Interface --------------------------------------------------------------------
@sharedmethodcache("_moment_cache")
def moment(self, exponent_tuple_or_polynomial):
"""Returns this equilibrium distribution's moment specified by ``exponent_tuple_or_polynomial``.
Args:
exponent_tuple_or_polynomial: Moment specification, see :mod:`lbmpy.moments`.
"""
monomials = []
if isinstance(exponent_tuple_or_polynomial, tuple):
monomials = [(1, exponent_tuple_or_polynomial)]
else:
monomials = polynomial_to_exponent_representation(exponent_tuple_or_polynomial, dim=self._dim)
moment_value = sp.Integer(0)
for coeff, moment in monomials:
moment_value += coeff * self._cached_monomial_raw_moment(moment)
return moment_value.expand()
def moments(self, exponent_tuples_or_polynomials):
"""Returns a tuple of this equilibrium distribution's moments specified by 'exponent_tuple_or_polynomial'.
Args:
exponent_tuples_or_polynomials: Sequence of moment specifications, see :mod:`lbmpy.moments`.
"""
return tuple(self.moment(m) for m in exponent_tuples_or_polynomials)
@sharedmethodcache("_central_moment_cache")
def central_moment(self, exponent_tuple_or_polynomial, frame_of_reference):
"""Returns this equilibrium distribution's central moment specified by
``exponent_tuple_or_polynomial``, computed according to the given ``frame_of_reference``.
Args:
exponent_tuple_or_polynomial: Moment specification, see :mod:`lbmpy.moments`.
frame_of_reference: The frame of reference with respect to which the central moment should be computed.
"""
monomials = []
if isinstance(exponent_tuple_or_polynomial, tuple):
monomials = [(1, exponent_tuple_or_polynomial)]
else:
monomials = polynomial_to_exponent_representation(exponent_tuple_or_polynomial, dim=self._dim)
moment_value = sp.Integer(0)
for coeff, moment in monomials:
moment_value += coeff * self._cached_monomial_central_moment(moment, frame_of_reference)
return moment_value.expand()
def central_moments(self, exponent_tuples_or_polynomials, frame_of_reference):
"""Returns a list this equilibrium distribution's central moments specified by
``exponent_tuples_or_polynomials``, computed according to the given ``frame_of_reference``.
Args:
exponent_tuples_or_polynomials: Sequence of moment specifications, see :mod:`lbmpy.moments`.
frame_of_reference: The frame of reference with respect to which the central moment should be computed.
"""
return tuple(self.central_moment(m, frame_of_reference) for m in exponent_tuples_or_polynomials)
@sharedmethodcache("_cumulant_cache")
def cumulant(self, exponent_tuple_or_polynomial, rescale=True):
"""Returns this equilibrium distribution's cumulant specified by ``exponent_tuple_or_polynomial``.
Args:
exponent_tuple_or_polynomial: Moment specification, see :mod:`lbmpy.moments`.
rescale: If ``True``, the cumulant value should be multiplied by the zeroth-order moment.
"""
monomials = []
if isinstance(exponent_tuple_or_polynomial, tuple):
monomials = [(1, exponent_tuple_or_polynomial)]
else:
monomials = polynomial_to_exponent_representation(exponent_tuple_or_polynomial, dim=self._dim)
cumulant_value = sp.Integer(0)
for coeff, moment in monomials:
cumulant_value += coeff * self._cached_monomial_cumulant(moment, rescale=rescale)
return cumulant_value.expand()
def cumulants(self, exponent_tuples_or_polynomials, rescale=True):
"""Returns a list of this equilibrium distribution's cumulants specified by ``exponent_tuples_or_polynomial``.
Args:
exponent_tuples_or_polynomials: Sequence of moment specifications, see :mod:`lbmpy.moments`.
rescale: If ``True``, the cumulant value should be multiplied by the zeroth-order moment.
"""
return tuple(self.cumulant(m, rescale) for m in exponent_tuples_or_polynomials)
# -------------- Monomial moment computation, to be overridden in subclass --------------------------------------
@abstractmethod
def _monomial_raw_moment(self, exponents):
"""See :func:`lbmpy.equilibrium.AbstractEquilibrium.moment`."""
raise NotImplementedError("'_monomial_raw_moment' must be implemented by a subclass.")
@abstractmethod
def _monomial_central_moment(self, exponents, frame_of_reference):
"""See :func:`lbmpy.equilibrium.AbstractEquilibrium.central_moment`."""
raise NotImplementedError("'_monomial_central_moment' must be implemented by a subclass.")
@abstractmethod
def _monomial_cumulant(self, exponents, rescale):
"""See :func:`lbmpy.equilibrium.AbstractEquilibrium.cumulant`."""
raise NotImplementedError("'_monomial_cumulant' must be implemented by a subclass.")
# -------------- Cached monomial moment computation methods -----------------------------------------------------
@sharedmethodcache("_moment_cache")
def _cached_monomial_raw_moment(self, exponents):
return self._monomial_raw_moment(exponents)
@sharedmethodcache("_central_moment_cache")
def _cached_monomial_central_moment(self, exponents, frame_of_reference):
return self._monomial_central_moment(exponents, frame_of_reference)
@sharedmethodcache("_cumulant_cache")
def _cached_monomial_cumulant(self, exponents, rescale):
return self._monomial_cumulant(exponents, rescale)
# -------------- HTML Representation ----------------------------------------------------------------------------
def _repr_html_(self):
html = f"""
<table style="border:none; width: 100%">
<tr>
<th colspan="2" style="text-align: left">
Instance of {self.__class__.__name__}
</th>
</tr>
"""
cont_eq = self.continuous_equation
if cont_eq is not None:
html += f"""
<tr>
<td> Continuous Equation: </td>
<td style="text-align: center">
${sp.latex(self.continuous_equation)}$
</td>
</tr>
"""
else:
pdfs = self.discrete_populations
if pdfs is not None:
html += """
<tr>
<td colspan="2" style="text-align: right;"> Discrete Populations: </td>
</tr>
"""
for f, eq in zip(sp.symbols(f"f_:{len(pdfs)}"), pdfs):
html += f'<tr><td colspan="2" style="text-align: left;"> ${f} = {sp.latex(eq)}$ </td></tr>'
html += "</table>"
return html
# end class AbstractEquilibrium
import sympy as sp
from .abstract_equilibrium import AbstractEquilibrium
from lbmpy.moments import contained_moments
from lbmpy.maxwellian_equilibrium import continuous_maxwellian_equilibrium
from lbmpy.continuous_distribution_measures import continuous_moment, continuous_cumulant
from pystencils.sympyextensions import remove_higher_order_terms, simplify_by_equality
def default_background_distribution(dim):
return ContinuousHydrodynamicMaxwellian(dim=dim, compressible=True, deviation_only=False,
rho=sp.Integer(1), delta_rho=0, u=(0,) * dim,
c_s_sq=sp.Rational(1, 3))
class ContinuousHydrodynamicMaxwellian(AbstractEquilibrium):
r"""
The standard continuous Maxwellian equilibrium distribution for hydrodynamics.
This class represents the Maxwellian equilibrium distribution of hydrodynamics in its continuous form
in :math:`d` dimensions :cite:`lbm_book`:
.. math::
\Psi \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
= \rho \left( \frac{1}{2 \pi c_s^2} \right)^{d/2}
\exp \left( \frac{- (\mathbf{\xi} - \mathbf{u})^2 }{2 c_s^2} \right)
Beyond this classic, 'compressible' form of the equilibrium, an alternative form known as the
incompressible equilibrium of the LBM can be obtained by setting the flag ``compressible=False``.
The continuous incompressible equilibrium can be expressed as
(:cite:`HeIncompressible,GruszczynskiCascadedPhaseFieldModel`):
.. math::
\Psi^{\mathrm{incomp}} \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
= \Psi \left( \rho_0, \mathbf{u}, \mathbf{\xi} \right)
+ \Psi \left( \delta\rho, \mathbf{0}, \mathbf{\xi} \right)
Here, :math:`\rho_0` (typically :math:`\rho_0 = 1`) denotes the background density, and :math:`\delta\rho` is
the density deviation, such that the total fluid density amounts to :math:`\rho = \rho_0 + \delta\rho`.
To simplify computations when the zero-centered storage format is used for PDFs, both equilibrium variants can
also be expressed in a *deviation-only* or *delta-equilibrium* form, which is obtained by subtracting the
constant background distribution :math:`\Psi (\rho_0, \mathbf{0})`. The delta form expresses the equilibrium
distribution only by its deviation from the rest state:
.. math::
\delta\Psi \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
&= \Psi \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
- \Psi \left( \rho_0, \mathbf{0}, \mathbf{\xi} \right) \\
\delta\Psi^{\mathrm{incomp}} \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
&= \Psi^{\mathrm{incomp}} \left( \rho, \mathbf{u}, \mathbf{\xi} \right)
- \Psi \left( \rho_0, \mathbf{0}, \mathbf{\xi} \right)
Parameters:
dim: Spatial dimensionality
compressible: If `False`, the incompressible equilibrium is created
deviation_only: If `True`, the delta-equilibrium is created
order: The discretization order in velocity to which computed statistical modes should be truncated
rho: Symbol or value for the density
rho_background: Symbol or value for the background density
delta_rho: Symbol or value for the density deviation
u: Sequence of symbols for the macroscopic velocity
v: Sequence of symbols for the particle velocity :math:`\xi`
c_s_sq: Symbol or value for the squared speed of sound
"""
def __init__(self, dim=3, compressible=True, deviation_only=False,
order=None,
rho=sp.Symbol("rho"),
rho_background=sp.Integer(1),
delta_rho=sp.Symbol("delta_rho"),
u=sp.symbols("u_:3"),
v=sp.symbols("v_:3"),
c_s_sq=sp.Symbol("c_s") ** 2):
super().__init__(dim=dim)
self._order = order
self._compressible = compressible
self._deviation_only = deviation_only
self._rho = rho
self._rho_background = rho_background
self._delta_rho = delta_rho
self._u = u[:dim]
self._v = v[:dim]
# trick to speed up sympy integration (otherwise it takes multiple minutes, or aborts):
# use a positive, real symbol to represent c_s_sq -> then replace this symbol afterwards with the real c_s_sq
# (see maxwellian_equilibrium.py)
self._c_s_sq = c_s_sq
self._c_s_sq_helper = sp.Symbol("csq_helper", positive=True, real=True)
def psi(rho, u):
return continuous_maxwellian_equilibrium(dim=self._dim,
rho=rho,
u=u,
v=self._v,
c_s_sq=self._c_s_sq_helper)
zeroth_moment_arg = self._rho if self._compressible else self._rho_background
self._base_equation = psi(zeroth_moment_arg, self._u)
self._corrections = []
if not self._compressible:
zeroth_order_correction = psi(self._delta_rho, (sp.Integer(0), ) * self._dim)
self._corrections.append((sp.Integer(1), zeroth_order_correction))
if self._deviation_only:
rest_state = psi(self._rho_background, (sp.Integer(0), ) * self._dim)
self._corrections.append((sp.Integer(-1), rest_state))
@property
def order(self):
return self._order
@property
def deviation_only(self):
return self._deviation_only
@property
def compressible(self):
return self._compressible
@property
def density(self):
return self._rho
@property
def background_density(self):
return self._rho_background
@property
def density_deviation(self):
return self._delta_rho
@property
def velocity(self):
return self._u
@property
def continuous_equation(self):
eq = self._base_equation + sum(f * e for f, e in self._corrections)
eq = eq.subs(self._c_s_sq_helper, self._c_s_sq)
return eq
@property
def zeroth_order_moment_symbol(self):
return self._delta_rho if self._deviation_only else self._rho
@property
def first_order_moment_symbols(self):
return self._u
@property
def background_distribution(self):
return ContinuousHydrodynamicMaxwellian(dim=self.dim, compressible=True, deviation_only=False,
order=self._order, rho=self._rho_background,
rho_background=self._rho_background,
delta_rho=0, u=(0,) * self.dim, v=self._v,
c_s_sq=self._c_s_sq)
@property
def discrete_populations(self):
return None
def central_moment(self, exponent_tuple_or_polynomial, velocity=None):
if velocity is None:
velocity = self._u
return super().central_moment(exponent_tuple_or_polynomial, velocity)
def central_moments(self, exponent_tuples_or_polynomials, velocity=None):
if velocity is None:
velocity = self._u
return super().central_moments(exponent_tuples_or_polynomials, velocity)
def cumulant(self, exponent_tuple_or_polynomial, rescale=True):
if not self._compressible or self._deviation_only:
raise Exception("Cumulants can only be computed for the compressible, "
"non-deviation maxwellian equilibrium!")
return super().cumulant(exponent_tuple_or_polynomial, rescale=rescale)
# ------------------ Overridden Moment Computation ------------------------------------------
def _monomial_raw_moment(self, exponents):
moment_value = continuous_moment(self._base_equation, exponents, self._v)
for coeff, corr in self._corrections:
moment_value += coeff * continuous_moment(corr, exponents, self._v)
moment_value = self._correct_order_and_cssq(moment_value)
moment_value = self._simplify_moment(moment_value)
return moment_value
def _monomial_central_moment(self, cm_exponents, velocity):
# Setting up the central moment-generating function using SymPy integration
# will take unfeasibly long at times
# So we compute the central moments by binomial expansion in raw moments
cm_order = sum(cm_exponents)
contained_raw_moments = contained_moments(cm_exponents, exclude_original=False)
moment_value = sp.Integer(0)
for rm_exponents in contained_raw_moments:
rm_order = sum(rm_exponents)
factor = (-1)**(cm_order - rm_order)
factor *= sp.Mul(*(u**(c - i) * sp.binomial(c, i)
for u, c, i in zip(velocity, cm_exponents, rm_exponents)))
rm_value = self._cached_monomial_raw_moment(rm_exponents)
moment_value += factor * rm_value
moment_value = self._correct_order_and_cssq(moment_value)
moment_value = self._simplify_moment(moment_value)
return moment_value
def _monomial_cumulant(self, c_exponents, rescale):
# this implementation works only for the compressible, non-deviation equilibrium
cumulant_value = continuous_cumulant(self._base_equation, c_exponents, self._v)
cumulant_value = self._correct_order_and_cssq(cumulant_value)
if rescale:
cumulant_value = self._rho * cumulant_value
return cumulant_value
def _correct_order_and_cssq(self, term):
term = term.subs(self._c_s_sq_helper, self._c_s_sq)
term = term.expand()
if self._order is not None:
return remove_higher_order_terms(term, order=self._order, symbols=self._u)
else:
return term
def _simplify_moment(self, moment_value):
if (self.deviation_only or not self.compressible) \
and isinstance(self.density, sp.Symbol) and isinstance(self.density_deviation, sp.Symbol):
moment_value = simplify_by_equality(moment_value, self.density,
self.density_deviation, self.background_density)
return moment_value
# ------------------ Utility ----------------------------------------------------------------
def __repr__(self):
return f"ContinuousHydrodynamicMaxwellian({self.dim}D, " \
f"compressible={self.compressible}, deviation_only:{self.deviation_only}" \
f"order={self.order})"
def _repr_html_(self):
def stylized_bool(b):
return "&#10003;" if b else "&#10007;"
html = f"""
<table style="border:none; width: 100%">
<tr>
<th colspan="3" style="text-align: left">
Continuous Hydrodynamic Maxwellian Equilibrium
</th>
<td rowspan="2" style="width: 50%; text-align: center">
$f ({sp.latex(self._rho)}, {sp.latex(self._u)}, {sp.latex(self._v)})
= {sp.latex(self.continuous_equation)}$
</td>
</tr>
<tr>
<td>Compressible: {stylized_bool(self._compressible)}</td>
<td>Deviation Only: {stylized_bool(self._deviation_only)}</td>
<td>Order: {"&#8734;" if self._order is None else self._order}</td>
</tr>
</table>
"""
return html
# end class ContinuousHydrodynamicMaxwellian
import sympy as sp
from pystencils.sympyextensions import simplify_by_equality
from lbmpy.maxwellian_equilibrium import discrete_maxwellian_equilibrium
from .generic_discrete_equilibrium import GenericDiscreteEquilibrium
class DiscreteHydrodynamicMaxwellian(GenericDiscreteEquilibrium):
r"""
The textbook discretization of the Maxwellian equilibrium distribution of hydrodynamics.
This class represents the default discretization of the Maxwellian in velocity space,
computed from the distribution's expansion in Hermite polynomials (cf. :cite:`lbm_book`).
In :math:`d` dimensions, its populations :math:`f_i` on a given stencil
:math:`(\mathbf{c}_i)_{i=0,\dots,q-1}` are given by
.. math::
f_i (\rho, \mathbf{u})
= w_i \rho \left(
1 + \frac{\mathbf{c}_i \cdot \mathbf{u}}{c_s^2}
+ \frac{(\mathbf{c}_i \cdot \mathbf{u})^2}{2 c_s^4}
- \frac{\mathbf{u} \cdot \mathbf{u}}{2 c_s^2}
\right).
Here :math:`w_i` denote the Hermite integration weights, also called lattice weights.
The incompressible variant of this distribution :cite:`HeIncompressible` can be written as
.. math::
f_i^{\mathrm{incomp}} (\rho, \mathbf{u})
= w_i \rho + w_i \rho_0 \left(
\frac{\mathbf{c}_i \cdot \mathbf{u}}{c_s^2}
+ \frac{(\mathbf{c}_i \cdot \mathbf{u})^2}{2 c_s^4}
- \frac{\mathbf{u} \cdot \mathbf{u}}{2 c_s^2}
\right).
Again, for usage with zero-centered PDF storage, both distributions may be expressed in a delta-form
by subtracting their values at the background rest state at :math:`\rho = \rho_0`,
:math:`\mathbf{u} = \mathbf{0}`, which are exactly the lattice weights:
.. math::
\delta f_i &= f_i - w_i \\
\delta f_i^{\mathrm{incomp}} &= f_i^{\mathrm{incomp}} - w_i \\
Parameters:
stencil: Discrete velocity set for the discretization, see :class:`lbmpy.stencils.LBStencil`
compressible: If `False`, the incompressible equilibrium is created
deviation_only: If `True`, the delta-equilibrium is created
order: The discretization order in velocity to which computed statistical modes should be truncated
rho: Symbol or value for the density
delta_rho: Symbol or value for the density deviation
u: Sequence of symbols for the macroscopic velocity
c_s_sq: Symbol or value for the squared speed of sound
"""
def __init__(self, stencil, compressible=True, deviation_only=False,
order=2,
rho=sp.Symbol("rho"),
delta_rho=sp.Symbol("delta_rho"),
u=sp.symbols("u_:3"),
c_s_sq=sp.Symbol("c_s") ** 2):
dim = stencil.D
if order is None:
order = 4
self._order = order
self._compressible = compressible
self._deviation_only = deviation_only
self._rho = rho
self._rho_background = sp.Integer(1)
self._delta_rho = delta_rho
self._u = u[:dim]
self._c_s_sq = c_s_sq
pdfs = discrete_maxwellian_equilibrium(stencil, rho=rho, u=u,
order=order, c_s_sq=c_s_sq,
compressible=compressible)
if deviation_only:
shift = discrete_maxwellian_equilibrium(stencil, rho=self._rho_background, u=(0,) * dim,
order=0, c_s_sq=c_s_sq, compressible=False)
pdfs = tuple(simplify_by_equality(f - s, rho, delta_rho, self._rho_background) for f, s in zip(pdfs, shift))
zeroth_order_moment = delta_rho if deviation_only else rho
super().__init__(stencil, pdfs, zeroth_order_moment, u, deviation_only)
@property
def order(self):
return self._order
@property
def deviation_only(self):
return self._deviation_only
@property
def compressible(self):
return self._compressible
@property
def density(self):
return self._rho
@property
def background_density(self):
return self._rho_background
@property
def density_deviation(self):
return self._delta_rho
@property
def velocity(self):
return self._u
@property
def background_distribution(self):
"""Returns the discrete Maxwellian background distribution, which amounts exactly to the
lattice weights."""
return DiscreteHydrodynamicMaxwellian(self._stencil, compressible=True, deviation_only=False,
order=self._order, rho=self._rho_background,
delta_rho=0, u=(0,) * self.dim, c_s_sq=self._c_s_sq)
def central_moment(self, exponent_tuple_or_polynomial, velocity=None):
if velocity is None:
velocity = self._u
return super().central_moment(exponent_tuple_or_polynomial, velocity)
def central_moments(self, exponent_tuples_or_polynomials, velocity=None):
if velocity is None:
velocity = self._u
return super().central_moments(exponent_tuples_or_polynomials, velocity)
def cumulant(self, exponent_tuple_or_polynomial, rescale=True):
if not self._compressible or self._deviation_only:
raise Exception("Cumulants can only be computed for the compressible, "
"non-deviation maxwellian equilibrium!")
return super().cumulant(exponent_tuple_or_polynomial, rescale=rescale)
# ------------------ Utility ----------------------------------------------------------------
def __repr__(self):
return f"DiscreteHydrodynamicMaxwellian({self.stencil}, " \
f"compressible={self.compressible}, deviation_only:{self.deviation_only}" \
f"order={self.order})"
def _repr_html_(self):
def stylized_bool(b):
return "&#10003;" if b else "&#10007;"
html = f"""
<div style="max-height: 150pt; overflow-y: auto;">
<table style="border:none; width: 100%">
<tr>
<th colspan="3" style="text-align: left">
Discrete Hydrodynamic Maxwellian Equilibrium
</th>
<td>Compressible: {stylized_bool(self._compressible)}</td>
<td>Deviation Only: {stylized_bool(self._deviation_only)}</td>
<td>Order: {"&#8734;" if self._order is None else self._order}</td>
</tr>
"""
pdfs = self.discrete_populations
for f, eq in zip(sp.symbols(f"f_:{len(pdfs)}"), pdfs):
html += f'<tr><td colspan="6" style="text-align: left;"> ${f} = {sp.latex(eq)}$ </td></tr>'
html += "</table></div>"
return html
# end class DiscreteHydrodynamicMaxwellian
import sympy as sp
from .abstract_equilibrium import AbstractEquilibrium
from lbmpy.moments import discrete_moment, moment_matrix
from lbmpy.cumulants import discrete_cumulant
def discrete_equilibrium_from_matching_moments(stencil, moment_constraints,
zeroth_order_moment_symbol,
first_order_moment_symbols,
deviation_only=False):
assert len(moment_constraints) == stencil.Q
moments = tuple(moment_constraints.keys())
mm = moment_matrix(moments, stencil)
try:
pdfs = mm.inv() * sp.Matrix(list(moment_constraints.values()))
pdfs = pdfs.expand()
return GenericDiscreteEquilibrium(stencil, pdfs, zeroth_order_moment_symbol,
first_order_moment_symbols, deviation_only=deviation_only)
except sp.matrices.inverse.NonInvertibleMatrixError as e:
raise ValueError("Could not construct equilibrium from given moment constraints.") from e
class GenericDiscreteEquilibrium(AbstractEquilibrium):
"""
Class for encapsulating arbitrary discrete equilibria, given by their equilibrium populations.
This class takes both a stencil and a sequence of populations modelling a discrete distribution function
and provides basic functionality for computing and caching that distribution's statistical modes.
Parameters:
stencil: Discrete velocity set, see :class:`lbmpy.stencils.LBStencil`.
equilibrium_pdfs: List of q populations, describing the particle distribution on the discrete velocity
set given by the stencil.
zeroth_order_moment_symbol: Symbol corresponding to the distribution's zeroth-order moment, the area under
it's curve (see :attr:`zeroth_order_moment_symbol`).
first_order_moment_symbols: Sequence of symbols corresponding to the distribution's first-order moment, the
vector of its mean values (see :attr:`first_order_moment_symbols`).
deviation_only: Set to `True` if the given populations model only the deviation from a rest state, to be
used in junction with the zero-centered storage format.
"""
def __init__(self, stencil, equilibrium_pdfs,
zeroth_order_moment_symbol,
first_order_moment_symbols,
deviation_only=False):
super().__init__(dim=stencil.D)
if len(equilibrium_pdfs) != stencil.Q:
raise ValueError(f"Wrong number of PDFs."
f"On the {stencil} stencil, exactly {stencil.Q} populations must be passed!")
self._stencil = stencil
self._pdfs = tuple(equilibrium_pdfs)
self._zeroth_order_moment_symbol = zeroth_order_moment_symbol
self._first_order_moment_symbols = first_order_moment_symbols
self._deviation_only = deviation_only
@property
def stencil(self):
return self._stencil
@property
def deviation_only(self):
return self._deviation_only
@property
def continuous_equation(self):
"""Always returns `None`."""
return None
@property
def discrete_populations(self):
return self._pdfs
@property
def background_distribution(self):
"""Always returns `None`. To specify a background distribution, override this class."""
return None
@property
def zeroth_order_moment_symbol(self):
return self._zeroth_order_moment_symbol
@property
def first_order_moment_symbols(self):
return self._first_order_moment_symbols
# Moment Computation
def _monomial_raw_moment(self, exponents):
return discrete_moment(self._pdfs, exponents, self._stencil)
def _monomial_central_moment(self, exponents, frame_of_reference):
return discrete_moment(self._pdfs, exponents, self._stencil, shift_velocity=frame_of_reference)
def _monomial_cumulant(self, exponents, rescale):
value = discrete_cumulant(self._pdfs, exponents, self._stencil)
if rescale:
value = self.zeroth_order_moment_symbol * value
return value
...@@ -4,13 +4,19 @@ import sympy as sp ...@@ -4,13 +4,19 @@ import sympy as sp
from pystencils import Field from pystencils import Field
# ------------------------------------------------ Interface ----------------------------------------------------------- # ------------------------------------------------ Interface -----------------------------------------------------------
from pystencils.astnodes import LoopOverCoordinate
from pystencils.stencil import inverse_direction from pystencils.stencil import inverse_direction
from lbmpy.enums import Stencil
from lbmpy.stencils import LBStencil
from ._compat import get_loop_counter_symbol
__all__ = ['PdfFieldAccessor', 'CollideOnlyInplaceAccessor', 'StreamPullTwoFieldsAccessor', __all__ = ['PdfFieldAccessor', 'CollideOnlyInplaceAccessor', 'StreamPullTwoFieldsAccessor',
'AAEvenTimeStepAccessor', 'AAOddTimeStepAccessor', 'AAEvenTimeStepAccessor', 'AAOddTimeStepAccessor',
'PeriodicTwoFieldsAccessor', 'StreamPushTwoFieldsAccessor', 'PeriodicTwoFieldsAccessor', 'StreamPushTwoFieldsAccessor',
'EsoTwistEvenTimeStepAccessor', 'EsoTwistOddTimeStepAccessor', 'EsoTwistEvenTimeStepAccessor', 'EsoTwistOddTimeStepAccessor',
'EsoPullEvenTimeStepAccessor', 'EsoPullOddTimeStepAccessor',
'EsoPushEvenTimeStepAccessor', 'EsoPushOddTimeStepAccessor',
'visualize_pdf_field_accessor', 'visualize_field_mapping'] 'visualize_pdf_field_accessor', 'visualize_field_mapping']
...@@ -51,11 +57,11 @@ class CollideOnlyInplaceAccessor(PdfFieldAccessor): ...@@ -51,11 +57,11 @@ class CollideOnlyInplaceAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
class StreamPullTwoFieldsAccessor(PdfFieldAccessor): class StreamPullTwoFieldsAccessor(PdfFieldAccessor):
...@@ -67,7 +73,7 @@ class StreamPullTwoFieldsAccessor(PdfFieldAccessor): ...@@ -67,7 +73,7 @@ class StreamPullTwoFieldsAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
class StreamPushTwoFieldsAccessor(PdfFieldAccessor): class StreamPushTwoFieldsAccessor(PdfFieldAccessor):
...@@ -75,7 +81,7 @@ class StreamPushTwoFieldsAccessor(PdfFieldAccessor): ...@@ -75,7 +81,7 @@ class StreamPushTwoFieldsAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
...@@ -109,7 +115,7 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor): ...@@ -109,7 +115,7 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor):
lower_limit = self._ghostLayers lower_limit = self._ghostLayers
upper_limit = field.spatial_shape[coord_id] - 1 - self._ghostLayers upper_limit = field.spatial_shape[coord_id] - 1 - self._ghostLayers
limit_diff = upper_limit - lower_limit limit_diff = upper_limit - lower_limit
loop_counter = LoopOverCoordinate.get_loop_counter_symbol(coord_id) loop_counter = get_loop_counter_symbol(coord_id)
if dir_element == 0: if dir_element == 0:
periodic_pull_direction.append(0) periodic_pull_direction.append(0)
elif dir_element == 1: elif dir_element == 1:
...@@ -125,7 +131,7 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor): ...@@ -125,7 +131,7 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
class AAEvenTimeStepAccessor(PdfFieldAccessor): class AAEvenTimeStepAccessor(PdfFieldAccessor):
...@@ -133,11 +139,11 @@ class AAEvenTimeStepAccessor(PdfFieldAccessor): ...@@ -133,11 +139,11 @@ class AAEvenTimeStepAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
return [field(i) for i in range(len(stencil))] return [field(i) for i in range(stencil.Q)]
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
return [field(stencil.index(inverse_direction(d))) for d in stencil] return [field(stencil.inverse_index(d)) for d in stencil]
class AAOddTimeStepAccessor(PdfFieldAccessor): class AAOddTimeStepAccessor(PdfFieldAccessor):
...@@ -145,67 +151,179 @@ class AAOddTimeStepAccessor(PdfFieldAccessor): ...@@ -145,67 +151,179 @@ class AAOddTimeStepAccessor(PdfFieldAccessor):
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
res = [] return [field[inverse_direction(d)](stencil.inverse_index(d)) for i, d in enumerate(stencil)]
for i, d in enumerate(stencil):
inv_dir = inverse_direction(d)
field_access = field[inv_dir](stencil.index(inv_dir))
res.append(field_access)
return res
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
return [field[d](i) for i, d in enumerate(stencil)] return [field[d](i) for i, d in enumerate(stencil)]
class EsoTwistEvenTimeStepAccessor(PdfFieldAccessor):
is_inplace = True
@staticmethod
def read(field, stencil):
return [field[tuple(max(-e, 0) for e in d)](i) for i, d in enumerate(stencil)]
@staticmethod
def write(field, stencil):
return [field[tuple(max(e, 0) for e in d)](stencil.inverse_index(d)) for d in stencil]
class EsoTwistOddTimeStepAccessor(PdfFieldAccessor): class EsoTwistOddTimeStepAccessor(PdfFieldAccessor):
is_inplace = True is_inplace = True
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
result = [] return [field[tuple(max(e, 0) for e in inverse_direction(d))](stencil.inverse_index(d)) for d in stencil]
for direction in stencil:
inv_dir = inverse_direction(direction) @staticmethod
spatial_offset = tuple(max(e, 0) for e in inv_dir) def write(field, stencil):
result.append(field[spatial_offset](stencil.index(inv_dir))) return [field[tuple(max(e, 0) for e in d)](i) for i, d in enumerate(stencil)]
class EsoPullEvenTimeStepAccessor(PdfFieldAccessor):
is_inplace = True
@staticmethod
def read(field, stencil):
lehmann_stencil = _get_lehmann_stencil(stencil)
center_cell = tuple([0] * stencil.D)
result = [field.center]
for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[inverse_direction(d)](i))
else:
result.append(field[center_cell](i))
return result return result
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
result = [] lehmann_stencil = _get_lehmann_stencil(stencil)
for i, direction in enumerate(stencil): center_cell = tuple([0] * stencil.D)
spatial_offset = tuple(max(e, 0) for e in direction) result = [field.center]
result.append(field[spatial_offset](i)) for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[center_cell](stencil.inverse_index(d)))
else:
result.append(field[d](stencil.inverse_index(d)))
return result return result
class EsoTwistEvenTimeStepAccessor(PdfFieldAccessor): class EsoPullOddTimeStepAccessor(PdfFieldAccessor):
is_inplace = True is_inplace = True
@staticmethod @staticmethod
def read(field, stencil): def read(field, stencil):
result = [] lehmann_stencil = _get_lehmann_stencil(stencil)
for i, direction in enumerate(stencil): center_cell = tuple([0] * stencil.D)
spatial_offset = tuple(max(-e, 0) for e in direction) result = [field.center]
result.append(field[spatial_offset](i)) for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[inverse_direction(d)](stencil.inverse_index(d)))
else:
result.append(field[center_cell](stencil.inverse_index(d)))
return result return result
@staticmethod @staticmethod
def write(field, stencil): def write(field, stencil):
result = [] lehmann_stencil = _get_lehmann_stencil(stencil)
for direction in stencil: center_cell = tuple([0] * stencil.D)
inv_dir = inverse_direction(direction) result = [field.center]
spatial_offset = tuple(max(e, 0) for e in direction) for i, d in enumerate(stencil):
result.append(field[spatial_offset](stencil.index(inv_dir))) if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[center_cell](i))
else:
result.append(field[d](i))
return result
class EsoPushEvenTimeStepAccessor(PdfFieldAccessor):
is_inplace = True
@staticmethod
def read(field, stencil):
lehmann_stencil = _get_lehmann_stencil(stencil)
center_cell = tuple([0] * stencil.D)
result = [field.center]
for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[center_cell](stencil.inverse_index(d)))
else:
result.append(field[inverse_direction(d)](stencil.inverse_index(d)))
return result
@staticmethod
def write(field, stencil):
lehmann_stencil = _get_lehmann_stencil(stencil)
center_cell = tuple([0] * stencil.D)
result = [field.center]
for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[d](i))
else:
result.append(field[center_cell](i))
return result
class EsoPushOddTimeStepAccessor(PdfFieldAccessor):
is_inplace = True
@staticmethod
def read(field, stencil):
lehmann_stencil = _get_lehmann_stencil(stencil)
center_cell = tuple([0] * stencil.D)
result = [field.center]
for i, d in enumerate(stencil):
inv_dir = inverse_direction(d)
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[center_cell](i))
else:
result.append(field[inv_dir](i))
return result
@staticmethod
def write(field, stencil):
lehmann_stencil = _get_lehmann_stencil(stencil)
center_cell = tuple([0] * stencil.D)
result = [field.center]
for i, d in enumerate(stencil):
if i == 0:
continue
if lehmann_stencil.index(d) % 2 == 0:
result.append(field[d](stencil.inverse_index(d)))
else:
result.append(field[center_cell](stencil.inverse_index(d)))
return result return result
# -------------------------------------------- Visualization ----------------------------------------------------------- # -------------------------------------------- Visualization -----------------------------------------------------------
def visualize_field_mapping(axes, stencil, field_mapping, color='b'): def visualize_field_mapping(axes, stencil, field_mapping, inverted=False, color='b'):
from lbmpy.plot import LbGrid from lbmpy.plot import LbGrid
grid = LbGrid(3, 3) grid = LbGrid(3, 3)
grid.fill_with_default_arrows() grid.fill_with_default_arrows(inverted=inverted)
for field_access, direction in zip(field_mapping, stencil): for field_access, direction in zip(field_mapping, stencil):
field_position = stencil[field_access.index[0]] field_position = stencil[field_access.index[0]]
neighbor = field_access.offsets neighbor = field_access.offsets
...@@ -214,14 +332,18 @@ def visualize_field_mapping(axes, stencil, field_mapping, color='b'): ...@@ -214,14 +332,18 @@ def visualize_field_mapping(axes, stencil, field_mapping, color='b'):
grid.draw(axes) grid.draw(axes)
def visualize_pdf_field_accessor(pdf_field_accessor, figure=None): def visualize_pdf_field_accessor(pdf_field_accessor, title=True, read_plot_params=None, write_plot_params=None,
from lbmpy.stencils import get_stencil figure=None):
if write_plot_params is None:
write_plot_params = {}
if read_plot_params is None:
read_plot_params = {}
if figure is None: if figure is None:
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
figure = plt.gcf() figure = plt.gcf()
stencil = get_stencil('D2Q9') stencil = LBStencil(Stencil.D2Q9)
figure.patch.set_facecolor('white') figure.patch.set_facecolor('white')
...@@ -232,8 +354,36 @@ def visualize_pdf_field_accessor(pdf_field_accessor, figure=None): ...@@ -232,8 +354,36 @@ def visualize_pdf_field_accessor(pdf_field_accessor, figure=None):
ax_left = figure.add_subplot(1, 2, 1) ax_left = figure.add_subplot(1, 2, 1)
ax_right = figure.add_subplot(1, 2, 2) ax_right = figure.add_subplot(1, 2, 2)
visualize_field_mapping(ax_left, stencil, pre_collision_accesses, color='k') if 'color' not in read_plot_params:
visualize_field_mapping(ax_right, stencil, post_collision_accesses, color='r') read_plot_params['color'] = 'k'
if 'color' not in write_plot_params:
write_plot_params['color'] = 'r'
visualize_field_mapping(ax_left, stencil, pre_collision_accesses, **read_plot_params)
visualize_field_mapping(ax_right, stencil, post_collision_accesses, **write_plot_params)
if title:
ax_left.set_title("Read")
ax_right.set_title("Write")
# -------------------------------------------- Helpers -----------------------------------------------------------
ax_left.set_title("Read")
ax_right.set_title("Write") def _get_lehmann_stencil(stencil):
"""
EsoPull and EsoPush streaming is only simple to implement with a specific stencil ordering, that comes from
"High Performance Free Surface LBM on GPUs" by moritz lehmann
Args:
stencil: lattice Boltzmann stencil
"""
if stencil.Q == 9:
return LBStencil(Stencil.D2Q9, ordering="lehmann")
elif stencil.Q == 15:
return LBStencil(Stencil.D3Q15, ordering="lehmann")
elif stencil.Q == 19:
return LBStencil(Stencil.D3Q19, ordering="lehmann")
elif stencil.Q == 27:
return LBStencil(Stencil.D3Q27, ordering="lehmann")
else:
ValueError("EsoPull or EsoPush is only available for D2Q9, D3Q15, D3Q19 and D3Q27 stencil")
import sympy as sp
import pystencils as ps
from pystencils.field import Field
def welford_assignments(field, mean_field, sum_of_squares_field=None, sum_of_cubes_field=None):
r"""
Creates the assignments for the kernel creation of Welford's algorithm
(https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm).
This algorithm is an online algorithm for the mean and variance calculation of sample data, here implemented for
the execution on scalar or vector fields, e.g., velocity.
The calculation of the variance / the third-order central moments is optional and only performed if a field for
the sum of squares / sum of cubes is given.
The mean value is directly updated in the mean vector field.
The variance / covariance must be retrieved in a post-processing step. Let :math `M_{2,n}` denote the value of the
sum of squares after the first :math `n` samples. According to Welford the biased sample variance
:math `\sigma_n^2` and the unbiased sample variance :math `s_n^2` are given by
.. math ::
\sigma_n^2 = \frac{M_{2,n}}{n}
s_n^2 = \frac{M_{2,n}}{n-1},
respectively.
Liekwise, to the 3rd-order central moment(s) of the (vector) field, the sum of cubes field must be divided
by :math `n` in a post-processing step.
"""
if isinstance(field, Field):
dim = field.values_per_cell()
welford_field = field.center
elif isinstance(field, Field.Access):
dim = field.field.values_per_cell()
welford_field = field
else:
raise ValueError("Vector field has to be a pystencils Field or Field.Access")
if isinstance(mean_field, Field):
welford_mean_field = mean_field.center
elif isinstance(mean_field, Field.Access):
welford_mean_field = mean_field
else:
raise ValueError("Mean vector field has to be a pystencils Field or Field.Access")
if sum_of_squares_field is None:
# sum of products will not be calculated, i.e., the covariance matrix is not retrievable
welford_sum_of_squares_field = None
else:
if isinstance(sum_of_squares_field, Field):
welford_sum_of_squares_field = sum_of_squares_field.center
elif isinstance(sum_of_squares_field, Field.Access):
welford_sum_of_squares_field = sum_of_squares_field
else:
raise ValueError("Sum of squares field has to be a pystencils Field or Field.Access")
assert welford_sum_of_squares_field.field.values_per_cell() == dim ** 2, \
(f"Sum of squares field does not have the right layout. "
f"Index dimension: {welford_sum_of_squares_field.field.values_per_cell()}, expected: {dim ** 2}")
if sum_of_cubes_field is None:
# sum of cubes will not be calculated, i.e., the 3rd-order central moments are not retrievable
welford_sum_of_cubes_field = None
else:
if isinstance(sum_of_cubes_field, Field):
welford_sum_of_cubes_field = sum_of_cubes_field.center
elif isinstance(sum_of_cubes_field, Field.Access):
welford_sum_of_cubes_field = sum_of_cubes_field
else:
raise ValueError("Sum of cubes field has to be a pystencils Field or Field.Access")
assert welford_sum_of_cubes_field.field.values_per_cell() == dim ** 3, \
(f"Sum of cubes field does not have the right layout. "
f"Index dimension: {welford_sum_of_cubes_field.field.values_per_cell()}, expected: {dim ** 3}")
# for the calculation of the thrid-order moments, the variance must also be calculated
if welford_sum_of_cubes_field is not None:
assert welford_sum_of_squares_field is not None
# actual assignments
counter = sp.Symbol('counter')
delta = sp.symbols(f"delta_:{dim}")
main_assignments = list()
for i in range(dim):
main_assignments.append(ps.Assignment(delta[i], welford_field.at_index(i) - welford_mean_field.at_index(i)))
main_assignments.append(ps.Assignment(welford_mean_field.at_index(i),
welford_mean_field.at_index(i) + delta[i] / counter))
if sum_of_squares_field is not None:
delta2 = sp.symbols(f"delta2_:{dim}")
for i in range(dim):
main_assignments.append(
ps.Assignment(delta2[i], welford_field.at_index(i) - welford_mean_field.at_index(i)))
for i in range(dim):
for j in range(dim):
idx = i * dim + j
main_assignments.append(ps.Assignment(
welford_sum_of_squares_field.at_index(idx),
welford_sum_of_squares_field.at_index(idx) + delta[i] * delta2[j]))
if sum_of_cubes_field is not None:
for i in range(dim):
for j in range(dim):
for k in range(dim):
idx = (i * dim + j) * dim + k
main_assignments.append(ps.Assignment(
welford_sum_of_cubes_field.at_index(idx),
welford_sum_of_cubes_field.at_index(idx)
- delta[k] / counter * welford_sum_of_squares_field(i * dim + j)
- delta[j] / counter * welford_sum_of_squares_field(k * dim + i)
- delta[i] / counter * welford_sum_of_squares_field(j * dim + k)
+ delta2[i] * (2 * delta[j] - delta2[j]) * delta[k]
))
return main_assignments
...@@ -3,30 +3,84 @@ ...@@ -3,30 +3,84 @@
to generate a fluctuating LBM the equilibrium moment values have to be scaled and an additive (random) to generate a fluctuating LBM the equilibrium moment values have to be scaled and an additive (random)
correction term is added to the collision rule correction term is added to the collision rule
""" """
from typing import overload
from ._compat import IS_PYSTENCILS_2
import numpy as np import numpy as np
import sympy as sp import sympy as sp
from lbmpy.moments import MOMENT_SYMBOLS from lbmpy.moments import MOMENT_SYMBOLS, is_shear_moment, get_order
from lbmpy.equilibrium import ContinuousHydrodynamicMaxwellian
from pystencils import Assignment, TypedSymbol from pystencils import Assignment, TypedSymbol
from pystencils.rng import PhiloxFourFloats, random_symbol
from pystencils.simp.assignment_collection import SymbolGen from pystencils.simp.assignment_collection import SymbolGen
if IS_PYSTENCILS_2:
from pystencils.sympyextensions.random import RngBase, Philox
from pystencils.sympyextensions import tcast
else:
from pystencils.rng import PhiloxFourFloats, random_symbol
@overload
def add_fluctuations_to_collision_rule(collision_rule, temperature=None, amplitudes=(),
*,
block_offsets, seed, rng_node, c_s_sq):
"""Fluctuating LB implementation for pystencils 1.3"""
@overload
def add_fluctuations_to_collision_rule(collision_rule, temperature=None, amplitudes=(), def add_fluctuations_to_collision_rule(collision_rule, temperature=None, amplitudes=(),
block_offsets=(0, 0, 0), seed=TypedSymbol("seed", np.uint32), *,
rng_node=PhiloxFourFloats, c_s_sq=sp.Rational(1, 3)): rng: 'RngBase | None' = None, c_s_sq):
"""""" """Fluctuating LB implementation for pystencils 2.0
Args:
collision_rule: The base collision rule
temperature: Expression representing the fluid temperature
amplitudes: If ``temperature`` was not specified, expression representing the fluctuation amplitude
rng: Random number generator instance used to compute the fluctuations.
If `None`, the float32 Philox RNG will be used.
"""
def add_fluctuations_to_collision_rule(collision_rule, temperature=None, amplitudes=(),
c_s_sq=sp.Rational(1, 3), **kwargs):
if not (temperature and not amplitudes) or (temperature and amplitudes): if not (temperature and not amplitudes) or (temperature and amplitudes):
raise ValueError("Fluctuating LBM: Pass either 'temperature' or 'amplitudes'.") raise ValueError("Fluctuating LBM: Pass either 'temperature' or 'amplitudes'.")
method = collision_rule.method method = collision_rule.method
if not amplitudes: if not amplitudes:
amplitudes = fluctuation_amplitude_from_temperature(method, temperature, c_s_sq) amplitudes = fluctuation_amplitude_from_temperature(method, temperature, c_s_sq)
if block_offsets == 'walberla':
block_offsets = tuple(TypedSymbol("block_offset_{}".format(i), np.uint32) for i in range(3))
rng_symbol_gen = random_symbol(collision_rule.subexpressions, seed, if not method.is_weighted_orthogonal:
rng_node=rng_node, dim=method.dim, offsets=block_offsets) raise ValueError("Fluctuations can only be added to weighted-orthogonal methods")
if IS_PYSTENCILS_2:
rng: RngBase = kwargs.get("rng", Philox("fluctuation_rng", np.float32, TypedSymbol("seed", np.uint32)))
ts = TypedSymbol("time_step", np.uint32)
def _rng_symbol_gen():
while True:
rx, rasm = rng.get_random_vector(ts)
collision_rule.subexpressions.insert(0, rasm)
for i in range(rng.vector_size):
yield tcast.as_numeric(rx[i])
rng_symbol_gen = _rng_symbol_gen()
else:
block_offsets = kwargs.get("block_offsets", (0, 0, 0))
rng_node = kwargs.get("rng_node", PhiloxFourFloats)
seed = kwargs.get("seed", TypedSymbol("seed", np.uint32))
if block_offsets == 'walberla':
block_offsets = tuple(TypedSymbol("block_offset_{}".format(i), np.uint32) for i in range(3))
rng_symbol_gen = random_symbol(
collision_rule.subexpressions, seed=seed,
rng_node=rng_node, dim=method.dim, offsets=block_offsets
)
correction = fluctuation_correction(method, rng_symbol_gen, amplitudes) correction = fluctuation_correction(method, rng_symbol_gen, amplitudes)
for i, corr in enumerate(correction): for i, corr in enumerate(correction):
...@@ -38,9 +92,7 @@ def fluctuation_amplitude_from_temperature(method, temperature, c_s_sq=sp.Symbol ...@@ -38,9 +92,7 @@ def fluctuation_amplitude_from_temperature(method, temperature, c_s_sq=sp.Symbol
"""Produces amplitude equations according to (2.60) and (3.54) in Schiller08""" """Produces amplitude equations according to (2.60) and (3.54) in Schiller08"""
normalization_factors = sp.matrix_multiply_elementwise(method.moment_matrix, method.moment_matrix) * \ normalization_factors = sp.matrix_multiply_elementwise(method.moment_matrix, method.moment_matrix) * \
sp.Matrix(method.weights) sp.Matrix(method.weights)
density = method.zeroth_order_equilibrium_moment_symbol density = method._cqc.density_symbol
if method.conserved_quantity_computation.zero_centered_pdfs:
density += 1
mu = temperature * density / c_s_sq mu = temperature * density / c_s_sq
return [sp.sqrt(mu * norm * (1 - (1 - rr) ** 2)) return [sp.sqrt(mu * norm * (1 - (1 - rr) ** 2))
for norm, rr in zip(normalization_factors, method.relaxation_rates)] for norm, rr in zip(normalization_factors, method.relaxation_rates)]
...@@ -57,3 +109,22 @@ def fluctuation_correction(method, rng_generator, amplitudes=SymbolGen("phi")): ...@@ -57,3 +109,22 @@ def fluctuation_correction(method, rng_generator, amplitudes=SymbolGen("phi")):
# corrections are applied in real space hence we need to convert to real space here # corrections are applied in real space hence we need to convert to real space here
return method.moment_matrix.inv() * amplitude_matrix * random_matrix return method.moment_matrix.inv() * amplitude_matrix * random_matrix
class ThermalizedEquilibrium(ContinuousHydrodynamicMaxwellian):
"""TODO: Remove Again!
This class is currently only used in the tutorial notebook `demo_thermalized_lbm.ipynb`
and has been added only temporarily, until the thermalized LBM is updated to our new
equilibrium framework."""
def __init__(self, random_number_symbols, **kwargs):
super().__init__(**kwargs)
self.random_number_symbols = random_number_symbols
def moment(self, exponent_tuple_or_polynomial):
value = super().moment(exponent_tuple_or_polynomial)
if is_shear_moment(exponent_tuple_or_polynomial, dim=self.dim):
value += self.random_number_symbols[0] * 0.001
elif get_order(exponent_tuple_or_polynomial) > 2:
value += self.random_number_symbols[1] * 0.001
return value
r"""
.. module:: forcemodels
:synopsis: Collection of forcing terms for hydrodynamic LBM simulations
Get started:
------------
This module offers different models to introduce a body force in the lattice Boltzmann scheme.
If you don't know which model to choose, use :class:`lbmpy.forcemodels.Guo`.
Detailed information:
---------------------
Force models add a term :math:`C_F` to the collision equation:
.. math ::
f(\mathbf{x} + c_q \Delta t, t + \Delta t) - f(\mathbf{x},t) = \Omega(f, f^{(\mathrm{eq})})
+ \underbrace{F_q}_{\mbox{forcing term}}
The form of this term depends on the concrete force model: the first moment of this forcing term is equal
to the acceleration :math:`\mathbf{a}` for all force models. Here :math:`\mathbf{F}` is the D dimensional force vector,
which defines the force for each spatial dircetion.
.. math ::
\sum_q \mathbf{c}_q \mathbf{F} = \mathbf{a}
The second order moment is different for the forcing models - if it is zero the model is suited for
incompressible flows. For weakly compressible collision operators a force model with a corrected second order moment
should be chosen.
.. math ::
\sum_q c_{qi} c_{qj} f_q &= F_i u_j + F_j u_i &\qquad \mbox{for Guo, Luo models}
\sum_q c_{qi} c_{qj} f_q &= 0 &\qquad \mbox{for Simple, Buick}
Models with zero second order moment have:
.. math ::
F_q = \frac{w_q}{c_s^2} c_{qi} \; a_i
Models with nonzero second moment have:
.. math ::
F_q = \frac{w_q}{c_s^2} c_{qi} \; a_i + \frac{w_q}{c_s^4} (c_{qi} c_{qj} - c_s^2 \delta_{ij} ) u_j \, a_i
For all force models the computation of the macroscopic velocity has to be adapted (shifted) by adding a term
:math:`S_{macro}` that we call "macroscopic velocity shift"
.. math ::
\mathbf{u} &= \sum_q \mathbf{c}_q f_q + S_{\mathrm{macro}}
S_{\mathrm{macro}} &= \frac{\Delta t}{2 \cdot \rho} \sum_q F_q
Some models also shift the velocity entering the equilibrium distribution.
Comparison
----------
Force models can be distinguished by 2 options:
**Option 1**:
:math:`C_F = 1` and equilibrium velocity is not shifted, or :math:`C_F=(1 - \frac{\omega}{2})`
and equilibrum is shifted.
**Option 2**:
second velocity moment is zero or :math:`F_i u_j + F_j u_i`
===================== ==================== =================
Option2 \\ Option1 no equilibrium shift equilibrium shift
===================== ==================== =================
second moment zero :class:`Simple` :class:`Buick`
second moment nonzero :class:`Luo` :class:`Guo`
===================== ==================== =================
"""
from warnings import warn
import abc
import sympy as sp
from pystencils.sympyextensions import scalar_product
from lbmpy.maxwellian_equilibrium import (
discrete_maxwellian_equilibrium, get_equilibrium_values_of_maxwell_boltzmann_function
)
from lbmpy.moments import (
MOMENT_SYMBOLS, exponent_tuple_sort_key, exponents_to_polynomial_representations,
extract_monomials, moment_sort_key, moment_matrix,
monomial_to_polynomial_transformation_matrix, set_up_shift_matrix)
FORCE_SYMBOLS = sp.symbols("F_x, F_y, F_z")
class AbstractForceModel(abc.ABC):
r"""
Abstract base class for all force models. All force models have to implement the __call__, which should return a
q dimensional vector added to the PDFs in the population space. If an MRT method is used, it is also possible
to apply the force directly in the moment space. This is done by additionally providing the function
moment_space_forcing. The MRT method will check if it is available and apply the force directly in the moment
space. For MRT methods in the central moment space the central_moment_space_forcing function can be provided,
which shifts the force vector to the central moment space. Applying the force in the collision space has the
advantage of saving FLOPs. Furthermore, it is sometimes easier to apply the correct force vector in the
collision space because often, the relaxation matrix has to be taken into account.
Args:
force: force vector of size dim which contains the force for each spatial dimension.
"""
def __init__(self, force):
self._force = force
# All force models work internaly with a pure symbolic list of the forcing vector.
# Each entry of the original force vector which is not a symbol is mapped to a symbol and a subs dict is
# created. The subs dict should be used inside the LB method for the creation of the collision rule.
self._symbolic_force = [x if isinstance(x, sp.Symbol) else y for x, y in zip(force, FORCE_SYMBOLS)]
self._subs_dict_force = {x: y for (x, y) in zip(self._symbolic_force, force) if x != y}
# The following booleans should indicate if a force model is has the function moment_space_forcing which
# transfers the forcing terms to the moment space or central_moment_space_forcing which transfers them to the
# central moment space
self.has_moment_space_forcing = hasattr(self, "moment_space_forcing")
self.has_central_moment_space_forcing = hasattr(self, "central_moment_space_forcing")
self.has_symmetric_central_moment_forcing = hasattr(self, "symmetric_central_moment_forcing")
def __call__(self, lb_method):
r"""
This function returns a vector of size q which is added to the PDFs in the PDF space. It has to be implemented
by all forcing models and returns a sympy Matrix containing the q dimensional force vector.
Args:
lb_method: LB method, see lbmpy.creationfunctions.create_lb_method
"""
raise NotImplementedError("Force model class has to overwrite __call__")
def macroscopic_velocity_shift(self, density):
r"""
macroscopic velocity shift by :math:`\frac{\Delta t}{2 \cdot \rho}`
Args:
density: Density symbol which is needed for the shift
"""
return default_velocity_shift(density, self.symbolic_force_vector)
def macroscopic_momentum_density_shift(self, *_):
r"""
macroscopic momentum density shift by :math:`\frac{\Delta t}{2}`
"""
return default_momentum_density_shift(self.symbolic_force_vector)
def equilibrium_velocity_shift(self, density):
r"""
Some models also shift the velocity entering the equilibrium distribution. By default the shift is zero
Args:
density: Density symbol which is needed for the shift
"""
return [0] * len(self.symbolic_force_vector)
@property
def symbolic_force_vector(self):
return self._symbolic_force
@property
def subs_dict_force(self):
return self._subs_dict_force
class Simple(AbstractForceModel):
r"""
A simple force model which introduces the following additional force term in the
collision process :math:`\frac{w_q}{c_s^2} \mathbf{c_{q}} \cdot \mathbf{F}` (often: force = rho * acceleration
where rho is the zeroth moment to be consistent with the above definition)
Should only be used with constant forces!
Shifts the macroscopic velocity by :math:`\frac{\mathbf{F}}{2}`, but does not change the equilibrium velocity.
"""
def __call__(self, lb_method):
force = self.symbolic_force_vector
assert len(force) == lb_method.dim, "Force vectore must match with the dimensions of the lb method"
cs_sq = sp.Rational(1, 3) # squared speed of sound
result = [(w_i / cs_sq) * scalar_product(force, direction)
for direction, w_i in zip(lb_method.stencil, lb_method.weights)]
return sp.Matrix(result)
def moment_space_forcing(self, lb_method):
return (lb_method.moment_matrix * self(lb_method)).expand()
def central_moment_space_forcing(self, lb_method):
moments = (lb_method.moment_matrix * sp.Matrix(self(lb_method))).expand()
return lb_method.shift_matrix * moments
def symmetric_central_moment_forcing(self, lb_method, central_moments):
u = lb_method.first_order_equilibrium_moment_symbols
cm_matrix = moment_matrix(central_moments, lb_method.stencil, shift_velocity=u)
before = sp.Matrix([0] * lb_method.stencil.Q)
after = cm_matrix @ sp.Matrix(self(lb_method))
return before, after
class CentralMoment(AbstractForceModel):
r"""
A force model that only shifts the macroscopic and equilibrium velocities and does not introduce a forcing term to
the collision process. Forcing is then applied through relaxation of the first central moments in the shifted
frame of reference (cf. https://doi.org/10.1016/j.camwa.2015.05.001).
"""
def __call__(self, lb_method):
raise ValueError("This force model can only be combined with the Cumulant collision model")
def symmetric_central_moment_forcing(self, lb_method, *_):
before = sp.Matrix([0, ] * lb_method.stencil.Q)
after = sp.Matrix([0, ] * lb_method.stencil.Q)
for i, sf in enumerate(self.symbolic_force_vector):
before[i + 1] = sp.Rational(1, 2) * sf
after[i + 1] = sp.Rational(1, 2) * sf
return before, after
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self.symbolic_force_vector)
class Luo(AbstractForceModel):
r"""Force model by Luo :cite:`luo1993lattice`.
Shifts the macroscopic velocity by :math:`\frac{\mathbf{F}}{2}`, but does not change the equilibrium velocity.
"""
def __call__(self, lb_method):
u = sp.Matrix(lb_method.first_order_equilibrium_moment_symbols)
force = sp.Matrix(self.symbolic_force_vector)
cs_sq = sp.Rational(1, 3) # squared speed of sound
result = []
for direction, w_i in zip(lb_method.stencil, lb_method.weights):
direction = sp.Matrix(direction)
first_summand = (direction - u) / cs_sq
second_summand = (direction * direction.dot(u)) / cs_sq ** 2
fq = w_i * force.dot(first_summand + second_summand)
result.append(fq.simplify())
return sp.Matrix(result)
def moment_space_forcing(self, lb_method):
return (lb_method.moment_matrix * self(lb_method)).expand()
def central_moment_space_forcing(self, lb_method):
moments = lb_method.moment_matrix * self(lb_method)
return (lb_method.shift_matrix * moments).expand()
def symmetric_central_moment_forcing(self, lb_method, central_moments):
u = lb_method.first_order_equilibrium_moment_symbols
cm_matrix = moment_matrix(central_moments, lb_method.stencil, shift_velocity=u)
before = sp.Matrix([0] * lb_method.stencil.Q)
after = (cm_matrix @ sp.Matrix(self(lb_method))).expand()
return before, after
class Guo(AbstractForceModel):
r"""
Force model by Guo :cite:`guo2002discrete`, generalized to MRT,
which makes it equivalent to :cite:`schiller2008thermal`, equation 4.67
Adapts the calculation of the macroscopic velocity as well as the equilibrium velocity
(both shifted by :math:`\frac{\mathbf{F}}{2}`)!
"""
def __call__(self, lb_method):
if len(set(lb_method.relaxation_rates)) == 1:
# It's an SRT method!
rr = lb_method.symbolic_relaxation_matrix[0]
force_terms = Luo(self.symbolic_force_vector)(lb_method)
correction_factor = (1 - rr / 2)
result = correction_factor * force_terms
else:
force_terms = self.moment_space_forcing(lb_method)
result = (lb_method.moment_matrix.inv() * force_terms).expand()
return result
def moment_space_forcing(self, lb_method):
luo = Luo(self.symbolic_force_vector)
q = len(lb_method.stencil)
correction_factor = sp.eye(q) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
moments = correction_factor * (lb_method.moment_matrix * sp.Matrix(luo(lb_method))).expand()
return moments
def central_moment_space_forcing(self, lb_method):
luo = Luo(self.symbolic_force_vector)
q = len(lb_method.stencil)
correction_factor = sp.eye(q) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
moments = (lb_method.moment_matrix * sp.Matrix(luo(lb_method)))
central_moments = correction_factor * (lb_method.shift_matrix * moments).expand()
return central_moments
def symmetric_central_moment_forcing(self, lb_method, central_moments):
luo = Luo(self.symbolic_force_vector)
_, force_cms = luo.symmetric_central_moment_forcing(lb_method, central_moments)
force_cms = sp.Rational(1, 2) * force_cms
return force_cms, force_cms
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self.symbolic_force_vector)
class He(AbstractForceModel):
r"""
Force model by He :cite:`HeForce`
Adapts the calculation of the macroscopic velocity as well as the equilibrium velocity
(both shifted by :math:`\frac{\mathbf{F}}{2}`)!
Force moments are derived from the continuous maxwellian equilibrium. From the
moment integrals of the continuous force term
.. math::
F (\mathbf{c})
= \frac{1}{\rho c_s^2}
\mathbf{F} \cdot ( \mathbf{c} - \mathbf{u} )
f^{\mathrm{eq}} (\mathbf{c})
the following analytical expresson for the monomial raw moments of the force is found:
.. math::
m_{\alpha\beta\gamma}^{F, \mathrm{He}}
= \frac{1}{\rho c_s^2} \left(
F_x m^{\mathrm{eq}}_{\alpha+1,\beta,\gamma}
+ F_y m^{\mathrm{eq}}_{\alpha,\beta+1,\gamma}
+ F_z m^{\mathrm{eq}}_{\alpha,\beta,\gamma+1}
- m^{eq}_{\alpha\beta\gamma} ( \mathbf{F} \cdot \mathbf{u} )
\right)
"""
def __init__(self, force):
super(He, self).__init__(force)
def forcing_terms(self, lb_method):
u = sp.Matrix(lb_method.first_order_equilibrium_moment_symbols)
force = sp.Matrix(self.symbolic_force_vector)
cs_sq = sp.Rational(1, 3) # squared speed of sound
# eq. 6.31 in the LB book by Krüger et al. shows that the equilibrium terms are devided by rho.
# This is done here with subs({rho: 1}) to be consistent with compressible and incompressible force models
cqc = lb_method.conserved_quantity_computation
eq_terms = discrete_maxwellian_equilibrium(lb_method.stencil, rho=sp.Integer(1),
u=cqc.velocity_symbols, c_s_sq=sp.Rational(1, 3))
result = []
for direction, eq in zip(lb_method.stencil, eq_terms):
direction = sp.Matrix(direction)
eu_dot_f = (direction - u).dot(force)
result.append(eq * eu_dot_f / cs_sq)
return sp.Matrix(result)
def continuous_force_raw_moments(self, lb_method, moments=None):
rho = lb_method.zeroth_order_equilibrium_moment_symbol
u = lb_method.first_order_equilibrium_moment_symbols
dim = lb_method.dim
c_s_sq = sp.Rational(1, 3)
force = sp.Matrix(self.symbolic_force_vector)
moment_polynomials = lb_method.moments if moments is None else moments
moment_exponents = sorted(extract_monomials(moment_polynomials), key=exponent_tuple_sort_key)
moment_monomials = exponents_to_polynomial_representations(moment_exponents)
extended_monomials = set()
for m in moment_monomials:
extended_monomials |= {m} | {m * x for x in MOMENT_SYMBOLS[:dim]}
extended_monomials = sorted(extended_monomials, key=moment_sort_key)
moment_eq_values = get_equilibrium_values_of_maxwell_boltzmann_function(extended_monomials, dim, rho=rho,
u=u, c_s_sq=c_s_sq)
moment_to_eq_dict = {m: v for m, v in zip(extended_monomials, moment_eq_values)}
monomial_force_moments = []
for moment in moment_monomials:
m_base = moment_to_eq_dict[moment]
m_shifted = sp.Matrix([moment_to_eq_dict[moment * x] for x in MOMENT_SYMBOLS[:dim]])
m_force = (c_s_sq * rho)**(-1) * (force.dot(m_shifted) - m_base * force.dot(u))
monomial_force_moments.append(m_force.expand())
mono_to_poly_matrix = monomial_to_polynomial_transformation_matrix(moment_exponents, moment_polynomials)
polynomial_force_moments = mono_to_poly_matrix * sp.Matrix(monomial_force_moments)
return polynomial_force_moments
def continuous_force_central_moments(self, lb_method, moments=None):
if moments is None:
moments = lb_method.moments
raw_moments = self.continuous_force_raw_moments(lb_method, moments=moments)
u = lb_method.first_order_equilibrium_moment_symbols
shift_matrix = set_up_shift_matrix(moments, lb_method.stencil, velocity_symbols=u)
return (shift_matrix * raw_moments).expand()
def __call__(self, lb_method):
if len(set(lb_method.relaxation_rates)) == 1:
# It's an SRT method!
rr = lb_method.symbolic_relaxation_matrix[0]
force_terms = self.forcing_terms(lb_method)
correction_factor = (1 - rr / 2)
result = correction_factor * force_terms
else:
force_terms = self.moment_space_forcing(lb_method)
result = (lb_method.moment_matrix.inv() * force_terms).expand()
return result
def moment_space_forcing(self, lb_method):
correction_factor = sp.eye(len(lb_method.stencil)) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
moments = self.continuous_force_raw_moments(lb_method)
moments = (correction_factor * moments).expand()
return moments
def central_moment_space_forcing(self, lb_method):
correction_factor = sp.eye(len(lb_method.stencil)) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
central_moments = self.continuous_force_central_moments(lb_method)
central_moments = (correction_factor * central_moments).expand()
return central_moments
def symmetric_central_moment_forcing(self, lb_method, central_moments):
central_moments = exponents_to_polynomial_representations(central_moments)
force_cms = sp.Rational(1, 2) * self.continuous_force_central_moments(lb_method, moments=central_moments)
return force_cms, force_cms
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self.symbolic_force_vector)
class Schiller(Guo):
r"""
Force model by Schiller :cite:`schiller2008thermal`, equation 4.67
Equivalent to the generalized Guo model.
"""
def __init__(self, force):
warn("The Schiller force model is deprecated, please use the Guo model, which is equivalent",
DeprecationWarning)
super(Schiller, self).__init__(force)
class Buick(AbstractForceModel):
r"""
This force model :cite:`buick2000gravity` has a force term with zero second moment.
It is suited for incompressible lattice models. However it should be used with care because such a LB body form
model is only consistent when applied to the solution of steady - state hydrodynamic problems. More information
on an analysis of the Buick force model can be found in :cite:`silva2010` and in :cite:`silva2020`
"""
def __call__(self, lb_method, **kwargs):
if len(set(lb_method.relaxation_rates)) == 1:
# It's an SRT method!
rr = lb_method.symbolic_relaxation_matrix[0]
force_terms = Simple(self.symbolic_force_vector)(lb_method)
correction_factor = (1 - rr / 2)
result = correction_factor * force_terms
else:
force_terms = self.moment_space_forcing(lb_method)
result = (lb_method.moment_matrix.inv() * force_terms).expand()
return result
def moment_space_forcing(self, lb_method):
simple = Simple(self.symbolic_force_vector)
q = len(lb_method.stencil)
correction_factor = sp.eye(q) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
moments = correction_factor * (lb_method.moment_matrix * sp.Matrix(simple(lb_method)))
return moments.expand()
def central_moment_space_forcing(self, lb_method):
simple = Simple(self.symbolic_force_vector)
q = len(lb_method.stencil)
correction_factor = sp.eye(q) - sp.Rational(1, 2) * lb_method.symbolic_relaxation_matrix
moments = (lb_method.moment_matrix * sp.Matrix(simple(lb_method)))
central_moments = correction_factor * (lb_method.shift_matrix * moments)
return central_moments.expand()
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self.symbolic_force_vector)
class EDM(AbstractForceModel):
r"""Exact differencing force model as shown in :cite:`lbm_book` in eq. 6.32"""
def __call__(self, lb_method):
cqc = lb_method.conserved_quantity_computation
reference_density = cqc.density_symbol if cqc.compressible else 1
rho = cqc.density_symbol
delta_rho = cqc.density_deviation_symbol
rho_0 = cqc.background_density
u = cqc.velocity_symbols
equilibrium_terms = lb_method.get_equilibrium_terms()
equilibrium_terms = equilibrium_terms.subs({delta_rho: rho - rho_0})
shifted_u = (u_i + f_i / reference_density for u_i, f_i in zip(u, self._force))
shifted_eq = equilibrium_terms.subs({u_i: su_i for u_i, su_i in zip(u, shifted_u)})
return shifted_eq - equilibrium_terms
def moment_space_forcing(self, lb_method):
moments = lb_method.moment_matrix * self(lb_method)
return moments.expand()
def central_moment_space_forcing(self, lb_method):
moments = lb_method.moment_matrix * self(lb_method)
central_moments = lb_method.shift_matrix * moments.expand()
return central_moments.expand()
class ShanChen(AbstractForceModel):
r"""Shan and Chen force model. The implementation is done according to :cite:`silva2020`.
For reference compare table 1 which is the Shan and Chen model for an SRT collision operator. These terms are
transfered to the moment space and then all representations for the different collision operators are derived
from that.
"""
def forcing_terms(self, lb_method):
q = len(lb_method.stencil)
cqc = lb_method.conserved_quantity_computation
rho = cqc.density_symbol if cqc.compressible else 1
u = cqc.velocity_symbols
F = sp.Matrix(self.symbolic_force_vector)
uf = sp.Matrix(u).dot(F)
F2 = sp.Matrix(F).dot(sp.Matrix(F))
Fq = sp.zeros(q, 1)
uq = sp.zeros(q, 1)
for i, cq in enumerate(lb_method.stencil):
Fq[i] = sp.Matrix(cq).dot(sp.Matrix(F))
uq[i] = sp.Matrix(cq).dot(u)
linear_term = sp.zeros(q, 1)
non_linear_term = sp.zeros(q, 1)
for i, w_i in enumerate(lb_method.weights):
linear_term[i] = w_i * (Fq[i] + 3 * uq[i] * Fq[i] - uf)
non_linear_term[i] = ((w_i / (2 * rho)) * (3 * Fq[i] ** 2 - F2))
return linear_term, non_linear_term
def __call__(self, lb_method):
force_terms = self.moment_space_forcing(lb_method)
result = lb_method.moment_matrix.inv() * force_terms
return result.expand()
def moment_space_forcing(self, lb_method):
linear_term, non_linear_term = self.forcing_terms(lb_method)
q = len(lb_method.stencil)
rel = lb_method.symbolic_relaxation_matrix
cs_sq = sp.Rational(1, 3)
correction_factor = 1 / cs_sq * (sp.eye(q) - sp.Rational(1, 2) * rel)
M = lb_method.moment_matrix
moments = correction_factor * (M * linear_term) + correction_factor ** 2 * (M * non_linear_term)
return moments.expand()
def central_moment_space_forcing(self, lb_method):
linear_term, non_linear_term = self.forcing_terms(lb_method)
q = len(lb_method.stencil)
rel = lb_method.symbolic_relaxation_matrix
cs_sq = sp.Rational(1, 3)
correction_factor = 1 / cs_sq * (sp.eye(q) - sp.Rational(1, 2) * rel)
M = lb_method.moment_matrix
N = lb_method.shift_matrix
moments_linear_term = (M * linear_term)
moments_non_linear_term = (M * non_linear_term)
central_moments_linear_term = correction_factor * (N * moments_linear_term)
central_moments_non_linear_term = correction_factor ** 2 * (N * moments_non_linear_term)
central_moments = central_moments_linear_term + central_moments_non_linear_term
return central_moments.expand()
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self.symbolic_force_vector)
# -------------------------------- Helper functions ------------------------------------------------------------------
def default_velocity_shift(density, force):
return [f_i / (2 * density) for f_i in force]
def default_momentum_density_shift(force):
return [f_i / 2 for f_i in force]
...@@ -231,17 +231,17 @@ def add_black_and_white_image(boundary_handling, image_file, target_slice=None, ...@@ -231,17 +231,17 @@ def add_black_and_white_image(boundary_handling, image_file, target_slice=None,
# binarize # binarize
zoomed_image[zoomed_image <= 254] = 0 zoomed_image[zoomed_image <= 254] = 0
zoomed_image[zoomed_image > 254] = 1 zoomed_image[zoomed_image > 254] = 1
zoomed_image = np.logical_not(zoomed_image.astype(np.bool)) zoomed_image = np.logical_not(zoomed_image.astype(bool))
# resize necessary if aspect ratio should be constant # resize necessary if aspect ratio should be constant
if zoomed_image.shape != target_size: if zoomed_image.shape != target_size:
resized_image = np.zeros(target_size, dtype=np.bool) resized_image = np.zeros(target_size, dtype=bool)
mid = [(ts - s) // 2 for ts, s in zip(target_size, zoomed_image.shape)] mid = [(ts - s) // 2 for ts, s in zip(target_size, zoomed_image.shape)]
resized_image[mid[0]:zoomed_image.shape[0] + mid[0], mid[1]:zoomed_image.shape[1] + mid[1]] = zoomed_image resized_image[mid[0]:zoomed_image.shape[0] + mid[0], mid[1]:zoomed_image.shape[1] + mid[1]] = zoomed_image
zoomed_image = resized_image zoomed_image = resized_image
def callback(*coordinates): def callback(*coordinates):
result = np.zeros_like(coordinates[0], dtype=np.bool) result = np.zeros_like(coordinates[0], dtype=bool)
mask_start = [int(coordinates[i][(0,) * dim] - 0.5) for i in range(dim)] mask_start = [int(coordinates[i][(0,) * dim] - 0.5) for i in range(dim)]
mask_end = [int(coordinates[i][(-1,) * dim] + 1 - 0.5) for i in range(dim)] mask_end = [int(coordinates[i][(-1,) * dim] + 1 - 0.5) for i in range(dim)]
......
File moved
from types import MappingProxyType from types import MappingProxyType
from dataclasses import replace
import numpy as np import numpy as np
from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling
from lbmpy.creationfunctions import ( from lbmpy.creationfunctions import (create_lb_function, update_with_default_parameters)
create_lb_function, switch_to_symbolic_relaxation_rates_for_omega_adapting_methods, from lbmpy.enums import Stencil
update_with_default_parameters)
from lbmpy.macroscopic_value_kernels import ( from lbmpy.macroscopic_value_kernels import (
create_advanced_velocity_setter_collision_rule, pdf_initialization_assignments) create_advanced_velocity_setter_collision_rule, pdf_initialization_assignments)
from lbmpy.simplificationfactory import create_simplification_strategy from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.stencils import get_stencil from lbmpy.stencils import LBStencil
from pystencils import create_data_handling, create_kernel, make_slice
from pystencils import CreateKernelConfig
from pystencils import create_data_handling, create_kernel, make_slice, Target
from pystencils.slicing import SlicedGetter from pystencils.slicing import SlicedGetter
from pystencils.timeloop import TimeLoop from pystencils.timeloop import TimeLoop
from ._compat import IS_PYSTENCILS_2
if not IS_PYSTENCILS_2:
from pystencils import Backend
class LatticeBoltzmannStep: class LatticeBoltzmannStep:
def __init__(self, domain_size=None, lbm_kernel=None, periodicity=False, def __init__(self, domain_size=None, lbm_kernel=None, periodicity=False,
...@@ -22,32 +29,59 @@ class LatticeBoltzmannStep: ...@@ -22,32 +29,59 @@ class LatticeBoltzmannStep:
velocity_data_name=None, density_data_name=None, density_data_index=None, velocity_data_name=None, density_data_name=None, density_data_index=None,
compute_velocity_in_every_step=False, compute_density_in_every_step=False, compute_velocity_in_every_step=False, compute_density_in_every_step=False,
velocity_input_array_name=None, time_step_order='stream_collide', flag_interface=None, velocity_input_array_name=None, time_step_order='stream_collide', flag_interface=None,
alignment_if_vectorized=64, fixed_loop_sizes=True, fixed_relaxation_rates=True, **method_parameters): alignment_if_vectorized=64, fixed_loop_sizes=True,
timeloop_creation_function=TimeLoop,
lbm_config=None, lbm_optimisation=None,
config: CreateKernelConfig | None = None,
**method_parameters):
if optimization is None:
optimization = {}
self._timeloop_creation_function = timeloop_creation_function
# --- Parameter normalization --- # --- Parameter normalization ---
if data_handling is not None: if data_handling is not None:
if domain_size is not None: if domain_size is not None:
raise ValueError("When passing a data_handling, the domain_size parameter can not be specified") raise ValueError("When passing a data_handling, the domain_size parameter can not be specified")
if config is not None:
if IS_PYSTENCILS_2:
target = config.get_target()
else:
target = config.target
else:
target = optimization.get('target', Target.CPU)
if data_handling is None: if data_handling is None:
if domain_size is None: if domain_size is None:
raise ValueError("Specify either domain_size or data_handling") raise ValueError("Specify either domain_size or data_handling")
data_handling = create_data_handling(domain_size, default_ghost_layers=1, data_handling = create_data_handling(domain_size,
periodicity=periodicity, parallel=False) default_ghost_layers=1,
periodicity=periodicity,
default_target=target,
parallel=False)
if lbm_config:
method_parameters['stencil'] = lbm_config.stencil
if 'stencil' not in method_parameters: if 'stencil' not in method_parameters:
method_parameters['stencil'] = 'D2Q9' if data_handling.dim == 2 else 'D3Q27' method_parameters['stencil'] = LBStencil(Stencil.D2Q9) \
if data_handling.dim == 2 else LBStencil(Stencil.D3Q27)
method_parameters, optimization = update_with_default_parameters(method_parameters, optimization) lbm_config, lbm_optimisation, config = update_with_default_parameters(method_parameters, optimization,
field_dtype = np.float64 if optimization['double_precision'] else np.float32 lbm_config, lbm_optimisation, config)
del method_parameters['kernel_type'] # the parallel datahandling understands only numpy datatypes. Strings lead to an errors
if IS_PYSTENCILS_2:
from pystencils import create_type
field_dtype = create_type(config.get_option("default_dtype")).numpy_dtype
else:
field_dtype = config.data_type.default_factory().numpy_dtype
if lbm_kernel: if lbm_kernel:
q = len(lbm_kernel.method.stencil) q = lbm_kernel.method.stencil.Q
else: else:
q = len(get_stencil(method_parameters['stencil'])) q = lbm_config.stencil.Q
target = optimization['target']
self.name = name self.name = name
self._data_handling = data_handling self._data_handling = data_handling
...@@ -56,14 +90,22 @@ class LatticeBoltzmannStep: ...@@ -56,14 +90,22 @@ class LatticeBoltzmannStep:
self.velocity_data_name = name + "_velocity" if velocity_data_name is None else velocity_data_name self.velocity_data_name = name + "_velocity" if velocity_data_name is None else velocity_data_name
self.density_data_name = name + "_density" if density_data_name is None else density_data_name self.density_data_name = name + "_density" if density_data_name is None else density_data_name
self.density_data_index = density_data_index self.density_data_index = density_data_index
self._optimization = optimization
self._gpu = target == 'gpu' if IS_PYSTENCILS_2:
layout = optimization['field_layout'] self._gpu = target.is_gpu()
else:
self._gpu = target == Target.GPU
layout = lbm_optimisation.field_layout
alignment = False alignment = False
if optimization['target'] == 'cpu' and optimization['vectorization']:
alignment = alignment_if_vectorized if IS_PYSTENCILS_2:
if config.get_target().is_vector_cpu() and config.cpu.vectorize.enable:
alignment = alignment_if_vectorized
else:
if config.backend == Backend.C and config.cpu_vectorize_info:
alignment = alignment_if_vectorized
self._data_handling.add_array(self._pdf_arr_name, values_per_cell=q, gpu=self._gpu, layout=layout, self._data_handling.add_array(self._pdf_arr_name, values_per_cell=q, gpu=self._gpu, layout=layout,
latex_name='src', dtype=field_dtype, alignment=alignment) latex_name='src', dtype=field_dtype, alignment=alignment)
...@@ -80,58 +122,71 @@ class LatticeBoltzmannStep: ...@@ -80,58 +122,71 @@ class LatticeBoltzmannStep:
layout=layout, latex_name='ρ', dtype=field_dtype, alignment=alignment) layout=layout, latex_name='ρ', dtype=field_dtype, alignment=alignment)
if compute_velocity_in_every_step: if compute_velocity_in_every_step:
method_parameters['output']['velocity'] = self._data_handling.fields[self.velocity_data_name] lbm_config.output['velocity'] = self._data_handling.fields[self.velocity_data_name]
if compute_density_in_every_step: if compute_density_in_every_step:
density_field = self._data_handling.fields[self.density_data_name] density_field = self._data_handling.fields[self.density_data_name]
if self.density_data_index is not None: if self.density_data_index is not None:
density_field = density_field(density_data_index) density_field = density_field(density_data_index)
method_parameters['output']['density'] = density_field lbm_config.output['density'] = density_field
if velocity_input_array_name is not None: if velocity_input_array_name is not None:
method_parameters['velocity_input'] = self._data_handling.fields[velocity_input_array_name] lbm_config = replace(lbm_config, velocity_input=self._data_handling.fields[velocity_input_array_name])
if method_parameters['omega_output_field'] and isinstance(method_parameters['omega_output_field'], str): if isinstance(lbm_config.omega_output_field, str):
method_parameters['omega_output_field'] = data_handling.add_array(method_parameters['omega_output_field'], lbm_config = replace(lbm_config, omega_output_field=data_handling.add_array(lbm_config.omega_output_field,
dtype=field_dtype, alignment=alignment) dtype=field_dtype,
alignment=alignment,
values_per_cell=1))
self.kernel_params = kernel_params.copy() self.kernel_params = kernel_params.copy()
# --- Kernel creation --- # --- Kernel creation ---
if lbm_kernel is None: if lbm_kernel is None:
switch_to_symbolic_relaxation_rates_for_omega_adapting_methods(method_parameters, self.kernel_params,
force=not fixed_relaxation_rates)
if fixed_loop_sizes: if fixed_loop_sizes:
optimization['symbolic_field'] = data_handling.fields[self._pdf_arr_name] lbm_optimisation = replace(lbm_optimisation, symbolic_field=data_handling.fields[self._pdf_arr_name])
method_parameters['field_name'] = self._pdf_arr_name lbm_config = replace(lbm_config, field_name=self._pdf_arr_name)
method_parameters['temporary_field_name'] = self._tmp_arr_name lbm_config = replace(lbm_config, temporary_field_name=self._tmp_arr_name)
if time_step_order == 'stream_collide': if time_step_order == 'stream_collide':
self._lbmKernels = [create_lb_function(optimization=optimization, self._lbmKernels = [create_lb_function(lbm_config=lbm_config,
**method_parameters)] lbm_optimisation=lbm_optimisation,
config=config)]
elif time_step_order == 'collide_stream': elif time_step_order == 'collide_stream':
self._lbmKernels = [create_lb_function(optimization=optimization, self._lbmKernels = [create_lb_function(lbm_config=lbm_config,
kernel_type='collide_only', lbm_optimisation=lbm_optimisation,
**method_parameters), config=config,
create_lb_function(optimization=optimization, kernel_type='collide_only'),
kernel_type='stream_pull_only', create_lb_function(lbm_config=lbm_config,
** method_parameters)] lbm_optimisation=lbm_optimisation,
config=config,
kernel_type='stream_pull_only')]
else: else:
assert self._data_handling.dim == lbm_kernel.method.dim, \ assert self._data_handling.dim == lbm_kernel.method.dim, \
"Error: %dD Kernel for %d dimensional domain" % (lbm_kernel.method.dim, self._data_handling.dim) f"Error: {lbm_kernel.method.dim}D Kernel for {self._data_handling.dim} dimensional domain"
self._lbmKernels = [lbm_kernel] self._lbmKernels = [lbm_kernel]
self.method = self._lbmKernels[0].method self.method = self._lbmKernels[0].method
self.ast = self._lbmKernels[0].ast self.ast = self._lbmKernels[0].ast
# -- Boundary Handling & Synchronization --- # -- Boundary Handling & Synchronization ---
stencil_name = method_parameters['stencil'] stencil_name = lbm_config.stencil.name
self._sync_src = data_handling.synchronization_function([self._pdf_arr_name], stencil_name, target, self._sync_src = data_handling.synchronization_function([self._pdf_arr_name], stencil_name, target,
stencil_restricted=True) stencil_restricted=True)
self._sync_tmp = data_handling.synchronization_function([self._tmp_arr_name], stencil_name, target, self._sync_tmp = data_handling.synchronization_function([self._tmp_arr_name], stencil_name, target,
stencil_restricted=True) stencil_restricted=True)
self._boundary_handling = LatticeBoltzmannBoundaryHandling(self.method, self._data_handling, self._pdf_arr_name, self._boundary_handling = LatticeBoltzmannBoundaryHandling(
name=name + "_boundary_handling", self.method, self._data_handling, self._pdf_arr_name,
flag_interface=flag_interface, name=name + "_boundary_handling",
target=target, openmp=optimization['openmp']) flag_interface=flag_interface,
target=target,
openmp=config.cpu_openmp,
**({"default_dtype": field_dtype} if IS_PYSTENCILS_2 else dict())
)
self._lbm_config = lbm_config
self._lbm_optimisation = lbm_optimisation
self._config = config
# -- Macroscopic Value Kernels # -- Macroscopic Value Kernels
self._getterKernel, self._setterKernel = self._compile_macroscopic_setter_and_getter() self._getterKernel, self._setterKernel = self._compile_macroscopic_setter_and_getter()
...@@ -184,6 +239,21 @@ class LatticeBoltzmannStep: ...@@ -184,6 +239,21 @@ class LatticeBoltzmannStep:
def pdf_array_name(self): def pdf_array_name(self):
return self._pdf_arr_name return self._pdf_arr_name
@property
def lbm_config(self):
"""LBM configuration of the scenario"""
return self._lbm_config
@property
def lbm_optimisation(self):
"""LBM optimisation parameters"""
return self._lbm_optimisation
@property
def config(self):
"""Configutation of pystencils parameters"""
return self._config
def _get_slice(self, data_name, slice_obj, masked): def _get_slice(self, data_name, slice_obj, masked):
if slice_obj is None: if slice_obj is None:
slice_obj = make_slice[:, :] if self.dim == 2 else make_slice[:, :, 0.5] slice_obj = make_slice[:, :] if self.dim == 2 else make_slice[:, :, 0.5]
...@@ -243,7 +313,7 @@ class LatticeBoltzmannStep: ...@@ -243,7 +313,7 @@ class LatticeBoltzmannStep:
def get_time_loop(self): def get_time_loop(self):
self.pre_run() # make sure GPU arrays are allocated self.pre_run() # make sure GPU arrays are allocated
fixed_loop = TimeLoop(steps=2) fixed_loop = self._timeloop_creation_function(steps=2)
fixed_loop.add_pre_run_function(self.pre_run) fixed_loop.add_pre_run_function(self.pre_run)
fixed_loop.add_post_run_function(self.post_run) fixed_loop.add_post_run_function(self.post_run)
fixed_loop.add_single_step_function(self.time_step) fixed_loop.add_single_step_function(self.time_step)
...@@ -328,7 +398,7 @@ class LatticeBoltzmannStep: ...@@ -328,7 +398,7 @@ class LatticeBoltzmannStep:
tuple (residuum, steps_run) if successful or raises ValueError if not converged tuple (residuum, steps_run) if successful or raises ValueError if not converged
""" """
dh = self.data_handling dh = self.data_handling
gpu = self._optimization['target'] == 'gpu' gpu = self._gpu
def on_first_call(): def on_first_call():
self._velocity_init_vel_backup = 'velocity_init_vel_backup' self._velocity_init_vel_backup = 'velocity_init_vel_backup'
...@@ -337,11 +407,11 @@ class LatticeBoltzmannStep: ...@@ -337,11 +407,11 @@ class LatticeBoltzmannStep:
collision_rule = create_advanced_velocity_setter_collision_rule(self.method, vel_backup_field, collision_rule = create_advanced_velocity_setter_collision_rule(self.method, vel_backup_field,
velocity_relaxation_rate) velocity_relaxation_rate)
optimization = self._optimization.copy() self._lbm_optimisation.symbolic_field = dh.fields[self._pdf_arr_name]
optimization['symbolic_field'] = dh.fields[self._pdf_arr_name]
kernel = create_lb_function(collision_rule=collision_rule, field_name=self._pdf_arr_name, kernel = create_lb_function(collision_rule=collision_rule, field_name=self._pdf_arr_name,
temporary_field_name=self._tmp_arr_name, optimization=optimization) temporary_field_name=self._tmp_arr_name,
lbm_optimisation=self._lbm_optimisation)
self._velocity_init_kernel = kernel self._velocity_init_kernel = kernel
def make_velocity_backup(): def make_velocity_backup():
...@@ -377,7 +447,7 @@ class LatticeBoltzmannStep: ...@@ -377,7 +447,7 @@ class LatticeBoltzmannStep:
self._data_handling.all_to_cpu() self._data_handling.all_to_cpu()
self._data_handling.run_kernel(self._getterKernel, **self.kernel_params) self._data_handling.run_kernel(self._getterKernel, **self.kernel_params)
global_residuum = compute_residuum() global_residuum = compute_residuum()
print("Initialization iteration {}, residuum {}".format(steps_run, global_residuum)) print(f"Initialization iteration {steps_run}, residuum {global_residuum}")
if np.isnan(global_residuum) or global_residuum < convergence_threshold: if np.isnan(global_residuum) or global_residuum < convergence_threshold:
break break
...@@ -385,8 +455,8 @@ class LatticeBoltzmannStep: ...@@ -385,8 +455,8 @@ class LatticeBoltzmannStep:
converged = global_residuum < convergence_threshold converged = global_residuum < convergence_threshold
if not converged: if not converged:
restore_velocity_backup() restore_velocity_backup()
raise ValueError("Iterative initialization did not converge after %d steps.\n" raise ValueError(f"Iterative initialization did not converge after {steps_run} steps.\n"
"Current residuum is %s" % (steps_run, global_residuum)) f"Current residuum is {global_residuum}")
return global_residuum, steps_run return global_residuum, steps_run
...@@ -398,12 +468,19 @@ class LatticeBoltzmannStep: ...@@ -398,12 +468,19 @@ class LatticeBoltzmannStep:
rho_field = rho_field.center if self.density_data_index is None else rho_field(self.density_data_index) rho_field = rho_field.center if self.density_data_index is None else rho_field(self.density_data_index)
vel_field = self._data_handling.fields[self.velocity_data_name] vel_field = self._data_handling.fields[self.velocity_data_name]
if IS_PYSTENCILS_2:
gen_config = CreateKernelConfig(target=Target.CPU)
gen_config.cpu.openmp.enable = self._config.cpu.openmp.get_option("enable")
gen_config.default_dtype = self._config.get_option("default_dtype")
else:
gen_config = CreateKernelConfig(target=Target.CPU, cpu_openmp=self._config.cpu_openmp)
getter_eqs = cqc.output_equations_from_pdfs(pdf_field.center_vector, getter_eqs = cqc.output_equations_from_pdfs(pdf_field.center_vector,
{'density': rho_field, 'velocity': vel_field}) {'density': rho_field, 'velocity': vel_field})
getter_kernel = create_kernel(getter_eqs, target='cpu', cpu_openmp=self._optimization['openmp']).compile() getter_kernel = create_kernel(getter_eqs, config=gen_config).compile()
setter_eqs = pdf_initialization_assignments(lb_method, rho_field, setter_eqs = pdf_initialization_assignments(lb_method, rho_field,
vel_field.center_vector, pdf_field.center_vector) vel_field.center_vector, pdf_field.center_vector)
setter_eqs = create_simplification_strategy(lb_method)(setter_eqs) setter_eqs = create_simplification_strategy(lb_method)(setter_eqs)
setter_kernel = create_kernel(setter_eqs, target='cpu', cpu_openmp=self._optimization['openmp']).compile() setter_kernel = create_kernel(setter_eqs, config=gen_config).compile()
return getter_kernel, setter_kernel return getter_kernel, setter_kernel
from typing import Sequence, Any
from abc import ABC, abstractmethod
import numpy as np
import sympy as sp
from ._compat import IS_PYSTENCILS_2
if not IS_PYSTENCILS_2:
raise ImportError("`lbmpy.lookup_tables` is only available when running with pystencils 2.x")
from pystencils import Assignment
from pystencils.sympyextensions import TypedSymbol
from pystencils.types.quick import Arr
from pystencils.types import UserTypeSpec, create_type
class LookupTables(ABC):
@abstractmethod
def get_array_declarations(self) -> list[Assignment]:
pass
class NeighbourOffsetArrays(LookupTables):
@staticmethod
def neighbour_offset(dir_idx, stencil):
if isinstance(sp.sympify(dir_idx), sp.Integer):
return stencil[dir_idx]
else:
return tuple(
[
sp.IndexedBase(symbol, shape=(1,))[dir_idx]
for symbol in NeighbourOffsetArrays._offset_symbols(stencil)
]
)
@staticmethod
def _offset_symbols(stencil):
q = len(stencil)
dim = len(stencil[0])
return [
TypedSymbol(f"neighbour_offset_{d}", Arr(create_type("int32"), q))
for d in ["x", "y", "z"][:dim]
]
def __init__(self, stencil, offsets_dtype: UserTypeSpec = np.int32):
self._offsets_dtype = create_type(
offsets_dtype
) # TODO: Currently, this has no effect
self._stencil = stencil
self._dim = len(stencil[0])
def get_array_declarations(self) -> list[Assignment]:
array_symbols = NeighbourOffsetArrays._offset_symbols(self._stencil)
return [
Assignment(arrsymb, tuple((d[i] for d in self._stencil)))
for i, arrsymb in enumerate(array_symbols)
]
class MirroredStencilDirections(LookupTables):
@staticmethod
def mirror_stencil(direction, mirror_axis):
assert mirror_axis <= len(
direction
), f"only {len(direction)} axis available for mirage"
direction = list(direction)
direction[mirror_axis] = -direction[mirror_axis]
return tuple(direction)
@staticmethod
def _mirrored_symbol(mirror_axis, stencil):
axis = ["x", "y", "z"]
q = len(stencil)
return TypedSymbol(
f"{axis[mirror_axis]}_axis_mirrored_stencil_dir", Arr(create_type("int32"), q)
)
def __init__(self, stencil, mirror_axis, dtype=np.int32):
self._offsets_dtype = create_type(dtype) # TODO: Currently, this has no effect
self._mirrored_stencil_symbol = MirroredStencilDirections._mirrored_symbol(
mirror_axis, stencil
)
self._mirrored_directions = tuple(
stencil.index(
MirroredStencilDirections.mirror_stencil(direction, mirror_axis)
)
for direction in stencil
)
def get_array_declarations(self) -> list[Assignment]:
return [Assignment(self._mirrored_stencil_symbol, self._mirrored_directions)]
class LbmWeightInfo(LookupTables):
def __init__(self, lb_method, data_type="double"):
self._weights = lb_method.weights
self._weights_array = TypedSymbol("weights", Arr(create_type(data_type), len(self._weights)))
def weight_of_direction(self, dir_idx, lb_method=None):
if isinstance(sp.sympify(dir_idx), sp.Integer):
assert lb_method is not None
return lb_method.weights[dir_idx].evalf(17)
else:
return sp.IndexedBase(self._weights_array, shape=(1,))[dir_idx]
def get_array_declarations(self) -> list[Assignment]:
return [Assignment(self._weights_array, tuple(self._weights))]
class TranslationArraysNode(LookupTables):
def __init__(self, array_content: Sequence[tuple[TypedSymbol, Sequence[Any]]]):
self._decls = [
Assignment(symb, tuple(content)) for symb, content in array_content
]
def __str__(self):
return "Variable PDF Access Translation Arrays"
def __repr__(self):
return "Variable PDF Access Translation Arrays"
def get_array_declarations(self) -> list[Assignment]:
return self._decls