Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • Sparse
  • WallLaw
  • fhennig/pystencils2.0-compat
  • improved_comm
  • master
  • suffa/psm_optimization
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
  • release/1.3.7
44 results

Target

Select target project
  • ravi.k.ayyala/lbmpy
  • brendan-waters/lbmpy
  • anirudh.jonnalagadda/lbmpy
  • jbadwaik/lbmpy
  • alexander.reinauer/lbmpy
  • itischler/lbmpy
  • he66coqe/lbmpy
  • ev81oxyl/lbmpy
  • Bindgen/lbmpy
  • da15siwa/lbmpy
  • holzer/lbmpy
  • RudolfWeeber/lbmpy
  • pycodegen/lbmpy
13 results
Select Git revision
  • Zerocentering
  • csebug
  • improved_comm
  • master
  • schiller
  • test_martin
  • tutorial_fixes_new
  • win
  • windows
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
30 results
Show changes
Showing
with 0 additions and 4461 deletions
from lbmpy.stencils import get_stencil
import sympy as sp
from pystencils.stencil import have_same_entries
from lbmpy.moments import MOMENT_SYMBOLS, moment_sort_key, exponent_to_polynomial_representation
def statistical_quantity_symbol(name, exponents):
return sp.Symbol(f'{name}_{"".join(str(i) for i in exponents)}')
def exponent_tuple_sort_key(x):
return moment_sort_key(exponent_to_polynomial_representation(x))
def get_default_polynomial_cumulants_for_stencil(stencil):
"""
Returns default groups of cumulants to be relaxed with common relaxation rates as stated in literature.
Groups are ordered like this:
- First group is density
- Second group are the momentum modes
- Third group are the shear modes
- Fourth group is the bulk mode
- Remaining groups do not govern hydrodynamic properties
"""
x, y, z = MOMENT_SYMBOLS
if have_same_entries(stencil, get_stencil("D2Q9")):
# Cumulants of the D2Q9 stencil up to third order are equal to
# the central moments; only the fourth-order cumulant x**2 * y**2
# has a more complicated form. They can be arranged into groups
# for the preservation of rotational invariance as described by
# Martin Geier in his dissertation.
#
# Reference: Martin Geier. Ab inito derivation of the cascaded Lattice Boltzmann
# Automaton. Dissertation. University of Freiburg. 2006.
return [
[sp.sympify(1)], # density is conserved
[x, y], # momentum is relaxed for cumulant forcing
[x * y, x**2 - y**2], # shear
[x**2 + y**2], # bulk
[x**2 * y, x * y**2],
[x**2 * y**2]
]
elif have_same_entries(stencil, get_stencil("D3Q19")):
# D3Q19 cumulants are obtained by pruning the D3Q27 cumulant set as
# described by Coreixas, 2019.
return [
[sp.sympify(1)], # density is conserved
[x, y, z], # momentum might be affected by forcing
[x * y,
x * z,
y * z,
x ** 2 - y ** 2,
x ** 2 - z ** 2], # shear
[x ** 2 + y ** 2 + z ** 2], # bulk
[x * y ** 2 + x * z ** 2,
x ** 2 * y + y * z ** 2,
x ** 2 * z + y ** 2 * z],
[x * y ** 2 - x * z ** 2,
x ** 2 * y - y * z ** 2,
x ** 2 * z - y ** 2 * z],
[x ** 2 * y ** 2 - 2 * x ** 2 * z ** 2 + y ** 2 * z ** 2,
x ** 2 * y ** 2 + x ** 2 * z ** 2 - 2 * y ** 2 * z ** 2],
[x ** 2 * y ** 2 + x ** 2 * z ** 2 + y ** 2 * z ** 2]
]
elif have_same_entries(stencil, get_stencil("D3Q27")):
# Cumulants grouped to preserve rotational invariance as described by Geier et al, 2015
return [
[sp.sympify(1)], # density is conserved
[x, y, z], # momentum might be affected by forcing
[x * y,
x * z,
y * z,
x ** 2 - y ** 2,
x ** 2 - z ** 2], # shear
[x ** 2 + y ** 2 + z ** 2], # bulk
[x * y ** 2 + x * z ** 2,
x ** 2 * y + y * z ** 2,
x ** 2 * z + y ** 2 * z],
[x * y ** 2 - x * z ** 2,
x ** 2 * y - y * z ** 2,
x ** 2 * z - y ** 2 * z],
[x * y * z],
[x ** 2 * y ** 2 - 2 * x ** 2 * z ** 2 + y ** 2 * z ** 2,
x ** 2 * y ** 2 + x ** 2 * z ** 2 - 2 * y ** 2 * z ** 2],
[x ** 2 * y ** 2 + x ** 2 * z ** 2 + y ** 2 * z ** 2],
[x ** 2 * y * z,
x * y ** 2 * z,
x * y * z ** 2],
[x ** 2 * y ** 2 * z,
x ** 2 * y * z ** 2,
x * y ** 2 * z ** 2],
[x ** 2 * y ** 2 * z ** 2]
]
else:
raise ValueError("No default set of cumulants is available for this stencil. "
"Please specify your own set of polynomial cumulants.")
from pystencils.simp.simplifications import sympy_cse
import sympy as sp
from warnings import warn
from pystencils import Assignment, AssignmentCollection
from pystencils.simp.assignment_collection import SymbolGen
from pystencils.stencil import have_same_entries
from pystencils.cache import disk_cache
from lbmpy.stencils import get_stencil
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, LbmCollisionRule, RelaxationInfo
from lbmpy.methods.conservedquantitycomputation import AbstractConservedQuantityComputation
from lbmpy.moments import (
moments_up_to_order, get_order,
monomial_to_polynomial_transformation_matrix,
moment_sort_key, exponent_to_polynomial_representation, extract_monomials, MOMENT_SYMBOLS)
# Local Imports
from lbmpy.methods.centeredcumulant.centered_cumulants import (
statistical_quantity_symbol, exponent_tuple_sort_key)
from lbmpy.methods.centeredcumulant.cumulant_transform import (
PRE_COLLISION_CUMULANT, POST_COLLISION_CUMULANT,
CentralMomentsToCumulantsByGeneratingFunc)
from lbmpy.methods.momentbased.moment_transforms import (
PRE_COLLISION_CENTRAL_MOMENT, POST_COLLISION_CENTRAL_MOMENT,
FastCentralMomentTransform)
from lbmpy.methods.centeredcumulant.force_model import CenteredCumulantForceModel
from lbmpy.methods.centeredcumulant.galilean_correction import (
contains_corrected_polynomials,
add_galilean_correction,
get_galilean_correction_terms)
# ============================ Cached Transformations ================================================================
@disk_cache
def cached_forward_transform(transform_obj, *args, **kwargs):
return transform_obj.forward_transform(*args, **kwargs)
@disk_cache
def cached_backward_transform(transform_obj, *args, **kwargs):
return transform_obj.backward_transform(*args, **kwargs)
# ============================ Lower Order Central Moment Collision ==================================================
@disk_cache
def relax_lower_order_central_moments(moment_indices, pre_collision_values,
relaxation_rates, equilibrium_values,
post_collision_base=POST_COLLISION_CENTRAL_MOMENT):
post_collision_symbols = [statistical_quantity_symbol(post_collision_base, i) for i in moment_indices]
equilibrium_vec = sp.Matrix(equilibrium_values)
moment_vec = sp.Matrix(pre_collision_values)
relaxation_matrix = sp.diag(*relaxation_rates)
moment_vec = moment_vec + relaxation_matrix * (equilibrium_vec - moment_vec)
main_assignments = [Assignment(s, eq) for s, eq in zip(post_collision_symbols, moment_vec)]
return AssignmentCollection(main_assignments)
# ============================ Polynomial Cumulant Collision =========================================================
@disk_cache
def relax_polynomial_cumulants(monomial_exponents, polynomials, relaxation_rates, equilibrium_values,
pre_simplification,
galilean_correction_terms=None,
pre_collision_base=PRE_COLLISION_CUMULANT,
post_collision_base=POST_COLLISION_CUMULANT,
subexpression_base='sub_col'):
mon_to_poly_matrix = monomial_to_polynomial_transformation_matrix(monomial_exponents, polynomials)
mon_vec = sp.Matrix([statistical_quantity_symbol(pre_collision_base, exp) for exp in monomial_exponents])
equilibrium_vec = sp.Matrix(equilibrium_values)
relaxation_matrix = sp.diag(*relaxation_rates)
subexpressions = []
poly_vec = mon_to_poly_matrix * mon_vec
relaxed_polys = poly_vec + relaxation_matrix * (equilibrium_vec - poly_vec)
if galilean_correction_terms is not None:
relaxed_polys = add_galilean_correction(relaxed_polys, polynomials, galilean_correction_terms)
subexpressions = galilean_correction_terms.all_assignments
relaxed_monos = mon_to_poly_matrix.inv() * relaxed_polys
main_assignments = [Assignment(statistical_quantity_symbol(post_collision_base, exp), v)
for exp, v in zip(monomial_exponents, relaxed_monos)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(
main_assignments, subexpressions=subexpressions, subexpression_symbol_generator=symbol_gen)
if pre_simplification == 'default_with_cse':
ac = sympy_cse(ac)
return ac
# =============================== LB Method Implementation ===========================================================
class CenteredCumulantBasedLbMethod(AbstractLbMethod):
"""
This class implements cumulant-based lattice boltzmann methods which relax all the non-conserved quantities
as either monomial or polynomial cumulants. It is mostly inspired by the work presented in :cite:`geier2015`.
Conserved quantities are relaxed in central moment space. This method supports an implicit forcing scheme
through :class:`lbmpy.methods.centeredcumulant.CenteredCumulantForceModel` where forces are applied by
shifting the central-moment frame of reference by :math:`F/2` and then relaxing the first-order central
moments with a relaxation rate of two. This corresponds to the change-of-sign described in the paper.
Classical forcing schemes can still be applied.
The galilean correction described in :cite:`geier2015` is also available for the D3Q27 lattice.
This method is implemented modularily as the transformation from populations to central moments to cumulants
is governed by subclasses of :class:`lbmpy.methods.momentbased.moment_transforms.AbstractMomentTransform`
which can be specified by constructor argument. This allows the selection of the most efficient transformation
for a given setup.
"""
def __init__(self, stencil, cumulant_to_relaxation_info_dict, conserved_quantity_computation, force_model=None,
galilean_correction=False,
central_moment_transform_class=FastCentralMomentTransform,
cumulant_transform_class=CentralMomentsToCumulantsByGeneratingFunc):
assert isinstance(conserved_quantity_computation,
AbstractConservedQuantityComputation)
super(CenteredCumulantBasedLbMethod, self).__init__(stencil)
for m in moments_up_to_order(1, dim=self.dim):
if exponent_to_polynomial_representation(m) not in cumulant_to_relaxation_info_dict.keys():
raise ValueError(f'No relaxation info given for conserved cumulant {m}!')
self._cumulant_to_relaxation_info_dict = cumulant_to_relaxation_info_dict
self._conserved_quantity_computation = conserved_quantity_computation
self._force_model = force_model
self._weights = None
self._galilean_correction = galilean_correction
if galilean_correction:
if not have_same_entries(stencil, get_stencil("D3Q27")):
raise ValueError("Galilean Correction only available for D3Q27 stencil")
if not contains_corrected_polynomials(cumulant_to_relaxation_info_dict):
raise ValueError("For the galilean correction, all three polynomial cumulants"
"(x^2 - y^2), (x^2 - z^2) and (x^2 + y^2 + z^2) must be present!")
self._cumulant_transform_class = cumulant_transform_class
self._central_moment_transform_class = central_moment_transform_class
self.force_model_rr_override = False
if isinstance(self._force_model, CenteredCumulantForceModel) and \
self._force_model.override_momentum_relaxation_rate is not None:
self.set_first_moment_relaxation_rate(self._force_model.override_momentum_relaxation_rate)
self.force_model_rr_override = True
@property
def force_model(self):
return self._force_model
@property
def relaxation_info_dict(self):
return self._cumulant_to_relaxation_info_dict
@property
def zeroth_order_equilibrium_moment_symbol(self, ):
return self._conserved_quantity_computation.zeroth_order_moment_symbol
@property
def first_order_equilibrium_moment_symbols(self, ):
return self._conserved_quantity_computation.first_order_moment_symbols
def set_zeroth_moment_relaxation_rate(self, relaxation_rate):
e = sp.Rational(1, 1)
prev_entry = self._cumulant_to_relaxation_info_dict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._cumulant_to_relaxation_info_dict[e] = new_entry
def set_first_moment_relaxation_rate(self, relaxation_rate):
if self.force_model_rr_override:
warn("Overwriting first-order relaxation rates governed by CenteredCumulantForceModel "
"might break your forcing scheme.")
for e in MOMENT_SYMBOLS[:self.dim]:
assert e in self._cumulant_to_relaxation_info_dict, \
"First cumulants are not relaxed separately by this method"
for e in MOMENT_SYMBOLS[:self.dim]:
prev_entry = self._cumulant_to_relaxation_info_dict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._cumulant_to_relaxation_info_dict[e] = new_entry
def set_conserved_moments_relaxation_rate(self, relaxation_rate):
self.set_zeroth_moment_relaxation_rate(relaxation_rate)
self.set_first_moment_relaxation_rate(relaxation_rate)
def set_force_model(self, force_model):
self._force_model = force_model
@property
def cumulants(self):
return sorted(self._cumulant_to_relaxation_info_dict.keys(), key=moment_sort_key)
@property
def cumulant_equilibrium_values(self):
return tuple([e.equilibrium_value for e in self._cumulant_to_relaxation_info_dict.values()])
@property
def relaxation_rates(self):
return tuple([e.relaxation_rate for e in self._cumulant_to_relaxation_info_dict.values()])
def _repr_html_(self):
table = """
<table style="border:none; width: 100%">
<tr {nb}>
<th {nb} >Central Moment / Cumulant</th>
<th {nb} >Eq. Value </th>
<th {nb} >Relaxation Rate</th>
</tr>
{content}
</table>
"""
content = ""
for cumulant, (eq_value, rr) in self._cumulant_to_relaxation_info_dict.items():
vals = {
'rr': f"${sp.latex(rr)}$",
'cumulant': f"${sp.latex(cumulant)}$",
'eq_value': f"${sp.latex(eq_value)}$",
'nb': 'style="border:none"',
}
order = get_order(cumulant)
if order <= 1:
vals['cumulant'] += ' (central moment)'
if order == 1 and self.force_model_rr_override:
vals['rr'] += ' (overridden by force model)'
content += """<tr {nb}>
<td {nb}>{cumulant}</td>
<td {nb}>{eq_value}</td>
<td {nb}>{rr}</td>
</tr>\n""".format(**vals)
return table.format(content=content, nb='style="border:none"')
# ----------------------- Overridden Abstract Members --------------------------
@property
def conserved_quantity_computation(self):
"""Returns an instance of class :class:`lbmpy.methods.AbstractConservedQuantityComputation`"""
return self._conserved_quantity_computation
@property
def weights(self):
"""Returns a sequence of weights, one for each lattice direction"""
if self._weights is None:
self._weights = self._compute_weights()
return self._weights
def override_weights(self, weights):
assert len(weights) == len(self.stencil)
self._weights = weights
def get_equilibrium(self, conserved_quantity_equations=None, subexpressions=False, pre_simplification=False,
keep_cqc_subexpressions=True):
"""Returns equation collection, to compute equilibrium values.
The equations have the post collision symbols as left hand sides and are
functions of the conserved quantities
Args:
conserved_quantity_equations: equations to compute conserved quantities.
subexpressions: if set to false all subexpressions of the equilibrium assignments are plugged
into the main assignments
pre_simplification: with or without pre_simplifications for the calculation of the collision
keep_cqc_subexpressions: if equilibrium is returned without subexpressions keep_cqc_subexpressions
determines if also subexpressions to calculate conserved quantities should be
plugged into the main assignments
"""
r_info_dict = {c: RelaxationInfo(info.equilibrium_value, 1)
for c, info in self._cumulant_to_relaxation_info_dict.items()}
ac = self._centered_cumulant_collision_rule(
r_info_dict, conserved_quantity_equations, pre_simplification, include_galilean_correction=False)
if not subexpressions:
if keep_cqc_subexpressions:
bs = self._bound_symbols_cqc(conserved_quantity_equations)
return ac.new_without_subexpressions(subexpressions_to_keep=bs)
else:
return ac.new_without_subexpressions()
else:
return ac
def get_equilibrium_terms(self):
equilibrium = self.get_equilibrium()
return sp.Matrix([eq.rhs for eq in equilibrium.main_assignments])
def get_collision_rule(self, conserved_quantity_equations=None, pre_simplification=False):
"""Returns an LbmCollisionRule i.e. an equation collection with a reference to the method.
This collision rule defines the collision operator."""
return self._centered_cumulant_collision_rule(
self._cumulant_to_relaxation_info_dict, conserved_quantity_equations, pre_simplification, True)
# ------------------------------- Internals --------------------------------------------
def _bound_symbols_cqc(self, conserved_quantity_equations=None):
f = self.pre_collision_pdf_symbols
cqe = conserved_quantity_equations
if cqe is None:
cqe = self._conserved_quantity_computation.equilibrium_input_equations_from_pdfs(f)
return cqe.bound_symbols
def _compute_weights(self):
defaults = self._conserved_quantity_computation.default_values
cqe = AssignmentCollection([Assignment(s, e) for s, e in defaults.items()])
eq_ac = self.get_equilibrium(cqe, subexpressions=False, keep_cqc_subexpressions=False)
weights = []
for eq in eq_ac.main_assignments:
value = eq.rhs.expand()
assert len(value.atoms(sp.Symbol)) == 0, "Failed to compute weights"
weights.append(value)
return weights
def _centered_cumulant_collision_rule(self, cumulant_to_relaxation_info_dict,
conserved_quantity_equations=None,
pre_simplification=False,
include_force_terms=False,
include_galilean_correction=True):
stencil = self.stencil
f = self.pre_collision_pdf_symbols
density = self.zeroth_order_equilibrium_moment_symbol
velocity = self.first_order_equilibrium_moment_symbols
cqe = conserved_quantity_equations
if cqe is None:
cqe = self._conserved_quantity_computation.equilibrium_input_equations_from_pdfs(f)
# 1) Extract Monomial Cumulants for the higher-order polynomials
polynomial_cumulants = cumulant_to_relaxation_info_dict.keys()
polynomial_cumulants = sorted(list(polynomial_cumulants), key=moment_sort_key)
higher_order_polynomials = [p for p in polynomial_cumulants if get_order(p) > 1]
monomial_cumulants = sorted(list(extract_monomials(
higher_order_polynomials, dim=self.dim)), key=exponent_tuple_sort_key)
# 2) Get Forward and Backward Transformations between central moment and cumulant space,
# and find required central moments
k_to_c_transform = self._cumulant_transform_class(stencil, monomial_cumulants, density, velocity)
k_to_c_eqs = cached_forward_transform(k_to_c_transform, simplification=pre_simplification)
c_post_to_k_post_eqs = cached_backward_transform(
k_to_c_transform, simplification=pre_simplification, omit_conserved_moments=True)
central_moments = k_to_c_transform.required_central_moments
assert len(central_moments) == len(stencil), 'Number of required central moments must match stencil size.'
# 3) Get Forward Transformation from PDFs to central moments
pdfs_to_k_transform = self._central_moment_transform_class(
stencil, central_moments, density, velocity, conserved_quantity_equations=cqe)
pdfs_to_k_eqs = cached_forward_transform(pdfs_to_k_transform, f, simplification=pre_simplification)
# 4) Add relaxation rules for lower order moments
lower_order_moments = moments_up_to_order(1, dim=self.dim)
lower_order_moment_symbols = [statistical_quantity_symbol(PRE_COLLISION_CENTRAL_MOMENT, exp)
for exp in lower_order_moments]
lower_order_relaxation_infos = [cumulant_to_relaxation_info_dict[exponent_to_polynomial_representation(e)]
for e in lower_order_moments]
lower_order_relaxation_rates = [info.relaxation_rate for info in lower_order_relaxation_infos]
lower_order_equilibrium = [info.equilibrium_value for info in lower_order_relaxation_infos]
lower_order_moment_collision_eqs = relax_lower_order_central_moments(
lower_order_moments, lower_order_moment_symbols,
lower_order_relaxation_rates, lower_order_equilibrium)
# 5) Add relaxation rules for higher-order, polynomial cumulants
poly_relaxation_infos = [cumulant_to_relaxation_info_dict[c] for c in higher_order_polynomials]
poly_relaxation_rates = [info.relaxation_rate for info in poly_relaxation_infos]
poly_equilibrium = [info.equilibrium_value for info in poly_relaxation_infos]
if self._galilean_correction and include_galilean_correction:
galilean_correction_terms = get_galilean_correction_terms(
cumulant_to_relaxation_info_dict, density, velocity)
else:
galilean_correction_terms = None
cumulant_collision_eqs = relax_polynomial_cumulants(
monomial_cumulants, higher_order_polynomials,
poly_relaxation_rates, poly_equilibrium,
pre_simplification,
galilean_correction_terms=galilean_correction_terms)
# 6) Get backward transformation from central moments to PDFs
d = self.post_collision_pdf_symbols
k_post_to_pdfs_eqs = cached_backward_transform(pdfs_to_k_transform, d, simplification=pre_simplification)
# 7) That's all. Now, put it all together.
all_acs = [] if pdfs_to_k_transform.absorbs_conserved_quantity_equations else [cqe]
all_acs += [pdfs_to_k_eqs, k_to_c_eqs, lower_order_moment_collision_eqs,
cumulant_collision_eqs, c_post_to_k_post_eqs]
subexpressions = [ac.all_assignments for ac in all_acs]
subexpressions += k_post_to_pdfs_eqs.subexpressions
main_assignments = k_post_to_pdfs_eqs.main_assignments
# 8) Maybe add forcing terms if CenteredCumulantForceModel was not used
if self._force_model is not None and \
not isinstance(self._force_model, CenteredCumulantForceModel) and include_force_terms:
force_model_terms = self._force_model(self)
force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms, )))
force_subexpressions = [Assignment(sym, force_model_term)
for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
subexpressions += force_subexpressions
main_assignments = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
for eq, force_term_symbol in zip(main_assignments, force_term_symbols)]
# Aaaaaand we're done.
return LbmCollisionRule(self, main_assignments, subexpressions)
from lbmpy.forcemodels import default_velocity_shift
# =========================== Centered Cumulant Force Model ==========================================================
class CenteredCumulantForceModel:
"""
A force model to be used with the centered cumulant-based LB Method.
It only shifts the macroscopic and equilibrium velocities and does not
introduce a forcing term to the collision process. Forcing is then applied
through relaxation of the first central moments in the shifted frame of
reference (cf. https://doi.org/10.1016/j.camwa.2015.05.001).
"""
def __init__(self, force):
self._force = force
self.override_momentum_relaxation_rate = 2
def __call__(self, lb_method, **kwargs):
raise Exception('This force model does not provide a forcing term.')
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
import sympy as sp
from pystencils.sympyextensions import is_constant
# Subexpression Insertion
def insert_subexpressions(ac, selection_callback, skip=set()):
i = 0
while i < len(ac.subexpressions):
exp = ac.subexpressions[i]
if exp.lhs not in skip and selection_callback(exp):
ac = ac.new_with_inserted_subexpression(exp.lhs)
else:
i += 1
return ac
def insert_aliases(ac, **kwargs):
return insert_subexpressions(ac, lambda x: isinstance(x.rhs, sp.Symbol), **kwargs)
def insert_zeros(ac, **kwargs):
zero = sp.Integer(0)
return insert_subexpressions(ac, lambda x: x.rhs == zero, **kwargs)
def insert_constants(ac, **kwargs):
return insert_subexpressions(ac, lambda x: is_constant(x.rhs), **kwargs)
def insert_symbol_times_minus_one(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
minus_one = sp.Integer(-1)
return isinstance(rhs, sp.Mul) and \
len(rhs.args) == 2 and \
(rhs.args[0] == minus_one or rhs.args[1] == minus_one)
return insert_subexpressions(ac, callback, **kwargs)
def insert_constant_multiples(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Mul) and \
len(rhs.args) == 2 and \
(is_constant(rhs.args[0]) or is_constant(rhs.args[1]))
return insert_subexpressions(ac, callback, **kwargs)
def insert_constant_additions(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Add) and \
len(rhs.args) == 2 and \
(is_constant(rhs.args[0]) or is_constant(rhs.args[1]))
return insert_subexpressions(ac, callback, **kwargs)
def insert_squares(ac, **kwargs):
two = sp.Integer(2)
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Pow) and rhs.args[1] == two
return insert_subexpressions(ac, callback, **kwargs)
def bind_symbols_to_skip(insertion_function, skip):
return lambda ac: insertion_function(ac, skip=skip)
import itertools
import operator
from collections import OrderedDict
from functools import reduce
from warnings import warn
import sympy as sp
from lbmpy.maxwellian_equilibrium import (
compressible_to_incompressible_moment_value, get_cumulants_of_continuous_maxwellian_equilibrium,
get_moments_of_continuous_maxwellian_equilibrium,
get_moments_of_discrete_maxwellian_equilibrium, get_weights)
from lbmpy.methods.centeredcumulant.centered_cumulants import get_default_polynomial_cumulants_for_stencil
from lbmpy.methods.momentbased.moment_transforms import PdfsToCentralMomentsByShiftMatrix
from lbmpy.methods.centeredcumulant.cumulant_transform import CentralMomentsToCumulantsByGeneratingFunc
from lbmpy.methods.abstractlbmethod import RelaxationInfo
from lbmpy.methods.conservedquantitycomputation import DensityVelocityComputation
from lbmpy.methods.centeredcumulant import CenteredCumulantBasedLbMethod
from lbmpy.methods.momentbased.momentbasedmethod import MomentBasedLbMethod
from lbmpy.moments import (
MOMENT_SYMBOLS, discrete_moment, exponents_to_polynomial_representations,
get_default_moment_set_for_stencil, gram_schmidt, is_even, moments_of_order,
moments_up_to_component_order, sort_moments_into_groups_of_same_order,
is_bulk_moment, is_shear_moment, get_order)
from lbmpy.relaxationrates import relaxation_rate_from_magic_number
from lbmpy.stencils import get_stencil
from pystencils.stencil import have_same_entries
from pystencils.sympyextensions import common_denominator
def create_with_discrete_maxwellian_eq_moments(stencil, moment_to_relaxation_rate_dict, compressible=False,
force_model=None, equilibrium_order=2,
cumulant=False, c_s_sq=sp.Rational(1, 3)):
r"""Creates a moment-based LBM by taking a list of moments with corresponding relaxation rate.
These moments are relaxed against the moments of the discrete Maxwellian distribution.
Args:
stencil: nested tuple defining the discrete velocity space. See `get_stencil`
moment_to_relaxation_rate_dict: dict that has as many entries as the stencil. Each moment, which can be
represented by an exponent tuple or in polynomial form
(see `lbmpy.moments`), is mapped to a relaxation rate.
compressible: incompressible LBM methods split the density into :math:`\rho = \rho_0 + \Delta \rho`
where :math:`\rho_0` is chosen as one, and the first moment of the pdfs is :math:`\Delta \rho` .
This approximates the incompressible Navier-Stokes equations better than the standard
compressible model.
force_model: force model instance, or None if no external forces
equilibrium_order: approximation order of macroscopic velocity :math:`\mathbf{u}` in the equilibrium
cumulant: if True relax cumulants instead of moments
c_s_sq: Speed of sound squared
Returns:
`lbmpy.methods.momentbased.MomentBasedLbMethod` instance
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
mom_to_rr_dict = OrderedDict(moment_to_relaxation_rate_dict)
assert len(mom_to_rr_dict) == len(stencil), \
"The number of moments has to be the same as the number of stencil entries"
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
if cumulant:
raise ValueError("Cumulant methods should be created with maxwellian_moments=True")
else:
eq_values = get_moments_of_discrete_maxwellian_equilibrium(stencil, tuple(mom_to_rr_dict.keys()),
c_s_sq=c_s_sq, compressible=compressible,
order=equilibrium_order)
rr_dict = OrderedDict([(mom, RelaxationInfo(eq_mom, rr))
for mom, rr, eq_mom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
return MomentBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
def create_with_continuous_maxwellian_eq_moments(stencil, moment_to_relaxation_rate_dict, compressible=False,
force_model=None, equilibrium_order=2,
cumulant=False, c_s_sq=sp.Rational(1, 3)):
r"""
Creates a moment-based LBM by taking a list of moments with corresponding relaxation rate. These moments are
relaxed against the moments of the continuous Maxwellian distribution.
For parameter description see :func:`lbmpy.methods.create_with_discrete_maxwellian_eq_moments`.
By using the continuous Maxwellian we automatically get a compressible model.
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
mom_to_rr_dict = OrderedDict(moment_to_relaxation_rate_dict)
assert len(mom_to_rr_dict) == len(stencil), "The number of moments has to be equal to the number of stencil entries"
dim = len(stencil[0])
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
if cumulant:
eq_values = get_cumulants_of_continuous_maxwellian_equilibrium(tuple(mom_to_rr_dict.keys()), dim, c_s_sq=c_s_sq,
order=equilibrium_order)
else:
eq_values = get_moments_of_continuous_maxwellian_equilibrium(tuple(mom_to_rr_dict.keys()), dim, c_s_sq=c_s_sq,
order=equilibrium_order)
if not compressible:
if not compressible and cumulant:
raise NotImplementedError("Incompressible cumulants not yet supported")
rho = density_velocity_computation.defined_symbols(order=0)[1]
u = density_velocity_computation.defined_symbols(order=1)[1]
eq_values = [compressible_to_incompressible_moment_value(em, rho, u) for em in eq_values]
rr_dict = OrderedDict([(mom, RelaxationInfo(eq_mom, rr))
for mom, rr, eq_mom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
if cumulant:
warn("Please use method=cumulant directly to create a cumulant method", DeprecationWarning)
return create_with_default_polynomial_cumulants(stencil, mom_to_rr_dict.values(),
force_model=force_model)
else:
return MomentBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
def create_generic_mrt(stencil, moment_eq_value_relaxation_rate_tuples, compressible=False,
force_model=None, cumulant=False):
r"""
Creates a generic moment-based LB method.
Args:
stencil: sequence of lattice velocities
moment_eq_value_relaxation_rate_tuples: sequence of tuples containing (moment, equilibrium value, relax. rate)
compressible: compressibility, determines calculation of velocity for force models
force_model: see create_with_discrete_maxwellian_eq_moments
cumulant: true for cumulant methods, False for moment-based methods. Depricated parameter.
"""
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
rr_dict = OrderedDict()
for moment, eq_value, rr in moment_eq_value_relaxation_rate_tuples:
moment = sp.sympify(moment)
rr_dict[moment] = RelaxationInfo(eq_value, rr)
if cumulant:
warn("Please use method=cumulant directly to create a cumulant method", DeprecationWarning)
return CenteredCumulantBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
else:
return MomentBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
def create_from_equilibrium(stencil, equilibrium, moment_to_relaxation_rate_dict, compressible=False, force_model=None):
r"""
Creates a moment-based LB method using a given equilibrium distribution function
Args:
stencil: see create_with_discrete_maxwellian_eq_moments
equilibrium: list of equilibrium terms, dependent on rho and u, one for each stencil direction
moment_to_relaxation_rate_dict: relaxation rate for each moment, or a symbol/float if all should relaxed with
the same rate
compressible: see create_with_discrete_maxwellian_eq_moments
force_model: see create_with_discrete_maxwellian_eq_moments
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
if any(isinstance(moment_to_relaxation_rate_dict, t) for t in (sp.Symbol, float, int)):
moment_to_relaxation_rate_dict = {m: moment_to_relaxation_rate_dict
for m in get_default_moment_set_for_stencil(stencil)}
mom_to_rr_dict = OrderedDict(moment_to_relaxation_rate_dict)
assert len(mom_to_rr_dict) == len(stencil), "The number of moments has to be equal to the number of stencil entries"
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
rr_dict = OrderedDict([(mom, RelaxationInfo(discrete_moment(equilibrium, mom, stencil).expand(), rr))
for mom, rr in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values())])
return MomentBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
# ------------------------------------ SRT / TRT/ MRT Creators ---------------------------------------------------------
def create_srt(stencil, relaxation_rate, maxwellian_moments=False, **kwargs):
r"""Creates a single relaxation time (SRT) lattice Boltzmann model also known as BGK model.
Args:
stencil: nested tuple defining the discrete velocity space. See :func:`lbmpy.stencils.get_stencil`
relaxation_rate: relaxation rate (inverse of the relaxation time)
usually called :math:`\omega` in LBM literature
maxwellian_moments: determines if the discrete or continuous maxwellian equilibrium is
used to compute the equilibrium moments
Returns:
:class:`lbmpy.methods.momentbased.MomentBasedLbMethod` instance
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
moments = get_default_moment_set_for_stencil(stencil)
rr_dict = OrderedDict([(m, relaxation_rate) for m in moments])
if maxwellian_moments:
return create_with_continuous_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
else:
return create_with_discrete_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
def create_trt(stencil, relaxation_rate_even_moments, relaxation_rate_odd_moments,
maxwellian_moments=False, **kwargs):
"""
Creates a two relaxation time (TRT) lattice Boltzmann model, where even and odd moments are relaxed differently.
In the SRT model the exact wall position of no-slip boundaries depends on the viscosity, the TRT method does not
have this problem.
Parameters are similar to :func:`lbmpy.methods.create_srt`, but instead of one relaxation rate there are
two relaxation rates: one for even moments (determines viscosity) and one for odd moments.
If unsure how to choose the odd relaxation rate, use the function :func:`lbmpy.methods.create_trt_with_magic_number`
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
moments = get_default_moment_set_for_stencil(stencil)
rr_dict = OrderedDict([(m, relaxation_rate_even_moments if is_even(m) else relaxation_rate_odd_moments)
for m in moments])
if maxwellian_moments:
return create_with_continuous_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
else:
return create_with_discrete_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
def create_trt_with_magic_number(stencil, relaxation_rate, magic_number=sp.Rational(3, 16), **kwargs):
"""
Creates a two relaxation time (TRT) lattice Boltzmann method, where the relaxation time for odd moments is
determines from the even moment relaxation time and a "magic number".
For possible parameters see :func:`lbmpy.methods.create_trt`
"""
rr_odd = relaxation_rate_from_magic_number(relaxation_rate, magic_number)
return create_trt(stencil, relaxation_rate_even_moments=relaxation_rate,
relaxation_rate_odd_moments=rr_odd, **kwargs)
def create_mrt_raw(stencil, relaxation_rates, maxwellian_moments=False, **kwargs):
"""Creates a MRT method using non-orthogonalized moments."""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
moments = get_default_moment_set_for_stencil(stencil)
rr_dict = OrderedDict(zip(moments, relaxation_rates))
if maxwellian_moments:
return create_with_continuous_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
else:
return create_with_discrete_maxwellian_eq_moments(stencil, rr_dict, **kwargs)
def create_trt_kbc(dim, shear_relaxation_rate, higher_order_relaxation_rate, method_name='KBC-N4',
maxwellian_moments=False, **kwargs):
"""
Creates a method with two relaxation rates, one for lower order moments which determines the viscosity and
one for higher order moments. In entropic models this second relaxation rate is chosen subject to an entropy
condition. Which moments are relaxed by which rate is determined by the method_name
Args:
dim: 2 or 3, leads to stencil D2Q9 or D3Q27
shear_relaxation_rate: relaxation rate that determines viscosity
higher_order_relaxation_rate: relaxation rate for higher order moments
method_name: string 'KBC-Nx' where x can be an number from 1 to 4, for details see
"Karlin 2015: Entropic multi relaxation lattice Boltzmann models for turbulent flows"
maxwellian_moments: determines if the discrete or continuous maxwellian equilibrium is
used to compute the equilibrium moments
"""
def product(iterable):
return reduce(operator.mul, iterable, 1)
the_moment = MOMENT_SYMBOLS[:dim]
rho = [sp.Rational(1, 1)]
velocity = list(the_moment)
shear_tensor_off_diagonal = [product(t) for t in itertools.combinations(the_moment, 2)]
shear_tensor_diagonal = [m_i * m_i for m_i in the_moment]
shear_tensor_trace = sum(shear_tensor_diagonal)
shear_tensor_trace_free_diagonal = [dim * d - shear_tensor_trace for d in shear_tensor_diagonal]
poly_repr = exponents_to_polynomial_representations
energy_transport_tensor = list(poly_repr([a for a in moments_of_order(3, dim, True)
if 3 not in a]))
explicitly_defined = set(rho + velocity + shear_tensor_off_diagonal
+ shear_tensor_diagonal + energy_transport_tensor)
rest = list(set(poly_repr(moments_up_to_component_order(2, dim))) - explicitly_defined)
assert len(rest) + len(explicitly_defined) == 3**dim
# naming according to paper Karlin 2015: Entropic multi relaxation lattice Boltzmann models for turbulent flows
d = shear_tensor_off_diagonal + shear_tensor_trace_free_diagonal[:-1]
t = [shear_tensor_trace]
q = energy_transport_tensor
if method_name == 'KBC-N1':
decomposition = [d, t + q + rest]
elif method_name == 'KBC-N2':
decomposition = [d + t, q + rest]
elif method_name == 'KBC-N3':
decomposition = [d + q, t + rest]
elif method_name == 'KBC-N4':
decomposition = [d + t + q, rest]
else:
raise ValueError("Unknown model. Supported models KBC-Nx where x in (1,2,3,4)")
omega_s, omega_h = shear_relaxation_rate, higher_order_relaxation_rate
shear_part, rest_part = decomposition
relaxation_rates = [omega_s] + \
[omega_s] * len(velocity) + \
[omega_s] * len(shear_part) + \
[omega_h] * len(rest_part)
stencil = get_stencil("D2Q9") if dim == 2 else get_stencil("D3Q27")
all_moments = rho + velocity + shear_part + rest_part
moment_to_rr = OrderedDict((m, rr) for m, rr in zip(all_moments, relaxation_rates))
if maxwellian_moments:
return create_with_continuous_maxwellian_eq_moments(stencil, moment_to_rr, **kwargs)
else:
return create_with_discrete_maxwellian_eq_moments(stencil, moment_to_rr, **kwargs)
def create_mrt_orthogonal(stencil, relaxation_rate_getter, maxwellian_moments=False, weighted=None,
nested_moments=None, **kwargs):
r"""
Returns an orthogonal multi-relaxation time model for the stencils D2Q9, D3Q15, D3Q19 and D3Q27.
These MRT methods are just one specific version - there are many MRT methods possible for all these stencils
which differ by the linear combination of moments and the grouping into equal relaxation times.
To create a generic MRT method use `create_with_discrete_maxwellian_eq_moments`
Args:
stencil: nested tuple defining the discrete velocity space. See `get_stencil`
relaxation_rate_getter: function getting a list of moments as argument, returning the associated relaxation
maxwellian_moments: determines if the discrete or continuous maxwellian equilibrium is
used to compute the equilibrium moments
weighted: whether to use weighted or unweighted orthogonality
nested_moments: a list of lists of modes, grouped by common relaxation times. This is usually used in
conjunction with `mrt_orthogonal_modes_literature`. If this argument is not provided,
Gram-Schmidt orthogonalization of the default modes is performed.
"""
dim = len(stencil[0])
if isinstance(stencil, str):
stencil = get_stencil(stencil)
if weighted is None and not nested_moments:
raise ValueError("Please specify whether you want weighted or unweighted orthogonality using 'weighted='")
elif weighted:
weights = get_weights(stencil, sp.Rational(1, 3))
else:
weights = None
moment_to_relaxation_rate_dict = OrderedDict()
if not nested_moments:
moments = get_default_moment_set_for_stencil(stencil)
x, y, z = MOMENT_SYMBOLS
if dim == 2:
diagonal_viscous_moments = [x ** 2 + y ** 2, x ** 2]
else:
diagonal_viscous_moments = [x ** 2 + y ** 2 + z ** 2, x ** 2, y ** 2 - z ** 2]
for i, d in enumerate(MOMENT_SYMBOLS[:dim]):
moments[moments.index(d**2)] = diagonal_viscous_moments[i]
orthogonal_moments = gram_schmidt(moments, stencil, weights)
orthogonal_moments_scaled = [e * common_denominator(e) for e in orthogonal_moments]
nested_moments = list(sort_moments_into_groups_of_same_order(orthogonal_moments_scaled).values())
# second order moments: separate bulk from shear
second_order_moments = nested_moments[2]
bulk_moment = [m for m in second_order_moments if is_bulk_moment(m, dim)]
shear_moments = [m for m in second_order_moments if is_shear_moment(m, dim)]
assert len(shear_moments) + len(bulk_moment) == len(second_order_moments)
nested_moments[2] = shear_moments
nested_moments.insert(3, bulk_moment)
for moment_list in nested_moments:
rr = relaxation_rate_getter(moment_list)
for m in moment_list:
moment_to_relaxation_rate_dict[m] = rr
if maxwellian_moments:
return create_with_continuous_maxwellian_eq_moments(stencil, moment_to_relaxation_rate_dict, **kwargs)
else:
return create_with_discrete_maxwellian_eq_moments(stencil, moment_to_relaxation_rate_dict, **kwargs)
def mrt_orthogonal_modes_literature(stencil, is_weighted, is_cumulant=False):
"""
Returns a list of lists of modes, grouped by common relaxation times.
This is for commonly used MRT models found in literature.
Args:
stencil: nested tuple defining the discrete velocity space. See `get_stencil`
is_weighted: whether to use weighted or unweighted orthogonality
is_cumulant: whether a moment-based or cumulant-based model is desired
MRT schemes as described in the following references are used
"""
x, y, z = MOMENT_SYMBOLS
one = sp.Rational(1, 1)
if not is_cumulant:
if have_same_entries(stencil, get_stencil("D2Q9")) and is_weighted:
# Reference:
# Duenweg, B., Schiller, U. D., & Ladd, A. J. (2007). Statistical mechanics of the fluctuating
# lattice Boltzmann equation. Physical Review E, 76(3)
sq = x ** 2 + y ** 2
all_moments = [one, x, y, 3 * sq - 2, 2 * x ** 2 - sq, x * y,
(3 * sq - 4) * x, (3 * sq - 4) * y, 9 * sq ** 2 - 15 * sq + 2]
nested_moments = list(sort_moments_into_groups_of_same_order(all_moments).values())
return nested_moments
elif have_same_entries(stencil, get_stencil("D3Q15")) and is_weighted:
sq = x ** 2 + y ** 2 + z ** 2
nested_moments = [
[one, x, y, z], # [0, 3, 5, 7]
[sq - 1], # [1]
[3 * sq ** 2 - 9 * sq + 4], # [2]
[(3 * sq - 5) * x, (3 * sq - 5) * y, (3 * sq - 5) * z], # [4, 6, 8]
[3 * x ** 2 - sq, y ** 2 - z ** 2, x * y, y * z, x * z], # [9, 10, 11, 12, 13]
[x * y * z]
]
elif have_same_entries(stencil, get_stencil("D3Q19")) and is_weighted:
# This MRT variant mentioned in the dissertation of Ulf Schiller
# "Thermal fluctuations and boundary conditions in the lattice Boltzmann method" (2008), p. 24ff
# There are some typos in the moment matrix on p.27
# The here implemented ordering of the moments is however different from that reference (Eq. 2.61-2.63)
# The moments are weighted-orthogonal (Eq. 2.58)
# Further references:
# Duenweg, B., Schiller, U. D., & Ladd, A. J. (2007). Statistical mechanics of the fluctuating
# lattice Boltzmann equation. Physical Review E, 76(3)
# Chun, B., & Ladd, A. J. (2007). Interpolated boundary condition for lattice Boltzmann simulations of
# flows in narrow gaps. Physical review E, 75(6)
sq = x ** 2 + y ** 2 + z ** 2
nested_moments = [
[one, x, y, z], # [0, 3, 5, 7]
[sq - 1], # [1]
[3 * sq ** 2 - 6 * sq + 1], # [2]
[(3 * sq - 5) * x, (3 * sq - 5) * y, (3 * sq - 5) * z], # [4, 6, 8]
[3 * x ** 2 - sq, y ** 2 - z ** 2, x * y, y * z, x * z], # [9, 11, 13, 14, 15]
[(2 * sq - 3) * (3 * x ** 2 - sq), (2 * sq - 3) * (y ** 2 - z ** 2)], # [10, 12]
[(y ** 2 - z ** 2) * x, (z ** 2 - x ** 2) * y, (x ** 2 - y ** 2) * z] # [16, 17, 18]
]
elif have_same_entries(stencil, get_stencil("D3Q27")) and not is_weighted:
xsq, ysq, zsq = x ** 2, y ** 2, z ** 2
all_moments = [
sp.Rational(1, 1), # 0
x, y, z, # 1, 2, 3
x * y, x * z, y * z, # 4, 5, 6
xsq - ysq, # 7
(xsq + ysq + zsq) - 3 * zsq, # 8
(xsq + ysq + zsq) - 2, # 9
3 * (x * ysq + x * zsq) - 4 * x, # 10
3 * (xsq * y + y * zsq) - 4 * y, # 11
3 * (xsq * z + ysq * z) - 4 * z, # 12
x * ysq - x * zsq, # 13
xsq * y - y * zsq, # 14
xsq * z - ysq * z, # 15
x * y * z, # 16
3 * (xsq * ysq + xsq * zsq + ysq * zsq) - 4 * (xsq + ysq + zsq) + 4, # 17
3 * (xsq * ysq + xsq * zsq - 2 * ysq * zsq) - 2 * (2 * xsq - ysq - zsq), # 18
3 * (xsq * ysq - xsq * zsq) - 2 * (ysq - zsq), # 19
3 * (xsq * y * z) - 2 * (y * z), # 20
3 * (x * ysq * z) - 2 * (x * z), # 21
3 * (x * y * zsq) - 2 * (x * y), # 22
9 * (x * ysq * zsq) - 6 * (x * ysq + x * zsq) + 4 * x, # 23
9 * (xsq * y * zsq) - 6 * (xsq * y + y * zsq) + 4 * y, # 24
9 * (xsq * ysq * z) - 6 * (xsq * z + ysq * z) + 4 * z, # 25
27 * (xsq * ysq * zsq) - 18 * (xsq * ysq + xsq * zsq + ysq * zsq) + 12 * (xsq + ysq + zsq) - 8, # 26
]
nested_moments = list(sort_moments_into_groups_of_same_order(all_moments).values())
else:
raise NotImplementedError("No MRT model is available (yet) for this stencil. "
"Create a custom MRT using 'create_with_discrete_maxwellian_eq_moments'")
else:
nested_moments = get_default_polynomial_cumulants_for_stencil(stencil)
return nested_moments
# ----------------------------------------- Cumulant method creators ---------------------------------------------------
def create_centered_cumulant_model(stencil, cumulant_to_rr_dict, force_model=None,
equilibrium_order=None, c_s_sq=sp.Rational(1, 3),
galilean_correction=False,
central_moment_transform_class=PdfsToCentralMomentsByShiftMatrix,
cumulant_transform_class=CentralMomentsToCumulantsByGeneratingFunc):
r"""Creates a cumulant lattice Boltzmann model.
Args:
stencil: nested tuple defining the discrete velocity space. See :func:`lbmpy.stencils.get_stencil`
cumulant_to_rr_dict: dict that has as many entries as the stencil. Each cumulant, which can be
represented by an exponent tuple or in polynomial form
(see `lbmpy.methods.centeredcumulant.get_default_centered_cumulants_for_stencil`),
is mapped to a relaxation rate.
force_model: force model used for the collision. For cumulant LB method a good choice is
`lbmpy.methods.centeredcumulant.force_model`
equilibrium_order: approximation order of macroscopic velocity :math:`\mathbf{u}` in the equilibrium
c_s_sq: Speed of sound squared
galilean_correction: special correction for D3Q27 cumulant collisions. See Appendix H in
:cite:`geier2015`. Implemented in :mod:`lbmpy.methods.centeredcumulant.galilean_correction`
central_moment_transform_class: Class which defines the transformation to the central moment space
(see :mod:`lbmpy.methods.momentbased.moment_transforms`)
cumulant_transform_class: Class which defines the transformation from the central moment space to the
cumulant space (see :mod:`lbmpy.methods.centeredcumulant.cumulant_transform`)
Returns:
:class:`lbmpy.methods.centeredcumulant.CenteredCumulantBasedLbMethod` instance
"""
one = sp.Integer(1)
if isinstance(stencil, str):
stencil = get_stencil(stencil)
assert len(cumulant_to_rr_dict) == len(
stencil), "The number of moments has to be equal to the number of stencil entries"
dim = len(stencil[0])
# CQC
cqc = DensityVelocityComputation(stencil, True, force_model=force_model)
density_symbol = cqc.zeroth_order_moment_symbol
velocity_symbols = cqc.first_order_moment_symbols
# Equilibrium Values
higher_order_polynomials = list(filter(lambda x: get_order(x) > 1, cumulant_to_rr_dict.keys()))
# Lower Order Equilibria
cumulants_to_relaxation_info_dict = {one: RelaxationInfo(density_symbol, cumulant_to_rr_dict[one])}
for d in MOMENT_SYMBOLS[:dim]:
cumulants_to_relaxation_info_dict[d] = RelaxationInfo(0, cumulant_to_rr_dict[d])
# Polynomial Cumulant Equilibria
polynomial_equilibria = get_cumulants_of_continuous_maxwellian_equilibrium(
higher_order_polynomials, dim, rho=density_symbol, u=velocity_symbols, c_s_sq=c_s_sq, order=equilibrium_order)
polynomial_equilibria = [density_symbol * v for v in polynomial_equilibria]
for i, c in enumerate(higher_order_polynomials):
cumulants_to_relaxation_info_dict[c] = RelaxationInfo(polynomial_equilibria[i], cumulant_to_rr_dict[c])
return CenteredCumulantBasedLbMethod(stencil, cumulants_to_relaxation_info_dict, cqc, force_model,
galilean_correction=galilean_correction,
central_moment_transform_class=central_moment_transform_class,
cumulant_transform_class=cumulant_transform_class)
def create_with_polynomial_cumulants(stencil, relaxation_rates, cumulant_groups, **kwargs):
r"""Creates a cumulant lattice Boltzmann model based on a default polynomial set.
Args:
stencil: nested tuple defining the discrete velocity space. See :func:`lbmpy.stencils.get_stencil`
relaxation_rates: relaxation rates for each cumulant group. If None are provided a list of symbolic relaxation
rates is created and used. If only a list with one entry is provided this relaxation rate is
used for determine the viscosity of the simulation. All other cumulants are relaxed with one.
If a cumulant force model is provided the first order cumulants are relaxed with two to ensure
that the force is applied correctly to the momentum groups
cumulant_groups: Nested sequence of polynomial expressions defining the cumulants to be relaxed. All cumulants
within one group are relaxed with the same relaxation rate.
kwargs: See :func:`create_centered_cumulant_model`
Returns:
:class:`lbmpy.methods.centeredcumulant.CenteredCumulantBasedLbMethod` instance
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
cumulant_to_rr_dict = OrderedDict()
rr_iter = iter(relaxation_rates)
for group in cumulant_groups:
if all(get_order(c) <= 1 for c in group):
for cumulant in group:
cumulant_to_rr_dict[cumulant] = 0
else:
try:
rr = next(rr_iter)
for cumulant in group:
cumulant_to_rr_dict[cumulant] = rr
except StopIteration:
raise ValueError('Not enough relaxation rates specified.')
return create_centered_cumulant_model(stencil, cumulant_to_rr_dict, **kwargs)
def create_with_monomial_cumulants(stencil, relaxation_rates, **kwargs):
r"""Creates a cumulant lattice Boltzmann model based on a default polinomial set.
Args:
stencil: nested tuple defining the discrete velocity space. See :func:`lbmpy.stencils.get_stencil`
relaxation_rates: relaxation rates for each cumulant group. If None are provided a list of symbolic relaxation
rates is created and used. If only a list with one entry is provided this relaxation rate is
used for determine the viscosity of the simulation. All other cumulants are relaxed with one.
If a cumulant force model is provided the first order cumulants are relaxed with two to ensure
that the force is applied correctly to the momentum groups
kwargs: See :func:`create_centered_cumulant_model`
Returns:
:class:`lbmpy.methods.centeredcumulant.CenteredCumulantBasedLbMethod` instance
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
dim = len(stencil[0])
# Get monomial moments
cumulants = get_default_moment_set_for_stencil(stencil)
if len(relaxation_rates) == 1:
r_rates = []
for c in cumulants:
order = get_order(c)
if order <= 1:
# Conserved moments
continue
elif is_shear_moment(c, dim):
# Viscosity-governing moments
r_rates.append(relaxation_rates[0])
else:
# The rest
r_rates.append(1)
else:
r_rates = relaxation_rates
cumulant_groups = [(c,) for c in cumulants]
return create_with_polynomial_cumulants(stencil, r_rates, cumulant_groups, **kwargs)
def create_with_default_polynomial_cumulants(stencil, relaxation_rates, **kwargs):
r"""Creates a cumulant lattice Boltzmann model based on a default polynomial set.
Args: See :func:`create_with_polynomial_cumulants`.
Returns:
:class:`lbmpy.methods.centeredcumulant.CenteredCumulantBasedLbMethod` instance
"""
if isinstance(stencil, str):
stencil = get_stencil(stencil)
# Get polynomial groups
cumulant_groups = get_default_polynomial_cumulants_for_stencil(stencil)
if len(relaxation_rates) == 1:
r_rates = [relaxation_rates[0]] # For correct viscosity
r_rates += [1] * (len(cumulant_groups) - 3)
else:
assert len(relaxation_rates) >= len(cumulant_groups) - 2, \
f"Number of relaxation rates must at least match the number of non-conserved cumulant groups. " \
f"For this stencil we have {len(cumulant_groups) - 2} such cumulant groups"
r_rates = relaxation_rates
return create_with_polynomial_cumulants(stencil, r_rates, cumulant_groups, **kwargs)
# ----------------------------------------- Comparison view for notebooks ----------------------------------------------
def compare_moment_based_lb_methods(reference, other, show_deviations_only=False):
import ipy_table
table = []
caption_rows = [len(table)]
table.append(['Shared Moment', 'ref', 'other', 'difference'])
reference_moments = set(reference.moments)
other_moments = set(other.moments)
for moment in reference_moments.intersection(other_moments):
reference_value = reference.relaxation_info_dict[moment].equilibrium_value
other_value = other.relaxation_info_dict[moment].equilibrium_value
diff = sp.simplify(reference_value - other_value)
if show_deviations_only and diff == 0:
pass
else:
table.append(["$%s$" % (sp.latex(moment), ),
"$%s$" % (sp.latex(reference_value), ),
"$%s$" % (sp.latex(other_value), ),
"$%s$" % (sp.latex(diff),)])
only_in_ref = reference_moments - other_moments
if only_in_ref:
caption_rows.append(len(table))
table.append(['Only in Ref', 'value', '', ''])
for moment in only_in_ref:
val = reference.relaxation_info_dict[moment].equilibrium_value
table.append(["$%s$" % (sp.latex(moment),),
"$%s$" % (sp.latex(val),),
" ", " "])
only_in_other = other_moments - reference_moments
if only_in_other:
caption_rows.append(len(table))
table.append(['Only in Other', '', 'value', ''])
for moment in only_in_other:
val = other.relaxation_info_dict[moment].equilibrium_value
table.append(["$%s$" % (sp.latex(moment),),
" ",
"$%s$" % (sp.latex(val),),
" "])
table_display = ipy_table.make_table(table)
for row_idx in caption_rows:
for col in range(4):
ipy_table.set_cell_style(row_idx, col, color='#bbbbbb')
return table_display
import sympy as sp
from lbmpy.maxwellian_equilibrium import get_weights
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, LbmCollisionRule
from lbmpy.methods.conservedquantitycomputation import DensityVelocityComputation
from pystencils import Assignment
class EntropicEquilibriumSRT(AbstractLbMethod):
"""Equilibrium from 'Minimal entropic kinetic models for hydrodynamics'
Ansumali, S. ; Karlin, I. V; Öttinger, H. C, (2003)
"""
def __init__(self, stencil, relaxation_rate, force_model, conserved_quantity_calculation):
super(EntropicEquilibriumSRT, self).__init__(stencil)
self._cqc = conserved_quantity_calculation
self._weights = get_weights(stencil, c_s_sq=sp.Rational(1, 3))
self._relaxationRate = relaxation_rate
self._forceModel = force_model
self.shear_relaxation_rate = relaxation_rate
@property
def conserved_quantity_computation(self):
return self._cqc
@property
def weights(self):
return self._weights
@property
def zeroth_order_equilibrium_moment_symbol(self, ):
return self._cqc.zeroth_order_moment_symbol
@property
def first_order_equilibrium_moment_symbols(self, ):
return self._cqc.first_order_moment_symbols
def get_equilibrium(self, conserved_quantity_equations=None, include_force_terms=False):
return self._get_collision_rule_with_relaxation_rate(1,
conserved_quantity_equations=conserved_quantity_equations,
include_force_terms=include_force_terms)
def get_equilibrium_terms(self):
equilibrium = self.get_equilibrium()
return sp.Matrix([eq.rhs for eq in equilibrium.main_assignments])
def _get_collision_rule_with_relaxation_rate(self, relaxation_rate, include_force_terms=True,
conserved_quantity_equations=None):
f = sp.Matrix(self.pre_collision_pdf_symbols)
rho = self._cqc.zeroth_order_moment_symbol
u = self._cqc.first_order_moment_symbols
if conserved_quantity_equations is None:
conserved_quantity_equations = self._cqc.equilibrium_input_equations_from_pdfs(f)
all_subexpressions = conserved_quantity_equations.all_assignments
eq = []
for w_i, direction in zip(self.weights, self.stencil):
f_i = rho * w_i
for u_a, e_ia in zip(u, direction):
b = sp.sqrt(1 + 3 * u_a ** 2)
f_i *= (2 - b) * ((2 * u_a + b) / (1 - u_a)) ** e_ia
eq.append(f_i)
collision_eqs = [Assignment(lhs, (1 - relaxation_rate) * f_i + relaxation_rate * eq_i)
for lhs, f_i, eq_i in zip(self.post_collision_pdf_symbols, self.pre_collision_pdf_symbols, eq)]
if (self._forceModel is not None) and include_force_terms:
force_model_terms = self._forceModel(self)
force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms, )))
force_subexpressions = [Assignment(sym, force_model_term)
for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
all_subexpressions += force_subexpressions
collision_eqs = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
for eq, force_term_symbol in zip(collision_eqs, force_term_symbols)]
cr = LbmCollisionRule(self, collision_eqs, all_subexpressions)
cr.simplification_hints['relaxation_rates'] = []
return cr
def get_collision_rule(self, conserved_quantity_equations=None, pre_simplification=None):
return self._get_collision_rule_with_relaxation_rate(self._relaxationRate,
conserved_quantity_equations=conserved_quantity_equations)
def create_srt_entropic(stencil, relaxation_rate, force_model, compressible):
if not compressible:
raise NotImplementedError("entropic-srt only implemented for compressible models")
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
return EntropicEquilibriumSRT(stencil, relaxation_rate, force_model, density_velocity_computation)
from abc import abstractmethod
import sympy as sp
from pystencils import Assignment, AssignmentCollection
from pystencils.simp import (
SimplificationStrategy, sympy_cse, add_subexpressions_for_divisions, add_subexpressions_for_constants)
from pystencils.simp.assignment_collection import SymbolGen
from pystencils.sympyextensions import subs_additive, fast_subs
from lbmpy.moments import moment_matrix, set_up_shift_matrix, contained_moments, moments_up_to_order
from lbmpy.methods.momentbased.momentbasedsimplifications import (
substitute_moments_in_conserved_quantity_equations,
split_pdf_main_assignments_by_symmetry)
from lbmpy.methods.centeredcumulant.centered_cumulants import statistical_quantity_symbol as sq_sym
# ============================ PDFs <-> Central Moments ==============================================================
PRE_COLLISION_RAW_MOMENT = 'm'
POST_COLLISION_RAW_MOMENT = 'm_post'
PRE_COLLISION_CENTRAL_MOMENT = 'kappa'
POST_COLLISION_CENTRAL_MOMENT = 'kappa_post'
class AbstractMomentTransform:
r"""
Abstract Base Class for classes providing transformations between moment spaces. These transformations
are bijective maps between two spaces :math:`\mathcal{S}` and :math:`\mathcal{D}` (i.e. populations
and raw moments, or central moments and cumulants). The forward map
:math:`F : \mathcal{S} \mapsto \mathcal{D}`
is given by `forward_transform`, and the backward (inverse) map
:math:`F^{-1} : \mathcal{D} \mapsto \mathcal{S}`
is provided by `backward_transform`. It is intendet to use the transformations in lattice Boltzmann collision
operators: The `forward_transform` to map pre-collision populations to the required moment
space (possibly by several consecutive transformations), and the `backward_transform` to map post-
collision quantities back to populations.
### Transformations
Transformation equations must be returned by implementations of `forward_transform` and
`backward_transform` as `AssignmentCollection`s.
- `forward_transform` returns an AssignmentCollection which depends on quantities of the domain
:math:`\mathcal{S}` and contains the equations to map them to the codomain :math:`\mathcal{D}`.
- `backward_transform` is the inverse of `forward_transform` and returns an AssignmentCollection
which maps quantities of the codomain :math:`\mathcal{D}` back to the domain :math:`\mathcal{S}`.
### Absorption of Conserved Quantity Equations
Transformations from the population space to any moment space may *absorb* the equations defining
the macroscopic quantities entering the equilibrium (typically the density :math:`\rho` and the
velocity :math:`\vec{u}`). This means that the `forward_transform` will possibly rewrite the
assignments given in the constructor argument `conserved_quantity_equations` to reduce
the total operation count. For example, in the transformation step from populations to
raw moments (see `PdfsToRawMomentsTransform`), :math:`\rho` can be aliased as the zeroth-order moment
:math:`m_{000}`. Assignments to the conserved quantities will then be part of the AssignmentCollection
returned by `forward_transform` and need not be added to the collision rule separately.
### Simplification
Both `forward_transform` and `backward_transform` expect a keyword argument `simplification`
which can be used to direct simplification steps applied during the derivation of the transformation
equations. Possible values are:
- `False` or `'none'`: No simplification is to be applied
- `True` or `'default'`: A default simplification strategy specific to the implementation is applied.
The actual simplification steps depend strongly on the nature of the equations. They are defined by
the implementation. It is the responsibility of the implementation to select the most effective
simplification strategy.
- `'default_with_cse'`: Same as `'default'`, but with an additional pass of common subexpression elimination.
"""
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
self.moment_exponents = sorted(list(moment_exponents), key=sum)
self.stencil = stencil
self.dim = len(stencil[0])
self.cqe = conserved_quantity_equations
self.equilibrium_density = equilibrium_density
self.equilibrium_velocity = equilibrium_velocity
@abstractmethod
def forward_transform(self, *args, **kwargs):
raise NotImplementedError("forward_transform must be implemented in a subclass")
@abstractmethod
def backward_transform(self, *args, **kwargs):
raise NotImplementedError("backward_transform must be implemented in a subclass")
@property
def absorbs_conserved_quantity_equations(self):
"""
Whether or not the given conserved quantity equations will be included in
the assignment collection returned by `forward_transform`, possibly in simplified
form.
"""
return False
@property
def _default_simplification(self):
return SimplificationStrategy()
def _get_simp_strategy(self, simplification, direction=None):
if isinstance(simplification, bool):
simplification = 'default' if simplification else 'none'
if simplification == 'default' or simplification == 'default_with_cse':
simp = self._default_simplification if direction is None else self._default_simplification[direction]
if simplification == 'default_with_cse':
simp.add(sympy_cse)
return simp
else:
return None
class PdfsToCentralMomentsByMatrix(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents, equilibrium_density, equilibrium_velocity, **kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToCentralMomentsByMatrix, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity, **kwargs)
moment_matrix_without_shift = moment_matrix(self.moment_exponents, self.stencil)
shift_matrix = set_up_shift_matrix(self.moment_exponents, self.stencil, equilibrium_velocity)
self.forward_matrix = moment_matrix(self.moment_exponents, self.stencil, equilibrium_velocity)
self.backward_matrix = moment_matrix_without_shift.inv() * shift_matrix.inv()
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_f_to_k'):
simplification = self._get_simp_strategy(simplification)
f_vec = sp.Matrix(pdf_symbols)
central_moments = self.forward_matrix * f_vec
main_assignments = [Assignment(sq_sym(moment_symbol_base, e), eq)
for e, eq in zip(self.moment_exponents, central_moments)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification)
moments = [sq_sym(moment_symbol_base, exp) for exp in self.moment_exponents]
moment_vec = sp.Matrix(moments)
pdfs_from_moments = self.backward_matrix * moment_vec
main_assignments = [Assignment(f, eq) for f, eq in zip(pdf_symbols, pdfs_from_moments)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
@property
def _default_simplification(self):
simplification = SimplificationStrategy()
simplification.add(add_subexpressions_for_divisions)
return simplification
# end class PdfsToCentralMomentsByMatrix
class FastCentralMomentTransform(AbstractMomentTransform):
def __init__(self, stencil,
moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(FastCentralMomentTransform, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations, **kwargs)
self.mat_transform = PdfsToCentralMomentsByMatrix(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations, **kwargs)
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_f_to_k'):
simplification = self._get_simp_strategy(simplification, 'forward')
def _partial_kappa_symbol(fixed_directions, remaining_exponents):
fixed_str = '_'.join(str(direction) for direction in fixed_directions).replace('-', 'm')
exp_str = '_'.join(str(exp) for exp in remaining_exponents).replace('-', 'm')
return sp.Symbol(f"partial_kappa_{fixed_str}_e_{exp_str}")
subexpressions_dict = dict()
main_assignments = []
def collect_partial_sums(exponents, dimension=0, fixed_directions=tuple()):
if dimension == self.dim:
# Base Case
if fixed_directions in self.stencil:
return pdf_symbols[self.stencil.index(fixed_directions)]
else:
return 0
else:
# Recursive Case
summation = sp.sympify(0)
for d in [-1, 0, 1]:
next_partial = collect_partial_sums(
exponents, dimension=dimension + 1, fixed_directions=fixed_directions + (d,))
summation += next_partial * (d - self.equilibrium_velocity[dimension])**exponents[dimension]
if dimension == 0:
lhs_symbol = sq_sym(moment_symbol_base, exponents)
main_assignments.append(Assignment(lhs_symbol, summation))
else:
lhs_symbol = _partial_kappa_symbol(fixed_directions, exponents[dimension:])
subexpressions_dict[lhs_symbol] = summation
return lhs_symbol
for e in self.moment_exponents:
collect_partial_sums(e)
subexpressions = [Assignment(lhs, rhs) for lhs, rhs in subexpressions_dict.items()]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
if simplification:
ac = self._simplify_lower_order_moments(ac, moment_symbol_base)
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification, 'backward')
raw_equations = self.mat_transform.backward_transform(
pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT, simplification=False)
raw_equations = raw_equations.new_without_subexpressions()
symbol_gen = SymbolGen(subexpression_base)
ac = self._split_backward_equations(raw_equations, symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(add_subexpressions_for_divisions)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
def _simplify_lower_order_moments(self, ac, moment_base):
if self.cqe is None:
return ac
f_to_cm_dict = ac.main_assignments_dict
f_to_cm_dict_reduced = ac.new_without_subexpressions().main_assignments_dict
moment_symbols = [sq_sym(moment_base, e) for e in moments_up_to_order(1, dim=self.dim)]
cqe_subs = self.cqe.new_without_subexpressions().main_assignments_dict
for m in moment_symbols:
m_eq = fast_subs(fast_subs(f_to_cm_dict_reduced[m], cqe_subs), cqe_subs)
m_eq = m_eq.expand().cancel()
for cqe_sym, cqe_exp in cqe_subs.items():
m_eq = subs_additive(m_eq, cqe_sym, cqe_exp)
f_to_cm_dict[m] = m_eq
main_assignments = [Assignment(lhs, rhs) for lhs, rhs in f_to_cm_dict.items()]
return ac.copy(main_assignments=main_assignments)
def _split_backward_equations_recursive(self, assignment, all_subexpressions,
stencil_direction, subexp_symgen, known_coeffs_dict,
step=0):
# Base Case
if step == self.dim:
return assignment
# Recursive Case
u = self.equilibrium_velocity[-1 - step]
d = stencil_direction[-1 - step]
one = sp.Integer(1)
two = sp.Integer(2)
# Factors to group terms by
grouping_factors = {
-1: [one, 2 * u - 1, u**2 - u],
0: [-one, -2 * u, 1 - u**2],
1: [one, 2 * u + 1, u**2 + u]
}
factors = grouping_factors[d]
# Common Integer factor to extract from all groups
common_factor = one if d == 0 else two
# Proxy for factor grouping
v = sp.Symbol('v')
square_factor_eq = (factors[2] - v**2)
lin_factor_eq = (factors[1] - v)
sub_for_u_sq = sp.solve(square_factor_eq, u**2)[0]
sub_for_u = sp.solve(lin_factor_eq, u)[0]
subs = {u**2: sub_for_u_sq, u: sub_for_u}
rhs_grouped_by_v = assignment.rhs.subs(subs).expand().collect(v)
new_rhs = sp.Integer(0)
for k in range(3):
coeff = rhs_grouped_by_v.coeff(v, k)
coeff_subexp = common_factor * coeff
# Explicitly divide out the constant factor in the zero case
if k == 0:
coeff_subexp = coeff_subexp / factors[0]
# MEMOISATION:
# The subexpression just generated might already have been found
# If so, reuse the existing symbol and skip forward.
# Otherwise, create it anew and continue recursively
coeff_symb = known_coeffs_dict.get(coeff_subexp, None)
if coeff_symb is None:
coeff_symb = next(subexp_symgen)
known_coeffs_dict[coeff_subexp] = coeff_symb
coeff_assignment = Assignment(coeff_symb, coeff_subexp)
# Recursively split the coefficient term
coeff_assignment = self._split_backward_equations_recursive(
coeff_assignment, all_subexpressions, stencil_direction, subexp_symgen,
known_coeffs_dict, step=step + 1)
all_subexpressions.append(coeff_assignment)
new_rhs += factors[k] * coeff_symb
new_rhs = sp.Rational(1, common_factor) * new_rhs
return Assignment(assignment.lhs, new_rhs)
def _split_backward_equations(self, backward_assignments, subexp_symgen):
all_subexpressions = []
split_main_assignments = []
known_coeffs_dict = dict()
for asm, stencil_dir in zip(backward_assignments, self.stencil):
split_asm = self._split_backward_equations_recursive(
asm, all_subexpressions, stencil_dir, subexp_symgen, known_coeffs_dict)
split_main_assignments.append(split_asm)
ac = AssignmentCollection(split_main_assignments, subexpressions=all_subexpressions,
subexpression_symbol_generator=subexp_symgen)
ac.topological_sort(sort_main_assignments=False)
return ac
# end class FastCentralMomentTransform
class PdfsToRawMomentsTransform(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToRawMomentsTransform, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.inv_moment_matrix = moment_matrix(self.moment_exponents, self.stencil).inv()
@property
def absorbs_conserved_quantity_equations(self):
return True
def get_cq_to_moment_symbols_dict(self, moment_symbol_base):
if self.cqe is None:
return dict()
rho = self.equilibrium_density
u = self.equilibrium_velocity
cq_symbols_to_moments = dict()
if isinstance(rho, sp.Symbol) and rho in self.cqe.defined_symbols:
cq_symbols_to_moments[rho] = sq_sym(moment_symbol_base, (0,) * self.dim)
for d, u_sym in enumerate(u):
if isinstance(u_sym, sp.Symbol) and u_sym in self.cqe.defined_symbols:
cq_symbols_to_moments[u_sym] = sq_sym(moment_symbol_base, tuple(
(1 if i == d else 0) for i in range(self.dim)))
return cq_symbols_to_moments
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_RAW_MOMENT,
simplification=True, subexpression_base='sub_f_to_m'):
simplification = self._get_simp_strategy(simplification, 'forward')
def _partial_kappa_symbol(fixed_directions, remaining_exponents):
fixed_str = '_'.join(str(direction) for direction in fixed_directions).replace('-', 'm')
exp_str = '_'.join(str(exp) for exp in remaining_exponents).replace('-', 'm')
return sp.Symbol(f"partial_{moment_symbol_base}_{fixed_str}_e_{exp_str}")
partial_sums_dict = dict()
main_assignments = self.cqe.main_assignments.copy() if self.cqe is not None else []
subexpressions = self.cqe.subexpressions.copy() if self.cqe is not None else []
def collect_partial_sums(exponents, dimension=0, fixed_directions=tuple()):
if dimension == self.dim:
# Base Case
if fixed_directions in self.stencil:
return pdf_symbols[self.stencil.index(fixed_directions)]
else:
return 0
else:
# Recursive Case
summation = sp.sympify(0)
for d in [-1, 0, 1]:
next_partial = collect_partial_sums(
exponents, dimension=dimension + 1, fixed_directions=fixed_directions + (d,))
summation += next_partial * d ** exponents[dimension]
if dimension == 0:
lhs_symbol = sq_sym(moment_symbol_base, exponents)
main_assignments.append(Assignment(lhs_symbol, summation))
else:
lhs_symbol = _partial_kappa_symbol(fixed_directions, exponents[dimension:])
partial_sums_dict[lhs_symbol] = summation
return lhs_symbol
for e in self.moment_exponents:
collect_partial_sums(e)
subexpressions += [Assignment(lhs, rhs) for lhs, rhs in partial_sums_dict.items()]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
ac.add_simplification_hint('cq_symbols_to_moments', self.get_cq_to_moment_symbols_dict(moment_symbol_base))
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_RAW_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification, 'backward')
post_collision_moments = [sq_sym(moment_symbol_base, e) for e in self.moment_exponents]
rm_to_f_vec = self.inv_moment_matrix * sp.Matrix(post_collision_moments)
main_assignments = [Assignment(f, eq) for f, eq in zip(pdf_symbols, rm_to_f_vec)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
ac.add_simplification_hint('stencil', self.stencil)
ac.add_simplification_hint('post_collision_pdf_symbols', pdf_symbols)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(substitute_moments_in_conserved_quantity_equations)
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(split_pdf_main_assignments_by_symmetry)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
# end class PdfsToRawMomentsTransform
class PdfsToCentralMomentsByShiftMatrix(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToCentralMomentsByShiftMatrix, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.raw_moment_transform = PdfsToRawMomentsTransform(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.shift_matrix = set_up_shift_matrix(self.moment_exponents, self.stencil, self.equilibrium_velocity)
self.inv_shift_matrix = self.shift_matrix.inv()
@property
def absorbs_conserved_quantity_equations(self):
return True
def forward_transform(self, pdf_symbols,
moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True,
subexpression_base='sub_f_to_k',
raw_moment_base=PRE_COLLISION_RAW_MOMENT):
simplification = self._get_simp_strategy(simplification, 'forward')
central_moment_base = moment_symbol_base
symbolic_rms = [sq_sym(raw_moment_base, e) for e in self.moment_exponents]
symbolic_cms = [sq_sym(central_moment_base, e) for e in self.moment_exponents]
rm_ac = self.raw_moment_transform.forward_transform(pdf_symbols, raw_moment_base, False, subexpression_base)
cq_symbols_to_moments = self.raw_moment_transform.get_cq_to_moment_symbols_dict(raw_moment_base)
rm_to_cm_vec = self.shift_matrix * sp.Matrix(symbolic_rms)
cq_subs = dict()
if simplification:
rm_ac = substitute_moments_in_conserved_quantity_equations(rm_ac)
# Compute replacements for conserved moments in terms of the CQE
rm_asm_dict = rm_ac.main_assignments_dict
for cq_sym, moment_sym in cq_symbols_to_moments.items():
cq_eq = rm_asm_dict[cq_sym]
solutions = sp.solve(cq_eq - cq_sym, moment_sym)
if len(solutions) > 0:
cq_subs[moment_sym] = solutions[0]
rm_to_cm_vec = fast_subs(rm_to_cm_vec, cq_subs)
rm_to_cm_dict = {cm: rm for cm, rm in zip(symbolic_cms, rm_to_cm_vec)}
if simplification:
rm_to_cm_dict = self._simplify_raw_to_central_moments(
rm_to_cm_dict, self.moment_exponents, raw_moment_base, central_moment_base)
rm_to_cm_dict = self._undo_remaining_cq_subexpressions(rm_to_cm_dict, cq_subs)
subexpressions = rm_ac.all_assignments
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(rm_to_cm_dict, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols,
moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True,
subexpression_base='sub_k_to_f',
raw_moment_base=POST_COLLISION_RAW_MOMENT):
simplification = self._get_simp_strategy(simplification, 'backward')
central_moment_base = moment_symbol_base
symbolic_rms = [sq_sym(raw_moment_base, e) for e in self.moment_exponents]
symbolic_cms = [sq_sym(central_moment_base, e) for e in self.moment_exponents]
cm_to_rm_vec = self.inv_shift_matrix * sp.Matrix(symbolic_cms)
cm_to_rm_dict = {rm: eq for rm, eq in zip(symbolic_rms, cm_to_rm_vec)}
if simplification:
cm_to_rm_dict = self._factor_backward_eqs_by_velocities(symbolic_rms, cm_to_rm_dict)
rm_ac = self.raw_moment_transform.backward_transform(pdf_symbols, raw_moment_base, False, subexpression_base)
cm_to_rm_assignments = [Assignment(lhs, rhs) for lhs, rhs in cm_to_rm_dict.items()]
subexpressions = cm_to_rm_assignments + rm_ac.subexpressions
ac = rm_ac.copy(subexpressions=subexpressions)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
def _simplify_raw_to_central_moments(self, rm_to_cm_dict, moment_exponents, raw_moment_base, central_moment_base):
for cm in moment_exponents:
if sum(cm) < 2:
continue
cm_symb = sq_sym(central_moment_base, cm)
cm_asm_rhs = rm_to_cm_dict[cm_symb]
for m in contained_moments(cm, min_order=2)[::-1]:
contained_rm_symb = sq_sym(raw_moment_base, m)
contained_cm_symb = sq_sym(central_moment_base, m)
contained_cm_eq = rm_to_cm_dict[contained_cm_symb]
rm_in_terms_of_cm = sp.solve(contained_cm_eq - contained_cm_symb, contained_rm_symb)[0]
cm_asm_rhs = cm_asm_rhs.subs({contained_rm_symb: rm_in_terms_of_cm}).expand()
rm_to_cm_dict[cm_symb] = cm_asm_rhs
return rm_to_cm_dict
def _undo_remaining_cq_subexpressions(self, rm_to_cm_dict, cq_subs):
for cm, cm_eq in rm_to_cm_dict.items():
for rm, rm_subexp in cq_subs.items():
cm_eq = subs_additive(cm_eq, rm, rm_subexp)
rm_to_cm_dict[cm] = cm_eq
return rm_to_cm_dict
def _factor_backward_eqs_by_velocities(self, symbolic_rms, cm_to_rm_dict, required_match_replacement=0.75):
velocity_by_occurences = dict()
for rm, rm_eq in cm_to_rm_dict.items():
velocity_by_occurences[rm] = sorted(self.equilibrium_velocity, key=rm_eq.count, reverse=True)
for d in range(self.dim):
for rm, rm_eq in cm_to_rm_dict.items():
u_sorted = velocity_by_occurences[rm]
cm_to_rm_dict[rm] = rm_eq.expand().collect(u_sorted[d])
for i, rm1 in enumerate(symbolic_rms):
for _, rm2 in enumerate(symbolic_rms[i + 1:]):
cm_to_rm_dict[rm2] = subs_additive(
cm_to_rm_dict[rm2], rm1, cm_to_rm_dict[rm1],
required_match_replacement=required_match_replacement)
return cm_to_rm_dict
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(split_pdf_main_assignments_by_symmetry)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
# end class PdfsToCentralMomentsByShiftMatrix
from collections import OrderedDict
import sympy as sp
from lbmpy.maxwellian_equilibrium import get_weights
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, LbmCollisionRule, RelaxationInfo
from lbmpy.methods.conservedquantitycomputation import AbstractConservedQuantityComputation
from lbmpy.moments import MOMENT_SYMBOLS, moment_matrix
from pystencils import Assignment
from pystencils.sympyextensions import subs_additive
class MomentBasedLbMethod(AbstractLbMethod):
def __init__(self, stencil, moment_to_relaxation_info_dict, conserved_quantity_computation=None, force_model=None):
"""
Moment based LBM is a class to represent the single (SRT), two (TRT) and multi relaxation time (MRT) methods.
These methods work by transforming the pdfs into moment space using a linear transformation. In the moment
space each component (moment) is relaxed to an equilibrium moment by a certain relaxation rate. These
equilibrium moments can e.g. be determined by taking the equilibrium moments of the continuous Maxwellian.
Args:
stencil: see :func:`lbmpy.stencils.get_stencil`
moment_to_relaxation_info_dict: a dictionary mapping moments in either tuple or polynomial formulation
to a RelaxationInfo, which consists of the corresponding equilibrium moment
and a relaxation rate
conserved_quantity_computation: instance of :class:`lbmpy.methods.AbstractConservedQuantityComputation`.
This determines how conserved quantities are computed, and defines
the symbols used in the equilibrium moments like e.g. density and velocity
force_model: force model instance, or None if no forcing terms are required
"""
assert isinstance(conserved_quantity_computation, AbstractConservedQuantityComputation)
super(MomentBasedLbMethod, self).__init__(stencil)
self._forceModel = force_model
self._momentToRelaxationInfoDict = OrderedDict(moment_to_relaxation_info_dict.items())
self._conservedQuantityComputation = conserved_quantity_computation
self._weights = None
@property
def force_model(self):
return self._forceModel
@property
def relaxation_info_dict(self):
return self._momentToRelaxationInfoDict
@property
def conserved_quantity_computation(self):
return self._conservedQuantityComputation
@property
def moments(self):
return tuple(self._momentToRelaxationInfoDict.keys())
@property
def moment_equilibrium_values(self):
return tuple([e.equilibrium_value for e in self._momentToRelaxationInfoDict.values()])
@property
def relaxation_rates(self):
return tuple([e.relaxation_rate for e in self._momentToRelaxationInfoDict.values()])
@property
def zeroth_order_equilibrium_moment_symbol(self, ):
return self._conservedQuantityComputation.zeroth_order_moment_symbol
@property
def first_order_equilibrium_moment_symbols(self, ):
return self._conservedQuantityComputation.first_order_moment_symbols
@property
def weights(self):
if self._weights is None:
self._weights = self._compute_weights()
return self._weights
def override_weights(self, weights):
assert len(weights) == len(self.stencil)
self._weights = weights
def get_equilibrium(self, conserved_quantity_equations=None, include_force_terms=False):
relaxation_matrix = sp.eye(len(self.relaxation_rates))
return self._collision_rule_with_relaxation_matrix(relaxation_matrix,
conserved_quantity_equations=conserved_quantity_equations,
include_force_terms=include_force_terms)
def get_equilibrium_terms(self):
equilibrium = self.get_equilibrium()
return sp.Matrix([eq.rhs for eq in equilibrium.main_assignments])
def get_collision_rule(self, conserved_quantity_equations=None, pre_simplification=True):
d = self.relaxation_matrix
relaxation_rate_sub_expressions, d = self._generate_relaxation_matrix(d, pre_simplification)
ac = self._collision_rule_with_relaxation_matrix(d, relaxation_rate_sub_expressions,
True, conserved_quantity_equations)
return ac
def set_zeroth_moment_relaxation_rate(self, relaxation_rate):
e = sp.Rational(1, 1)
prev_entry = self._momentToRelaxationInfoDict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._momentToRelaxationInfoDict[e] = new_entry
def set_first_moment_relaxation_rate(self, relaxation_rate):
for e in MOMENT_SYMBOLS[:self.dim]:
assert e in self._momentToRelaxationInfoDict, "First moments are not relaxed separately by this method"
for e in MOMENT_SYMBOLS[:self.dim]:
prev_entry = self._momentToRelaxationInfoDict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._momentToRelaxationInfoDict[e] = new_entry
def set_conserved_moments_relaxation_rate(self, relaxation_rate):
self.set_zeroth_moment_relaxation_rate(relaxation_rate)
self.set_first_moment_relaxation_rate(relaxation_rate)
def set_force_model(self, force_model):
self._forceModel = force_model
@property
def collision_matrix(self):
pdfs_to_moments = self.moment_matrix
d = self.relaxation_matrix
return pdfs_to_moments.inv() * d * pdfs_to_moments
@property
def inverse_collision_matrix(self):
pdfs_to_moments = self.moment_matrix
inverse_relaxation_matrix = self.relaxation_matrix.inv()
return pdfs_to_moments.inv() * inverse_relaxation_matrix * pdfs_to_moments
@property
def moment_matrix(self):
return moment_matrix(self.moments, self.stencil)
@property
def relaxation_matrix(self):
d = sp.zeros(len(self.relaxation_rates))
for i in range(0, len(self.relaxation_rates)):
d[i, i] = self.relaxation_rates[i]
return d
@property
def is_orthogonal(self):
return (self.moment_matrix * self.moment_matrix.T).is_diagonal()
@property
def is_weighted_orthogonal(self):
w = get_weights(self.stencil, sp.Rational(1, 3))
return (sp.matrix_multiply_elementwise(self.moment_matrix, sp.Matrix([w] * len(w))) * self.moment_matrix.T
).is_diagonal()
def __getstate__(self):
# Workaround for a bug in joblib
self._momentToRelaxationInfoDictToPickle = [i for i in self._momentToRelaxationInfoDict.items()]
return self.__dict__
def _repr_html_(self):
table = """
<table style="border:none; width: 100%">
<tr {nb}>
<th {nb} >Moment</th>
<th {nb} >Eq. Value </th>
<th {nb} >Relaxation Rate</th>
</tr>
{content}
</table>
"""
content = ""
for moment, (eq_value, rr) in self._momentToRelaxationInfoDict.items():
vals = {
'rr': sp.latex(rr),
'moment': sp.latex(moment),
'eq_value': sp.latex(eq_value),
'nb': 'style="border:none"',
}
content += """<tr {nb}>
<td {nb}>${moment}$</td>
<td {nb}>${eq_value}$</td>
<td {nb}>${rr}$</td>
</tr>\n""".format(**vals)
return table.format(content=content, nb='style="border:none"')
def _compute_weights(self):
replacements = self._conservedQuantityComputation.default_values
ac = self.get_equilibrium(include_force_terms=False)
ac = ac.new_with_substitutions(replacements, substitute_on_lhs=False).new_without_subexpressions()
new_assignments = [Assignment(e.lhs,
subs_additive(e.rhs, sp.sympify(1), sum(self.pre_collision_pdf_symbols),
required_match_replacement=1.0))
for e in ac.main_assignments]
ac = ac.copy(new_assignments)
weights = []
for eq in ac.main_assignments:
value = eq.rhs.expand()
assert len(value.atoms(sp.Symbol)) == 0, "Failed to compute weights " + str(value)
weights.append(value)
return weights
def _collision_rule_with_relaxation_matrix(self, d, additional_subexpressions=(), include_force_terms=True,
conserved_quantity_equations=None):
f = sp.Matrix(self.pre_collision_pdf_symbols)
pdf_to_moment = self.moment_matrix
m_eq = sp.Matrix(self.moment_equilibrium_values)
collision_rule = f + pdf_to_moment.inv() * d * (m_eq - pdf_to_moment * f)
collision_eqs = [Assignment(lhs, rhs) for lhs, rhs in zip(self.post_collision_pdf_symbols, collision_rule)]
if conserved_quantity_equations is None:
conserved_quantity_equations = self._conservedQuantityComputation.equilibrium_input_equations_from_pdfs(f)
simplification_hints = conserved_quantity_equations.simplification_hints.copy()
simplification_hints.update(self._conservedQuantityComputation.defined_symbols())
simplification_hints['relaxation_rates'] = [d[i, i] for i in range(d.rows)]
all_subexpressions = list(additional_subexpressions) + conserved_quantity_equations.all_assignments
if self._forceModel is not None and include_force_terms:
force_model_terms = self._forceModel(self)
force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms,)))
force_subexpressions = [Assignment(sym, force_model_term)
for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
all_subexpressions += force_subexpressions
collision_eqs = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
for eq, force_term_symbol in zip(collision_eqs, force_term_symbols)]
simplification_hints['force_terms'] = force_term_symbols
return LbmCollisionRule(self, collision_eqs, all_subexpressions,
simplification_hints)
@staticmethod
def _generate_relaxation_matrix(relaxation_matrix, keep_rr_symbolic):
"""
For SRT and TRT the equations can be easier simplified if the relaxation times are symbols, not numbers.
This function replaces the numbers in the relaxation matrix with symbols in this case, and returns also
the subexpressions, that assign the number to the newly introduced symbol
"""
rr = [relaxation_matrix[i, i] for i in range(relaxation_matrix.rows)]
if keep_rr_symbolic <= 2:
unique_relaxation_rates = set(rr)
subexpressions = {}
for rt in unique_relaxation_rates:
rt = sp.sympify(rt)
if not isinstance(rt, sp.Symbol):
rt_symbol = sp.Symbol("rr_%d" % (len(subexpressions),))
subexpressions[rt] = rt_symbol
new_rr = [subexpressions[sp.sympify(e)] if sp.sympify(e) in subexpressions else e
for e in rr]
substitutions = [Assignment(e[1], e[0]) for e in subexpressions.items()]
return substitutions, sp.diag(*new_rr)
else:
return [], relaxation_matrix
def analytic_rising_speed(gravitational_acceleration, bubble_diameter, viscosity_gas):
r"""
Calculated the analytical rising speed of a bubble. This is the expected end rising speed.
Args:
gravitational_acceleration: the gravitational acceleration acting in the simulation scenario. Usually it gets
calculated based on dimensionless parameters which describe the scenario
bubble_diameter: the diameter of the bubble at the beginning of the simulation
viscosity_gas: the viscosity of the fluid inside the bubble
"""
result = -(gravitational_acceleration * bubble_diameter * bubble_diameter) / (12.0 * viscosity_gas)
return result
import sympy as sp
import numpy as np
from lbmpy.forcemodels import Simple
class MultiphaseForceModel:
r"""
A force model based on PhysRevE.96.053301. This model realises the modified equilibrium distributions meaning the
force gets shifted by minus one half multiplied with the collision operator
"""
def __init__(self, force, rho=1):
self._force = force
self._rho = rho
def __call__(self, lb_method):
stencil = lb_method.stencil
force_symp = sp.symbols("force_:{}".format(lb_method.dim))
simple = Simple(force_symp)
force = [f / self._rho for f in simple(lb_method)]
moment_matrix = lb_method.moment_matrix
relaxation_rates = sp.Matrix(np.diag(lb_method.relaxation_rates))
mrt_collision_op = moment_matrix.inv() * relaxation_rates * moment_matrix
result = -0.5 * mrt_collision_op * sp.Matrix(force) + sp.Matrix(force)
for i in range(0, len(stencil)):
result[i] = result[i].simplify()
subs_dict = dict(zip(force_symp, self._force))
for i in range(0, len(stencil)):
result[i] = result[i].subs(subs_dict)
return result
from pystencils.fd.derivation import FiniteDifferenceStencilDerivation
from pystencils import Assignment, AssignmentCollection
from lbmpy.maxwellian_equilibrium import get_weights
from lbmpy.fieldaccess import StreamPushTwoFieldsAccessor, CollideOnlyInplaceAccessor
import sympy as sp
import numpy as np
def chemical_potential_symbolic(phi_field, stencil, beta, kappa):
r"""
Get a symbolic expression for the chemical potential according to equation (5) in PhysRevE.96.053301.
Args:
phi_field: the phase-field on which the chemical potential is applied
stencil: stencil to derive the finite difference for the laplacian (2nd order isotropic)
beta: coefficient related to surface tension and interface thickness
kappa: coefficient related to surface tension and interface thickness
"""
dimensions = len(stencil[0])
q = len(stencil)
lap = sp.simplify(0)
for i in range(dimensions):
deriv = FiniteDifferenceStencilDerivation((i, i), stencil)
for j in range(dimensions):
# assume the stencil is symmetric
deriv.assume_symmetric(dim=j, anti_symmetric=False)
# set weights for missing degrees of freedom in the calculation and assume the stencil is isotropic
if q == 9:
res = deriv.get_stencil(isotropic=True)
lap += res.apply(phi_field.center)
elif q == 15:
deriv.set_weight((0, 0, 0), sp.Rational(-32, 27))
res = deriv.get_stencil(isotropic=True)
lap += res.apply(phi_field.center)
elif q == 19:
res = deriv.get_stencil(isotropic=True)
lap += res.apply(phi_field.center)
else:
deriv.set_weight((0, 0, 0), sp.Rational(-38, 27))
res = deriv.get_stencil(isotropic=True)
lap += res.apply(phi_field.center)
# get the chemical potential
mu = 4.0 * beta * phi_field.center * (phi_field.center - 1.0) * (phi_field.center - 0.5) - \
kappa * lap
return mu
def isotropic_gradient_symbolic(phi_field, stencil):
r"""
Get a symbolic expression for the isotropic gradient of the phase-field
Args:
phi_field: the phase-field on which the isotropic gradient is applied
stencil: stencil to derive the finite difference for the gradient (2nd order isotropic)
"""
dimensions = len(stencil[0])
q = len(stencil)
deriv = FiniteDifferenceStencilDerivation((0,), stencil)
deriv.assume_symmetric(0, anti_symmetric=True)
deriv.assume_symmetric(1, anti_symmetric=False)
if dimensions == 3:
deriv.assume_symmetric(2, anti_symmetric=False)
# set weights for missing degrees of freedom in the calculation and assume the stencil is isotropic
# furthermore the stencils gets rotated to get the y and z components
if q == 9:
res = deriv.get_stencil(isotropic=True)
grad = [res.apply(phi_field.center), res.rotate_weights_and_apply(phi_field.center, (0, 1)), 0]
elif q == 15:
res = deriv.get_stencil(isotropic=True)
grad = [res.apply(phi_field.center),
res.rotate_weights_and_apply(phi_field.center, (0, 1)),
res.rotate_weights_and_apply(phi_field.center, (1, 2))]
elif q == 19:
deriv.set_weight((0, 0, 0), sp.sympify(0))
deriv.set_weight((1, 0, 0), sp.Rational(1, 6))
res = deriv.get_stencil(isotropic=True)
grad = [res.apply(phi_field.center),
res.rotate_weights_and_apply(phi_field.center, (0, 1)),
res.rotate_weights_and_apply(phi_field.center, (1, 2))]
else:
deriv.set_weight((0, 0, 0), sp.sympify(0))
deriv.set_weight((1, 0, 0), sp.Rational(2, 9))
res = deriv.get_stencil(isotropic=True)
grad = [res.apply(phi_field.center),
res.rotate_weights_and_apply(phi_field.center, (0, 1)),
res.rotate_weights_and_apply(phi_field.center, (1, 2))]
return grad
def normalized_isotropic_gradient_symbolic(phi_field, stencil, fd_stencil=None):
r"""
Get a symbolic expression for the normalized isotropic gradient of the phase-field
Args:
phi_field: the phase-field on which the normalized isotropic gradient is applied
stencil: stencil of the lattice Boltzmann step
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
if fd_stencil is None:
fd_stencil = stencil
tmp = (sum(map(lambda x: x * x, isotropic_gradient_symbolic(phi_field, fd_stencil))) + 1.e-32) ** 0.5
result = [x / tmp for x in isotropic_gradient_symbolic(phi_field, fd_stencil)]
return result
def pressure_force(phi_field, stencil, density_heavy, density_light, fd_stencil=None):
r"""
Get a symbolic expression for the pressure force
Args:
phi_field: phase-field
stencil: stencil of the lattice Boltzmann step
density_heavy: density of the heavier fluid
density_light: density of the lighter fluid
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
if fd_stencil is None:
fd_stencil = stencil
iso_grad = isotropic_gradient_symbolic(phi_field, fd_stencil)
result = list(map(lambda x: sp.Rational(-1, 3) * sp.symbols("rho") * (density_heavy - density_light) * x, iso_grad))
return result
def viscous_force(lb_velocity_field, phi_field, mrt_method, tau, density_heavy, density_light, fd_stencil=None):
r"""
Get a symbolic expression for the viscous force
Args:
lb_velocity_field: hydrodynamic distribution function
phi_field: phase-field
mrt_method: mrt lattice boltzmann method used for hydrodynamics
tau: relaxation time of the hydrodynamic lattice boltzmann step
density_heavy: density of the heavier fluid
density_light: density of the lighter fluid
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
stencil = mrt_method.stencil
dimensions = len(stencil[0])
if fd_stencil is None:
fd_stencil = stencil
iso_grad = isotropic_gradient_symbolic(phi_field, fd_stencil)
moment_matrix = mrt_method.moment_matrix
rel = mrt_method.relaxation_rates
eq = mrt_method.moment_equilibrium_values
eq = np.array(eq)
g_vals = [lb_velocity_field.center(i) for i, _ in enumerate(stencil)]
m0 = np.dot(moment_matrix.tolist(), g_vals)
m = m0 - eq
m = m * rel
non_equilibrium = np.dot(moment_matrix.inv().tolist(), m)
stress_tensor = [0] * 6
# Calculate Stress Tensor MRT
for i, d in enumerate(stencil):
stress_tensor[0] = sp.Add(stress_tensor[0], non_equilibrium[i] * (d[0] * d[0]))
stress_tensor[1] = sp.Add(stress_tensor[1], non_equilibrium[i] * (d[1] * d[1]))
if dimensions == 3:
stress_tensor[2] = sp.Add(stress_tensor[2], non_equilibrium[i] * (d[2] * d[2]))
stress_tensor[3] = sp.Add(stress_tensor[3], non_equilibrium[i] * (d[1] * d[2]))
stress_tensor[4] = sp.Add(stress_tensor[4], non_equilibrium[i] * (d[0] * d[2]))
stress_tensor[5] = sp.Add(stress_tensor[5], non_equilibrium[i] * (d[0] * d[1]))
density_difference = density_heavy - density_light
# Calculate Viscous Force MRT
fmx = (0.5 - tau) * (stress_tensor[0] * iso_grad[0]
+ stress_tensor[5] * iso_grad[1]
+ stress_tensor[4] * iso_grad[2]) * density_difference
fmy = (0.5 - tau) * (stress_tensor[5] * iso_grad[0]
+ stress_tensor[1] * iso_grad[1]
+ stress_tensor[3] * iso_grad[2]) * density_difference
fmz = (0.5 - tau) * (stress_tensor[4] * iso_grad[0]
+ stress_tensor[3] * iso_grad[1]
+ stress_tensor[2] * iso_grad[2]) * density_difference
return [fmx, fmy, fmz]
def surface_tension_force(phi_field, stencil, beta, kappa, fd_stencil=None):
r"""
Get a symbolic expression for the surface tension force
Args:
phi_field: the phase-field on which the chemical potential is applied
stencil: stencil of the lattice Boltzmann step
beta: coefficient related to surface tension and interface thickness
kappa: coefficient related to surface tension and interface thickness
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
if fd_stencil is None:
fd_stencil = stencil
chemical_potential = chemical_potential_symbolic(phi_field, fd_stencil, beta, kappa)
iso_grad = isotropic_gradient_symbolic(phi_field, fd_stencil)
return [chemical_potential * x for x in iso_grad]
def hydrodynamic_force(lb_velocity_field, phi_field, lb_method, tau,
density_heavy, density_light, kappa, beta, body_force, fd_stencil=None):
r"""
Get a symbolic expression for the hydrodynamic force
Args:
lb_velocity_field: hydrodynamic distribution function
phi_field: phase-field
lb_method: Lattice boltzmann method used for hydrodynamics
tau: relaxation time of the hydrodynamic lattice boltzmann step
density_heavy: density of the heavier fluid
density_light: density of the lighter fluid
beta: coefficient related to surface tension and interface thickness
kappa: coefficient related to surface tension and interface thickness
body_force: force acting on the fluids. Usually the gravity
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
stencil = lb_method.stencil
dimensions = len(stencil[0])
if fd_stencil is None:
fd_stencil = stencil
fp = pressure_force(phi_field, stencil, density_heavy, density_light, fd_stencil)
fm = viscous_force(lb_velocity_field, phi_field, lb_method, tau, density_heavy, density_light, fd_stencil)
fs = surface_tension_force(phi_field, stencil, beta, kappa, fd_stencil)
result = []
for i in range(dimensions):
result.append(fs[i] + fp[i] + fm[i] + body_force[i])
return result
def interface_tracking_force(phi_field, stencil, interface_thickness, fd_stencil=None):
r"""
Get a symbolic expression for the hydrodynamic force
Args:
phi_field: phase-field
stencil: stencil of the phase-field distribution lattice Boltzmann step
interface_thickness: interface thickness
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
if fd_stencil is None:
fd_stencil = stencil
dimensions = len(stencil[0])
normal_fd = normalized_isotropic_gradient_symbolic(phi_field, stencil, fd_stencil)
result = []
for i in range(dimensions):
result.append(((1.0 - 4.0 * (phi_field.center - 0.5) ** 2) / interface_thickness) * normal_fd[i])
return result
def get_update_rules_velocity(src_field, u_in, lb_method, force, density, sub_iterations=2):
r"""
Get assignments to update the velocity with a force shift
Args:
src_field: the source field of the hydrodynamic distribution function
u_in: velocity field
lb_method: mrt lattice boltzmann method used for hydrodynamics
force: force acting on the hydrodynamic lb step
density: the interpolated density of the simulation
sub_iterations: number of updates of the velocity field
"""
stencil = lb_method.stencil
dimensions = len(stencil[0])
moment_matrix = lb_method.moment_matrix
eq = lb_method.moment_equilibrium_values
first_eqs = lb_method.first_order_equilibrium_moment_symbols
indices = list()
for i in range(dimensions):
indices.append(eq.index(first_eqs[i]))
src = [src_field.center(i) for i, _ in enumerate(stencil)]
m0 = np.dot(moment_matrix.tolist(), src)
update_u = list()
update_u.append(Assignment(sp.symbols("rho"), m0[0]))
index = 0
u_symp = sp.symbols("u_:{}".format(dimensions))
aleph = sp.symbols("aleph_:{}".format(dimensions * sub_iterations))
for i in range(dimensions):
update_u.append(Assignment(aleph[i], u_in.center_vector[i]))
index += 1
for k in range(sub_iterations - 1):
subs_dict = dict(zip(u_symp, aleph[k * dimensions:index]))
for i in range(dimensions):
update_u.append(Assignment(aleph[index], m0[indices[i]] + force[i].subs(subs_dict) / density / 2))
index += 1
subs_dict = dict(zip(u_symp, aleph[index - dimensions:index]))
for i in range(dimensions):
update_u.append(Assignment(u_symp[i], m0[indices[i]] + force[i].subs(subs_dict) / density / 2))
# update_u.append(Assignment(u_in.center_vector[i], m0[indices[i]] + force[i].subs(subs_dict) / density / 2))
return update_u
def get_collision_assignments_hydro(lb_method, density, velocity_input, force, sub_iterations, symbolic_fields,
kernel_type):
r"""
Get collision assignments for the hydrodynamic lattice Boltzmann step. Here the force gets applied in the moment
space. Afterwards the transformation back to the pdf space happens.
Args:
lb_method: moment based lattice Boltzmann method
density: the interpolated density of the simulation
velocity_input: velocity field for the hydrodynamic and Allen-Chan LB step
force: force vector containing a summation of the surface tension-, pressure-, viscous- and bodyforce vector
sub_iterations: number of updates of the velocity field
symbolic_fields: PDF fields for source and destination
kernel_type: collide_stream_push or collide_only
"""
stencil = lb_method.stencil
dimensions = len(stencil[0])
src_field = symbolic_fields['symbolic_field']
dst_field = symbolic_fields['symbolic_temporary_field']
moment_matrix = lb_method.moment_matrix
rel = lb_method.relaxation_rates
eq = lb_method.moment_equilibrium_values
first_eqs = lb_method.first_order_equilibrium_moment_symbols
indices = list()
for i in range(dimensions):
indices.append(eq.index(first_eqs[i]))
eq = np.array(eq)
g_vals = [src_field.center(i) for i, _ in enumerate(stencil)]
m0 = np.dot(moment_matrix.tolist(), g_vals)
mf = np.zeros(len(stencil), dtype=object)
for i in range(dimensions):
mf[indices[i]] = force[i] / density
m = sp.symbols("m_:{}".format(len(stencil)))
update_m = get_update_rules_velocity(src_field, velocity_input, lb_method, force,
density, sub_iterations=sub_iterations)
u_symp = sp.symbols("u_:{}".format(dimensions))
for i in range(0, len(stencil)):
update_m.append(Assignment(m[i], m0[i] - (m0[i] - eq[i] + mf[i] / 2) * rel[i] + mf[i]))
update_g = list()
var = np.dot(moment_matrix.inv().tolist(), m)
if kernel_type == 'collide_stream_push':
push_accessor = StreamPushTwoFieldsAccessor()
post_collision_accesses = push_accessor.write(dst_field, stencil)
else:
collide_accessor = CollideOnlyInplaceAccessor()
post_collision_accesses = collide_accessor.write(src_field, stencil)
for i in range(0, len(stencil)):
update_g.append(Assignment(post_collision_accesses[i], var[i]))
for i in range(dimensions):
update_g.append(Assignment(velocity_input.center_vector[i], u_symp[i]))
hydro_lb_update_rule = AssignmentCollection(main_assignments=update_g,
subexpressions=update_m)
return hydro_lb_update_rule
def initializer_kernel_phase_field_lb(lb_phase_field, phi_field, velocity_field, mrt_method, interface_thickness,
fd_stencil=None):
r"""
Returns an assignment list for initializing the phase-field distribution functions
Args:
lb_phase_field: source field of phase-field distribution function
phi_field: phase-field
velocity_field: velocity field
mrt_method: lattice Boltzmann method of the phase-field lattice Boltzmann step
interface_thickness: interface thickness
fd_stencil: stencil to derive the finite differences of the isotropic gradient and the laplacian of the phase
field. If it is not given the stencil of the LB method will be applied.
"""
stencil = mrt_method.stencil
dimensions = len(stencil[0])
if fd_stencil is None:
fd_stencil = stencil
weights = get_weights(stencil, c_s_sq=sp.Rational(1, 3))
u_symp = sp.symbols("u_:{}".format(dimensions))
normal_fd = normalized_isotropic_gradient_symbolic(phi_field, stencil, fd_stencil)
gamma = mrt_method.get_equilibrium_terms()
gamma = gamma.subs({sp.symbols("rho"): 1})
gamma_init = gamma.subs({x: y for x, y in zip(u_symp, velocity_field.center_vector)})
# create the kernels for the initialization of the h field
h_updates = list()
def scalar_product(a, b):
return sum(a_i * b_i for a_i, b_i in zip(a, b))
f = []
for i, d in enumerate(stencil):
f.append(weights[i] * ((1.0 - 4.0 * (phi_field.center - 0.5) ** 2) / interface_thickness)
* scalar_product(d, normal_fd[0:dimensions]))
for i, _ in enumerate(stencil):
h_updates.append(Assignment(lb_phase_field.center(i), phi_field.center * gamma_init[i] - 0.5 * f[i]))
return h_updates
def initializer_kernel_hydro_lb(lb_velocity_field, velocity_field, mrt_method):
r"""
Returns an assignment list for initializing the velocity distribution functions
Args:
lb_velocity_field: source field of velocity distribution function
velocity_field: velocity field
mrt_method: lattice Boltzmann method of the hydrodynamic lattice Boltzmann step
"""
stencil = mrt_method.stencil
dimensions = len(stencil[0])
weights = get_weights(stencil, c_s_sq=sp.Rational(1, 3))
u_symp = sp.symbols("u_:{}".format(dimensions))
gamma = mrt_method.get_equilibrium_terms()
gamma = gamma.subs({sp.symbols("rho"): 1})
gamma_init = gamma.subs({x: y for x, y in zip(u_symp, velocity_field.center_vector)})
g_updates = list()
for i, _ in enumerate(stencil):
g_updates.append(Assignment(lb_velocity_field.center(i), gamma_init[i] - weights[i]))
return g_updates
import math
def calculate_parameters_rti(reference_length=256,
reference_time=16000,
density_heavy=1.0,
capillary_number=0.26,
reynolds_number=3000,
atwood_number=0.5,
peclet_number=1000,
density_ratio=3,
viscosity_ratio=1):
r"""
Calculate the simulation parameters for the Rayleigh-Taylor instability.
The calculation is done according to the description in part B of PhysRevE.96.053301.
Args:
reference_length: reference length of the RTI
reference_time: chosen reference time
density_heavy: density of the heavier fluid
capillary_number: capillary number of the simulation represents the relative effect of viscous drag
forces versus surface tension forces
reynolds_number: reynolds number of the simulation is the ratio between the viscous forces in a fluid
and the inertial forces
atwood_number: atwood number of the simulation is a dimensionless density ratio
peclet_number: peclet number of the simulation is the ratio of the rate of advection
of a physical quantity by the flow to the rate of diffusion of the same quantity
driven by an appropriate gradient
density_ratio: density ratio of the heavier and the lighter fluid
viscosity_ratio: viscosity ratio of the heavier and the lighter fluid
"""
# calculate the gravitational acceleration and the reference velocity
g = reference_length / (reference_time ** 2 * atwood_number)
reference_velocity = math.sqrt(g * reference_length)
dynamic_viscosity_heavy = (density_heavy * reference_velocity * reference_length) / reynolds_number
dynamic_viscosity_light = dynamic_viscosity_heavy / viscosity_ratio
density_light = density_heavy / density_ratio
kinematic_viscosity_heavy = dynamic_viscosity_heavy / density_heavy
kinematic_viscosity_light = dynamic_viscosity_light / density_light
relaxation_time_heavy = 3.0 * kinematic_viscosity_heavy
relaxation_time_light = 3.0 * kinematic_viscosity_light
surface_tension = (dynamic_viscosity_heavy * reference_velocity) / capillary_number
mobility = (reference_velocity * reference_length) / peclet_number
parameters = {
"density_light": density_light,
"dynamic_viscosity_heavy": dynamic_viscosity_heavy,
"dynamic_viscosity_light": dynamic_viscosity_light,
"relaxation_time_heavy": relaxation_time_heavy,
"relaxation_time_light": relaxation_time_light,
"gravitational_acceleration": -g,
"mobility": mobility,
"surface_tension": surface_tension
}
return parameters
def calculate_dimensionless_rising_bubble(reference_time=18000,
density_heavy=1.0,
bubble_radius=16,
bond_number=1,
reynolds_number=40,
density_ratio=1000,
viscosity_ratio=100):
r"""
Calculate the simulation parameters for a rising bubble. The parameter calculation follows the ideas of Weber and
is based on the Reynolds number. This means the rising velocity of the bubble is implicitly stated with the
Reynolds number
Args:
reference_time: chosen reference time
density_heavy: density of the heavier fluid
bubble_radius: initial radius of the rising bubble
bond_number: also called eötvös number is measuring the importance of gravitational forces compared to
surface tension forces
reynolds_number: reynolds number of the simulation is the ratio between the viscous forces in a fluid
and the inertial forces
density_ratio: density ratio of the heavier and the lighter fluid
viscosity_ratio: viscosity ratio of the heavier and the lighter fluid
"""
bubble_diameter = bubble_radius * 2
g = bubble_diameter / (reference_time ** 2)
density_light = density_heavy / density_ratio
dynamic_viscosity_heavy = (density_heavy * math.sqrt(g * bubble_diameter ** 3)) / reynolds_number
dynamic_viscosity_light = dynamic_viscosity_heavy / viscosity_ratio
kinematic_viscosity_heavy = dynamic_viscosity_heavy / density_heavy
kinematic_viscosity_light = dynamic_viscosity_light / density_light
relaxation_time_heavy = 3 * kinematic_viscosity_heavy
relaxation_time_light = 3 * kinematic_viscosity_light
surface_tension = (density_heavy - density_light) * g * bubble_diameter ** 2 / bond_number
# calculation of the Morton number
# Mo = gravitational_acceleration * dynamic_viscosity_heavy / (density_heavy * surface_tension ** 3)
parameters = {
"density_light": density_light,
"dynamic_viscosity_heavy": dynamic_viscosity_heavy,
"dynamic_viscosity_light": dynamic_viscosity_light,
"relaxation_time_heavy": relaxation_time_heavy,
"relaxation_time_light": relaxation_time_light,
"gravitational_acceleration": -g,
"surface_tension": surface_tension
}
return parameters
import sympy as sp
from lbmpy.innerloopsplit import create_lbm_split_groups
from lbmpy.methods.momentbased.momentbasedmethod import MomentBasedLbMethod
from lbmpy.methods.centeredcumulant import CenteredCumulantBasedLbMethod
from lbmpy.methods.centeredcumulant.simplification import insert_aliases, insert_zeros
from lbmpy.methods.momentbased.momentbasedsimplifications import (
factor_density_after_factoring_relaxation_times, factor_relaxation_rates,
replace_common_quadratic_and_constant_term, replace_density_and_velocity, replace_second_order_velocity_products)
from pystencils.simp import (
SimplificationStrategy, add_subexpressions_for_divisions, apply_to_all_assignments,
subexpression_substitution_in_main_assignments)
def create_simplification_strategy(lb_method, split_inner_loop=False):
s = SimplificationStrategy()
expand = apply_to_all_assignments(sp.expand)
if isinstance(lb_method, MomentBasedLbMethod):
if len(set(lb_method.relaxation_rates)) <= 2:
s.add(expand)
s.add(replace_second_order_velocity_products)
s.add(expand)
s.add(factor_relaxation_rates)
s.add(replace_density_and_velocity)
s.add(replace_common_quadratic_and_constant_term)
s.add(factor_density_after_factoring_relaxation_times)
s.add(subexpression_substitution_in_main_assignments)
if split_inner_loop:
s.add(create_lbm_split_groups)
s.add(add_subexpressions_for_divisions)
else:
s.add(subexpression_substitution_in_main_assignments)
if split_inner_loop:
s.add(create_lbm_split_groups)
elif isinstance(lb_method, CenteredCumulantBasedLbMethod):
s.add(insert_zeros)
s.add(insert_aliases)
s.add(lambda ac: ac.new_without_unused_subexpressions())
return s
def get_stencil(name, ordering='walberla'):
"""
Stencils are tuples of discrete velocities. They are commonly labeled in the 'DxQy' notation, where d is the
dimension (length of the velocity tuples) and y is number of discrete velocities.
Args:
name: DxQy notation
ordering: the LBM literature does not use a common order of the discrete velocities, therefore here
different common orderings are available. All orderings lead to the same method, it just has
to be used consistently. Here more orderings are available to compare intermediate results with
the literature.
"""
try:
return get_stencil.data[name.upper()][ordering.lower()]
except KeyError:
err_msg = ""
for stencil, ordering_names in get_stencil.data.items():
err_msg += " %s: %s\n" % (stencil, ", ".join(ordering_names.keys()))
raise ValueError("No such stencil available. "
"Available stencils: <stencil_name>( <ordering_names> )\n" + err_msg)
get_stencil.data = {
'D2Q9': {
'walberla': ((0, 0),
(0, 1), (0, -1), (-1, 0), (1, 0),
(-1, 1), (1, 1), (-1, -1), (1, -1),),
'counterclockwise': ((0, 0),
(1, 0), (0, 1), (-1, 0), (0, -1),
(1, 1), (-1, 1), (-1, -1), (1, -1)),
'braunschweig': ((0, 0),
(-1, 1), (-1, 0), (-1, -1), (0, -1),
(1, -1), (1, 0), (1, 1), (0, 1)),
'uk': ((0, 0),
(1, 0), (-1, 0), (0, 1), (0, -1),
(1, 1), (-1, -1), (-1, 1), (1, -1),
)
},
'D2V17': {
'walberla': (
(0, 0), (0, -1), (-1, 0), (1, 0), (0, 1), (-1, -1), (1, -1), (-1, 1), (1, 1), (-2, -2), (2, -2), (-2, 2),
(2, 2), (0, -3), (-3, 0), (3, 0), (0, 3)),
},
'D2V37': {
'walberla': (
(0, 0), (0, -1), (-1, 0), (1, 0), (0, 1), (-1, -1), (1, -1), (-1, 1), (1, 1), (0, -2), (-2, 0), (2, 0),
(0, 2), (-1, -2), (1, -2), (-2, -1), (2, -1), (-2, 1), (2, 1), (-1, 2), (1, 2), (-2, -2), (2, -2), (-2, 2),
(2, 2), (0, -3), (-3, 0), (3, 0), (0, 3), (-1, -3), (1, -3), (-3, -1), (3, -1), (-3, 1), (3, 1), (-1, 3),
(1, 3))
},
'D3Q15': {
'walberla':
((0, 0, 0),
(0, 1, 0), (0, -1, 0), (-1, 0, 0), (1, 0, 0), (0, 0, 1), (0, 0, -1),
(1, 1, 1), (-1, 1, 1), (1, -1, 1), (-1, -1, 1), (1, 1, -1), (-1, 1, -1), (1, -1, -1), (-1, -1, -1)),
'premnath': ((0, 0, 0),
(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1),
(1, 1, 1), (-1, 1, 1), (1, -1, 1), (-1, -1, 1),
(1, 1, -1), (-1, 1, -1), (1, -1, -1), (-1, -1, -1)),
},
'D3Q19': {
'walberla': ((0, 0, 0),
(0, 1, 0), (0, -1, 0), (-1, 0, 0), (1, 0, 0), (0, 0, 1), (0, 0, -1),
(-1, 1, 0), (1, 1, 0), (-1, -1, 0), (1, -1, 0),
(0, 1, 1), (0, -1, 1), (-1, 0, 1), (1, 0, 1),
(0, 1, -1), (0, -1, -1), (-1, 0, -1), (1, 0, -1)),
'braunschweig': ((0, 0, 0),
(1, 0, 0), (-1, 0, 0),
(0, 1, 0), (0, -1, 0),
(0, 0, 1), (0, 0, -1),
(1, 1, 0), (-1, -1, 0),
(1, -1, 0), (-1, 1, 0),
(1, 0, 1), (-1, 0, -1),
(1, 0, -1), (-1, 0, 1),
(0, 1, 1), (0, -1, -1),
(0, 1, -1), (0, -1, 1)),
'premnath': ((0, 0, 0),
(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1),
(1, 1, 0), (-1, 1, 0), (1, -1, 0), (-1, -1, 0),
(1, 0, 1), (-1, 0, 1), (1, 0, -1), (-1, 0, -1),
(0, 1, 1), (0, -1, 1), (0, 1, -1), (0, -1, -1)),
},
'D3Q27': {
'walberla': ((0, 0, 0),
(0, 1, 0), (0, -1, 0), (-1, 0, 0), (1, 0, 0), (0, 0, 1), (0, 0, -1),
(-1, 1, 0), (1, 1, 0), (-1, -1, 0), (1, -1, 0),
(0, 1, 1), (0, -1, 1), (-1, 0, 1), (1, 0, 1),
(0, 1, -1), (0, -1, -1), (-1, 0, -1), (1, 0, -1),
(1, 1, 1), (-1, 1, 1), (1, -1, 1), (-1, -1, 1), (1, 1, -1), (-1, 1, -1), (1, -1, -1),
(-1, -1, -1)),
'premnath': ((0, 0, 0),
(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1),
(1, 1, 0), (-1, 1, 0), (1, -1, 0), (-1, -1, 0),
(1, 0, 1), (-1, 0, 1), (1, 0, -1), (-1, 0, -1),
(0, 1, 1), (0, -1, 1), (0, 1, -1), (0, -1, -1),
(1, 1, 1), (-1, 1, 1), (1, -1, 1), (-1, -1, 1),
(1, 1, -1), (-1, 1, -1), (1, -1, -1), (-1, -1, -1)),
'fakhari': ((0, 0, 0),
(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1),
(1, 1, 1), (-1, 1, 1), (1, -1, 1), (-1, -1, 1),
(1, 1, -1), (-1, 1, -1), (1, -1, -1), (-1, -1, -1),
(1, 1, 0), (-1, 1, 0), (1, -1, 0), (-1, -1, 0),
(1, 0, 1), (-1, 0, 1), (1, 0, -1), (-1, 0, -1), (0, 1, 1), (0, -1, 1), (0, 1, -1), (0, -1, -1)),
}
}
import sympy as sp
from lbmpy.relaxationrates import get_shear_relaxation_rate
from pystencils import Assignment
def second_order_moment_tensor(function_values, stencil):
"""Returns (D x D) Matrix of second order moments of the given function where D is the dimension"""
assert len(function_values) == len(stencil)
dim = len(stencil[0])
return sp.Matrix(dim, dim, lambda i, j: sum(c[i] * c[j] * f for f, c in zip(function_values, stencil)))
def frobenius_norm(matrix, factor=1):
"""Computes the Frobenius norm of a matrix defined as the square root of the sum of squared matrix elements
The optional factor is added inside the square root"""
return sp.sqrt(sum(i * i for i in matrix) * factor)
def add_smagorinsky_model(collision_rule, smagorinsky_constant, omega_output_field=None):
method = collision_rule.method
omega_s = get_shear_relaxation_rate(method)
if isinstance(omega_s, float) or isinstance(omega_s, int):
raise ValueError("For the smagorinsky model the shear relaxation rate has to be a symbol")
f_neq = sp.Matrix(method.pre_collision_pdf_symbols) - method.get_equilibrium_terms()
tau_0 = sp.Symbol("tau_0_")
second_order_neq_moments = sp.Symbol("Pi")
rho = method.zeroth_order_equilibrium_moment_symbol if method.conserved_quantity_computation.compressible else 1
adapted_omega = sp.Symbol("smagorinsky_omega")
collision_rule = collision_rule.new_with_substitutions({omega_s: adapted_omega})
# for derivation see notebook demo_custom_LES_model.ipynb
eqs = [Assignment(tau_0, 1 / omega_s),
Assignment(second_order_neq_moments,
frobenius_norm(second_order_moment_tensor(f_neq, method.stencil), factor=2) / rho),
Assignment(adapted_omega,
2 / (tau_0 + sp.sqrt(18 * smagorinsky_constant ** 2 * second_order_neq_moments + tau_0 ** 2)))]
collision_rule.subexpressions += eqs
collision_rule.topological_sort(sort_subexpressions=True, sort_main_assignments=False)
if omega_output_field:
collision_rule.main_assignments.append(Assignment(omega_output_field.center, adapted_omega))
return collision_rule
Source diff could not be displayed: it is too large. Options to address this: view the blob.
import pystencils as ps
import numpy as np
from lbmpy.stencils import get_stencil
from pystencils.slicing import get_slice_before_ghost_layer, get_ghost_region_slice
from lbmpy.creationfunctions import create_lb_update_rule
from lbmpy.advanced_streaming.communication import get_communication_slices, _fix_length_one_slices, \
LBMPeriodicityHandling
from lbmpy.advanced_streaming.utility import streaming_patterns, Timestep
import pytest
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27'])
@pytest.mark.parametrize('streaming_pattern', streaming_patterns)
@pytest.mark.parametrize('timestep', [Timestep.EVEN, Timestep.ODD])
def test_slices_not_empty(stencil, streaming_pattern, timestep):
stencil = get_stencil(stencil)
dim = len(stencil[0])
q = len(stencil)
arr = np.zeros((4,) * dim + (q,))
slices = get_communication_slices(stencil, streaming_pattern=streaming_pattern, prev_timestep=timestep,
ghost_layers=1)
for _, slices_list in slices.items():
for src, dst in slices_list:
assert all(s != 0 for s in arr[src].shape)
assert all(s != 0 for s in arr[dst].shape)
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27'])
@pytest.mark.parametrize('streaming_pattern', streaming_patterns)
@pytest.mark.parametrize('timestep', [Timestep.EVEN, Timestep.ODD])
def test_src_dst_same_shape(stencil, streaming_pattern, timestep):
stencil = get_stencil(stencil)
dim = len(stencil[0])
q = len(stencil)
arr = np.zeros((4,) * dim + (q,))
slices = get_communication_slices(stencil, streaming_pattern=streaming_pattern, prev_timestep=timestep,
ghost_layers=1)
for _, slices_list in slices.items():
for src, dst in slices_list:
src_shape = arr[src].shape
dst_shape = arr[dst].shape
assert src_shape == dst_shape
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27'])
def test_pull_communication_slices(stencil):
stencil = get_stencil(stencil)
slices = get_communication_slices(
stencil, streaming_pattern='pull', prev_timestep=Timestep.BOTH, ghost_layers=1)
for i, d in enumerate(stencil):
if i == 0:
continue
for s in slices[d]:
if s[0][-1] == i:
src = s[0][:-1]
dst = s[1][:-1]
break
inner_slice = _fix_length_one_slices(get_slice_before_ghost_layer(d, ghost_layers=1))
inv_dir = (-e for e in d)
gl_slice = _fix_length_one_slices(get_ghost_region_slice(inv_dir, ghost_layers=1))
assert src == inner_slice
assert dst == gl_slice
@pytest.mark.parametrize('stencil_name', ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27'])
def test_optimised_and_full_communication_equivalence(stencil_name):
target = 'cpu'
stencil = get_stencil(stencil_name)
dim = len(stencil[0])
domain_size = (4, ) * dim
dh = ps.create_data_handling(domain_size, periodicity=(True, ) * dim,
parallel=False, default_target=target)
pdf = dh.add_array("pdf", values_per_cell=len(stencil), dtype=np.int64)
dh.fill("pdf", 0, ghost_layers=True)
pdf_tmp = dh.add_array("pdf_tmp", values_per_cell=len(stencil), dtype=np.int64)
dh.fill("pdf_tmp", 0, ghost_layers=True)
gl = dh.ghost_layers_of_field("pdf")
num = 0
for idx, x in np.ndenumerate(dh.cpu_arrays['pdf']):
dh.cpu_arrays['pdf'][idx] = num
dh.cpu_arrays['pdf_tmp'][idx] = num
num += 1
ac = create_lb_update_rule(stencil=stencil,
optimization={"symbolic_field": pdf,
"symbolic_temporary_field": pdf_tmp},
kernel_type='stream_pull_only')
ast = ps.create_kernel(ac, target=dh.default_target, cpu_openmp=True)
stream = ast.compile()
full_communication = dh.synchronization_function(pdf.name, target=dh.default_target, optimization={"openmp": True})
full_communication()
dh.run_kernel(stream)
dh.swap("pdf", "pdf_tmp")
pdf_full_communication = np.copy(dh.cpu_arrays['pdf'])
num = 0
for idx, x in np.ndenumerate(dh.cpu_arrays['pdf']):
dh.cpu_arrays['pdf'][idx] = num
dh.cpu_arrays['pdf_tmp'][idx] = num
num += 1
optimised_communication = LBMPeriodicityHandling(stencil=stencil, data_handling=dh, pdf_field_name=pdf.name,
streaming_pattern='pull')
optimised_communication()
dh.run_kernel(stream)
dh.swap("pdf", "pdf_tmp")
if dim == 3:
for i in range(gl, domain_size[0]):
for j in range(gl, domain_size[1]):
for k in range(gl, domain_size[2]):
for f in range(len(stencil)):
assert dh.cpu_arrays['pdf'][i, j, k, f] == pdf_full_communication[i, j, k, f], print(f)
else:
for i in range(gl, domain_size[0]):
for j in range(gl, domain_size[1]):
for f in range(len(stencil)):
assert dh.cpu_arrays['pdf'][i, j, f] == pdf_full_communication[i, j, f]
import sympy as sp
from lbmpy.moments import get_default_moment_set_for_stencil, extract_monomials
import pytest
from lbmpy.stencils import get_stencil
from lbmpy.methods.centeredcumulant.centered_cumulants import statistical_quantity_symbol
from lbmpy.methods.momentbased.moment_transforms import (
PdfsToCentralMomentsByMatrix, FastCentralMomentTransform, PdfsToCentralMomentsByShiftMatrix,
PRE_COLLISION_CENTRAL_MOMENT, POST_COLLISION_CENTRAL_MOMENT
)
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q19', 'D3Q27'])
def test_forward_transform(stencil):
stencil = get_stencil(stencil)
dim = len(stencil[0])
q = len(stencil)
moment_exponents = list(extract_monomials(get_default_moment_set_for_stencil(stencil)))
moment_exponents = sorted(moment_exponents, key=sum)
pdfs = sp.symbols(f"f_:{q}")
rho = sp.Symbol('rho')
u = sp.symbols(f"u_:{dim}")
matrix_transform = PdfsToCentralMomentsByMatrix(stencil, moment_exponents, rho, u)
fast_transform = FastCentralMomentTransform(stencil, moment_exponents, rho, u)
shift_transform = PdfsToCentralMomentsByShiftMatrix(stencil, moment_exponents, rho, u)
f_to_k_matrix = matrix_transform.forward_transform(pdfs, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT)
f_to_k_matrix = f_to_k_matrix.new_without_subexpressions().main_assignments_dict
f_to_k_fast = fast_transform.forward_transform(pdfs, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT)
f_to_k_fast = f_to_k_fast.new_without_subexpressions().main_assignments_dict
f_to_k_shift = shift_transform.forward_transform(pdfs, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=False)
f_to_k_shift = f_to_k_shift.new_without_subexpressions().main_assignments_dict
for e in moment_exponents:
moment_symbol = statistical_quantity_symbol(PRE_COLLISION_CENTRAL_MOMENT, e)
rhs_matrix = f_to_k_matrix[moment_symbol].expand()
rhs_fast = f_to_k_fast[moment_symbol].expand()
rhs_shift = f_to_k_shift[moment_symbol].expand()
assert (rhs_matrix - rhs_fast) == 0, f"Mismatch between matrix and fast transform at {moment_symbol}."
assert (rhs_matrix - rhs_shift) == 0, f"Mismatch between matrix and shift-matrix transform at {moment_symbol}."
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q19', 'D3Q27'])
def test_backward_transform(stencil):
stencil = get_stencil(stencil)
dim = len(stencil[0])
q = len(stencil)
moment_exponents = list(extract_monomials(get_default_moment_set_for_stencil(stencil)))
moment_exponents = sorted(moment_exponents, key=sum)
pdfs = sp.symbols(f"f_:{q}")
rho = sp.Symbol('rho')
u = sp.symbols(f"u_:{dim}")
matrix_transform = PdfsToCentralMomentsByMatrix(stencil, moment_exponents, rho, u)
fast_transform = FastCentralMomentTransform(stencil, moment_exponents, rho, u)
shift_transform = PdfsToCentralMomentsByShiftMatrix(stencil, moment_exponents, rho, u)
k_to_f_matrix = matrix_transform.backward_transform(pdfs, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT)
k_to_f_matrix = k_to_f_matrix.new_without_subexpressions().main_assignments_dict
k_to_f_fast = fast_transform.backward_transform(pdfs, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT)
k_to_f_fast = k_to_f_fast.new_without_subexpressions().main_assignments_dict
k_to_f_shift = shift_transform.backward_transform(pdfs, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT)
k_to_f_shift = k_to_f_shift.new_without_subexpressions().main_assignments_dict
for f in pdfs:
rhs_matrix = k_to_f_matrix[f].expand()
rhs_fast = k_to_f_fast[f].expand()
rhs_shift = k_to_f_shift[f].expand()
assert (rhs_matrix - rhs_fast) == 0, f"Mismatch between matrix and fast transform at {f}."
assert (rhs_matrix - rhs_shift) == 0, f"Mismatch between matrix and shift-matrix transform at {f}."
%% Cell type:code id: tags:
``` python
import pytest
pytest.importorskip('pycuda')
```
%% Output
<module 'pycuda' from '/home/markus/miniconda3/envs/pystencils/lib/python3.8/site-packages/pycuda/__init__.py'>
%% Cell type:code id: tags:
``` python
%load_ext autoreload
%autoreload 2
```
%% Cell type:code id: tags:
``` python
import sympy as sp
import numpy as np
from numpy.testing import assert_allclose
from pystencils.session import *
from lbmpy.session import *
from lbmpy.stencils import get_stencil
from lbmpy.methods.centeredcumulant import CenteredCumulantForceModel
from lbmpy.macroscopic_value_kernels import macroscopic_values_getter, macroscopic_values_setter
```
%% Cell type:markdown id: tags:
## Pipe Flow Scenario
%% Cell type:code id: tags:
``` python
class PeriodicPipeFlow:
def __init__(self, method_params, optimization=dict()):
wall_boundary = NoSlip()
self.stencil = method_params.get('stencil', get_stencil("D2Q9"))
self.streaming_pattern = method_params.get('streaming_pattern', 'pull')
self.target = optimization.get('target', 'cpu')
self.gpu = self.target in ['gpu', 'opencl']
# Stencil
self.q = len(self.stencil)
self.dim = len(self.stencil[0])
# Streaming
self.inplace = is_inplace(self.streaming_pattern)
self.timesteps = get_timesteps(self.streaming_pattern)
self.zeroth_timestep = self.timesteps[0]
# Domain, Data Handling and PDF fields
self.pipe_length = 60
self.pipe_radius = 15
self.domain_size = (self.pipe_length, ) + (2 * self.pipe_radius,) * (self.dim - 1)
self.periodicity = (True, ) + (False, ) * (self.dim - 1)
force = (0.0001, ) + (0.0,) * (self.dim - 1)
self.force = method_params.get('force', force)
self.dh = create_data_handling(domain_size=self.domain_size,
periodicity=self.periodicity, default_target=self.target)
self.pdfs = self.dh.add_array('pdfs', self.q)
if not self.inplace:
self.pdfs_tmp = self.dh.add_array_like('pdfs_tmp', self.pdfs.name)
method_params['force'] = self.force
optimization['symbolic_field'] = self.pdfs
if not self.inplace:
optimization['symbolic_temporary_field'] = self.pdfs_tmp
self.lb_collision = create_lb_collision_rule(optimization=optimization, **method_params)
self.lb_method = self.lb_collision.method
self.lb_kernels = []
for t in self.timesteps:
self.lb_kernels.append(create_lb_function(collision_rule=self.lb_collision,
optimization=optimization,
timestep=t,
**method_params))
# Macroscopic Values
self.density = 1.0
self.density_field = self.dh.add_array('rho', 1)
u_x = 0.0
self.velocity = (u_x,) * self.dim
self.velocity_field = self.dh.add_array('u', self.dim)
setter = macroscopic_values_setter(
self.lb_method, self.density, self.velocity, self.pdfs,
streaming_pattern=self.streaming_pattern, previous_timestep=self.zeroth_timestep)
self.init_kernel = create_kernel(setter, ghost_layers=1, target=self.target).compile()
self.getter_kernels = []
for t in self.timesteps:
getter = macroscopic_values_getter(
self.lb_method, self.density_field, self.velocity_field, self.pdfs,
streaming_pattern=self.streaming_pattern, previous_timestep=t)
self.getter_kernels.append(create_kernel(getter, ghost_layers=1, target=self.target).compile())
# Periodicity
self.periodicity_handler = LBMPeriodicityHandling(
self.stencil, self.dh, self.pdfs.name, streaming_pattern=self.streaming_pattern)
# Boundary Handling
self.wall = wall_boundary
self.bh = LatticeBoltzmannBoundaryHandling(
self.lb_method, self.dh, self.pdfs.name,
streaming_pattern=self.streaming_pattern, target=self.target)
self.bh.set_boundary(boundary_obj=self.wall, mask_callback=self.mask_callback)
self.current_timestep = self.zeroth_timestep
def mask_callback(self, x, y, z=None):
y = y - self.pipe_radius
z = z - self.pipe_radius if z is not None else 0
return np.sqrt(y**2 + z**2) >= self.pipe_radius
def init(self):
self.current_timestep = self.zeroth_timestep
self.dh.run_kernel(self.init_kernel)
def step(self):
# Order matters! First communicate, then boundaries, otherwise
# periodicity handling overwrites reflected populations
# Periodicty
self.periodicity_handler(self.current_timestep)
# Boundaries
self.bh(prev_timestep=self.current_timestep)
# Here, the next time step begins
self.current_timestep = self.current_timestep.next()
# LBM Step
self.dh.run_kernel(self.lb_kernels[self.current_timestep.idx])
# Field Swaps
if not self.inplace:
self.dh.swap(self.pdfs.name, self.pdfs_tmp.name)
# Macroscopic Values
self.dh.run_kernel(self.getter_kernels[self.current_timestep.idx])
def run(self, iterations):
for _ in range(iterations):
self.step()
@property
def velocity_array(self):
if self.gpu:
self.dh.to_cpu(self.velocity_field.name)
return self.dh.gather_array(self.velocity_field.name)
def get_trimmed_velocity_array(self):
if self.gpu:
self.dh.to_cpu(self.velocity_field.name)
u = np.copy(self.dh.gather_array(self.velocity_field.name))
mask = self.bh.get_mask(None, self.wall)
for idx in np.ndindex(u.shape[:-1]):
if mask[idx] != 0:
u[idx] = np.full((self.dim, ), np.nan)
return u
```
%% Cell type:markdown id: tags:
## General Setup
%% Cell type:code id: tags:
``` python
stencil = get_stencil('D3Q19')
dim = len(stencil[0])
target = 'gpu'
streaming_pattern = 'aa'
optimization = { 'target': target }
viscous_rr = 1.1
force = (0.0001, ) + (0.0,) * (dim - 1)
```
%% Cell type:markdown id: tags:
## 1. Reference: SRT Method
%% Cell type:code id: tags:
``` python
srt_params = {
'stencil': stencil,
'method': 'srt',
'relaxation_rate': viscous_rr,
'force_model': 'guo',
'force' : force,
'streaming_pattern': streaming_pattern
}
srt_flow = PeriodicPipeFlow(srt_params, optimization)
srt_flow.init()
srt_flow.run(400)
```
%% Cell type:code id: tags:
``` python
srt_u = srt_flow.get_trimmed_velocity_array()
ps.plot.vector_field_magnitude(srt_u[30,:,:,:])
ps.plot.colorbar()
```
%% Output
<matplotlib.colorbar.Colorbar at 0x7f20b2f75bb0>
%% Cell type:markdown id: tags:
## 2. Centered Cumulant Method with Implicit Forcing
The default setup of the centered cumulant method does not use a force model to compute the force contributions on all populations, but instead applies the force in cumulant space simply by setting the momentum relaxation rate $\omega_1 = 2$. Due to the half-force shift of the equilibrium input velocity, the first central moments (which correspond to momentum relative to the moving frame of reference) are not zero, but equal to $-F/2$. The relaxation process causes the first central moments to simply change sign:
$$
\kappa_{100}^* = - \kappa_{100}.
$$
Thus, $\kappa_{100}^* = F_x /2$. In total, the entire acceleration given by the force is added onto the momentum.
%% Cell type:code id: tags:
``` python
cm_method_params = {
'method' : 'monomial_cumulant',
'stencil': stencil,
'relaxation_rate': viscous_rr, # Specify viscous relaxation rate only
'force_model': CenteredCumulantForceModel(force),
'force' : force,
'streaming_pattern' : streaming_pattern
}
optimization = {
'target': target,
'pre_simplification' : True
}
cm_impl_f_flow = PeriodicPipeFlow(cm_method_params, optimization)
```
%% Cell type:code id: tags:
``` python
lb_method = cm_impl_f_flow.lb_method
assert all(rr == 2 for rr in lb_method.relaxation_rates[1:4])
assert all(rr == viscous_rr for rr in lb_method.relaxation_rates[4:9])
```
%% Cell type:code id: tags:
``` python
cm_impl_f_flow.init()
cm_impl_f_flow.run(400)
```
%% Cell type:code id: tags:
``` python
cm_impl_f_u = cm_impl_f_flow.get_trimmed_velocity_array()
ps.plot.vector_field_magnitude(cm_impl_f_u[30,:,:,:])
ps.plot.colorbar()
```
%% Output
<matplotlib.colorbar.Colorbar at 0x7f20aa846670>
%% Cell type:code id: tags:
``` python
assert_allclose(cm_impl_f_u, srt_u, rtol=1, atol=1e-4)
```
%% Cell type:markdown id: tags:
## 3. Centered Cumulant Method with Explicit Forcing
%% Cell type:code id: tags:
``` python
from lbmpy.forcemodels import Schiller
cm_method_params_expl_force = {
'method' : 'cumulant',
'stencil': stencil,
'relaxation_rates': [viscous_rr], # Specify viscous relaxation rate only
'force_model': Schiller(force),
'force' : force,
'streaming_pattern' : streaming_pattern
}
optimization = {
'target': target,
'pre_simplification' : True
}
cm_expl_f_flow = PeriodicPipeFlow(cm_method_params_expl_force, optimization)
```
%% Cell type:code id: tags:
``` python
lb_method = cm_expl_f_flow.lb_method
assert all(rr == 0 for rr in lb_method.relaxation_rates[1:4])
assert all(rr == viscous_rr for rr in lb_method.relaxation_rates[4:9])
```
%% Cell type:code id: tags:
``` python
cm_expl_f_flow.init()
cm_expl_f_flow.run(400)
```
%% Cell type:code id: tags:
``` python
cm_expl_f_u = cm_expl_f_flow.get_trimmed_velocity_array()
ps.plot.vector_field_magnitude(cm_expl_f_u[30,:,:,:])
ps.plot.colorbar()
```
%% Output
<matplotlib.colorbar.Colorbar at 0x7f20aa641070>
%% Cell type:code id: tags:
``` python
assert_allclose(cm_expl_f_u, srt_u, rtol=1, atol=1e-4)
assert_allclose(cm_expl_f_u, cm_impl_f_u, rtol=1, atol=1e-4)
```
%% Cell type:code id: tags:
``` python
from lbmpy.session import *
from lbmpy.phasefield.analytical import *
from pystencils.fd import evaluate_diffs
```
%% Cell type:markdown id: tags:
# Analytical checks for 3-Phase model
Here you can inspect the components of the free energy. View only bulk or interface contributions, before and after transformation from $U \rightarrow (\rho, \phi,\psi)$:
%% Cell type:code id: tags:
``` python
order_parameters = sp.symbols("rho phi psi")
rho, phi, psi = order_parameters
F, _ = free_energy_functional_3_phases(include_bulk=True,
include_interface=True,
transformed=True,
expand_derivatives=False)
F
```
%% Output
$$\frac{\alpha^{2} \kappa_{0}}{2} {\partial (\frac{\phi}{2} - \frac{\psi}{2} + \frac{\rho}{2}) }^{2} + \frac{\alpha^{2} \kappa_{1}}{2} {\partial (- \frac{\phi}{2} - \frac{\psi}{2} + \frac{\rho}{2}) }^{2} + \frac{\alpha^{2} \kappa_{2}}{2} {\partial \psi}^{2} + \frac{\kappa_{0}}{2} \left(\frac{\phi}{2} - \frac{\psi}{2} + \frac{\rho}{2}\right)^{2} \left(- \frac{\phi}{2} + \frac{\psi}{2} - \frac{\rho}{2} + 1\right)^{2} + \frac{\kappa_{1}}{2} \left(- \frac{\phi}{2} - \frac{\psi}{2} + \frac{\rho}{2}\right)^{2} \left(\frac{\phi}{2} + \frac{\psi}{2} - \frac{\rho}{2} + 1\right)^{2} + \frac{\kappa_{2} \psi^{2}}{2} \left(- \psi + 1\right)^{2}$$
2 2 2 2 2
α ⋅κ₀⋅D(phi/2 - psi/2 + rho/2) α ⋅κ₁⋅D(-phi/2 - psi/2 + rho/2) α ⋅κ₂⋅D(p
─────────────────────────────── + ──────────────────────────────── + ─────────
2 2 2
2 2 2 2
⎛φ ψ ρ⎞ ⎛ φ ψ ρ ⎞ ⎛ φ ψ ρ⎞ ⎛φ ψ ρ ⎞
2 κ₀⋅⎜─ - ─ + ─⎟ ⋅⎜- ─ + ─ - ─ + 1⎟ κ₁⋅⎜- ─ - ─ + ─⎟ ⋅⎜─ + ─ - ─ + 1⎟
si) ⎝2 2 2⎠ ⎝ 2 2 2 ⎠ ⎝ 2 2 2⎠ ⎝2 2 2 ⎠
──── + ────────────────────────────────── + ──────────────────────────────────
2 2
2 2
κ₂⋅ψ ⋅(-ψ + 1)
+ ───────────────
2
%% Cell type:markdown id: tags:
### Analytically checking the phase transition profile
%% Cell type:markdown id: tags:
Automatically deriving chemical potential as functional derivative of free energy
%% Cell type:code id: tags:
``` python
F, _ = free_energy_functional_3_phases(order_parameters)
mu_diff_eq = chemical_potentials_from_free_energy(F, order_parameters)
mu_diff_eq[0]
```
%% Output
$$- \frac{\alpha^{2} \kappa_{0}}{4} {\partial {\partial \phi}} + \frac{\alpha^{2} \kappa_{0}}{4} {\partial {\partial \psi}} - \frac{\alpha^{2} \kappa_{0}}{4} {\partial {\partial \rho}} + \frac{\alpha^{2} \kappa_{1}}{4} {\partial {\partial \phi}} + \frac{\alpha^{2} \kappa_{1}}{4} {\partial {\partial \psi}} - \frac{\alpha^{2} \kappa_{1}}{4} {\partial {\partial \rho}} + \frac{\kappa_{0} \phi^{3}}{8} - \frac{3 \kappa_{0}}{8} \phi^{2} \psi + \frac{3 \kappa_{0}}{8} \phi^{2} \rho - \frac{3 \kappa_{0}}{8} \phi^{2} + \frac{3 \kappa_{0}}{8} \phi \psi^{2} - \frac{3 \kappa_{0}}{4} \phi \psi \rho + \frac{3 \kappa_{0}}{4} \phi \psi + \frac{3 \kappa_{0}}{8} \phi \rho^{2} - \frac{3 \kappa_{0}}{4} \phi \rho + \frac{\kappa_{0} \phi}{4} - \frac{\kappa_{0} \psi^{3}}{8} + \frac{3 \kappa_{0}}{8} \psi^{2} \rho - \frac{3 \kappa_{0}}{8} \psi^{2} - \frac{3 \kappa_{0}}{8} \psi \rho^{2} + \frac{3 \kappa_{0}}{4} \psi \rho - \frac{\kappa_{0} \psi}{4} + \frac{\kappa_{0} \rho^{3}}{8} - \frac{3 \kappa_{0}}{8} \rho^{2} + \frac{\kappa_{0} \rho}{4} - \frac{\kappa_{1} \phi^{3}}{8} - \frac{3 \kappa_{1}}{8} \phi^{2} \psi + \frac{3 \kappa_{1}}{8} \phi^{2} \rho - \frac{3 \kappa_{1}}{8} \phi^{2} - \frac{3 \kappa_{1}}{8} \phi \psi^{2} + \frac{3 \kappa_{1}}{4} \phi \psi \rho - \frac{3 \kappa_{1}}{4} \phi \psi - \frac{3 \kappa_{1}}{8} \phi \rho^{2} + \frac{3 \kappa_{1}}{4} \phi \rho - \frac{\kappa_{1} \phi}{4} - \frac{\kappa_{1} \psi^{3}}{8} + \frac{3 \kappa_{1}}{8} \psi^{2} \rho - \frac{3 \kappa_{1}}{8} \psi^{2} - \frac{3 \kappa_{1}}{8} \psi \rho^{2} + \frac{3 \kappa_{1}}{4} \psi \rho - \frac{\kappa_{1} \psi}{4} + \frac{\kappa_{1} \rho^{3}}{8} - \frac{3 \kappa_{1}}{8} \rho^{2} + \frac{\kappa_{1} \rho}{4}$$
2 2 2 2 2
α ⋅κ₀⋅D(D(phi)) α ⋅κ₀⋅D(D(psi)) α ⋅κ₀⋅D(D(rho)) α ⋅κ₁⋅D(D(phi)) α ⋅κ
- ─────────────── + ─────────────── - ─────────────── + ─────────────── + ────
4 4 4 4
2 3 2 2 2
₁⋅D(D(psi)) α ⋅κ₁⋅D(D(rho)) κ₀⋅φ 3⋅κ₀⋅φ ⋅ψ 3⋅κ₀⋅φ ⋅ρ 3⋅κ₀⋅φ 3⋅κ₀
─────────── - ─────────────── + ───── - ───────── + ───────── - ─────── + ────
4 4 8 8 8 8
2 2 3 2
⋅φ⋅ψ 3⋅κ₀⋅φ⋅ψ⋅ρ 3⋅κ₀⋅φ⋅ψ 3⋅κ₀⋅φ⋅ρ 3⋅κ₀⋅φ⋅ρ κ₀⋅φ κ₀⋅ψ 3⋅κ₀⋅ψ ⋅
───── - ────────── + ──────── + ───────── - ──────── + ──── - ───── + ────────
8 4 4 8 4 4 8 8
2 2 3 2 3
ρ 3⋅κ₀⋅ψ 3⋅κ₀⋅ψ⋅ρ 3⋅κ₀⋅ψ⋅ρ κ₀⋅ψ κ₀⋅ρ 3⋅κ₀⋅ρ κ₀⋅ρ κ₁⋅φ 3
─ - ─────── - ───────── + ──────── - ──── + ───── - ─────── + ──── - ───── - ─
8 8 4 4 8 8 4 8
2 2 2 2 2
⋅κ₁⋅φ ⋅ψ 3⋅κ₁⋅φ ⋅ρ 3⋅κ₁⋅φ 3⋅κ₁⋅φ⋅ψ 3⋅κ₁⋅φ⋅ψ⋅ρ 3⋅κ₁⋅φ⋅ψ 3⋅κ₁⋅φ⋅ρ
──────── + ───────── - ─────── - ───────── + ────────── - ──────── - ─────────
8 8 8 8 4 4 8
3 2 2 2
3⋅κ₁⋅φ⋅ρ κ₁⋅φ κ₁⋅ψ 3⋅κ₁⋅ψ ⋅ρ 3⋅κ₁⋅ψ 3⋅κ₁⋅ψ⋅ρ 3⋅κ₁⋅ψ⋅ρ κ₁⋅ψ
+ ──────── - ──── - ───── + ───────── - ─────── - ───────── + ──────── - ────
4 4 8 8 8 8 4 4
3 2
κ₁⋅ρ 3⋅κ₁⋅ρ κ₁⋅ρ
+ ───── - ─────── + ────
8 8 4
%% Cell type:markdown id: tags:
Checking if expected profile is a solution of the differential equation:
%% Cell type:code id: tags:
``` python
x = sp.symbols("x")
expectedProfile = analytic_interface_profile(x)
expectedProfile
```
%% Output
$$\frac{1}{2} \tanh{\left (\frac{x}{2 \alpha} \right )} + \frac{1}{2}$$
⎛ x ⎞
tanh⎜───⎟
⎝2⋅α⎠ 1
───────── + ─
2 2
%% Cell type:markdown id: tags:
Checking a phase transition from $C_0$ to $C_2$. This means that $\rho=1$ while $phi$ and $psi$ are the analytical profile or 1-analytical profile
%% Cell type:code id: tags:
``` python
for eq in mu_diff_eq:
eq = eq.subs({rho: 1,
phi: 1 - expectedProfile,
psi: expectedProfile})
eq = evaluate_diffs(eq, x).expand()
assert eq == 0
```
%% Cell type:markdown id: tags:
### Checking the surface tensions parameters
%% Cell type:code id: tags:
``` python
F, _ = free_energy_functional_3_phases(order_parameters)
F = expand_diff_linear(F, functions=order_parameters) # expand derivatives using product rule
two_phase_free_energy = F.subs({rho: 1,
phi: 1 - expectedProfile,
psi: expectedProfile})
two_phase_free_energy = sp.simplify(evaluate_diffs(two_phase_free_energy, x))
two_phase_free_energy
```
%% Output
$$\frac{\kappa_{0} + \kappa_{2}}{16 \cosh^{4}{\left (\frac{x}{2 \alpha} \right )}}$$
κ₀ + κ₂
─────────────
4⎛ x ⎞
16⋅cosh ⎜───⎟
⎝2⋅α⎠
%% Cell type:code id: tags:
``` python
gamma = cosh_integral(two_phase_free_energy, x)
gamma
```
%% Output
$$\frac{\alpha}{6} \left(\kappa_{0} + \kappa_{2}\right)$$
α⋅(κ₀ + κ₂)
───────────
6
%% Cell type:code id: tags:
``` python
alpha, k0, k2 = sp.symbols("alpha, kappa_0, kappa_2")
assert gamma == alpha/6 * (k0 + k2)
```