Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • Sparse
  • WallLaw
  • fhennig/pystencils2.0-compat
  • improved_comm
  • master
  • suffa/psm_optimization
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
  • release/1.3.7
44 results

Target

Select target project
  • ravi.k.ayyala/lbmpy
  • brendan-waters/lbmpy
  • anirudh.jonnalagadda/lbmpy
  • jbadwaik/lbmpy
  • alexander.reinauer/lbmpy
  • itischler/lbmpy
  • he66coqe/lbmpy
  • ev81oxyl/lbmpy
  • Bindgen/lbmpy
  • da15siwa/lbmpy
  • holzer/lbmpy
  • RudolfWeeber/lbmpy
  • pycodegen/lbmpy
13 results
Select Git revision
  • GetterSetterAPI
  • HRR
  • HydroPressure
  • InplaceConfig
  • Outflow
  • PhaseField
  • Sparse
  • UBBVelocity
  • UpdateAPISparse
  • WallLaw
  • WetNodeBoundaries
  • csebug
  • feature/sparse
  • feature/try
  • improved_comm
  • install_requires
  • master
  • phaseField
  • relaxationrates
  • test_martin
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
57 results
Show changes
Showing
with 702 additions and 2464 deletions
*********************************************
Equilibrium Distributions (lbmpy.equilibrium)
*********************************************
.. automodule:: lbmpy.equilibrium
Abstract Base Class
===================
.. autoclass:: lbmpy.equilibrium.AbstractEquilibrium
:members:
:private-members: _monomial_raw_moment, _monomial_central_moment, _monomial_cumulant
Generic Discrete Equilibria
===========================
Use the following class for custom discrete equilibria.
.. autoclass:: lbmpy.equilibrium.GenericDiscreteEquilibrium
:members:
Maxwellian Equilibria for Hydrodynamics
=======================================
The following classes represent the continuous and the discrete variant of the Maxwellian equilibrium for
hydrodynamics.
.. autoclass:: lbmpy.equilibrium.ContinuousHydrodynamicMaxwellian
:members:
.. autoclass:: lbmpy.equilibrium.DiscreteHydrodynamicMaxwellian
:members:
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
number={4}, number={4},
pages={043309}, pages={043309},
year={2015}, year={2015},
publisher={APS} publisher={APS},
doi={10.1103/PhysRevE.92.043309},
} }
@PHDTHESIS{luo1993lattice, @PHDTHESIS{luo1993lattice,
...@@ -16,7 +17,7 @@ ...@@ -16,7 +17,7 @@
school = {GEORGIA INSTITUTE OF TECHNOLOGY.}, school = {GEORGIA INSTITUTE OF TECHNOLOGY.},
year = 1993, year = 1993,
adsurl = {http://adsabs.harvard.edu/abs/1993PhDT.......233L}, adsurl = {http://adsabs.harvard.edu/abs/1993PhDT.......233L},
adsnote = {Provided by the SAO/NASA Astrophysics Data System} adsnote = {Provided by the SAO/NASA Astrophysics Data System},
} }
@Article{guo2002discrete, @Article{guo2002discrete,
...@@ -28,8 +29,24 @@ ...@@ -28,8 +29,24 @@
number = {4}, number = {4},
pages = {046308}, pages = {046308},
publisher = {APS}, publisher = {APS},
doi = {10.1103/PhysRevE.65.046308},
} }
@article{HeForce,
title = {Discrete Boltzmann equation model for nonideal gases},
author = {He, Xiaoyi and Shan, Xiaowen and Doolen, Gary D.},
journal = {Physical Review E},
volume = {57},
issue = {1},
pages = {R13--R16},
numpages = {0},
year = {1998},
month = {1},
publisher = {APS},
doi = {10.1103/PhysRevE.57.R13}
}
@article{buick2000gravity, @article{buick2000gravity,
title={Gravity in a lattice Boltzmann model}, title={Gravity in a lattice Boltzmann model},
author={Buick, JM and Greated, CA}, author={Buick, JM and Greated, CA},
...@@ -38,7 +55,8 @@ ...@@ -38,7 +55,8 @@
number={5}, number={5},
pages={5307}, pages={5307},
year={2000}, year={2000},
publisher={APS} publisher={APS},
doi = {10.1103/PhysRevE.61.5307},
} }
@phdthesis{schiller2008thermal, @phdthesis{schiller2008thermal,
...@@ -67,35 +85,62 @@ year = {2018} ...@@ -67,35 +85,62 @@ year = {2018}
@article{Semprebon2016, @article{Semprebon2016,
author = {Semprebon, Ciro and Kusumaatmaja, Halim}, title = {Ternary free-energy lattice Boltzmann model with tunable surface tensions and contact angles},
doi = {10.1103/PhysRevE.93.033305}, author = {Semprebon, Ciro and Kr\"uger, Timm and Kusumaatmaja, Halim},
keywords = {lbm,multiphase,phasefield}, journal = {Phys. Rev. E},
mendeley-tags = {lbm,multiphase,phasefield}, volume = {93},
pages = {1--11}, issue = {3},
title = {{Ternary free-energy lattice Boltzmann model with tunable surface tensions and contact angles}}, pages = {033305},
volume = {033305}, numpages = {11},
year = {2016} year = {2016},
month = {Mar},
publisher = {American Physical Society},
doi = {10.1103/PhysRevE.93.033305},
} }
@article{geier2015, @article{geier2015,
author = {Geier, Martin and Sch{\"{o}}nherr, Martin and Pasquali, Andrea and Krafczyk, Manfred}, author = {Geier, Martin and Sch{\"{o}}nherr, Martin and Pasquali, Andrea and Krafczyk, Manfred},
title = {{The cumulant lattice Boltzmann equation in three dimensions: Theory and validation}}, title = {{The cumulant lattice Boltzmann equation in three dimensions: Theory and validation}},
journal = {Computers \& Mathematics with Applications}, journal = {Computers \& Mathematics with Applications},
volume = {70},
number = {4},
pages = {507-547},
year = {2015}, year = {2015},
doi = {10.1016/j.camwa.2015.05.001} issn = {0898-1221},
doi = {10.1016/j.camwa.2015.05.001},
}
@article{geier2017,
author = {Geier, Martin and Pasquali, Andrea and Sch{\"{o}}nherr, Martin},
title = {Parametrization of the Cumulant Lattice Boltzmann Method for Fourth Order Accurate Diffusion Part I},
year = {2017},
issue_date = {November 2017},
publisher = {Academic Press Professional, Inc.},
address = {USA},
volume = {348},
number = {C},
issn = {0021-9991},
url = {https://doi.org/10.1016/j.jcp.2017.05.040},
doi = {10.1016/j.jcp.2017.05.040},
journal = {J. Comput. Phys.},
month = {nov},
pages = {862–888},
numpages = {27}
} }
@Article{Coreixas2019, @Article{Coreixas2019,
author = {Christophe Coreixas and Bastien Chopard and Jonas Latt}, title = {Comprehensive comparison of collision models in the lattice Boltzmann framework: Theoretical investigations},
journal = {Physical Review E}, author = {Coreixas, Christophe and Chopard, Bastien and Latt, Jonas},
title = {Comprehensive comparison of collision models in the lattice Boltzmann framework: Theoretical investigations}, journal = {Phys. Rev. E},
year = {2019}, volume = {100},
month = {sep}, issue = {3},
number = {3}, pages = {033305},
pages = {033305}, numpages = {46},
volume = {100}, year = {2019},
doi = {10.1103/physreve.100.033305}, month = {Sep},
publisher = {American Physical Society ({APS})}, publisher = {American Physical Society},
doi = {10.1103/PhysRevE.100.033305},
url = {https://link.aps.org/doi/10.1103/PhysRevE.100.033305}
} }
@PhdThesis{Geier2006, @PhdThesis{Geier2006,
...@@ -104,3 +149,169 @@ doi = {10.1016/j.camwa.2015.05.001} ...@@ -104,3 +149,169 @@ doi = {10.1016/j.camwa.2015.05.001}
title = {Ab inito derivation of the cascaded lattice Boltzmann automaton}, title = {Ab inito derivation of the cascaded lattice Boltzmann automaton},
year = {2006}, year = {2006},
} }
@article{Fakhari2018,
title = {A phase-field lattice {Boltzmann} model for simulating multiphase flows in porous media: Application and comparison to experiments of {CO2} sequestration at pore scale},
journal = {Advances in Water Resources},
volume = {114},
pages = {119-134},
year = {2018},
issn = {0309-1708},
doi = {10.1016/j.advwatres.2018.02.005},
author = {Fakhari, A. and Li, Y. and Bolster, D. and Christensen, K. T.},
}
@article{silva2010,
title = {A study on the inclusion of body forces in the lattice Boltzmann BGK equation to recover steady-state hydrodynamics},
journal = {Physica A: Statistical Mechanics and its Applications},
volume = {390},
number = {6},
pages = {1085-1095},
year = {2011},
doi = {10.1016/j.physa.2010.11.037},
author = {Silva, G. and Semiao, V.},
keywords = {{Lattice Boltzmann} method, {BGK} collision operator, Steady-state flows, Body force driven flows}
}
@article{silva2020,
title = {Force methods for the two-relaxation-times lattice {Boltzmann}},
author = {Postma, B. and Silva, G.},
journal = {Phys. Rev. E},
volume = {102},
issue = {6},
year = {2020},
month = {12},
publisher = {American Physical Society},
doi = {10.1103/PhysRevE.102.063307},
}
@book{lbm_book,
doi = {10.1007/978-3-319-44649-3},
year = {2017},
publisher = {Springer International Publishing},
author = {Kr\"{u}ger, T. and Kusumaatmaja, H. and Kuzmin, A. and Shardt, O. and Silva, G. and Viggen, E. M.},
title = {The Lattice {Boltzmann} Method}
}
@article{Ansumali2003,
doi = {10.1209/epl/i2003-00496-6},
year = 2003,
month = {sep},
journal = {{IOP} Publishing},
volume = {63},
number = {6},
pages = {798--804},
author = {S Ansumali and I. V Karlin and H. C Öttinger},
title = {Minimal entropic kinetic models for hydrodynamics},
abstract = {We derive minimal discrete models of the Boltzmann equation consistent with equilibrium thermodynamics, and which recover correct hydrodynamics in arbitrary dimensions. A new discrete velocity model is proposed for the simulation of the Navier-Stokes-Fourier equation and is tested in the setup of Taylor vortex flow. A simple analytical procedure for constructing the equilibrium for thermal hydrodynamics is established. For the lattice Boltzmann method of isothermal hydrodynamics, the explicit analytical form of the equilibrium distribution is presented. This results in an entropic version of the isothermal lattice Boltzmann method with the simplicity and computational efficiency of the standard lattice Boltzmann model.}
}
@Article{raw_moments,
author = {D. D'Humières},
journal = {Rarefied gas dynamics},
title = {Generalized lattice-{Boltzmann} equations},
year = {1992},
}
@article{FAKHARI201722,
author = "Abbas Fakhari and Diogo Bolster and Li-Shi Luo",
title = "A weighted multiple-relaxation-time lattice {Boltzmann} method for multiphase flows and its application to partial coalescence cascades",
journal = "Journal of Computational Physics",
year = "2017",
doi = "10.1016/j.jcp.2017.03.062"
}
@article{TRT,
author = {Ginzburg Irina and Frederik Verhaeghe and D. D'Humières},
year = {2008},
month = {01},
pages = {427-478},
title = {Two-relaxation-time Lattice Boltzmann scheme: about parametrization, velocity, pressure and mixed boundary conditions},
volume = {3},
journal = {Communications in Computational Physics}
}
@Article{HeIncompressible,
author = {Xiaoyi He and Li-Shi Luo},
journal = {Journal of Statistical Physics},
title = {Lattice Boltzmann Model for the Incompressible Navier{\textendash}Stokes Equation},
year = {1997},
month = {aug},
number = {3/4},
pages = {927--944},
volume = {88},
doi = {10.1023/b:joss.0000015179.12689.e4},
publisher = {Springer Science and Business Media {LLC}},
}
@Article{GruszczynskiCascadedPhaseFieldModel,
author = {G. Gruszczy{\'{n}}ski and T. Mitchell and C. Leonardi and {\L}. {\L}aniewski-Wo{\l}{\l}k and T. Barber},
journal = {Computers {\&} Mathematics with Applications},
title = {A cascaded phase-field lattice Boltzmann model for the simulation of incompressible, immiscible fluids with high density contrast},
year = {2020},
month = {feb},
number = {4},
pages = {1049--1071},
volume = {79},
doi = {10.1016/j.camwa.2019.08.018},
publisher = {Elsevier {BV}},
}
@Article{Casson,
author = {R. Ouared and B. Chopard},
journal = {Journal of Statistical Physics},
title = {Lattice {Boltzmann} Simulations of Blood Flow: {Non-Newtonian} Rheology and Clotting Processes},
year = {2005},
doi = {10.1007/s10955-005-8415-x},
publisher = {Springer Link},
}
@article{BouzidiBC,
author = {Bouzidi, M’hamed and Firdaouss, Mouaouia and Lallemand, Pierre},
title = "{Momentum transfer of a Boltzmann-lattice fluid with boundaries}",
journal = {Physics of Fluids},
year = {2001},
month = {11},
doi = {10.1063/1.1399290},
url = {https://doi.org/10.1063/1.1399290},
}
@Article{rozema15,
doi = {10.1063/1.4928700},
year = {2015},
month = {aug},
publisher = {AIP Publishing},
volume = {27},
number = {8},
author = {Wybe Rozema and Hyun J. Bae and Parviz Moin and Roel Verstappen},
title = {Minimum-dissipation models for large-eddy simulation},
journal = {Physics of Fluids}
}
@article{Han2021,
doi = {10.1088/1873-7005/ac1782},
url = {https://dx.doi.org/10.1088/1873-7005/ac1782} ,
year = {2021},
month = {aug},
publisher = {IOP Publishing},
volume = {53},
number = {4},
pages = {045506},
author = {Mengtao Han and Ryozo Ooka and Hideki Kikumoto},
title = {A wall function approach in lattice Boltzmann method: algorithm and validation using turbulent channel flow},
journal = {Fluid Dynamics Research}
}
@article{Maronga2020,
author = {Maronga, Bj{\"o}rn and Knigge, Christoph and Raasch, Siegfried},
year = {2020},
title = {{An Improved Surface Boundary Condition for Large-Eddy Simulations Based on Monin--Obukhov Similarity Theory: Evaluation and Consequences for Grid Convergence in Neutral and Stable Conditions}},
pages = {297--325},
volume = {174},
number = {2},
issn = {0006-8314},
journal = {{Boundary-layer meteorology}},
doi = {10.1007/s10546-019-00485-w}
}
@Comment{jabref-meta: databaseType:bibtex;}
********************** *******************************
Maxwellian Equilibrium Maxwellian Equilibrium (Legacy)
********************** *******************************
.. automodule:: lbmpy.maxwellian_equilibrium .. automodule:: lbmpy.maxwellian_equilibrium
:members: :members:
...@@ -11,7 +11,7 @@ Maxwellian Equilibrium ...@@ -11,7 +11,7 @@ Maxwellian Equilibrium
.. autofunction:: lbmpy.maxwellian_equilibrium.continuous_maxwellian_equilibrium .. autofunction:: lbmpy.maxwellian_equilibrium.continuous_maxwellian_equilibrium
.. autofunction:: lbmpy.maxwellian_equilibrium.get_moments_of_continuous_maxwellian_equilibrium .. autofunction:: lbmpy.maxwellian_equilibrium.get_equilibrium_values_of_maxwell_boltzmann_function
.. autofunction:: lbmpy.maxwellian_equilibrium.get_moments_of_discrete_maxwellian_equilibrium .. autofunction:: lbmpy.maxwellian_equilibrium.get_moments_of_discrete_maxwellian_equilibrium
Creating LBM methods
====================
This module is a lower level API to construct methods.
When possible use the high level API.
.. automodule:: lbmpy.methods.creationfunctions
:members:
*********************** ********************************
Methods (lbmpy.methods) Collision models (lbmpy.methods)
*********************** ********************************
This module defines the classes defining all types of lattice Boltzmann methods available in *lbmpy*,
together with a set of factory functions used to create their instances. The factory functions are
organized in three levels of abstraction. Objects of the method classes should be created only through
these factory functions, never manually.
LBM Method Interfaces Methods in *lbmpy* can be distinguished into three categories:
===================== - :ref:`methods_rawmomentbased`, including the classical single relaxation-time (SRT, BGK), two relaxation-time (TRT)
and multiple relaxation-time (MRT) methods, as well as entropic variants of the TRT method.
- :ref:`methods_centralmomentbased`, which are multiple relaxation-time methods using relaxation in central moment space.
- :ref:`methods_cumulantbased`, multiple relaxation-time methods using relaxation in cumulant space.
.. autoclass:: lbmpy.methods.AbstractLbMethod Abstract LB Method Base Class
:members: =============================
.. autoclass:: lbmpy.methods.AbstractConservedQuantityComputation .. autoclass:: lbmpy.methods.LbmCollisionRule
:members: :members:
.. autoclass:: lbmpy.methods.AbstractLbMethod
LBM with conserved zeroth and first order
=========================================
.. autoclass:: lbmpy.methods.DensityVelocityComputation
:members: :members:
.. _methods_rawmomentbased:
Raw Moment-based methods
========================
These methods are represented by instances of :class:`lbmpy.methods.momentbased.MomentBasedLbMethod` and will derive
Moment-based methods collision equations either in population space (SRT, TRT, entropic TRT), or in raw moment space (MRT variants).
====================
Creation Functions Creation Functions
------------------ ------------------
The following factory functions create raw moment-based methods using variants of the regular hydrodynamic maxwellian
equilibrium.
.. autofunction:: lbmpy.methods.create_srt .. autofunction:: lbmpy.methods.create_srt
.. autofunction:: lbmpy.methods.create_trt .. autofunction:: lbmpy.methods.create_trt
...@@ -38,9 +44,7 @@ Creation Functions ...@@ -38,9 +44,7 @@ Creation Functions
.. autofunction:: lbmpy.methods.create_mrt_orthogonal .. autofunction:: lbmpy.methods.create_mrt_orthogonal
.. autofunction:: lbmpy.methods.create_with_continuous_maxwellian_eq_moments .. autofunction:: lbmpy.methods.create_trt_kbc
.. autofunction:: lbmpy.methods.create_with_discrete_maxwellian_eq_moments
Class Class
...@@ -50,32 +54,111 @@ Class ...@@ -50,32 +54,111 @@ Class
:members: :members:
.. _methods_centralmomentbased:
Central Moment-based methods
============================
These methods are represented by instances of :class:`lbmpy.methods.momentbased.CentralMomentBasedLbMethod` and will derive
collision equations in central moment space.
Creation Functions
------------------
The following factory functions create central moment-based methods using variants of the regular hydrodynamic maxwellian
equilibrium.
.. autofunction:: lbmpy.methods.create_central_moment
Class
-----
.. autoclass:: lbmpy.methods.momentbased.CentralMomentBasedLbMethod
:members:
.. _methods_cumulantbased:
Cumulant-based methods Cumulant-based methods
====================== ======================
These methods are represented by instances of :class:`lbmpy.methods.cumulantbased.CumulantBasedLbMethod` and will derive
collision equations in cumulant space.
Creation Functions Creation Functions
------------------ ------------------
.. autofunction:: lbmpy.methods.create_with_polynomial_cumulants The following factory functions create cumulant-based methods using the regular continuous hydrodynamic maxwellian equilibrium.
.. autofunction:: lbmpy.methods.create_cumulant
.. autofunction:: lbmpy.methods.create_with_monomial_cumulants .. autofunction:: lbmpy.methods.create_with_monomial_cumulants
.. autofunction:: lbmpy.methods.create_with_default_polynomial_cumulants .. autofunction:: lbmpy.methods.create_with_default_polynomial_cumulants
.. autofunction:: lbmpy.methods.create_centered_cumulant_model
Class
-----
.. autoclass:: lbmpy.methods.cumulantbased.CumulantBasedLbMethod
:members:
Default Moment sets
===================
The following functions provide default sets of polynomial raw moments, central moments and cumulants
to be used in MRT-type methods.
.. autofunction:: lbmpy.methods.default_moment_sets.cascaded_moment_sets_literature
.. autofunction:: lbmpy.methods.default_moment_sets.mrt_orthogonal_modes_literature
Low-Level Method Creation Interface
===================================
Utility The following classes and factory functions constitute the lower levels of abstraction in method creation.
------- They are called from the higher-level functions described above, or, in special cases, by the user directly.
.. autofunction:: lbmpy.methods.centeredcumulant.get_default_polynomial_cumulants_for_stencil Custom method variants in population space, raw and central moment space based on the hydrodynamic Maxwellian
equilibrium may be created using either
:func:`lbmpy.methods.creationfunctions.create_with_discrete_maxwellian_equilibrium` or
:func:`create_with_continuous_maxwellian_equilibrium`.
.. autoclass:: lbmpy.methods.centeredcumulant.CenteredCumulantForceModel Methods may also be created using custom equilibrium distributions using
:func:`lbmpy.methods.creationfunctions.create_from_equilibrium`.
The desired collision space, and the transform classes defining the manner of transforming populations to that
space and back, are defined using :class:`lbmpy.enums.CollisionSpace` and :class:`lbmpy.methods.CollisionSpaceInfo`.
Collision Space Info
--------------------
.. autoclass lbmpy.methods.CollisionSpaceInfo
:members: :members:
Class Low-Level Creation Functions
----- ----------------------------
.. autofunction:: lbmpy.methods.creationfunctions.create_with_discrete_maxwellian_equilibrium
.. autofunction:: lbmpy.methods.creationfunctions.create_with_continuous_maxwellian_equilibrium
.. autoclass:: lbmpy.methods.centeredcumulant.CenteredCumulantBasedLbMethod .. autofunction:: lbmpy.methods.creationfunctions.create_from_equilibrium
Conserved Quantity Computation
==============================
The classes of the conserved quantity computation (CQC) submodule define an LB Method's conserved quantities and
the equations to compute them. For hydrodynamic methods, :class:`lbmpy.methods.DensityVelocityComputation` is
the typical choice. For custom methods, however, a custom CQC class might have to be created.
.. autoclass:: lbmpy.methods.AbstractConservedQuantityComputation
:members: :members:
.. autoclass:: lbmpy.methods.DensityVelocityComputation
:members:
\ No newline at end of file
*******************************************
Moment Transforms (lbmpy.moment_transforms)
*******************************************
The ``moment_transforms`` module provides an ecosystem for transformation of quantities between
lattice Boltzmann population space and other spaces of observable quantities. Currently, transforms
to raw and central moment space are available.
The common base class `lbmpy.moment_transforms.AbstractMomentTransform` defines the interface all transform classes share.
This interface, together with the fundamental principles all transforms adhere to, is explained
in the following.
Principles of Collision Space Transforms
========================================
Each class of this module implements a bijective map :math:`\mathcal{M}` from population space
to raw moment or central moment space, capable of transforming the particle distribution
function :math:`\mathbf{f}` to (almost) arbitrary user-defined moment sets.
Monomial and Polynomial Moments
-------------------------------
We discriminate *monomial* and *polynomial* moments. The monomial moments are the canonical basis of their
corresponding space. Polynomial moments are defined as linear combinations of this basis. Monomial moments are
typically expressed by exponent tuples :math:`(\alpha, \beta, \gamma)`. The monomial raw moments are defined as
.. math::
m_{\alpha \beta \gamma}
= \sum_{i = 0}^{q - 1} f_i c_{i,x}^{\alpha} c_{i,y}^{\beta} c_{i,z}^{\gamma}
and the monomial central moments are given by
.. math::
\kappa_{\alpha \beta \gamma}
= \sum_{i = 0}^{q - 1}
f_i
(c_{i,x} - u_x)^{\alpha}
(c_{i,y} - u_y)^{\beta}
(c_{i,z} - u_z)^{\gamma}.
Polynomial moments are, in turn, expressed by
polynomial expressions :math:`p` in the coordinate symbols :math:`x`, :math:`y` and :math:`z`.
An exponent tuple :math:`(\alpha, \beta, \gamma)` corresponds directly
to the mixed monomial expression :math:`x^{\alpha} y^{\beta} z^{\gamma}`. Polynomial moments are then
constructed as linear combinations of these monomials. For example, the polynomial
.. math::
p(x,y,z) = x^2 + y^2 + z^2 + 1.
defines both the polynomial raw moment
.. math::
M = m_{200} + m_{020} + m_{002}
and central moment
.. math::
K = \kappa_{200} + \kappa_{020} + \kappa_{002}.
Transformation Matrices
-----------------------
The collision space basis for an MRT LB method on a :math:`DdQq` lattice is spanned by a set of :math:`q`
polynomial quantities, given by polynomials :math:`\left( p_i \right)_{i=0, \dots, q-1}`.
Through the polynomials, a polynomialization matrix :math:`P` is defined such that
.. math::
\mathbf{M} &= P \mathbf{m} \\
\mathbf{K} &= P \mathbf{\kappa}
Both rules can also be written using matrix multiplication, such that
.. math::
\mathbf{m} = M \mathbf{f}
\qquad
\mathbf{\kappa} = K \mathbf{f}.
Further, there exists a mapping from raw to central moment space using (monomial or polynomial)
shift matrices (see `set_up_shift_matrix`) such that
.. math::
\mathbf{\kappa} = N \mathbf{m}
\qquad
\mathbf{K} = N^P \mathbf{M}.
Working with the transformation matrices, those mappings can easily be inverted.
This module provides classes that derive efficient implementations of these transformations.
Moment Aliasing
---------------
For a collision space transform to be invertible, exactly :math:`q` independent polynomial quantities of
the collision space must be chosen. In that case, since all transforms discussed here are linear, the space of
populations is isomorphic to the chosen moment space. The important word here is 'independent'. Even if :math:`q`
distinct moment polynomials are chosen, due to discrete lattice artifacts, they might not span the entire collision
space if chosen wrongly. The discrete lattice structure gives rise to *moment aliasing* effects. The most simple such
effect occurs in the monomial raw moments, where are all nonzero even and all odd exponents are essentially the same.
For example, we have :math:`m_{400} = m_{200}` or :math:`m_{305} = m_{101}`. This rule holds on every direct-neighborhood
stencil. Sparse stencils, like :math:`D3Q15`, may introduce additional aliases. On the :math:`D3Q15` stencil, due to its
structure, we have also :math:`m_{112} = m_{110}` and even :math:`m_{202} = m_{220} = m_{022} = m_{222}`.
Including aliases in a set of monomial moments will lead to a non-invertible transform and is hence not possible.
In polynomials, however, aliases may occur without breaking the transform. Several established sets of polynomial
moments, like in orthogonal raw moment space MRT methods, will comprise :math:`q` polynomials :math:`\mathbf{M}` that in turn are built
out of :math:`r > q` monomial moments :math:`\mathbf{m}`. In that set of monomials, non-independent moments have to be included by definition.
Since the full transformation matrix :math:`M^P = PM` is still invertible, this is not a problem in general. If, however, the
two steps of the transform are computed separately, it becomes problematic, as the matrices :math:`M \in \mathbb{R}^{r \times q}`
and :math:`P \in \mathbb{R}^{q \times r}` are not invertible on their own.
But there is a remedy. By eliminating aliases from the moment polynomials, they can be reduced to a new set of polynomials based
on a non-aliased reduced vector of monomial moments :math:`\tilde{\mathbf{m}} \in \mathbb{R}^{q}`, expressed through the reduced
matrix :math:`\tilde{P}`.
Interfaces and Usage
====================
Construction
------------
All moment transform classes expect either a sequence of exponent tuples or a sequence of polynomials that define
the set of quantities spanning the destination space. If polynomials are given, the exponent tuples of the underlying
set of monomials are extracted automatically. Depending on the implementation, moment aliases may be eliminated in the
process, altering both the polynomials and the set of monomials.
Forward and Backward Transform
------------------------------
The core functionality of the transform classes is given by the methods ``forward_transform`` and ``backward_transform``.
They return assignment collections containing the equations to compute moments from populations, and vice versa.
Symbols Used
^^^^^^^^^^^^
Unless otherwise specified by the user, monomial and polynomial quantities occur in the transformation equations according
to the naming conventions listed in the following table:
+--------------------------------+---------------------------------------------+----------------------+
| | Monomial | Polynomial |
+--------------------------------+---------------------------------------------+----------------------+
| Pre-Collision Raw Moment | :math:`m_{\alpha \beta \gamma}` | :math:`M_{i}` |
+--------------------------------+---------------------------------------------+----------------------+
| Post-Collision Raw Moment | :math:`m_{post, \alpha \beta \gamma}` | :math:`M_{post, i}` |
+--------------------------------+---------------------------------------------+----------------------+
| Pre-Collision Central Moment | :math:`\kappa_{\alpha \beta \gamma}` | :math:`K_{i}` |
+--------------------------------+---------------------------------------------+----------------------+
| Post-Collision Central Moment | :math:`\kappa_{post, \alpha \beta \gamma}` | :math:`K_{post, i}` |
+--------------------------------+---------------------------------------------+----------------------+
These symbols are also exposed by the member properties `pre_collision_symbols`, `post_collision_symbols`,
`pre_collision_monomial_symbols` and `post_collision_monomial_symbols`.
Forward Transform
^^^^^^^^^^^^^^^^^
Implementations of the `lbmpy.moment_transforms.AbstractMomentTransform.forward_transform` method
derive equations for the transform from population to moment space, to compute pre-collision moments
from pre-collision populations. The returned ``AssignmentCollection`` has the `pre_collision_symbols`
as left-hand sides of its main assignments, computed from the given ``pdf_symbols`` and, potentially,
the macroscopic density and velocity. Depending on the implementation, the `pre_collision_monomial_symbols`
may be part of the assignment collection in the form of subexpressions.
Backward Transform
^^^^^^^^^^^^^^^^^^
Implementations of `lbmpy.moment_transforms.AbstractMomentTransform.backward_transform` contain the post-collision
polynomial quantities as free symbols of their equation right-hand sides, and compute the post-collision populations
from those. The PDF symbols are the left-hand sides of the main assignments.
Absorption of Conserved Quantity Equations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Transformations from the population space to any space of observable quantities may *absorb* the equations
defining the macroscopic quantities entering the equilibrium (typically the density :math:`\rho` and the
velocity :math:`\mathbf{u}`). This means that the ``forward_transform`` will possibly rewrite the
assignments given in the constructor argument ``conserved_quantity_equations`` to reduce
the total operation count. For example, in the transformation step from populations to
raw moments (see `lbmpy.moment_transforms.PdfsToMomentsByChimeraTransform`), :math:`\rho` can be aliased as the zeroth-order moment
:math:`m_{000}`. Assignments to the conserved quantities will then be part of the AssignmentCollection
returned by ``forward_transform`` and need not be added to the collision rule separately.
Simplification
^^^^^^^^^^^^^^
Both ``forward_transform`` and ``backward_transform`` expect a keyword argument ``simplification``
which can be used to direct simplification steps applied during the derivation of the transformation
equations. Possible values are:
- `False` or ``'none'``: No simplification is to be applied
- `True` or ``'default'``: A default simplification strategy specific to the implementation is applied.
The actual simplification steps depend strongly on the nature of the equations. They are defined by
the implementation. It is the responsibility of the implementation to select the most effective
simplification strategy.
- ``'default_with_cse'``: Same as ``'default'``, but with an additional pass of common subexpression elimination.
Working With Monomials
^^^^^^^^^^^^^^^^^^^^^^
In certain situations, we want the ``forward_transform`` to yield equations for the monomial symbols :math:`m_{\alpha \beta \gamma}`
and :math:`\kappa_{\alpha \beta \gamma}` only, and the ``backward_transform`` to return equations that also expect these symbols as input.
In this case, it is not sufficient to pass a set of monomials or exponent tuples to the constructor, as those are still treated as
polynomials internally. Instead, both transform methods expose keyword arguments ``return_monomials`` and ``start_from_monomials``, respectively.
If set to true, equations in the monomial moments are returned. They are best used *only* together with the ``exponent_tuples`` constructor argument
to have full control over the monomials. If polynomials are passed to the constructor, the behaviour of these flags is generally not well-defined,
especially in the presence of aliases.
The Transform Classes
=====================
Abstract Base Class
-------------------
.. autoclass:: lbmpy.moment_transforms.AbstractMomentTransform
:members:
Moment Space Transforms
-----------------------
.. autoclass:: lbmpy.moment_transforms.PdfsToMomentsByMatrixTransform
:members:
.. autoclass:: lbmpy.moment_transforms.PdfsToMomentsByChimeraTransform
:members:
Central Moment Space Transforms
-------------------------------
.. autoclass:: lbmpy.moment_transforms.PdfsToCentralMomentsByMatrix
:members:
.. autoclass:: lbmpy.moment_transforms.BinomialChimeraTransform
:members:
.. autoclass:: lbmpy.moment_transforms.FastCentralMomentTransform
:members:
.. autoclass:: lbmpy.moment_transforms.PdfsToCentralMomentsByShiftMatrix
:members:
Cumulant Space Transforms
-------------------------
.. autoclass:: lbmpy.moment_transforms.CentralMomentsToCumulantsByGeneratingFunc
:members:
...@@ -4,6 +4,10 @@ Tutorials ...@@ -4,6 +4,10 @@ Tutorials
All tutorials are automatically created by Jupyter Notebooks. All tutorials are automatically created by Jupyter Notebooks.
You can open the notebooks directly to play around with the code examples. You can open the notebooks directly to play around with the code examples.
=================
Basics
=================
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
...@@ -13,13 +17,68 @@ You can open the notebooks directly to play around with the code examples. ...@@ -13,13 +17,68 @@ You can open the notebooks directly to play around with the code examples.
/notebooks/03_tutorial_lbm_formulation.ipynb /notebooks/03_tutorial_lbm_formulation.ipynb
/notebooks/04_tutorial_cumulant_LBM.ipynb /notebooks/04_tutorial_cumulant_LBM.ipynb
/notebooks/05_tutorial_nondimensionalization_and_scaling.ipynb /notebooks/05_tutorial_nondimensionalization_and_scaling.ipynb
===================
Turbulence modeling
===================
.. toctree::
:maxdepth: 1
/notebooks/06_tutorial_modifying_method_smagorinsky.ipynb /notebooks/06_tutorial_modifying_method_smagorinsky.ipynb
=================
Thermal flows
=================
.. toctree::
:maxdepth: 1
/notebooks/07_tutorial_thermal_lbm.ipynb /notebooks/07_tutorial_thermal_lbm.ipynb
/notebooks/demo_thermalized_lbm.ipynb
=================
Multiphase flows
=================
.. toctree::
:maxdepth: 1
/notebooks/08_tutorial_shanchen_twophase.ipynb /notebooks/08_tutorial_shanchen_twophase.ipynb
/notebooks/09_tutorial_shanchen_twocomponent.ipynb /notebooks/09_tutorial_shanchen_twocomponent.ipynb
/notebooks/10_tutorial_conservative_allen_cahn_two_phase.ipynb
========================
Thermocapillary flows
========================
.. toctree::
:maxdepth: 1
/notebooks/12_Thermocapillary_flows_heated_channel.ipynb
/notebooks/13_Thermocapillary_flows_droplet_motion.ipynb
===================
Non Newtonian flow
===================
.. toctree::
:maxdepth: 1
/notebooks/11_tutorial_Non_Newtonian_Flow.ipynb
=================
Diverse
=================
.. toctree::
:maxdepth: 1
/notebooks/demo_stencils.ipynb /notebooks/demo_stencils.ipynb
/notebooks/demo_streaming_patterns.ipynb
/notebooks/demo_create_method_from_scratch.ipynb /notebooks/demo_create_method_from_scratch.ipynb
/notebooks/demo_moments_cumulants_and_maxwellian_equilibrium.ipynb /notebooks/demo_moments_cumulants_and_maxwellian_equilibrium.ipynb
/notebooks/demo_automatic_chapman_enskog_analysis.ipynb /notebooks/demo_automatic_chapman_enskog_analysis.ipynb
/notebooks/demo_thermalized_lbm.ipynb /notebooks/demo_interpolation_boundary_conditions.ipynb
/notebooks/demo_shallow_water_lbm.ipynb
/notebooks/demo_theoretical_background_generic_equilibrium_construction.ipynb /notebooks/demo_theoretical_background_generic_equilibrium_construction.ipynb
...@@ -2,4 +2,4 @@ Bibliography ...@@ -2,4 +2,4 @@ Bibliography
------------ ------------
.. bibliography:: lbmpy.bib .. bibliography:: lbmpy.bib
:cited: :all:
\ No newline at end of file \ No newline at end of file
from .creationfunctions import create_lb_ast, create_lb_collision_rule, create_lb_function,\
create_lb_method, create_lb_method_from_existing, create_lb_update_rule
from .lbstep import LatticeBoltzmannStep
from .macroscopic_value_kernels import pdf_initialization_assignments, macroscopic_values_getter,\
compile_macroscopic_values_getter, compile_macroscopic_values_setter, create_advanced_velocity_setter_collision_rule
from .maxwellian_equilibrium import get_weights
from .relaxationrates import relaxation_rate_from_lattice_viscosity, lattice_viscosity_from_relaxation_rate,\
relaxation_rate_from_magic_number
from .scenarios import create_lid_driven_cavity, create_fully_periodic_flow
from .stencils import get_stencil
__all__ = ['create_lb_ast', 'create_lb_collision_rule', 'create_lb_function', 'create_lb_method',
'create_lb_method_from_existing', 'create_lb_update_rule',
'LatticeBoltzmannStep',
'pdf_initialization_assignments', 'macroscopic_values_getter', 'compile_macroscopic_values_getter',
'compile_macroscopic_values_setter', 'create_advanced_velocity_setter_collision_rule',
'get_weights',
'relaxation_rate_from_lattice_viscosity', 'lattice_viscosity_from_relaxation_rate',
'relaxation_rate_from_magic_number',
'create_lid_driven_cavity', 'create_fully_periodic_flow',
'get_stencil']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from lbmpy.boundaries.boundaryconditions import (
UBB, FixedDensity, DiffusionDirichlet, SimpleExtrapolationOutflow,
ExtrapolationOutflow, NeumannByCopy, NoSlip, StreamInConstant)
from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling
__all__ = ['NoSlip', 'UBB', 'SimpleExtrapolationOutflow', 'ExtrapolationOutflow',
'FixedDensity', 'DiffusionDirichlet', 'NeumannByCopy',
'LatticeBoltzmannBoundaryHandling', 'StreamInConstant']
r"""
Creating LBM kernels
====================
The following list describes common parameters for the creation functions. They have to be passed as keyword parameters.
Method parameters
^^^^^^^^^^^^^^^^^
General:
- ``stencil='D2Q9'``: stencil name e.g. 'D2Q9', 'D3Q19'. See :func:`pystencils.stencils.get_stencil` for details
- ``method='srt'``: name of lattice Boltzmann method. This determines the selection and relaxation pattern of
moments/cumulants, i.e. which moment/cumulant basis is chosen, and which of the basis vectors are relaxed together
- ``srt``: single relaxation time (:func:`lbmpy.methods.create_srt`)
- ``trt``: two relaxation time, first relaxation rate is for even moments and determines the viscosity (as in SRT),
the second relaxation rate is used for relaxing odd moments, and controls the bulk viscosity.
(:func:`lbmpy.methods.create_trt`)
- ``mrt``: orthogonal multi relaxation time model, relaxation rates are used in this order for :
shear modes, bulk modes, 3rd order modes, 4th order modes, etc.
Requires also a parameter 'weighted' that should be True if the moments should be orthogonal w.r.t. weighted
scalar product using the lattice weights. If `False` the normal scalar product is used.
For custom definition of the method, a 'nested_moments' can be passed.
For example: [ [1, x, y], [x*y, x**2, y**2], ... ] that groups all moments together that should be relaxed with
the same rate. Literature values of this list can be obtained through
:func:`lbmpy.methods.creationfunctions.mrt_orthogonal_modes_literature`.
See also :func:`lbmpy.methods.create_mrt_orthogonal`
- ``mrt_raw``: non-orthogonal MRT where all relaxation rates can be specified independently i.e. there are as many
relaxation rates as stencil entries. Look at the generated method in Jupyter to see which moment<->relaxation rate
mapping (:func:`lbmpy.methods.create_mrt_raw`)
- ``trt-kbc-n<N>`` where <N> is 1,2,3 or 4. Special two-relaxation rate method. This is not the entropic method
yet, only the relaxation pattern. To get the entropic method, see parameters below!
(:func:`lbmpy.methods.create_trt_kbc`)
- ``cumulant``: cumulant-based lb method (:func:`lbmpy.methods.create_with_default_polynomial_cumulants`) which
relaxes groups of polynomial cumulants chosen to optimize rotational invariance.
- ``monomial_cumulant``: cumulant-based lb method (:func:`lbmpy.methods.create_with_monomial_cumulants`) which
relaxes monomial cumulants.
- ``relaxation_rates``: sequence of relaxation rates, number depends on selected method. If you specify more rates than
method needs, the additional rates are ignored. For SRT, TRT and polynomial cumulant models it is possible ot define
a single ``relaxation_rate`` instead of a list. The second rate for TRT is then determined via magic number. For the
cumulant model, it sets only the relaxation rate corresponding to shear viscosity, setting all others to unity.
- ``compressible=False``: affects the selection of equilibrium moments. Both options approximate the *incompressible*
Navier Stokes Equations. However when chosen as False, the approximation is better, the standard LBM derivation is
compressible.
- ``equilibrium_order=2``: order in velocity, at which the equilibrium moment/cumulant approximation is
truncated. Order 2 is sufficient to approximate Navier-Stokes
- ``force_model=None``: possible values: ``None``, ``'simple'``, ``'luo'``, ``'guo'``, ``'buick'``, ``'schiller'``,
``'cumulant'`` or an instance of a class implementing the same methods as the classes in :mod:`lbmpy.forcemodels`.
For details, see :mod:`lbmpy.forcemodels`
- ``force=(0,0,0)``: either constant force or a symbolic expression depending on field value
- ``maxwellian_moments=True``: way to compute equilibrium moments/cumulants, if False the standard
discretized LBM equilibrium is used, otherwise the equilibrium moments are computed from the continuous Maxwellian.
This makes only a difference if sparse stencils are used e.g. D2Q9 and D3Q27 are not affected, D319 and DQ15 are
- ``initial_velocity=None``: initial velocity in domain, can either be a tuple (x,y,z) velocity to set a constant
velocity everywhere, or a numpy array with the same size of the domain, with a last coordinate of shape dim to set
velocities on cell level
- ``output={}``: a dictionary mapping macroscopic quantites e.g. the strings 'density' and 'velocity' to pystencils
fields. In each timestep the corresponding quantities are written to the given fields.
- ``velocity_input``: symbolic field where the velocities are read from (for advection diffusion LBM)
- ``density_input``: symbolic field or field access where to read density from. When passing this parameter,
``velocity_input`` has to be passed as well
- ``kernel_type``: supported values: 'default_stream_collide' (default), 'collide_only', 'stream_pull_only'.
With 'default_stream_collide', streaming pattern and even/odd time-step (for in-place patterns) can be specified
by the ``streaming_pattern`` and ``timestep`` arguments. For backwards compatibility, ``kernel_type`` also accepts
'stream_pull_collide', 'collide_stream_push', 'esotwist_even', 'esotwist_odd', 'aa_even' and 'aa_odd' for selection
of the streaming pattern.
- ``streaming_pattern``: The streaming pattern to be used with a 'default_stream_collide' kernel. Accepted values are
'pull', 'push', 'aa' and 'esotwist'.
- ``timestep``: Timestep modulus for the streaming pattern. For two-fields patterns, this argument is irrelevant and
by default set to ``Timestep.BOTH``. For in-place patterns, ``Timestep.EVEN`` or ``Timestep.ODD`` must be speficied.
Entropic methods:
- ``entropic=False``: In case there are two distinct relaxation rate in a method, one of them (usually the one, not
determining the viscosity) can be automatically chosen w.r.t an entropy condition. For details see
:mod:`lbmpy.methods.momentbased.entropic`
- ``entropic_newton_iterations=None``: For moment methods the entropy optimum can be calculated in closed form.
For cumulant methods this is not possible, in that case it is computed using Newton iterations. This parameter can be
used to force Newton iterations and specify how many should be done
- ``omega_output_field=None``: you can pass a pystencils Field here, where the calculated free relaxation rate of
an entropic or Smagorinsky method is written to
Cumulant methods:
- ``galilean_correction=False``: Special correction for D3Q27 cumulant LBMs. For Details see
:mod:`lbmpy.methods.centeredcumulant.galilean_correction`
LES methods:
- ``smagorinsky=False``: set to Smagorinsky constant to activate turbulence model, ``omega_output_field`` can be set to
write out adapted relaxation rates
Fluctuating LB:
- ``fluctuating``: enables fluctuating lattice Boltzmann by randomizing collision process.
Pass dictionary with parameters to ``lbmpy.fluctuatinglb.add_fluctuations_to_collision_rule``
Optimization Parameters
^^^^^^^^^^^^^^^^^^^^^^^
Simplifications / Transformations:
- ``cse_pdfs=False``: run common subexpression elimination for opposing stencil directions
- ``cse_global=False``: run common subexpression elimination after all other simplifications have been executed
- ``split=False``: split innermost loop, to handle only 2 directions per loop. This reduces the number of parallel
load/store streams and thus speeds up the kernel on most architectures
- ``builtin_periodicity=(False,False,False)``: instead of handling periodicity by copying ghost layers, the periodicity
is built into the kernel. This parameters specifies if the domain is periodic in (x,y,z) direction. Even if the
periodicity is built into the kernel, the fields have one ghost layer to be consistent with other functions.
- ``pre_simplification=True``: Simplifications applied during the derivaton of the collision rule for cumulant LBMs
For details see :mod:`lbmpy.methods.momentbased.moment_transforms`.
Field size information:
- ``pdf_arr=None``: pass a numpy array here to create kernels with fixed size and create the loop nest according
to layout of this array
- ``field_size=None``: create kernel for fixed field size
- ``field_layout='c'``: ``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse_numpy'`` or ``'f'`` for fortran
layout, this does not apply when pdf_arr was given, then the same layout as pdf_arr is used
- ``symbolic_field``: pystencils field for source (pdf field that is read)
- ``symbolic_temporary_field``: pystencils field for temporary (pdf field that is written in stream, or stream-collide)
CPU:
- ``openmp=True``: Can be a boolean to turn multi threading on/off, or an integer
specifying the number of threads. If True is specified OpenMP chooses the number of threads
- ``vectorization=False``: controls manual vectorization using SIMD instrinsics. If True default vectorization settings
are use. Alternatively a dictionary with parameters for vectorize function can be passed. For example
``{'instruction_set': 'avx', 'assume_aligned': True, 'nontemporal': True}``. Nontemporal stores are only used if
assume_aligned is also activated.
GPU:
- ``target='cpu'``: ``'cpu'`` or ``'gpu'``, last option requires a CUDA enabled graphics card
and installed *pycuda* package
- ``gpu_indexing='block'``: determines mapping of CUDA threads to cells. Can be either ``'block'`` or ``'line'``
- ``gpu_indexing_params='block'``: parameters passed to init function of gpu indexing.
For ``'block'`` indexing one can e.g. specify the block size ``{'block_size' : (128, 4, 1)}``
Other:
- ``openmp=True``: only applicable for cpu simulations. Can be a boolean to turn multi threading on/off, or an integer
specifying the number of threads. If True is specified OpenMP chooses the number of threads
- ``double_precision=True``: by default simulations run with double precision floating point numbers, by setting this
parameter to False, single precision is used, which is much faster, especially on GPUs
Terminology and creation pipeline
---------------------------------
Kernel functions are created in three steps:
1. *Method*:
the method defines the collision process. Currently there are two big categories:
moment and cumulant based methods. A method defines how each moment or cumulant is relaxed by
storing the equilibrium value and the relaxation rate for each moment/cumulant.
2. *Collision/Update Rule*:
Methods can generate a "collision rule" which is an equation collection that define the
post collision values as a function of the pre-collision values. On these equation collection
simplifications are applied to reduce the number of floating point operations.
At this stage an entropic optimization step can also be added to determine one relaxation rate by an
entropy condition.
Then a streaming rule is added which transforms the collision rule into an update rule.
The streaming step depends on the pdf storage (source/destination, AABB pattern, EsoTwist).
Currently only the simple source/destination pattern is supported.
3. *AST*:
The abstract syntax tree describes the structure of the kernel, including loops and conditionals.
The ast can be modified e.g. to add OpenMP pragmas, reorder loops or apply other optimizations.
4. *Function*:
This step compiles the AST into an executable function, either for CPU or GPUs. This function
behaves like a normal Python function and runs one LBM time step.
The function :func:`create_lb_function` runs the whole pipeline, the other functions in this module
execute this pipeline only up to a certain step. Each function optionally also takes the result of the previous step.
For example, to modify the AST one can run::
ast = create_lb_ast(...)
# modify ast here
func = create_lb_function(ast=ast, ...)
"""
from collections import OrderedDict
from copy import copy
import sympy as sp
import lbmpy.forcemodels as forcemodels
import lbmpy.methods.centeredcumulant.force_model as cumulant_force_model
from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, PdfFieldAccessor, PeriodicTwoFieldsAccessor
from lbmpy.fluctuatinglb import add_fluctuations_to_collision_rule
from lbmpy.methods import (create_mrt_orthogonal, create_mrt_raw, create_srt, create_trt, create_trt_kbc)
from lbmpy.methods.abstractlbmethod import RelaxationInfo
from lbmpy.methods.centeredcumulant import CenteredCumulantBasedLbMethod
from lbmpy.methods.momentbased.moment_transforms import FastCentralMomentTransform
from lbmpy.methods.centeredcumulant.cumulant_transform import CentralMomentsToCumulantsByGeneratingFunc
from lbmpy.methods.creationfunctions import (
create_with_monomial_cumulants, create_with_polynomial_cumulants, create_with_default_polynomial_cumulants)
from lbmpy.methods.creationfunctions import create_generic_mrt
from lbmpy.methods.momentbased.entropic import add_entropy_condition, add_iterative_entropy_condition
from lbmpy.methods.momentbased.entropic_eq_srt import create_srt_entropic
from lbmpy.moments import get_order
from lbmpy.relaxationrates import relaxation_rate_from_magic_number
from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.stencils import get_stencil
from lbmpy.turbulence_models import add_smagorinsky_model
from lbmpy.updatekernels import create_lbm_kernel, create_stream_pull_with_output_kernel
from lbmpy.advanced_streaming.utility import Timestep, get_accessor
from pystencils import Assignment, AssignmentCollection, create_kernel
from pystencils.cache import disk_cache_no_fallback
from pystencils.data_types import collate_types
from pystencils.field import Field, get_layout_of_array
from pystencils.simp import sympy_cse
from pystencils.stencil import have_same_entries
def create_lb_function(ast=None, optimization=None, **kwargs):
"""Creates a Python function for the LB method"""
params, opt_params = update_with_default_parameters(kwargs, optimization)
if ast is None:
params['optimization'] = opt_params
ast = create_lb_ast(**params)
res = ast.compile()
res.method = ast.method
res.update_rule = ast.update_rule
res.ast = ast
return res
def create_lb_ast(update_rule=None, optimization=None, **kwargs):
"""Creates a pystencils AST for the LB method"""
params, opt_params = update_with_default_parameters(kwargs, optimization)
if update_rule is None:
params['optimization'] = optimization
update_rule = create_lb_update_rule(**params)
field_types = set(fa.field.dtype for fa in update_rule.defined_symbols if isinstance(fa, Field.Access))
res = create_kernel(update_rule, target=opt_params['target'], data_type=collate_types(field_types),
cpu_openmp=opt_params['openmp'], cpu_vectorize_info=opt_params['vectorization'],
gpu_indexing=opt_params['gpu_indexing'], gpu_indexing_params=opt_params['gpu_indexing_params'],
ghost_layers=1)
res.method = update_rule.method
res.update_rule = update_rule
return res
@disk_cache_no_fallback
def create_lb_update_rule(collision_rule=None, optimization=None, **kwargs):
"""Creates an update rule (list of Assignments) for a LB method that describe a full sweep"""
params, opt_params = update_with_default_parameters(kwargs, optimization)
if collision_rule is None:
collision_rule = create_lb_collision_rule(**params, optimization=opt_params)
lb_method = collision_rule.method
field_data_type = 'float64' if opt_params['double_precision'] else 'float32'
q = len(collision_rule.method.stencil)
if opt_params['symbolic_field'] is not None:
src_field = opt_params['symbolic_field']
elif opt_params['field_size']:
field_size = [s + 2 for s in opt_params['field_size']] + [q]
src_field = Field.create_fixed_size(params['field_name'], field_size, index_dimensions=1,
layout=opt_params['field_layout'], dtype=field_data_type)
else:
src_field = Field.create_generic(params['field_name'], spatial_dimensions=collision_rule.method.dim,
index_shape=(q,), layout=opt_params['field_layout'], dtype=field_data_type)
if opt_params['symbolic_temporary_field'] is not None:
dst_field = opt_params['symbolic_temporary_field']
else:
dst_field = src_field.new_field_with_different_name(params['temporary_field_name'])
kernel_type = params['kernel_type']
if kernel_type == 'stream_pull_only':
return create_stream_pull_with_output_kernel(lb_method, src_field, dst_field, params['output'])
else:
if kernel_type == 'default_stream_collide':
if params['streaming_pattern'] == 'pull' and any(opt_params['builtin_periodicity']):
accessor = PeriodicTwoFieldsAccessor(opt_params['builtin_periodicity'], ghost_layers=1)
else:
accessor = get_accessor(params['streaming_pattern'], params['timestep'])
elif kernel_type == 'collide_only':
accessor = CollideOnlyInplaceAccessor
elif isinstance(kernel_type, PdfFieldAccessor):
accessor = kernel_type
else:
raise ValueError("Invalid value of parameter 'kernel_type'", params['kernel_type'])
return create_lbm_kernel(collision_rule, src_field, dst_field, accessor)
@disk_cache_no_fallback
def create_lb_collision_rule(lb_method=None, optimization=None, **kwargs):
"""Creates a collision rule (list of Assignments) for a LB method describing the collision operator (no stream)"""
params, opt_params = update_with_default_parameters(kwargs, optimization)
if lb_method is None:
lb_method = create_lb_method(**params)
split_inner_loop = 'split' in opt_params and opt_params['split']
cqc = lb_method.conserved_quantity_computation
rho_in = params['density_input']
u_in = params['velocity_input']
if u_in is not None and isinstance(u_in, Field):
u_in = u_in.center_vector
if rho_in is not None and isinstance(rho_in, Field):
rho_in = rho_in.center
pre_simplification = opt_params['pre_simplification']
if u_in is not None:
density_rhs = sum(lb_method.pre_collision_pdf_symbols) if rho_in is None else rho_in
eqs = [Assignment(cqc.zeroth_order_moment_symbol, density_rhs)]
eqs += [Assignment(u_sym, u_in[i]) for i, u_sym in enumerate(cqc.first_order_moment_symbols)]
eqs = AssignmentCollection(eqs, [])
collision_rule = lb_method.get_collision_rule(conserved_quantity_equations=eqs,
pre_simplification=pre_simplification)
elif u_in is None and rho_in is not None:
raise ValueError("When setting 'density_input' parameter, 'velocity_input' has to be specified as well.")
else:
collision_rule = lb_method.get_collision_rule(pre_simplification=pre_simplification)
if params['entropic']:
if params['smagorinsky']:
raise ValueError("Choose either entropic or smagorinsky")
if params['entropic_newton_iterations']:
if isinstance(params['entropic_newton_iterations'], bool):
iterations = 3
else:
iterations = params['entropic_newton_iterations']
collision_rule = add_iterative_entropy_condition(collision_rule, newton_iterations=iterations,
omega_output_field=params['omega_output_field'])
else:
collision_rule = add_entropy_condition(collision_rule, omega_output_field=params['omega_output_field'])
elif params['smagorinsky']:
smagorinsky_constant = 0.12 if params['smagorinsky'] is True else params['smagorinsky']
collision_rule = add_smagorinsky_model(collision_rule, smagorinsky_constant,
omega_output_field=params['omega_output_field'])
if 'split_groups' in collision_rule.simplification_hints:
collision_rule.simplification_hints['split_groups'][0].append(sp.Symbol("smagorinsky_omega"))
if params['output']:
cqc = lb_method.conserved_quantity_computation
output_eqs = cqc.output_equations_from_pdfs(lb_method.pre_collision_pdf_symbols, params['output'])
collision_rule = collision_rule.new_merged(output_eqs)
if opt_params['simplification'] == 'auto':
simplification = create_simplification_strategy(lb_method, split_inner_loop=split_inner_loop)
else:
simplification = opt_params['simplification']
collision_rule = simplification(collision_rule)
if params['fluctuating']:
add_fluctuations_to_collision_rule(collision_rule, **params['fluctuating'])
cse_pdfs = False if 'cse_pdfs' not in opt_params else opt_params['cse_pdfs']
cse_global = False if 'cse_global' not in opt_params else opt_params['cse_global']
if cse_pdfs:
from lbmpy.methods.momentbased.momentbasedsimplifications import cse_in_opposing_directions
collision_rule = cse_in_opposing_directions(collision_rule)
if cse_global:
collision_rule = sympy_cse(collision_rule)
return collision_rule
def create_lb_method(**params):
"""Creates a LB method, defined by moments/cumulants for collision space, equilibrium and relaxation rates."""
params, _ = update_with_default_parameters(params, {}, fail_on_unknown_parameter=False)
method_name = params['method']
relaxation_rates = params['relaxation_rates']
if isinstance(params['stencil'], tuple) or isinstance(params['stencil'], list):
stencil_entries = params['stencil']
else:
stencil_entries = get_stencil(params['stencil'])
dim = len(stencil_entries[0])
if isinstance(params['force'], Field):
params['force'] = tuple(params['force'](i) for i in range(dim))
force_is_zero = True
for f_i in params['force']:
if f_i != 0:
force_is_zero = False
no_force_model = 'force_model' not in params or params['force_model'] == 'none' or params['force_model'] is None
if not force_is_zero and no_force_model:
params['force_model'] = 'cumulant' if method_name.lower().endswith('cumulant') else 'schiller'
if 'force_model' in params:
force_model = force_model_from_string(params['force_model'], params['force'][:dim])
else:
force_model = None
common_params = {
'compressible': params['compressible'],
'equilibrium_order': params['equilibrium_order'],
'force_model': force_model,
'maxwellian_moments': params['maxwellian_moments'],
'c_s_sq': params['c_s_sq'],
}
cumulant_params = {
'equilibrium_order': params['equilibrium_order'],
'force_model': force_model,
'c_s_sq': params['c_s_sq'],
'galilean_correction': params['galilean_correction'],
'central_moment_transform_class': params['central_moment_transform_class'],
'cumulant_transform_class': params['cumulant_transform_class'],
}
if method_name.lower() == 'srt':
assert len(relaxation_rates) >= 1, "Not enough relaxation rates"
method = create_srt(stencil_entries, relaxation_rates[0], **common_params)
elif method_name.lower() == 'trt':
assert len(relaxation_rates) >= 2, "Not enough relaxation rates"
method = create_trt(stencil_entries, relaxation_rates[0], relaxation_rates[1], **common_params)
elif method_name.lower() == 'mrt':
next_relaxation_rate = [0]
def relaxation_rate_getter(moments):
try:
if all(get_order(m) < 2 for m in moments):
if params['entropic']:
return relaxation_rates[0]
else:
return 0
res = relaxation_rates[next_relaxation_rate[0]]
next_relaxation_rate[0] += 1
except IndexError:
raise ValueError("Too few relaxation rates specified")
return res
weighted = params['weighted'] if 'weighted' in params else True
nested_moments = params['nested_moments'] if 'nested_moments' in params else None
method = create_mrt_orthogonal(stencil_entries, relaxation_rate_getter, weighted=weighted,
nested_moments=nested_moments, **common_params)
elif method_name.lower() == 'mrt_raw':
method = create_mrt_raw(stencil_entries, relaxation_rates, **common_params)
elif method_name.lower().startswith('trt-kbc-n'):
if have_same_entries(stencil_entries, get_stencil("D2Q9")):
dim = 2
elif have_same_entries(stencil_entries, get_stencil("D3Q27")):
dim = 3
else:
raise NotImplementedError("KBC type TRT methods can only be constructed for D2Q9 and D3Q27 stencils")
method_nr = method_name[-1]
method = create_trt_kbc(dim, relaxation_rates[0], relaxation_rates[1], 'KBC-N' + method_nr, **common_params)
elif method_name.lower() == 'entropic-srt':
method = create_srt_entropic(stencil_entries, relaxation_rates[0], force_model, params['compressible'])
elif method_name.lower() == 'cumulant':
nested_moments = params['nested_moments'] if 'nested_moments' in params else None
if nested_moments is not None:
method = create_with_polynomial_cumulants(
stencil_entries, relaxation_rates, nested_moments, **cumulant_params)
else:
method = create_with_default_polynomial_cumulants(stencil_entries, relaxation_rates, **cumulant_params)
elif method_name.lower() == 'monomial_cumulant':
method = create_with_monomial_cumulants(stencil_entries, relaxation_rates, **cumulant_params)
else:
raise ValueError("Unknown method %s" % (method_name,))
return method
def create_lb_method_from_existing(method, modification_function):
"""Creates a new method based on an existing method by modifying its collision table.
Args:
method: old method
modification_function: function receiving (moment, equilibrium_value, relaxation_rate) as arguments,
i.e. one row of the relaxation table, returning a modified version
"""
compressible = method.conserved_quantity_computation.compressible
if isinstance(method, CenteredCumulantBasedLbMethod):
rr_dict = OrderedDict()
relaxation_table = (modification_function(m, eq, rr)
for m, eq, rr in
zip(method.cumulants, method.cumulant_equilibrium_values, method.relaxation_rates))
for cumulant, eq_value, rr in relaxation_table:
cumulant = sp.sympify(cumulant)
rr_dict[cumulant] = RelaxationInfo(eq_value, rr)
return CenteredCumulantBasedLbMethod(method.stencil, rr_dict, method.conserved_quantity_computation,
method.force_model,
galilean_correction=method.galilean_correction,
central_moment_transform_class=method.central_moment_transform_class,
cumulant_transform_class=method.cumulant_transform_class)
else:
relaxation_table = (modification_function(m, eq, rr)
for m, eq, rr in
zip(method.moments, method.moment_equilibrium_values, method.relaxation_rates))
return create_generic_mrt(method.stencil, relaxation_table, compressible, method.force_model)
# ----------------------------------------------------------------------------------------------------------------------
def force_model_from_string(force_model_name, force_values):
if type(force_model_name) is not str:
return force_model_name
if force_model_name == 'none':
return None
force_model_dict = {
'simple': forcemodels.Simple,
'luo': forcemodels.Luo,
'guo': forcemodels.Guo,
'buick': forcemodels.Buick,
'silva': forcemodels.Buick,
'edm': forcemodels.EDM,
'schiller': forcemodels.Schiller,
'cumulant': cumulant_force_model.CenteredCumulantForceModel
}
if force_model_name.lower() not in force_model_dict:
raise ValueError("Unknown force model %s" % (force_model_name,))
force_model_class = force_model_dict[force_model_name.lower()]
return force_model_class(force_values)
def switch_to_symbolic_relaxation_rates_for_omega_adapting_methods(method_parameters, kernel_params, force=False):
"""
For entropic kernels the relaxation rate has to be a variable. If a constant was passed a
new dummy variable is inserted and the value of this variable is later on passed to the kernel
"""
if method_parameters['entropic'] or method_parameters['smagorinsky'] or force:
value_to_symbol_map = {}
new_relaxation_rates = []
for rr in method_parameters['relaxation_rates']:
if not isinstance(rr, sp.Symbol):
if rr not in value_to_symbol_map:
value_to_symbol_map[rr] = sp.Dummy()
dummy_var = value_to_symbol_map[rr]
new_relaxation_rates.append(dummy_var)
kernel_params[dummy_var.name] = rr
else:
new_relaxation_rates.append(rr)
if len(new_relaxation_rates) < 2:
new_relaxation_rates.append(sp.Dummy())
method_parameters['relaxation_rates'] = new_relaxation_rates
def update_with_default_parameters(params, opt_params=None, fail_on_unknown_parameter=True):
opt_params = opt_params if opt_params is not None else {}
default_method_description = {
'stencil': 'D2Q9',
'method': 'srt', # can be srt, trt, mrt or cumulant
'relaxation_rates': None,
'compressible': False,
'equilibrium_order': 2,
'c_s_sq': sp.Rational(1, 3),
'weighted': True,
'nested_moments': None,
'force_model': 'none',
'force': (0, 0, 0),
'maxwellian_moments': True,
'initial_velocity': None,
'galilean_correction': False, # only available for D3Q27 cumulant methods
'central_moment_transform_class': FastCentralMomentTransform,
'cumulant_transform_class': CentralMomentsToCumulantsByGeneratingFunc,
'entropic': False,
'entropic_newton_iterations': None,
'omega_output_field': None,
'smagorinsky': False,
'fluctuating': False,
'temperature': None,
'output': {},
'velocity_input': None,
'density_input': None,
'kernel_type': 'default_stream_collide',
'streaming_pattern': 'pull',
'timestep': Timestep.BOTH,
'field_name': 'src',
'temporary_field_name': 'dst',
'lb_method': None,
'collision_rule': None,
'update_rule': None,
'ast': None,
}
default_optimization_description = {
'cse_pdfs': False,
'cse_global': False,
'simplification': 'auto',
'pre_simplification': True,
'split': False,
'field_size': None,
'field_layout': 'fzyx', # can be 'numpy' (='c'), 'reverse_numpy' (='f'), 'fzyx', 'zyxf'
'symbolic_field': None,
'symbolic_temporary_field': None,
'target': 'cpu',
'openmp': False,
'double_precision': True,
'gpu_indexing': 'block',
'gpu_indexing_params': {},
'vectorization': None,
'builtin_periodicity': (False, False, False),
}
if 'relaxation_rate' in params:
if 'relaxation_rates' not in params:
if 'entropic' in params and params['entropic']:
params['relaxation_rates'] = [params['relaxation_rate']]
elif 'method' in params and params['method'].endswith('cumulant'):
params['relaxation_rates'] = [params['relaxation_rate']]
else:
params['relaxation_rates'] = [params['relaxation_rate'],
relaxation_rate_from_magic_number(params['relaxation_rate'])]
del params['relaxation_rate']
# for backwards compatibility
if 'kernel_type' in params:
kernel_type_to_streaming_pattern = {
'stream_pull_collide': ('pull', Timestep.BOTH),
'collide_stream_push': ('push', Timestep.BOTH),
'aa_even': ('aa', Timestep.EVEN),
'aa_odd': ('aa', Timestep.ODD),
'esotwist_even': ('esotwist', Timestep.EVEN),
'esotwist_odd': ('esotwist', Timestep.ODD)
}
kernel_type = params['kernel_type']
if kernel_type in kernel_type_to_streaming_pattern.keys():
params['kernel_type'] = 'default_stream_collide'
params['streaming_pattern'], params['timestep'] = kernel_type_to_streaming_pattern[kernel_type]
if 'pdf_arr' in opt_params:
arr = opt_params['pdf_arr']
opt_params['field_size'] = tuple(e - 2 for e in arr.shape[:-1])
opt_params['field_layout'] = get_layout_of_array(arr)
del opt_params['pdf_arr']
if fail_on_unknown_parameter:
unknown_params = [k for k in params.keys() if k not in default_method_description]
unknown_opt_params = [k for k in opt_params.keys() if k not in default_optimization_description]
if unknown_params:
raise ValueError("Unknown parameter(s): " + ", ".join(unknown_params))
if unknown_opt_params:
raise ValueError("Unknown optimization parameter(s): " + ",".join(unknown_opt_params))
params_result = copy(default_method_description)
params_result.update(params)
opt_params_result = copy(default_optimization_description)
opt_params_result.update(opt_params)
if params_result['relaxation_rates'] is None:
stencil_param = params_result['stencil']
if isinstance(stencil_param, tuple) or isinstance(stencil_param, list):
stencil_entries = stencil_param
else:
stencil_entries = get_stencil(params_result['stencil'])
params_result['relaxation_rates'] = sp.symbols("omega_:%d" % len(stencil_entries))
return params_result, opt_params_result
r"""
.. module:: forcemodels
:synopsis: Collection of forcing terms for hydrodynamic LBM simulations
Get started:
------------
This module offers different models to introduce a body force in the lattice Boltzmann scheme.
If you don't know which model to choose, use :class:`lbmpy.forcemodels.Schiller`.
For incompressible collision models the :class:`lbmpy.forcemodels.Buick` model can be better.
Detailed information:
---------------------
Force models add a term :math:`C_F` to the collision equation:
.. math ::
f(\pmb{x} + c_q \Delta t, t + \Delta t) - f(\pmb{x},t) = \Omega(f, f^{(eq)})
+ \underbrace{F_q}_{\mbox{forcing term}}
The form of this term depends on the concrete force model: the first moment of this forcing term is equal
to the acceleration :math:`\pmb{a}` for all force models.
.. math ::
\sum_q \pmb{c}_q F_q = \pmb{a}
The second order moment is different for the forcing models - if it is zero the model is suited for
incompressible flows. For weakly compressible collision operators a force model with a corrected second order moment
should be chosen.
.. math ::
\sum_q c_{qi} c_{qj} f_q = F_i u_j + F_j u_i \hspace{1cm} \mbox{for Guo, Luo models}
\sum_q c_{qi} c_{qj} f_q = 0 \hspace{1cm} \mbox{for Simple, Buick}
Models with zero second order moment have:
.. math ::
F_q = \frac{w_q}{c_s^2} c_{qi} \; a_i
Models with nonzero second moment have:
.. math ::
F_q = \frac{w_q}{c_s^2} c_{qi} \; a_i + \frac{w_q}{c_s^4} (c_{qi} c_{qj} - c_s^2 \delta_{ij} ) u_j \, a_i
For all force models the computation of the macroscopic velocity has to be adapted (shifted) by adding a term
:math:`S_{macro}` that we call "macroscopic velocity shift"
.. math ::
\pmb{u} = \sum_q \pmb{c}_q f_q + S_{macro}
S_{macro} = \frac{\Delta t}{2} \sum_q F_q
Some models also shift the velocity entering the equilibrium distribution.
Comparison
----------
Force models can be distinguished by 2 options:
**Option 1**:
:math:`C_F = 1` and equilibrium velocity is not shifted, or :math:`C_F=(1 - \frac{\omega}{2})`
and equilibrum is shifted.
**Option 2**:
second velocity moment is zero or :math:`F_i u_j + F_j u_i`
===================== ==================== =================
Option2 \\ Option1 no equilibrium shift equilibrium shift
===================== ==================== =================
second moment zero :class:`Simple` :class:`Buick`
second moment nonzero :class:`Luo` :class:`Guo`
===================== ==================== =================
"""
import sympy as sp
from lbmpy.relaxationrates import get_bulk_relaxation_rate, get_shear_relaxation_rate
class Simple:
r"""
A simple force model which introduces the following additional force term in the
collision process :math:`\frac{w_q}{c_s^2} c_{qi} \; a_i` (often: force = rho * acceleration)
Should only be used with constant forces!
Shifts the macroscopic velocity by F/2, but does not change the equilibrium velocity.
"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method, **kwargs):
assert len(self._force) == lb_method.dim
def scalar_product(a, b):
return sum(a_i * b_i for a_i, b_i in zip(a, b))
return [3 * w_i * scalar_product(self._force, direction)
for direction, w_i in zip(lb_method.stencil, lb_method.weights)]
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
class Luo:
r"""Force model by Luo :cite:`luo1993lattice`.
Shifts the macroscopic velocity by F/2, but does not change the equilibrium velocity.
"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method):
u = sp.Matrix(lb_method.first_order_equilibrium_moment_symbols)
force = sp.Matrix(self._force)
result = []
for direction, w_i in zip(lb_method.stencil, lb_method.weights):
direction = sp.Matrix(direction)
result.append(3 * w_i * force.dot(direction - u + 3 * direction * direction.dot(u)))
return result
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
class Guo:
r"""
Force model by Guo :cite:`guo2002discrete`
Adapts the calculation of the macroscopic velocity as well as the equilibrium velocity (both shifted by F/2)!
"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method):
luo = Luo(self._force)
shear_relaxation_rate = get_shear_relaxation_rate(lb_method)
assert len(set(lb_method.relaxation_rates)) == 1, "Guo only works for SRT, use Schiller instead"
correction_factor = (1 - sp.Rational(1, 2) * shear_relaxation_rate)
return [correction_factor * t for t in luo(lb_method)]
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
class Schiller:
r"""
Force model by Schiller :cite:`schiller2008thermal`, equation 4.67
Equivalent to Guo but not restricted to SRT.
"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method):
u = sp.Matrix(lb_method.first_order_equilibrium_moment_symbols)
force = sp.Matrix(self._force)
uf = u.dot(force) * sp.eye(len(force))
omega = get_shear_relaxation_rate(lb_method)
omega_bulk = get_bulk_relaxation_rate(lb_method)
G = (u * force.transpose() + force * u.transpose() - uf * sp.Rational(2, lb_method.dim)) * sp.Rational(1, 2) * \
(2 - omega) + uf * sp.Rational(1, lb_method.dim) * (2 - omega_bulk)
result = []
for direction, w_i in zip(lb_method.stencil, lb_method.weights):
direction = sp.Matrix(direction)
tr = sp.trace(G * (direction * direction.transpose() - sp.Rational(1, 3) * sp.eye(len(force))))
result.append(3 * w_i * (force.dot(direction) + sp.Rational(3, 2) * tr))
return result
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
class Buick:
r"""
This force model :cite:`buick2000gravity` has a force term with zero second moment.
It is suited for incompressible lattice models.
"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method, **kwargs):
simple = Simple(self._force)
shear_relaxation_rate = get_shear_relaxation_rate(lb_method)
assert len(set(lb_method.relaxation_rates)) == 1, "Buick only works for SRT"
correction_factor = (1 - sp.Rational(1, 2) * shear_relaxation_rate)
return [correction_factor * t for t in simple(lb_method)]
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
class EDM:
r"""Exact differencing force model"""
def __init__(self, force):
self._force = force
def __call__(self, lb_method, **kwargs):
cqc = lb_method.conserved_quantity_computation
rho = cqc.zeroth_order_moment_symbol if cqc.compressible else 1
u = cqc.first_order_moment_symbols
shifted_u = (u_i + f_i / rho for u_i, f_i in zip(u, self._force))
eq_terms = lb_method.get_equilibrium_terms()
shifted_eq = eq_terms.subs({u_i: su_i for u_i, su_i in zip(u, shifted_u)})
return shifted_eq - eq_terms
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def macroscopic_momentum_density_shift(self, density):
return default_momentum_density_shift(self._force)
# -------------------------------- Helper functions ------------------------------------------------------------------
def default_velocity_shift(density, force):
return [f_i / (2 * density) for f_i in force]
def default_momentum_density_shift(force):
return [f_i / 2 for f_i in force]
from lbmpy.methods.creationfunctions import (
create_mrt_orthogonal, create_mrt_raw, create_srt, create_trt, create_trt_kbc,
create_trt_with_magic_number, create_with_continuous_maxwellian_eq_moments,
create_with_discrete_maxwellian_eq_moments, mrt_orthogonal_modes_literature,
create_centered_cumulant_model, create_with_default_polynomial_cumulants,
create_with_polynomial_cumulants, create_with_monomial_cumulants)
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, RelaxationInfo
from lbmpy.methods.conservedquantitycomputation import AbstractConservedQuantityComputation
from .conservedquantitycomputation import DensityVelocityComputation
__all__ = ['RelaxationInfo', 'AbstractLbMethod',
'AbstractConservedQuantityComputation', 'DensityVelocityComputation',
'create_srt', 'create_trt', 'create_trt_with_magic_number', 'create_trt_kbc',
'create_mrt_orthogonal', 'create_mrt_raw',
'create_with_continuous_maxwellian_eq_moments', 'create_with_discrete_maxwellian_eq_moments',
'mrt_orthogonal_modes_literature', 'create_centered_cumulant_model',
'create_with_default_polynomial_cumulants', 'create_with_polynomial_cumulants',
'create_with_monomial_cumulants']
from .force_model import CenteredCumulantForceModel
from .centeredcumulantmethod import CenteredCumulantBasedLbMethod
from .centered_cumulants import get_default_polynomial_cumulants_for_stencil
__all__ = ['CenteredCumulantForceModel', 'CenteredCumulantBasedLbMethod',
'get_default_polynomial_cumulants_for_stencil']
from lbmpy.stencils import get_stencil
import sympy as sp
from pystencils.stencil import have_same_entries
from lbmpy.moments import MOMENT_SYMBOLS, moment_sort_key, exponent_to_polynomial_representation
def statistical_quantity_symbol(name, exponents):
return sp.Symbol(f'{name}_{"".join(str(i) for i in exponents)}')
def exponent_tuple_sort_key(x):
return moment_sort_key(exponent_to_polynomial_representation(x))
def get_default_polynomial_cumulants_for_stencil(stencil):
"""
Returns default groups of cumulants to be relaxed with common relaxation rates as stated in literature.
Groups are ordered like this:
- First group is density
- Second group are the momentum modes
- Third group are the shear modes
- Fourth group is the bulk mode
- Remaining groups do not govern hydrodynamic properties
Args:
stencil: can be D2Q9, D2Q19 or D3Q27
"""
x, y, z = MOMENT_SYMBOLS
if have_same_entries(stencil, get_stencil("D2Q9")):
# Cumulants of the D2Q9 stencil up to third order are equal to
# the central moments; only the fourth-order cumulant x**2 * y**2
# has a more complicated form. They can be arranged into groups
# for the preservation of rotational invariance as described by
# Martin Geier in his dissertation.
#
# Reference: Martin Geier. Ab inito derivation of the cascaded Lattice Boltzmann
# Automaton. Dissertation. University of Freiburg. 2006.
return [
[sp.sympify(1)], # density is conserved
[x, y], # momentum is relaxed for cumulant forcing
[x * y, x**2 - y**2], # shear
[x**2 + y**2], # bulk
[x**2 * y, x * y**2],
[x**2 * y**2]
]
elif have_same_entries(stencil, get_stencil("D3Q19")):
# D3Q19 cumulants are obtained by pruning the D3Q27 cumulant set as
# described by Coreixas, 2019.
return [
[sp.sympify(1)], # density is conserved
[x, y, z], # momentum might be affected by forcing
[x * y,
x * z,
y * z,
x ** 2 - y ** 2,
x ** 2 - z ** 2], # shear
[x ** 2 + y ** 2 + z ** 2], # bulk
[x * y ** 2 + x * z ** 2,
x ** 2 * y + y * z ** 2,
x ** 2 * z + y ** 2 * z],
[x * y ** 2 - x * z ** 2,
x ** 2 * y - y * z ** 2,
x ** 2 * z - y ** 2 * z],
[x ** 2 * y ** 2 - 2 * x ** 2 * z ** 2 + y ** 2 * z ** 2,
x ** 2 * y ** 2 + x ** 2 * z ** 2 - 2 * y ** 2 * z ** 2],
[x ** 2 * y ** 2 + x ** 2 * z ** 2 + y ** 2 * z ** 2]
]
elif have_same_entries(stencil, get_stencil("D3Q27")):
# Cumulants grouped to preserve rotational invariance as described by Geier et al, 2015
return [
[sp.sympify(1)], # density is conserved
[x, y, z], # momentum might be affected by forcing
[x * y,
x * z,
y * z,
x ** 2 - y ** 2,
x ** 2 - z ** 2], # shear
[x ** 2 + y ** 2 + z ** 2], # bulk
[x * y ** 2 + x * z ** 2,
x ** 2 * y + y * z ** 2,
x ** 2 * z + y ** 2 * z],
[x * y ** 2 - x * z ** 2,
x ** 2 * y - y * z ** 2,
x ** 2 * z - y ** 2 * z],
[x * y * z],
[x ** 2 * y ** 2 - 2 * x ** 2 * z ** 2 + y ** 2 * z ** 2,
x ** 2 * y ** 2 + x ** 2 * z ** 2 - 2 * y ** 2 * z ** 2],
[x ** 2 * y ** 2 + x ** 2 * z ** 2 + y ** 2 * z ** 2],
[x ** 2 * y * z,
x * y ** 2 * z,
x * y * z ** 2],
[x ** 2 * y ** 2 * z,
x ** 2 * y * z ** 2,
x * y ** 2 * z ** 2],
[x ** 2 * y ** 2 * z ** 2]
]
else:
raise ValueError("No default set of cumulants is available for this stencil. "
"Please specify your own set of polynomial cumulants.")
from pystencils.simp.simplifications import sympy_cse
import sympy as sp
from warnings import warn
from pystencils import Assignment, AssignmentCollection
from pystencils.simp.assignment_collection import SymbolGen
from pystencils.stencil import have_same_entries
from pystencils.cache import disk_cache
from lbmpy.stencils import get_stencil
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, LbmCollisionRule, RelaxationInfo
from lbmpy.methods.conservedquantitycomputation import AbstractConservedQuantityComputation
from lbmpy.moments import (
moments_up_to_order, get_order,
monomial_to_polynomial_transformation_matrix,
moment_sort_key, exponent_to_polynomial_representation, extract_monomials, MOMENT_SYMBOLS)
# Local Imports
from lbmpy.methods.centeredcumulant.centered_cumulants import (
statistical_quantity_symbol, exponent_tuple_sort_key)
from lbmpy.methods.centeredcumulant.cumulant_transform import (
PRE_COLLISION_CUMULANT, POST_COLLISION_CUMULANT,
CentralMomentsToCumulantsByGeneratingFunc)
from lbmpy.methods.momentbased.moment_transforms import (
PRE_COLLISION_CENTRAL_MOMENT, POST_COLLISION_CENTRAL_MOMENT,
FastCentralMomentTransform)
from lbmpy.methods.centeredcumulant.force_model import CenteredCumulantForceModel
from lbmpy.methods.centeredcumulant.galilean_correction import (
contains_corrected_polynomials,
add_galilean_correction,
get_galilean_correction_terms)
# ============================ Cached Transformations ================================================================
@disk_cache
def cached_forward_transform(transform_obj, *args, **kwargs):
return transform_obj.forward_transform(*args, **kwargs)
@disk_cache
def cached_backward_transform(transform_obj, *args, **kwargs):
return transform_obj.backward_transform(*args, **kwargs)
# ============================ Lower Order Central Moment Collision ==================================================
@disk_cache
def relax_lower_order_central_moments(moment_indices, pre_collision_values,
relaxation_rates, equilibrium_values,
post_collision_base=POST_COLLISION_CENTRAL_MOMENT):
post_collision_symbols = [statistical_quantity_symbol(post_collision_base, i) for i in moment_indices]
equilibrium_vec = sp.Matrix(equilibrium_values)
moment_vec = sp.Matrix(pre_collision_values)
relaxation_matrix = sp.diag(*relaxation_rates)
moment_vec = moment_vec + relaxation_matrix * (equilibrium_vec - moment_vec)
main_assignments = [Assignment(s, eq) for s, eq in zip(post_collision_symbols, moment_vec)]
return AssignmentCollection(main_assignments)
# ============================ Polynomial Cumulant Collision =========================================================
@disk_cache
def relax_polynomial_cumulants(monomial_exponents, polynomials, relaxation_rates, equilibrium_values,
pre_simplification,
galilean_correction_terms=None,
pre_collision_base=PRE_COLLISION_CUMULANT,
post_collision_base=POST_COLLISION_CUMULANT,
subexpression_base='sub_col'):
mon_to_poly_matrix = monomial_to_polynomial_transformation_matrix(monomial_exponents, polynomials)
mon_vec = sp.Matrix([statistical_quantity_symbol(pre_collision_base, exp) for exp in monomial_exponents])
equilibrium_vec = sp.Matrix(equilibrium_values)
relaxation_matrix = sp.diag(*relaxation_rates)
subexpressions = []
poly_vec = mon_to_poly_matrix * mon_vec
relaxed_polys = poly_vec + relaxation_matrix * (equilibrium_vec - poly_vec)
if galilean_correction_terms is not None:
relaxed_polys = add_galilean_correction(relaxed_polys, polynomials, galilean_correction_terms)
subexpressions = galilean_correction_terms.all_assignments
relaxed_monos = mon_to_poly_matrix.inv() * relaxed_polys
main_assignments = [Assignment(statistical_quantity_symbol(post_collision_base, exp), v)
for exp, v in zip(monomial_exponents, relaxed_monos)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(
main_assignments, subexpressions=subexpressions, subexpression_symbol_generator=symbol_gen)
if pre_simplification == 'default_with_cse':
ac = sympy_cse(ac)
return ac
# =============================== LB Method Implementation ===========================================================
class CenteredCumulantBasedLbMethod(AbstractLbMethod):
"""
This class implements cumulant-based lattice boltzmann methods which relax all the non-conserved quantities
as either monomial or polynomial cumulants. It is mostly inspired by the work presented in :cite:`geier2015`.
Conserved quantities are relaxed in central moment space. This method supports an implicit forcing scheme
through :class:`lbmpy.methods.centeredcumulant.CenteredCumulantForceModel` where forces are applied by
shifting the central-moment frame of reference by :math:`F/2` and then relaxing the first-order central
moments with a relaxation rate of two. This corresponds to the change-of-sign described in the paper.
Classical forcing schemes can still be applied.
The galilean correction described in :cite:`geier2015` is also available for the D3Q27 lattice.
This method is implemented modularily as the transformation from populations to central moments to cumulants
is governed by subclasses of :class:`lbmpy.methods.momentbased.moment_transforms.AbstractMomentTransform`
which can be specified by constructor argument. This allows the selection of the most efficient transformation
for a given setup.
Args:
stencil: see :func:`lbmpy.stencils.get_stencil`
cumulant_to_relaxation_info_dict: a dictionary mapping cumulants in either tuple or polynomial formulation
to a RelaxationInfo, which consists of the corresponding equilibrium cumulant
and a relaxation rate
conserved_quantity_computation: instance of :class:`lbmpy.methods.AbstractConservedQuantityComputation`.
This determines how conserved quantities are computed, and defines
the symbols used in the equilibrium moments like e.g. density and velocity
force_model: force model instance, or None if no forcing terms are required
galilean_correction: if set to True the galilean_correction is applied to a D3Q27 cumulant method
central_moment_transform_class: transform class to get from PDF space to the central moment space
cumulant_transform_class: transform class to get from the central moment space to the cumulant space
"""
def __init__(self, stencil, cumulant_to_relaxation_info_dict, conserved_quantity_computation, force_model=None,
galilean_correction=False,
central_moment_transform_class=FastCentralMomentTransform,
cumulant_transform_class=CentralMomentsToCumulantsByGeneratingFunc):
assert isinstance(conserved_quantity_computation,
AbstractConservedQuantityComputation)
super(CenteredCumulantBasedLbMethod, self).__init__(stencil)
for m in moments_up_to_order(1, dim=self.dim):
if exponent_to_polynomial_representation(m) not in cumulant_to_relaxation_info_dict.keys():
raise ValueError(f'No relaxation info given for conserved cumulant {m}!')
self._cumulant_to_relaxation_info_dict = cumulant_to_relaxation_info_dict
self._conserved_quantity_computation = conserved_quantity_computation
self._force_model = force_model
self._weights = None
self._galilean_correction = galilean_correction
if galilean_correction:
if not have_same_entries(stencil, get_stencil("D3Q27")):
raise ValueError("Galilean Correction only available for D3Q27 stencil")
if not contains_corrected_polynomials(cumulant_to_relaxation_info_dict):
raise ValueError("For the galilean correction, all three polynomial cumulants"
"(x^2 - y^2), (x^2 - z^2) and (x^2 + y^2 + z^2) must be present!")
self._cumulant_transform_class = cumulant_transform_class
self._central_moment_transform_class = central_moment_transform_class
self.force_model_rr_override = False
if isinstance(self._force_model, CenteredCumulantForceModel) and \
self._force_model.override_momentum_relaxation_rate is not None:
self.set_first_moment_relaxation_rate(self._force_model.override_momentum_relaxation_rate)
self.force_model_rr_override = True
@property
def central_moment_transform_class(self):
self._central_moment_transform_class
@property
def cumulants(self):
return tuple(self._cumulant_to_relaxation_info_dict.keys())
@property
def cumulant_equilibrium_values(self):
return tuple([e.equilibrium_value for e in self._cumulant_to_relaxation_info_dict.values()])
@property
def cumulant_transform_class(self):
self._cumulant_transform_class
@property
def first_order_equilibrium_moment_symbols(self, ):
return self._conserved_quantity_computation.first_order_moment_symbols
@property
def force_model(self):
return self._force_model
@property
def galilean_correction(self):
return self._galilean_correction
@property
def relaxation_info_dict(self):
return self._cumulant_to_relaxation_info_dict
@property
def relaxation_rates(self):
return tuple([e.relaxation_rate for e in self._cumulant_to_relaxation_info_dict.values()])
@property
def zeroth_order_equilibrium_moment_symbol(self, ):
return self._conserved_quantity_computation.zeroth_order_moment_symbol
def set_zeroth_moment_relaxation_rate(self, relaxation_rate):
e = sp.Rational(1, 1)
prev_entry = self._cumulant_to_relaxation_info_dict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._cumulant_to_relaxation_info_dict[e] = new_entry
def set_first_moment_relaxation_rate(self, relaxation_rate):
if self.force_model_rr_override:
warn("Overwriting first-order relaxation rates governed by CenteredCumulantForceModel "
"might break your forcing scheme.")
for e in MOMENT_SYMBOLS[:self.dim]:
assert e in self._cumulant_to_relaxation_info_dict, \
"First cumulants are not relaxed separately by this method"
for e in MOMENT_SYMBOLS[:self.dim]:
prev_entry = self._cumulant_to_relaxation_info_dict[e]
new_entry = RelaxationInfo(prev_entry[0], relaxation_rate)
self._cumulant_to_relaxation_info_dict[e] = new_entry
def set_conserved_moments_relaxation_rate(self, relaxation_rate):
self.set_zeroth_moment_relaxation_rate(relaxation_rate)
self.set_first_moment_relaxation_rate(relaxation_rate)
def set_force_model(self, force_model):
self._force_model = force_model
def _repr_html_(self):
table = """
<table style="border:none; width: 100%">
<tr {nb}>
<th {nb} >Central Moment / Cumulant</th>
<th {nb} >Eq. Value </th>
<th {nb} >Relaxation Rate</th>
</tr>
{content}
</table>
"""
content = ""
for cumulant, (eq_value, rr) in self._cumulant_to_relaxation_info_dict.items():
vals = {
'rr': f"${sp.latex(rr)}$",
'cumulant': f"${sp.latex(cumulant)}$",
'eq_value': f"${sp.latex(eq_value)}$",
'nb': 'style="border:none"',
}
order = get_order(cumulant)
if order <= 1:
vals['cumulant'] += ' (central moment)'
if order == 1 and self.force_model_rr_override:
vals['rr'] += ' (overridden by force model)'
content += """<tr {nb}>
<td {nb}>{cumulant}</td>
<td {nb}>{eq_value}</td>
<td {nb}>{rr}</td>
</tr>\n""".format(**vals)
return table.format(content=content, nb='style="border:none"')
# ----------------------- Overridden Abstract Members --------------------------
@property
def conserved_quantity_computation(self):
"""Returns an instance of class :class:`lbmpy.methods.AbstractConservedQuantityComputation`"""
return self._conserved_quantity_computation
@property
def weights(self):
"""Returns a sequence of weights, one for each lattice direction"""
if self._weights is None:
self._weights = self._compute_weights()
return self._weights
def override_weights(self, weights):
assert len(weights) == len(self.stencil)
self._weights = weights
def get_equilibrium(self, conserved_quantity_equations=None, subexpressions=False, pre_simplification=False,
keep_cqc_subexpressions=True):
"""Returns equation collection, to compute equilibrium values.
The equations have the post collision symbols as left hand sides and are
functions of the conserved quantities
Args:
conserved_quantity_equations: equations to compute conserved quantities.
subexpressions: if set to false all subexpressions of the equilibrium assignments are plugged
into the main assignments
pre_simplification: with or without pre_simplifications for the calculation of the collision
keep_cqc_subexpressions: if equilibrium is returned without subexpressions keep_cqc_subexpressions
determines if also subexpressions to calculate conserved quantities should be
plugged into the main assignments
"""
r_info_dict = {c: RelaxationInfo(info.equilibrium_value, 1)
for c, info in self._cumulant_to_relaxation_info_dict.items()}
ac = self._centered_cumulant_collision_rule(
r_info_dict, conserved_quantity_equations, pre_simplification, include_galilean_correction=False)
if not subexpressions:
if keep_cqc_subexpressions:
bs = self._bound_symbols_cqc(conserved_quantity_equations)
return ac.new_without_subexpressions(subexpressions_to_keep=bs)
else:
return ac.new_without_subexpressions()
else:
return ac
def get_equilibrium_terms(self):
equilibrium = self.get_equilibrium()
return sp.Matrix([eq.rhs for eq in equilibrium.main_assignments])
def get_collision_rule(self, conserved_quantity_equations=None, pre_simplification=False):
"""Returns an LbmCollisionRule i.e. an equation collection with a reference to the method.
This collision rule defines the collision operator."""
return self._centered_cumulant_collision_rule(
self._cumulant_to_relaxation_info_dict, conserved_quantity_equations, pre_simplification, True)
# ------------------------------- Internals --------------------------------------------
def _bound_symbols_cqc(self, conserved_quantity_equations=None):
f = self.pre_collision_pdf_symbols
cqe = conserved_quantity_equations
if cqe is None:
cqe = self._conserved_quantity_computation.equilibrium_input_equations_from_pdfs(f)
return cqe.bound_symbols
def _compute_weights(self):
defaults = self._conserved_quantity_computation.default_values
cqe = AssignmentCollection([Assignment(s, e) for s, e in defaults.items()])
eq_ac = self.get_equilibrium(cqe, subexpressions=False, keep_cqc_subexpressions=False)
weights = []
for eq in eq_ac.main_assignments:
value = eq.rhs.expand()
assert len(value.atoms(sp.Symbol)) == 0, "Failed to compute weights"
weights.append(value)
return weights
def _centered_cumulant_collision_rule(self, cumulant_to_relaxation_info_dict,
conserved_quantity_equations=None,
pre_simplification=False,
include_force_terms=False,
include_galilean_correction=True):
stencil = self.stencil
f = self.pre_collision_pdf_symbols
density = self.zeroth_order_equilibrium_moment_symbol
velocity = self.first_order_equilibrium_moment_symbols
cqe = conserved_quantity_equations
if cqe is None:
cqe = self._conserved_quantity_computation.equilibrium_input_equations_from_pdfs(f)
# 1) Extract Monomial Cumulants for the higher-order polynomials
polynomial_cumulants = cumulant_to_relaxation_info_dict.keys()
polynomial_cumulants = sorted(list(polynomial_cumulants), key=moment_sort_key)
higher_order_polynomials = [p for p in polynomial_cumulants if get_order(p) > 1]
monomial_cumulants = sorted(list(extract_monomials(
higher_order_polynomials, dim=self.dim)), key=exponent_tuple_sort_key)
# 2) Get Forward and Backward Transformations between central moment and cumulant space,
# and find required central moments
k_to_c_transform = self._cumulant_transform_class(stencil, monomial_cumulants, density, velocity)
k_to_c_eqs = cached_forward_transform(k_to_c_transform, simplification=pre_simplification)
c_post_to_k_post_eqs = cached_backward_transform(
k_to_c_transform, simplification=pre_simplification, omit_conserved_moments=True)
central_moments = k_to_c_transform.required_central_moments
assert len(central_moments) == len(stencil), 'Number of required central moments must match stencil size.'
# 3) Get Forward Transformation from PDFs to central moments
pdfs_to_k_transform = self._central_moment_transform_class(
stencil, central_moments, density, velocity, conserved_quantity_equations=cqe)
pdfs_to_k_eqs = cached_forward_transform(pdfs_to_k_transform, f, simplification=pre_simplification)
# 4) Add relaxation rules for lower order moments
lower_order_moments = moments_up_to_order(1, dim=self.dim)
lower_order_moment_symbols = [statistical_quantity_symbol(PRE_COLLISION_CENTRAL_MOMENT, exp)
for exp in lower_order_moments]
lower_order_relaxation_infos = [cumulant_to_relaxation_info_dict[exponent_to_polynomial_representation(e)]
for e in lower_order_moments]
lower_order_relaxation_rates = [info.relaxation_rate for info in lower_order_relaxation_infos]
lower_order_equilibrium = [info.equilibrium_value for info in lower_order_relaxation_infos]
lower_order_moment_collision_eqs = relax_lower_order_central_moments(
lower_order_moments, tuple(lower_order_moment_symbols),
tuple(lower_order_relaxation_rates), tuple(lower_order_equilibrium))
# 5) Add relaxation rules for higher-order, polynomial cumulants
poly_relaxation_infos = [cumulant_to_relaxation_info_dict[c] for c in higher_order_polynomials]
poly_relaxation_rates = [info.relaxation_rate for info in poly_relaxation_infos]
poly_equilibrium = [info.equilibrium_value for info in poly_relaxation_infos]
if self._galilean_correction and include_galilean_correction:
galilean_correction_terms = get_galilean_correction_terms(
cumulant_to_relaxation_info_dict, density, velocity)
else:
galilean_correction_terms = None
cumulant_collision_eqs = relax_polynomial_cumulants(
tuple(monomial_cumulants), tuple(higher_order_polynomials),
tuple(poly_relaxation_rates), tuple(poly_equilibrium),
pre_simplification,
galilean_correction_terms=galilean_correction_terms)
# 6) Get backward transformation from central moments to PDFs
d = self.post_collision_pdf_symbols
k_post_to_pdfs_eqs = cached_backward_transform(pdfs_to_k_transform, d, simplification=pre_simplification)
# 7) That's all. Now, put it all together.
all_acs = [] if pdfs_to_k_transform.absorbs_conserved_quantity_equations else [cqe]
all_acs += [pdfs_to_k_eqs, k_to_c_eqs, lower_order_moment_collision_eqs,
cumulant_collision_eqs, c_post_to_k_post_eqs]
subexpressions = [ac.all_assignments for ac in all_acs]
subexpressions += k_post_to_pdfs_eqs.subexpressions
main_assignments = k_post_to_pdfs_eqs.main_assignments
# 8) Maybe add forcing terms if CenteredCumulantForceModel was not used
if self._force_model is not None and \
not isinstance(self._force_model, CenteredCumulantForceModel) and include_force_terms:
force_model_terms = self._force_model(self)
force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms, )))
force_subexpressions = [Assignment(sym, force_model_term)
for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
subexpressions += force_subexpressions
main_assignments = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
for eq, force_term_symbol in zip(main_assignments, force_term_symbols)]
# Aaaaaand we're done.
return LbmCollisionRule(self, main_assignments, subexpressions)
from lbmpy.forcemodels import default_velocity_shift
# =========================== Centered Cumulant Force Model ==========================================================
class CenteredCumulantForceModel:
"""
A force model to be used with the centered cumulant-based LB Method.
It only shifts the macroscopic and equilibrium velocities and does not introduce a forcing term to the
collision process. Forcing is then applied through relaxation of the first central moments in the shifted frame of
reference (cf. https://doi.org/10.1016/j.camwa.2015.05.001).
Args:
force: force vector which should be applied to the fluid
"""
def __init__(self, force):
self._force = force
self.override_momentum_relaxation_rate = 2
def __call__(self, lb_method, **kwargs):
raise Exception('This force model does not provide a forcing term.')
def macroscopic_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
def equilibrium_velocity_shift(self, density):
return default_velocity_shift(density, self._force)
import sympy as sp
from pystencils.sympyextensions import is_constant
# Subexpression Insertion
def insert_subexpressions(ac, selection_callback, skip=set()):
i = 0
while i < len(ac.subexpressions):
exp = ac.subexpressions[i]
if exp.lhs not in skip and selection_callback(exp):
ac = ac.new_with_inserted_subexpression(exp.lhs)
else:
i += 1
return ac
def insert_aliases(ac, **kwargs):
return insert_subexpressions(ac, lambda x: isinstance(x.rhs, sp.Symbol), **kwargs)
def insert_zeros(ac, **kwargs):
zero = sp.Integer(0)
return insert_subexpressions(ac, lambda x: x.rhs == zero, **kwargs)
def insert_constants(ac, **kwargs):
return insert_subexpressions(ac, lambda x: is_constant(x.rhs), **kwargs)
def insert_symbol_times_minus_one(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
minus_one = sp.Integer(-1)
return isinstance(rhs, sp.Mul) and \
len(rhs.args) == 2 and \
(rhs.args[0] == minus_one or rhs.args[1] == minus_one)
return insert_subexpressions(ac, callback, **kwargs)
def insert_constant_multiples(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Mul) and \
len(rhs.args) == 2 and \
(is_constant(rhs.args[0]) or is_constant(rhs.args[1]))
return insert_subexpressions(ac, callback, **kwargs)
def insert_constant_additions(ac, **kwargs):
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Add) and \
len(rhs.args) == 2 and \
(is_constant(rhs.args[0]) or is_constant(rhs.args[1]))
return insert_subexpressions(ac, callback, **kwargs)
def insert_squares(ac, **kwargs):
two = sp.Integer(2)
def callback(exp):
rhs = exp.rhs
return isinstance(rhs, sp.Pow) and rhs.args[1] == two
return insert_subexpressions(ac, callback, **kwargs)
def bind_symbols_to_skip(insertion_function, skip):
return lambda ac: insertion_function(ac, skip=skip)
import sympy as sp
from lbmpy.maxwellian_equilibrium import get_weights
from lbmpy.methods.abstractlbmethod import AbstractLbMethod, LbmCollisionRule
from lbmpy.methods.conservedquantitycomputation import DensityVelocityComputation
from pystencils import Assignment
class EntropicEquilibriumSRT(AbstractLbMethod):
"""Equilibrium from 'Minimal entropic kinetic models for hydrodynamics'
Ansumali, S. ; Karlin, I. V; Öttinger, H. C, (2003)
"""
def __init__(self, stencil, relaxation_rate, force_model, conserved_quantity_calculation):
super(EntropicEquilibriumSRT, self).__init__(stencil)
self._cqc = conserved_quantity_calculation
self._weights = get_weights(stencil, c_s_sq=sp.Rational(1, 3))
self._relaxationRate = relaxation_rate
self._forceModel = force_model
self.shear_relaxation_rate = relaxation_rate
@property
def conserved_quantity_computation(self):
return self._cqc
@property
def weights(self):
return self._weights
@property
def zeroth_order_equilibrium_moment_symbol(self, ):
return self._cqc.zeroth_order_moment_symbol
@property
def first_order_equilibrium_moment_symbols(self, ):
return self._cqc.first_order_moment_symbols
def get_equilibrium(self, conserved_quantity_equations=None, include_force_terms=False):
return self._get_collision_rule_with_relaxation_rate(1,
conserved_quantity_equations=conserved_quantity_equations,
include_force_terms=include_force_terms)
def get_equilibrium_terms(self):
equilibrium = self.get_equilibrium()
return sp.Matrix([eq.rhs for eq in equilibrium.main_assignments])
def _get_collision_rule_with_relaxation_rate(self, relaxation_rate, include_force_terms=True,
conserved_quantity_equations=None):
f = sp.Matrix(self.pre_collision_pdf_symbols)
rho = self._cqc.zeroth_order_moment_symbol
u = self._cqc.first_order_moment_symbols
if conserved_quantity_equations is None:
conserved_quantity_equations = self._cqc.equilibrium_input_equations_from_pdfs(f)
all_subexpressions = conserved_quantity_equations.all_assignments
eq = []
for w_i, direction in zip(self.weights, self.stencil):
f_i = rho * w_i
for u_a, e_ia in zip(u, direction):
b = sp.sqrt(1 + 3 * u_a ** 2)
f_i *= (2 - b) * ((2 * u_a + b) / (1 - u_a)) ** e_ia
eq.append(f_i)
collision_eqs = [Assignment(lhs, (1 - relaxation_rate) * f_i + relaxation_rate * eq_i)
for lhs, f_i, eq_i in zip(self.post_collision_pdf_symbols, self.pre_collision_pdf_symbols, eq)]
if (self._forceModel is not None) and include_force_terms:
force_model_terms = self._forceModel(self)
force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms, )))
force_subexpressions = [Assignment(sym, force_model_term)
for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
all_subexpressions += force_subexpressions
collision_eqs = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
for eq, force_term_symbol in zip(collision_eqs, force_term_symbols)]
cr = LbmCollisionRule(self, collision_eqs, all_subexpressions)
cr.simplification_hints['relaxation_rates'] = []
return cr
def get_collision_rule(self, conserved_quantity_equations=None, pre_simplification=None):
return self._get_collision_rule_with_relaxation_rate(self._relaxationRate,
conserved_quantity_equations=conserved_quantity_equations)
def create_srt_entropic(stencil, relaxation_rate, force_model, compressible):
if not compressible:
raise NotImplementedError("entropic-srt only implemented for compressible models")
density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
return EntropicEquilibriumSRT(stencil, relaxation_rate, force_model, density_velocity_computation)
from abc import abstractmethod
import sympy as sp
from pystencils import Assignment, AssignmentCollection
from pystencils.simp import (
SimplificationStrategy, sympy_cse, add_subexpressions_for_divisions, add_subexpressions_for_constants)
from pystencils.simp.assignment_collection import SymbolGen
from pystencils.sympyextensions import subs_additive, fast_subs
from lbmpy.moments import moment_matrix, set_up_shift_matrix, contained_moments, moments_up_to_order
from lbmpy.methods.momentbased.momentbasedsimplifications import (
substitute_moments_in_conserved_quantity_equations,
split_pdf_main_assignments_by_symmetry)
from lbmpy.methods.centeredcumulant.centered_cumulants import statistical_quantity_symbol as sq_sym
# ============================ PDFs <-> Central Moments ==============================================================
PRE_COLLISION_RAW_MOMENT = 'm'
POST_COLLISION_RAW_MOMENT = 'm_post'
PRE_COLLISION_CENTRAL_MOMENT = 'kappa'
POST_COLLISION_CENTRAL_MOMENT = 'kappa_post'
class AbstractMomentTransform:
r"""
Abstract Base Class for classes providing transformations between moment spaces. These transformations
are bijective maps between two spaces :math:`\mathcal{S}` and :math:`\mathcal{D}` (i.e. populations
and raw moments, or central moments and cumulants). The forward map
:math:`F : \mathcal{S} \mapsto \mathcal{D}`
is given by `forward_transform`, and the backward (inverse) map
:math:`F^{-1} : \mathcal{D} \mapsto \mathcal{S}`
is provided by `backward_transform`. It is intendet to use the transformations in lattice Boltzmann collision
operators: The `forward_transform` to map pre-collision populations to the required moment
space (possibly by several consecutive transformations), and the `backward_transform` to map post-
collision quantities back to populations.
### Transformations
Transformation equations must be returned by implementations of `forward_transform` and
`backward_transform` as `AssignmentCollection`s.
- `forward_transform` returns an AssignmentCollection which depends on quantities of the domain
:math:`\mathcal{S}` and contains the equations to map them to the codomain :math:`\mathcal{D}`.
- `backward_transform` is the inverse of `forward_transform` and returns an AssignmentCollection
which maps quantities of the codomain :math:`\mathcal{D}` back to the domain :math:`\mathcal{S}`.
### Absorption of Conserved Quantity Equations
Transformations from the population space to any moment space may *absorb* the equations defining
the macroscopic quantities entering the equilibrium (typically the density :math:`\rho` and the
velocity :math:`\vec{u}`). This means that the `forward_transform` will possibly rewrite the
assignments given in the constructor argument `conserved_quantity_equations` to reduce
the total operation count. For example, in the transformation step from populations to
raw moments (see `PdfsToRawMomentsTransform`), :math:`\rho` can be aliased as the zeroth-order moment
:math:`m_{000}`. Assignments to the conserved quantities will then be part of the AssignmentCollection
returned by `forward_transform` and need not be added to the collision rule separately.
### Simplification
Both `forward_transform` and `backward_transform` expect a keyword argument `simplification`
which can be used to direct simplification steps applied during the derivation of the transformation
equations. Possible values are:
- `False` or `'none'`: No simplification is to be applied
- `True` or `'default'`: A default simplification strategy specific to the implementation is applied.
The actual simplification steps depend strongly on the nature of the equations. They are defined by
the implementation. It is the responsibility of the implementation to select the most effective
simplification strategy.
- `'default_with_cse'`: Same as `'default'`, but with an additional pass of common subexpression elimination.
"""
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
self.moment_exponents = sorted(list(moment_exponents), key=sum)
self.stencil = stencil
self.dim = len(stencil[0])
self.cqe = conserved_quantity_equations
self.equilibrium_density = equilibrium_density
self.equilibrium_velocity = equilibrium_velocity
@abstractmethod
def forward_transform(self, *args, **kwargs):
raise NotImplementedError("forward_transform must be implemented in a subclass")
@abstractmethod
def backward_transform(self, *args, **kwargs):
raise NotImplementedError("backward_transform must be implemented in a subclass")
@property
def absorbs_conserved_quantity_equations(self):
"""
Whether or not the given conserved quantity equations will be included in
the assignment collection returned by `forward_transform`, possibly in simplified
form.
"""
return False
@property
def _default_simplification(self):
return SimplificationStrategy()
def _get_simp_strategy(self, simplification, direction=None):
if isinstance(simplification, bool):
simplification = 'default' if simplification else 'none'
if simplification == 'default' or simplification == 'default_with_cse':
simp = self._default_simplification if direction is None else self._default_simplification[direction]
if simplification == 'default_with_cse':
simp.add(sympy_cse)
return simp
else:
return None
class PdfsToCentralMomentsByMatrix(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents, equilibrium_density, equilibrium_velocity, **kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToCentralMomentsByMatrix, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity, **kwargs)
moment_matrix_without_shift = moment_matrix(self.moment_exponents, self.stencil)
shift_matrix = set_up_shift_matrix(self.moment_exponents, self.stencil, equilibrium_velocity)
self.forward_matrix = moment_matrix(self.moment_exponents, self.stencil, equilibrium_velocity)
self.backward_matrix = moment_matrix_without_shift.inv() * shift_matrix.inv()
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_f_to_k'):
simplification = self._get_simp_strategy(simplification)
f_vec = sp.Matrix(pdf_symbols)
central_moments = self.forward_matrix * f_vec
main_assignments = [Assignment(sq_sym(moment_symbol_base, e), eq)
for e, eq in zip(self.moment_exponents, central_moments)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification)
moments = [sq_sym(moment_symbol_base, exp) for exp in self.moment_exponents]
moment_vec = sp.Matrix(moments)
pdfs_from_moments = self.backward_matrix * moment_vec
main_assignments = [Assignment(f, eq) for f, eq in zip(pdf_symbols, pdfs_from_moments)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
@property
def _default_simplification(self):
simplification = SimplificationStrategy()
simplification.add(add_subexpressions_for_divisions)
return simplification
# end class PdfsToCentralMomentsByMatrix
class FastCentralMomentTransform(AbstractMomentTransform):
def __init__(self, stencil,
moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(FastCentralMomentTransform, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations, **kwargs)
self.mat_transform = PdfsToCentralMomentsByMatrix(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations, **kwargs)
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_f_to_k'):
simplification = self._get_simp_strategy(simplification, 'forward')
def _partial_kappa_symbol(fixed_directions, remaining_exponents):
fixed_str = '_'.join(str(direction) for direction in fixed_directions).replace('-', 'm')
exp_str = '_'.join(str(exp) for exp in remaining_exponents).replace('-', 'm')
return sp.Symbol(f"partial_kappa_{fixed_str}_e_{exp_str}")
subexpressions_dict = dict()
main_assignments = []
def collect_partial_sums(exponents, dimension=0, fixed_directions=tuple()):
if dimension == self.dim:
# Base Case
if fixed_directions in self.stencil:
return pdf_symbols[self.stencil.index(fixed_directions)]
else:
return 0
else:
# Recursive Case
summation = sp.sympify(0)
for d in [-1, 0, 1]:
next_partial = collect_partial_sums(
exponents, dimension=dimension + 1, fixed_directions=fixed_directions + (d,))
summation += next_partial * (d - self.equilibrium_velocity[dimension])**exponents[dimension]
if dimension == 0:
lhs_symbol = sq_sym(moment_symbol_base, exponents)
main_assignments.append(Assignment(lhs_symbol, summation))
else:
lhs_symbol = _partial_kappa_symbol(fixed_directions, exponents[dimension:])
subexpressions_dict[lhs_symbol] = summation
return lhs_symbol
for e in self.moment_exponents:
collect_partial_sums(e)
subexpressions = [Assignment(lhs, rhs) for lhs, rhs in subexpressions_dict.items()]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
if simplification:
ac = self._simplify_lower_order_moments(ac, moment_symbol_base)
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification, 'backward')
raw_equations = self.mat_transform.backward_transform(
pdf_symbols, moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT, simplification=False)
raw_equations = raw_equations.new_without_subexpressions()
symbol_gen = SymbolGen(subexpression_base)
ac = self._split_backward_equations(raw_equations, symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(add_subexpressions_for_divisions)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
def _simplify_lower_order_moments(self, ac, moment_base):
if self.cqe is None:
return ac
f_to_cm_dict = ac.main_assignments_dict
f_to_cm_dict_reduced = ac.new_without_subexpressions().main_assignments_dict
moment_symbols = [sq_sym(moment_base, e) for e in moments_up_to_order(1, dim=self.dim)]
cqe_subs = self.cqe.new_without_subexpressions().main_assignments_dict
for m in moment_symbols:
m_eq = fast_subs(fast_subs(f_to_cm_dict_reduced[m], cqe_subs), cqe_subs)
m_eq = m_eq.expand().cancel()
for cqe_sym, cqe_exp in cqe_subs.items():
m_eq = subs_additive(m_eq, cqe_sym, cqe_exp)
f_to_cm_dict[m] = m_eq
main_assignments = [Assignment(lhs, rhs) for lhs, rhs in f_to_cm_dict.items()]
return ac.copy(main_assignments=main_assignments)
def _split_backward_equations_recursive(self, assignment, all_subexpressions,
stencil_direction, subexp_symgen, known_coeffs_dict,
step=0):
# Base Case
if step == self.dim:
return assignment
# Recursive Case
u = self.equilibrium_velocity[-1 - step]
d = stencil_direction[-1 - step]
one = sp.Integer(1)
two = sp.Integer(2)
# Factors to group terms by
grouping_factors = {
-1: [one, 2 * u - 1, u**2 - u],
0: [-one, -2 * u, 1 - u**2],
1: [one, 2 * u + 1, u**2 + u]
}
factors = grouping_factors[d]
# Common Integer factor to extract from all groups
common_factor = one if d == 0 else two
# Proxy for factor grouping
v = sp.Symbol('v')
square_factor_eq = (factors[2] - v**2)
lin_factor_eq = (factors[1] - v)
sub_for_u_sq = sp.solve(square_factor_eq, u**2)[0]
sub_for_u = sp.solve(lin_factor_eq, u)[0]
subs = {u**2: sub_for_u_sq, u: sub_for_u}
rhs_grouped_by_v = assignment.rhs.subs(subs).expand().collect(v)
new_rhs = sp.Integer(0)
for k in range(3):
coeff = rhs_grouped_by_v.coeff(v, k)
coeff_subexp = common_factor * coeff
# Explicitly divide out the constant factor in the zero case
if k == 0:
coeff_subexp = coeff_subexp / factors[0]
# MEMOISATION:
# The subexpression just generated might already have been found
# If so, reuse the existing symbol and skip forward.
# Otherwise, create it anew and continue recursively
coeff_symb = known_coeffs_dict.get(coeff_subexp, None)
if coeff_symb is None:
coeff_symb = next(subexp_symgen)
known_coeffs_dict[coeff_subexp] = coeff_symb
coeff_assignment = Assignment(coeff_symb, coeff_subexp)
# Recursively split the coefficient term
coeff_assignment = self._split_backward_equations_recursive(
coeff_assignment, all_subexpressions, stencil_direction, subexp_symgen,
known_coeffs_dict, step=step + 1)
all_subexpressions.append(coeff_assignment)
new_rhs += factors[k] * coeff_symb
new_rhs = sp.Rational(1, common_factor) * new_rhs
return Assignment(assignment.lhs, new_rhs)
def _split_backward_equations(self, backward_assignments, subexp_symgen):
all_subexpressions = []
split_main_assignments = []
known_coeffs_dict = dict()
for asm, stencil_dir in zip(backward_assignments, self.stencil):
split_asm = self._split_backward_equations_recursive(
asm, all_subexpressions, stencil_dir, subexp_symgen, known_coeffs_dict)
split_main_assignments.append(split_asm)
ac = AssignmentCollection(split_main_assignments, subexpressions=all_subexpressions,
subexpression_symbol_generator=subexp_symgen)
ac.topological_sort(sort_main_assignments=False)
return ac
# end class FastCentralMomentTransform
class PdfsToRawMomentsTransform(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToRawMomentsTransform, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.inv_moment_matrix = moment_matrix(self.moment_exponents, self.stencil).inv()
@property
def absorbs_conserved_quantity_equations(self):
return True
def get_cq_to_moment_symbols_dict(self, moment_symbol_base):
if self.cqe is None:
return dict()
rho = self.equilibrium_density
u = self.equilibrium_velocity
cq_symbols_to_moments = dict()
if isinstance(rho, sp.Symbol) and rho in self.cqe.defined_symbols:
cq_symbols_to_moments[rho] = sq_sym(moment_symbol_base, (0,) * self.dim)
for d, u_sym in enumerate(u):
if isinstance(u_sym, sp.Symbol) and u_sym in self.cqe.defined_symbols:
cq_symbols_to_moments[u_sym] = sq_sym(moment_symbol_base, tuple(
(1 if i == d else 0) for i in range(self.dim)))
return cq_symbols_to_moments
def forward_transform(self, pdf_symbols, moment_symbol_base=PRE_COLLISION_RAW_MOMENT,
simplification=True, subexpression_base='sub_f_to_m'):
simplification = self._get_simp_strategy(simplification, 'forward')
def _partial_kappa_symbol(fixed_directions, remaining_exponents):
fixed_str = '_'.join(str(direction) for direction in fixed_directions).replace('-', 'm')
exp_str = '_'.join(str(exp) for exp in remaining_exponents).replace('-', 'm')
return sp.Symbol(f"partial_{moment_symbol_base}_{fixed_str}_e_{exp_str}")
partial_sums_dict = dict()
main_assignments = self.cqe.main_assignments.copy() if self.cqe is not None else []
subexpressions = self.cqe.subexpressions.copy() if self.cqe is not None else []
def collect_partial_sums(exponents, dimension=0, fixed_directions=tuple()):
if dimension == self.dim:
# Base Case
if fixed_directions in self.stencil:
return pdf_symbols[self.stencil.index(fixed_directions)]
else:
return 0
else:
# Recursive Case
summation = sp.sympify(0)
for d in [-1, 0, 1]:
next_partial = collect_partial_sums(
exponents, dimension=dimension + 1, fixed_directions=fixed_directions + (d,))
summation += next_partial * d ** exponents[dimension]
if dimension == 0:
lhs_symbol = sq_sym(moment_symbol_base, exponents)
main_assignments.append(Assignment(lhs_symbol, summation))
else:
lhs_symbol = _partial_kappa_symbol(fixed_directions, exponents[dimension:])
partial_sums_dict[lhs_symbol] = summation
return lhs_symbol
for e in self.moment_exponents:
collect_partial_sums(e)
subexpressions += [Assignment(lhs, rhs) for lhs, rhs in partial_sums_dict.items()]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
ac.add_simplification_hint('cq_symbols_to_moments', self.get_cq_to_moment_symbols_dict(moment_symbol_base))
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols, moment_symbol_base=POST_COLLISION_RAW_MOMENT,
simplification=True, subexpression_base='sub_k_to_f'):
simplification = self._get_simp_strategy(simplification, 'backward')
post_collision_moments = [sq_sym(moment_symbol_base, e) for e in self.moment_exponents]
rm_to_f_vec = self.inv_moment_matrix * sp.Matrix(post_collision_moments)
main_assignments = [Assignment(f, eq) for f, eq in zip(pdf_symbols, rm_to_f_vec)]
symbol_gen = SymbolGen(subexpression_base)
ac = AssignmentCollection(main_assignments, subexpression_symbol_generator=symbol_gen)
ac.add_simplification_hint('stencil', self.stencil)
ac.add_simplification_hint('post_collision_pdf_symbols', pdf_symbols)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(substitute_moments_in_conserved_quantity_equations)
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(split_pdf_main_assignments_by_symmetry)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
# end class PdfsToRawMomentsTransform
class PdfsToCentralMomentsByShiftMatrix(AbstractMomentTransform):
def __init__(self, stencil, moment_exponents,
equilibrium_density,
equilibrium_velocity,
conserved_quantity_equations=None,
**kwargs):
assert len(moment_exponents) == len(stencil), 'Number of moments must match stencil'
super(PdfsToCentralMomentsByShiftMatrix, self).__init__(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.raw_moment_transform = PdfsToRawMomentsTransform(
stencil, moment_exponents, equilibrium_density, equilibrium_velocity,
conserved_quantity_equations=conserved_quantity_equations,
**kwargs)
self.shift_matrix = set_up_shift_matrix(self.moment_exponents, self.stencil, self.equilibrium_velocity)
self.inv_shift_matrix = self.shift_matrix.inv()
@property
def absorbs_conserved_quantity_equations(self):
return True
def forward_transform(self, pdf_symbols,
moment_symbol_base=PRE_COLLISION_CENTRAL_MOMENT,
simplification=True,
subexpression_base='sub_f_to_k',
raw_moment_base=PRE_COLLISION_RAW_MOMENT):
simplification = self._get_simp_strategy(simplification, 'forward')
central_moment_base = moment_symbol_base
symbolic_rms = [sq_sym(raw_moment_base, e) for e in self.moment_exponents]
symbolic_cms = [sq_sym(central_moment_base, e) for e in self.moment_exponents]
rm_ac = self.raw_moment_transform.forward_transform(pdf_symbols, raw_moment_base, False, subexpression_base)
cq_symbols_to_moments = self.raw_moment_transform.get_cq_to_moment_symbols_dict(raw_moment_base)
rm_to_cm_vec = self.shift_matrix * sp.Matrix(symbolic_rms)
cq_subs = dict()
if simplification:
rm_ac = substitute_moments_in_conserved_quantity_equations(rm_ac)
# Compute replacements for conserved moments in terms of the CQE
rm_asm_dict = rm_ac.main_assignments_dict
for cq_sym, moment_sym in cq_symbols_to_moments.items():
cq_eq = rm_asm_dict[cq_sym]
solutions = sp.solve(cq_eq - cq_sym, moment_sym)
if len(solutions) > 0:
cq_subs[moment_sym] = solutions[0]
rm_to_cm_vec = fast_subs(rm_to_cm_vec, cq_subs)
rm_to_cm_dict = {cm: rm for cm, rm in zip(symbolic_cms, rm_to_cm_vec)}
if simplification:
rm_to_cm_dict = self._simplify_raw_to_central_moments(
rm_to_cm_dict, self.moment_exponents, raw_moment_base, central_moment_base)
rm_to_cm_dict = self._undo_remaining_cq_subexpressions(rm_to_cm_dict, cq_subs)
subexpressions = rm_ac.all_assignments
symbol_gen = SymbolGen(subexpression_base, dtype=float)
ac = AssignmentCollection(rm_to_cm_dict, subexpressions=subexpressions,
subexpression_symbol_generator=symbol_gen)
if simplification:
ac = simplification.apply(ac)
return ac
def backward_transform(self, pdf_symbols,
moment_symbol_base=POST_COLLISION_CENTRAL_MOMENT,
simplification=True,
subexpression_base='sub_k_to_f',
raw_moment_base=POST_COLLISION_RAW_MOMENT):
simplification = self._get_simp_strategy(simplification, 'backward')
central_moment_base = moment_symbol_base
symbolic_rms = [sq_sym(raw_moment_base, e) for e in self.moment_exponents]
symbolic_cms = [sq_sym(central_moment_base, e) for e in self.moment_exponents]
cm_to_rm_vec = self.inv_shift_matrix * sp.Matrix(symbolic_cms)
cm_to_rm_dict = {rm: eq for rm, eq in zip(symbolic_rms, cm_to_rm_vec)}
if simplification:
cm_to_rm_dict = self._factor_backward_eqs_by_velocities(symbolic_rms, cm_to_rm_dict)
rm_ac = self.raw_moment_transform.backward_transform(pdf_symbols, raw_moment_base, False, subexpression_base)
cm_to_rm_assignments = [Assignment(lhs, rhs) for lhs, rhs in cm_to_rm_dict.items()]
subexpressions = cm_to_rm_assignments + rm_ac.subexpressions
ac = rm_ac.copy(subexpressions=subexpressions)
if simplification:
ac = simplification.apply(ac)
return ac
# ----------------------------- Private Members -----------------------------
def _simplify_raw_to_central_moments(self, rm_to_cm_dict, moment_exponents, raw_moment_base, central_moment_base):
for cm in moment_exponents:
if sum(cm) < 2:
continue
cm_symb = sq_sym(central_moment_base, cm)
cm_asm_rhs = rm_to_cm_dict[cm_symb]
for m in contained_moments(cm, min_order=2)[::-1]:
contained_rm_symb = sq_sym(raw_moment_base, m)
contained_cm_symb = sq_sym(central_moment_base, m)
contained_cm_eq = rm_to_cm_dict[contained_cm_symb]
rm_in_terms_of_cm = sp.solve(contained_cm_eq - contained_cm_symb, contained_rm_symb)[0]
cm_asm_rhs = cm_asm_rhs.subs({contained_rm_symb: rm_in_terms_of_cm}).expand()
rm_to_cm_dict[cm_symb] = cm_asm_rhs
return rm_to_cm_dict
def _undo_remaining_cq_subexpressions(self, rm_to_cm_dict, cq_subs):
for cm, cm_eq in rm_to_cm_dict.items():
for rm, rm_subexp in cq_subs.items():
cm_eq = subs_additive(cm_eq, rm, rm_subexp)
rm_to_cm_dict[cm] = cm_eq
return rm_to_cm_dict
def _factor_backward_eqs_by_velocities(self, symbolic_rms, cm_to_rm_dict, required_match_replacement=0.75):
velocity_by_occurences = dict()
for rm, rm_eq in cm_to_rm_dict.items():
velocity_by_occurences[rm] = sorted(self.equilibrium_velocity, key=rm_eq.count, reverse=True)
for d in range(self.dim):
for rm, rm_eq in cm_to_rm_dict.items():
u_sorted = velocity_by_occurences[rm]
cm_to_rm_dict[rm] = rm_eq.expand().collect(u_sorted[d])
for i, rm1 in enumerate(symbolic_rms):
for _, rm2 in enumerate(symbolic_rms[i + 1:]):
cm_to_rm_dict[rm2] = subs_additive(
cm_to_rm_dict[rm2], rm1, cm_to_rm_dict[rm1],
required_match_replacement=required_match_replacement)
return cm_to_rm_dict
@property
def _default_simplification(self):
forward_simp = SimplificationStrategy()
forward_simp.add(add_subexpressions_for_divisions)
backward_simp = SimplificationStrategy()
backward_simp.add(split_pdf_main_assignments_by_symmetry)
backward_simp.add(add_subexpressions_for_constants)
return {
'forward': forward_simp,
'backward': backward_simp
}
# end class PdfsToCentralMomentsByShiftMatrix