Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • ravi.k.ayyala/lbmpy
  • brendan-waters/lbmpy
  • anirudh.jonnalagadda/lbmpy
  • jbadwaik/lbmpy
  • alexander.reinauer/lbmpy
  • itischler/lbmpy
  • he66coqe/lbmpy
  • ev81oxyl/lbmpy
  • Bindgen/lbmpy
  • da15siwa/lbmpy
  • holzer/lbmpy
  • RudolfWeeber/lbmpy
  • pycodegen/lbmpy
13 results
Select Git revision
Show changes
Commits on Source (45)
Showing
with 1050 additions and 291 deletions
lbmpy/_version.py export-subst src/lbmpy/_version.py export-subst
stages: stages:
- pretest - pretest
- test - test
- nightly
- docs
- deploy - deploy
# -------------------------- Templates ------------------------------------------------------------------------------------
# Base configuration for jobs meant to run at every commit
.every-commit:
rules:
- if: $CI_PIPELINE_SOURCE != "schedule"
# Configuration for jobs meant to run on each commit to pycodegen/pystencils/master
.every-commit-master:
rules:
- if: '$CI_PIPELINE_SOURCE != "schedule" && $CI_PROJECT_PATH == "pycodegen/lbmpy" && $CI_COMMIT_BRANCH == "master"'
# Base configuration for jobs meant to run at a schedule
.scheduled:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
# -------------------------- Pre Tests -------------------------------------------------------------------------------- # -------------------------- Pre Tests --------------------------------------------------------------------------------
# Normal test - runs on every commit all but "long run" tests # Normal test - runs on every commit all but "long run" tests
tests-and-coverage: tests-and-coverage:
stage: pretest stage: pretest
except: extends: .every-commit
variables: image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
script: script:
# - pip install sympy --upgrade # - pip install sympy --upgrade
- export NUM_CORES=$(nproc --all) - export NUM_CORES=$(nproc --all)
...@@ -62,9 +79,7 @@ tests-and-coverage-with-longrun: ...@@ -62,9 +79,7 @@ tests-and-coverage-with-longrun:
minimal-conda: minimal-conda:
stage: pretest stage: pretest
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
script: script:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils - pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
...@@ -76,9 +91,7 @@ minimal-conda: ...@@ -76,9 +91,7 @@ minimal-conda:
# Linter for code formatting # Linter for code formatting
flake8-lint: flake8-lint:
stage: pretest stage: pretest
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
script: script:
- flake8 src/lbmpy - flake8 src/lbmpy
...@@ -91,9 +104,7 @@ flake8-lint: ...@@ -91,9 +104,7 @@ flake8-lint:
# pipeline with latest python version # pipeline with latest python version
latest-python: latest-python:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script: before_script:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils - pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
...@@ -133,9 +144,7 @@ latest-python: ...@@ -133,9 +144,7 @@ latest-python:
minimal-sympy-master: minimal-sympy-master:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script: before_script:
- pip install -e . - pip install -e .
...@@ -151,9 +160,7 @@ minimal-sympy-master: ...@@ -151,9 +160,7 @@ minimal-sympy-master:
ubuntu: ubuntu:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu
before_script: before_script:
# - apt-get -y remove python3-sympy # - apt-get -y remove python3-sympy
...@@ -210,10 +217,41 @@ pycodegen-integration: ...@@ -210,10 +217,41 @@ pycodegen-integration:
- cuda11 - cuda11
- AVX - AVX
# -------------------- Scheduled Tasks --------------------------------------------------------------------------
nightly-sympy:
stage: nightly
extends: .scheduled
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install -e .
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- pip install --upgrade --pre sympy
script:
- env
- pip list
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- AVX
- cuda
artifacts:
when: always
reports:
junit: report.xml
# -------------------- Documentation and deploy ------------------------------------------------------------------------ # -------------------- Documentation and deploy ------------------------------------------------------------------------
build-documentation: build-documentation:
stage: test stage: docs
needs: []
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation
before_script: before_script:
- pip install -e . - pip install -e .
...@@ -232,7 +270,9 @@ build-documentation: ...@@ -232,7 +270,9 @@ build-documentation:
pages: pages:
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit-master
stage: deploy stage: deploy
needs: ["tests-and-coverage", "build-documentation"]
script: script:
- ls -l - ls -l
- mv coverage_report html_doc - mv coverage_report html_doc
...@@ -242,5 +282,3 @@ pages: ...@@ -242,5 +282,3 @@ pages:
- public - public
tags: tags:
- docker - docker
only:
- master@pycodegen/lbmpy
------------------------ Important ---------------------------------
lbmpy is under the following GNU AGPLv3 license.
This license holds for the sources of lbmpy itself as well
as for all kernels generated with lbmpy i.e.
the output of lbmpy is also protected by the GNU AGPLv3 license.
----------------------------------------------------------------------
GNU AFFERO GENERAL PUBLIC LICENSE GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007 Version 3, 19 November 2007
......
...@@ -19,9 +19,10 @@ try: ...@@ -19,9 +19,10 @@ try:
import pyximport import pyximport
pyximport.install(language_level=3) pyximport.install(language_level=3)
from lbmpy.phasefield.simplex_projection import simplex_projection_2d # NOQA
except ImportError: except ImportError:
pass pass
from lbmpy.phasefield.simplex_projection import simplex_projection_2d # NOQA
SCRIPT_FOLDER = os.path.dirname(os.path.realpath(__file__)) SCRIPT_FOLDER = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.abspath('lbmpy')) sys.path.insert(0, os.path.abspath('lbmpy'))
...@@ -106,18 +107,25 @@ class IPyNbTest(pytest.Item): ...@@ -106,18 +107,25 @@ class IPyNbTest(pytest.Item):
# disable matplotlib output # disable matplotlib output
exec("import matplotlib.pyplot as p; " exec("import matplotlib.pyplot as p; "
"p.close('all'); "
"p.switch_backend('Template')", global_dict) "p.switch_backend('Template')", global_dict)
# in notebooks there is an implicit plt.show() - if this is not called a warning is shown when the next # in notebooks there is an implicit plt.show() - if this is not called a warning is shown when the next
# plot is created. This warning is suppressed here # plot is created. This warning is suppressed here
# Also animations cannot be shown, which also leads to a warning.
exec("import warnings;" exec("import warnings;"
"warnings.filterwarnings('ignore', 'Adding an axes using the same arguments as a previous.*');", "warnings.filterwarnings('ignore', 'Adding an axes using the same arguments as a previous.*');"
"warnings.filterwarnings('ignore', 'Animation was deleted without rendering anything.*');",
global_dict) global_dict)
with tempfile.NamedTemporaryFile() as f: with tempfile.NamedTemporaryFile() as f:
f.write(self.code.encode()) f.write(self.code.encode())
f.flush() f.flush()
runpy.run_path(f.name, init_globals=global_dict, run_name=self.name) runpy.run_path(f.name, init_globals=global_dict, run_name=self.name)
# Close any open figures
exec("import matplotlib.pyplot as p; "
"p.close('all')", global_dict)
class IPyNbFile(pytest.File): class IPyNbFile(pytest.File):
def collect(self): def collect(self):
...@@ -140,10 +148,19 @@ class IPyNbFile(pytest.File): ...@@ -140,10 +148,19 @@ class IPyNbFile(pytest.File):
pass pass
def pytest_collect_file(path, parent): if pytest_version >= 70000:
glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"] # Since pytest 7.0, usage of `py.path.local` is deprecated and `pathlib.Path` should be used instead
if any(path.fnmatch(g) for g in glob_exprs): import pathlib
if pytest_version >= 50403:
return IPyNbFile.from_parent(fspath=path, parent=parent) def pytest_collect_file(file_path: pathlib.Path, parent):
else: glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"]
return IPyNbFile(path, parent) if any(file_path.match(g) for g in glob_exprs):
return IPyNbFile.from_parent(path=file_path, parent=parent)
else:
def pytest_collect_file(path, parent):
glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"]
if any(path.fnmatch(g) for g in glob_exprs):
if pytest_version >= 50403:
return IPyNbFile.from_parent(fspath=path, parent=parent)
else:
return IPyNbFile(path, parent)
Source diff could not be displayed: it is too large. Options to address this: view the blob.
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
from lbmpy.session import * from lbmpy.session import *
from lbmpy.relaxationrates import * from lbmpy.relaxationrates import *
``` ```
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
# Tutorial 06: Modifying a LBM method: Smagorinsky model # Tutorial 06: Modifying a LBM method: Smagorinsky model
   
In this demo, we show how to modify a lattice Boltzmann method. As example we are going to add a simple turbulence model, by introducing a rule that locally computes the relaxation parameter dependent on the local strain rate tensor. The Smagorinsky model is implemented directly in *lbmpy* as well, however here we take the manual approach to demonstrate how a LB method can be changed in *lbmpy*. In this demo, we show how to modify a lattice Boltzmann method. As example we are going to add a simple turbulence model, by introducing a rule that locally computes the relaxation parameter dependent on the local strain rate tensor. The Smagorinsky model is implemented directly in *lbmpy* as well, however here we take the manual approach to demonstrate how a LB method can be changed in *lbmpy*.
   
## 1) Theoretical background ## 1) Theoretical background
   
Since we have *sympy* available, we want to start out with the basic model equations and derive the concrete equations ourselves. This approach is less error prone, since the calculations are done by the computer algebra system, and oftentimes this approach is also more general and easier to understand. Since we have *sympy* available, we want to start out with the basic model equations and derive the concrete equations ourselves. This approach is less error prone, since the calculations are done by the computer algebra system, and oftentimes this approach is also more general and easier to understand.
   
### a) Smagorinsky model ### a) Smagorinsky model
   
The basic idea of the Smagorinsky turbulence model is to safe compute time, by not resolving the smallest eddies of the flow on the grid, but model them by an artifical dissipation term. The basic idea of the Smagorinsky turbulence model is to safe compute time, by not resolving the smallest eddies of the flow on the grid, but model them by an artifical dissipation term.
The energy dissipation of small scale vortices is taken into account by introducing a "turbulent viscosity". This additional viscosity depends on local flow properties, namely the local shear rates. The larger the local shear rates the higher the turbulent viscosity and the more artifical dissipation is added. The energy dissipation of small scale vortices is taken into account by introducing a "turbulent viscosity". This additional viscosity depends on local flow properties, namely the local shear rates. The larger the local shear rates the higher the turbulent viscosity and the more artifical dissipation is added.
   
The total viscosity is The total viscosity is
   
$$\nu_{total} = \nu_0 + \underbrace{(C_S \Delta)^2 |S|}_{\nu_{t}}$$ $$\nu_{total} = \nu_0 + \underbrace{(C_S \Delta)^2 |S|}_{\nu_{t}}$$
   
where $\nu_0$ is the normal viscosity, $C_S$ is the Smagorinsky constant, not to be confused with the speed of sound! Typical values of the Smagorinsky constant are between 0.1 - 0.2. The filter length $\Delta$ is chosen as 1 in lattice coordinates. where $\nu_0$ is the normal viscosity, $C_S$ is the Smagorinsky constant, not to be confused with the speed of sound! Typical values of the Smagorinsky constant are between 0.1 - 0.2. The filter length $\Delta$ is chosen as 1 in lattice coordinates.
   
The quantity $|S|$ is computed from the local strain rate tensor $S$ that is given by The quantity $|S|$ is computed from the local strain rate tensor $S$ that is given by
   
$$S_{ij} = \frac{1}{2} \left( \partial_i u_j + \partial_j u_i \right)$$ $$S_{ij} = \frac{1}{2} \left( \partial_i u_j + \partial_j u_i \right)$$
   
and and
   
$$|S| = \sqrt{2 S_{ij} S_{ij}}$$ $$|S| = \sqrt{2 S_{ij} S_{ij}}$$
   
   
### b) LBM implementation of Smagorinsky model ### b) LBM implementation of Smagorinsky model
   
To add the Smagorinsky model to a LB scheme one has to first compute the strain rate tensor $S_{ij}$ in each cell, and compute the turbulent viscosity $\nu_t$ from it. Then the local relaxation rate has to be adapted to match the total viscosity $\nu_{total}$ instead of the standard viscosity $\nu_0$. To add the Smagorinsky model to a LB scheme one has to first compute the strain rate tensor $S_{ij}$ in each cell, and compute the turbulent viscosity $\nu_t$ from it. Then the local relaxation rate has to be adapted to match the total viscosity $\nu_{total}$ instead of the standard viscosity $\nu_0$.
   
A fortunate property of LB methods is, that the strain rate tensor can be computed locally from the non-equilibrium part of the distribution function. This is somewhat surprising, since the strain rate tensor contains first order derivatives. The strain rate tensor can be obtained by A fortunate property of LB methods is, that the strain rate tensor can be computed locally from the non-equilibrium part of the distribution function. This is somewhat surprising, since the strain rate tensor contains first order derivatives. The strain rate tensor can be obtained by
   
$$S_{ij} = - \frac{3 \omega_s}{2 \rho_{(0)}} \Pi_{ij}^{(neq)}$$ $$S_{ij} = - \frac{3 \omega_s}{2 \rho_{(0)}} \Pi_{ij}^{(neq)}$$
   
where $\omega_s$ is the relaxation rate that determines the viscosity, $\rho_{(0)}$ is $\rho$ in compressible models and $1$ for incompressible schemes. where $\omega_s$ is the relaxation rate that determines the viscosity, $\rho_{(0)}$ is $\rho$ in compressible models and $1$ for incompressible schemes.
$\Pi_{ij}^{(neq)}$ is the second order moment tensor of the non-equilibrium part of the distribution functions $f^{(neq)} = f - f^{(eq)}$ and can be computed as $\Pi_{ij}^{(neq)}$ is the second order moment tensor of the non-equilibrium part of the distribution functions $f^{(neq)} = f - f^{(eq)}$ and can be computed as
   
$$\Pi_{ij}^{(neq)} = \sum_q c_{qi} c_{qj} \; f_q^{(neq)}$$ $$\Pi_{ij}^{(neq)} = \sum_q c_{qi} c_{qj} \; f_q^{(neq)}$$
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
We first have to find a closed form for $S_{ij}$ since in the formula above, it depends on $\omega$, which should be adapated according to $S_{ij}$. We first have to find a closed form for $S_{ij}$ since in the formula above, it depends on $\omega$, which should be adapated according to $S_{ij}$.
So we compute $\omega$ and insert it into the formula for $S$: So we compute $\omega$ and insert it into the formula for $S$:
   
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
τ_0, ρ, ω, ω_total, ω_0 = sp.symbols("tau_0 rho omega omega_total omega_0", positive=True, real=True) τ_0, ρ, ω, ω_total, ω_0 = sp.symbols("tau_0 rho omega omega_total omega_0", positive=True, real=True)
ν_0, C_S, S, Π = sp.symbols("nu_0, C_S, |S|, Pi", positive=True, real=True) ν_0, C_S, S, Π = sp.symbols("nu_0, C_S, |S|, Pi", positive=True, real=True)
   
Seq = sp.Eq(S, 3 * ω / 2 * Π) Seq = sp.Eq(S, 3 * ω / 2 * Π)
Seq Seq
``` ```
   
%% Output %% Output
   
$\displaystyle |S| = \frac{3 \Pi \omega}{2}$ $\displaystyle |S| = \frac{3 \Pi \omega}{2}$
3⋅Π⋅ω 3⋅Π⋅ω
|S| = ───── |S| = ─────
2 2
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Note that we left of the minus, since we took the absolute value of both tensor. The absolute value is defined as above, with the factor of two inside the square root. The $\rho_{(0)}$ has been left out, remembering that $\Pi^{(neq)}$ has to be divided by $\rho$ in case of compressible models|. Note that we left of the minus, since we took the absolute value of both tensor. The absolute value is defined as above, with the factor of two inside the square root. The $\rho_{(0)}$ has been left out, remembering that $\Pi^{(neq)}$ has to be divided by $\rho$ in case of compressible models|.
   
Next, we compute $\omega$ from the total viscosity as given by the Smagorinsky equation: Next, we compute $\omega$ from the total viscosity as given by the Smagorinsky equation:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
relaxation_rate_from_lattice_viscosity(ν_0 + C_S ** 2 * S) relaxation_rate_from_lattice_viscosity(ν_0 + C_S ** 2 * S)
``` ```
   
%% Output %% Output
   
$\displaystyle \frac{2}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$ $\displaystyle \frac{2}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$
2 2
───────────────────── ─────────────────────
2 2
6⋅C_S ⋅|S| + 6⋅ν₀ + 1 6⋅C_S ⋅|S| + 6⋅ν₀ + 1
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
and insert it into the equation for $|S|$ and insert it into the equation for $|S|$
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
Seq2 = Seq.subs(ω, relaxation_rate_from_lattice_viscosity(ν_0 + C_S **2 * S )) Seq2 = Seq.subs(ω, relaxation_rate_from_lattice_viscosity(ν_0 + C_S **2 * S ))
Seq2 Seq2
``` ```
   
%% Output %% Output
   
$\displaystyle |S| = \frac{3 \Pi}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$ $\displaystyle |S| = \frac{3 \Pi}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$
3⋅Π 3⋅Π
|S| = ───────────────────── |S| = ─────────────────────
2 2
6⋅C_S ⋅|S| + 6⋅ν₀ + 1 6⋅C_S ⋅|S| + 6⋅ν₀ + 1
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
This equation contains only known quantities, such that we can solve it for $|S|$. This equation contains only known quantities, such that we can solve it for $|S|$.
Additionally we substitute the lattice viscosity $\nu_0$ by the original relaxation time $\tau_0$. The resulting equations get simpler using relaxation times instead of rates. Additionally we substitute the lattice viscosity $\nu_0$ by the original relaxation time $\tau_0$. The resulting equations get simpler using relaxation times instead of rates.
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
solveRes = sp.solve(Seq2, S) solveRes = sp.solve(Seq2, S)
assert len(solveRes) == 1 assert len(solveRes) == 1
SVal = solveRes[0] SVal = solveRes[0]
SVal = SVal.subs(ν_0, lattice_viscosity_from_relaxation_rate(1 / τ_0)).expand() SVal = SVal.subs(ν_0, lattice_viscosity_from_relaxation_rate(1 / τ_0)).expand()
SVal SVal
``` ```
   
%% Output %% Output
   
$\displaystyle - \frac{\tau_{0}}{6 C_{S}^{2}} + \frac{\sqrt{72 C_{S}^{2} \Pi + 4 \tau_{0}^{2}}}{12 C_{S}^{2}}$ $\displaystyle - \frac{\tau_{0}}{6 C_{S}^{2}} + \frac{\sqrt{72 C_{S}^{2} \Pi + 4 \tau_{0}^{2}}}{12 C_{S}^{2}}$
___________________ ___________________
╱ 2 2 ╱ 2 2
τ₀ ╲╱ 72⋅C_S ⋅Π + 4⋅τ₀ τ₀ ╲╱ 72⋅C_S ⋅Π + 4⋅τ₀
- ────── + ────────────────────── - ────── + ──────────────────────
2 2 2 2
6⋅C_S 12⋅C_S 6⋅C_S 12⋅C_S
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Knowning $|S|$ we can compute the total relaxation time using Knowning $|S|$ we can compute the total relaxation time using
   
$$\nu_{total} = \nu_0 +C_S^2 |S|$$ $$\nu_{total} = \nu_0 +C_S^2 |S|$$
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
τ_val = 1 / (relaxation_rate_from_lattice_viscosity(lattice_viscosity_from_relaxation_rate(1/τ_0) + C_S**2 * SVal)).cancel() τ_val = 1 / (relaxation_rate_from_lattice_viscosity(lattice_viscosity_from_relaxation_rate(1/τ_0) + C_S**2 * SVal)).cancel()
τ_val τ_val
``` ```
   
%% Output %% Output
   
$\displaystyle \frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}$ $\displaystyle \frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}$
_________________ _________________
╱ 2 2 ╱ 2 2
τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀ τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀
── + ──────────────────── ── + ────────────────────
2 2 2 2
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
To compute $\Pi^{(neq)}$ we use the following functions: To compute $\Pi^{(neq)}$ we use the following functions:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
def second_order_moment_tensor(function_values, stencil): def second_order_moment_tensor(function_values, stencil):
assert len(function_values) == len(stencil) assert len(function_values) == len(stencil)
dim = len(stencil[0]) dim = len(stencil[0])
return sp.Matrix(dim, dim, lambda i, j: sum(c[i] * c[j] * f for f, c in zip(function_values, stencil))) return sp.Matrix(dim, dim, lambda i, j: sum(c[i] * c[j] * f for f, c in zip(function_values, stencil)))
   
   
def frobenius_norm(matrix, factor=1): def frobenius_norm(matrix, factor=1):
return sp.sqrt(sum(i*i for i in matrix) * factor) return sp.sqrt(sum(i*i for i in matrix) * factor)
``` ```
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
In the next cell we construct equations that take an standard relaxation rate $\omega_0$ and compute a new relaxation rate $\omega_{total}$ according to the Smagorinksy model, using `τ_val` computed above In the next cell we construct equations that take an standard relaxation rate $\omega_0$ and compute a new relaxation rate $\omega_{total}$ according to the Smagorinksy model, using `τ_val` computed above
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
def smagorinsky_equations(ω_0, ω_total, method): def smagorinsky_equations(ω_0, ω_total, method):
f_neq = sp.Matrix(method.pre_collision_pdf_symbols) - method.get_equilibrium_terms() f_neq = sp.Matrix(method.pre_collision_pdf_symbols) - method.get_equilibrium_terms()
return [sp.Eq(τ_0, 1 / ω_0), return [sp.Eq(τ_0, 1 / ω_0),
sp.Eq(Π, frobenius_norm(second_order_moment_tensor(f_neq, method.stencil), factor=2)), sp.Eq(Π, frobenius_norm(second_order_moment_tensor(f_neq, method.stencil), factor=2)),
sp.Eq(ω_total, 1 / τ_val)] sp.Eq(ω_total, 1 / τ_val)]
   
   
smagorinsky_equations(ω_0, ω_total, create_lb_method()) smagorinsky_equations(ω_0, ω_total, create_lb_method())
``` ```
   
%% Output %% Output
   
$\displaystyle \left[ \tau_{0} = \frac{1}{\omega_{0}}, \ \Pi = \sqrt{4 \left(- f_{5} + f_{6} + f_{7} - f_{8} - u_{0} u_{1}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{1} + f_{2} + f_{5} + f_{6} + f_{7} + f_{8} - u_{1}^{2}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{3} + f_{4} + f_{5} + f_{6} + f_{7} + f_{8} - u_{0}^{2}\right)^{2}}, \ \omega_{total} = \frac{1}{\frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}}\right]$ $\displaystyle \left[ \tau_{0} = \frac{1}{\omega_{0}}, \ \Pi = \sqrt{4 \left(- f_{5} + f_{6} + f_{7} - f_{8} - u_{0} u_{1}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{1} + f_{2} + f_{5} + f_{6} + f_{7} + f_{8} - u_{1}^{2}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{3} + f_{4} + f_{5} + f_{6} + f_{7} + f_{8} - u_{0}^{2}\right)^{2}}, \ \omega_{total} = \frac{1}{\frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}}\right]$
⎡ ___________________________________________________________ ⎡ ___________________________________________________________
⎢ ╱ ⎢ ╱
⎢ 1 ╱ 2 ⎛ δᵨ ⎢ 1 ╱ 2 ⎛ δᵨ
⎢τ₀ = ──, Π = ╱ 4⋅(-f₅ + f₆ + f₇ - f₈ - u₀⋅u₁) + 2⋅⎜- ── + f₁ + f₂ + f₅ + ⎢τ₀ = ──, Π = ╱ 4⋅(-f₅ + f₆ + f₇ - f₈ - u₀⋅u₁) + 2⋅⎜- ── + f₁ + f₂ + f₅ +
⎢ ω₀ ╲╱ ⎝ 3 ⎢ ω₀ ╲╱ ⎝ 3
______________________________________________________________________ ______________________________________________________________________
2 2 2 2
2⎞ ⎛ δᵨ 2⎞ 2⎞ ⎛ δᵨ 2⎞
f₆ + f₇ + f₈ - u₁ ⎟ + 2⋅⎜- ── + f₃ + f₄ + f₅ + f₆ + f₇ + f₈ - u₀ ⎟ , ωₜₒₜₐₗ f₆ + f₇ + f₈ - u₁ ⎟ + 2⋅⎜- ── + f₃ + f₄ + f₅ + f₆ + f₇ + f₈ - u₀ ⎟ , ωₜₒₜₐₗ
⎠ ⎝ 3 ⎠ ⎠ ⎝ 3 ⎠
1 ⎥ 1 ⎥
= ─────────────────────────⎥ = ─────────────────────────⎥
_________________⎥ _________________⎥
╱ 2 2 ⎥ ╱ 2 2 ⎥
τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀ ⎥ τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀ ⎥
── + ────────────────────⎥ ── + ────────────────────⎥
2 2 ⎦ 2 2 ⎦
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
## 2) Application: Channel flow ## 2) Application: Channel flow
   
Next we modify a *lbmpy* scenario to use the Smagorinsky model. Next we modify a *lbmpy* scenario to use the Smagorinsky model.
We create a MRT method, where we fix all relaxation rates except the relaxation rate that controls the viscosity. We create a MRT method, where we fix all relaxation rates except the relaxation rate that controls the viscosity.
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
lbm_conifg = LBMConfig(stencil=Stencil.D2Q9, method=Method.MRT, force=(1e-6, 0), lbm_config = LBMConfig(stencil=Stencil.D2Q9, method=Method.MRT, force=(1e-6, 0),
force_model=ForceModel.LUO, relaxation_rates=[ω, 1.9, 1.9, 1.9]) force_model=ForceModel.LUO, relaxation_rates=[ω, 1.9, 1.9, 1.9])
   
method = create_lb_method(lbm_config=lbm_conifg) method = create_lb_method(lbm_config=lbm_config)
method method
``` ```
   
%% Output %% Output
   
<lbmpy.methods.momentbased.momentbasedmethod.MomentBasedLbMethod at 0x7f745d5c3b20> <lbmpy.methods.momentbased.momentbasedmethod.MomentBasedLbMethod at 0x7f745d5c3b20>
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Only the collision rule has to be changed. Thus we first construct the collision rule, add the Smagorinsky equations and create a normal scenario from the modified collision rule. To avoid that the macroscopic quantity symbols in the Smagorinsky equations fall prey to optimization, we must disable simplification: Only the collision rule has to be changed. Thus we first construct the collision rule, add the Smagorinsky equations and create a normal scenario from the modified collision rule. To avoid that the macroscopic quantity symbols in the Smagorinsky equations fall prey to optimization, we must disable simplification:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
optimization = {'simplification' : False} optimization = {'simplification' : False}
collision_rule = create_lb_collision_rule(lb_method=method, optimization=optimization) collision_rule = create_lb_collision_rule(lb_method=method, optimization=optimization)
collision_rule = collision_rule.new_with_substitutions({ω: ω_total}) collision_rule = collision_rule.new_with_substitutions({ω: ω_total})
   
collision_rule.subexpressions += smagorinsky_equations(ω, ω_total, method) collision_rule.subexpressions += smagorinsky_equations(ω, ω_total, method)
collision_rule.topological_sort(sort_subexpressions=True, sort_main_assignments=False) collision_rule.topological_sort(sort_subexpressions=True, sort_main_assignments=False)
collision_rule collision_rule
``` ```
   
%% Output %% Output
   
AssignmentCollection: d_6, d_5, d_2, d_3, d_0, d_1, d_7, d_4, d_8 <- f(f_2, f_4, f_7, f_5, omega_total, f_6, f_3, f_1, f_0, f_8) AssignmentCollection: d_6, d_5, d_2, d_3, d_0, d_1, d_7, d_4, d_8 <- f(f_2, f_4, f_7, f_5, omega_total, f_6, f_3, f_1, f_0, f_8)
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
In the next cell the collision rule is simplified by extracting common subexpressions In the next cell the collision rule is simplified by extracting common subexpressions
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
from pystencils.simp import sympy_cse from pystencils.simp import sympy_cse
#collision_rule = sympy_cse(collision_rule) #collision_rule = sympy_cse(collision_rule)
``` ```
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
A channel scenario can be created from a modified collision rule: A channel scenario can be created from a modified collision rule:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
ch = create_channel((300, 100), force=1e-6, collision_rule=collision_rule, ch = create_channel((300, 100), force=1e-6, collision_rule=collision_rule,
kernel_params={"C_S": 0.12, "omega": 1.999}) kernel_params={"C_S": 0.12, "omega": 1.999})
``` ```
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
#show_code(ch.ast) #show_code(ch.ast)
``` ```
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
ch.run(5000) ch.run(5000)
``` ```
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
plt.figure(dpi=200) plt.figure(dpi=200)
plt.vector_field(ch.velocity[:, :]) plt.vector_field(ch.velocity[:, :])
np.max(ch.velocity[:, :]) np.max(ch.velocity[:, :])
``` ```
   
%% Output %% Output
   
$\displaystyle 0.00504266401703371$ $\displaystyle 0.00504266401703371$
0.00504266401703371 0.00504266401703371
   
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
## Appendix: Strain rate tensor formula from Chapman Enskog ## Appendix: Strain rate tensor formula from Chapman Enskog
   
The connection between $S_{ij}$ and $\Pi_{ij}^{(neq)}$ can be seen using a Chapman Enskog expansion. Since *lbmpy* has a module that automatically does this expansions we can have a look at it: The connection between $S_{ij}$ and $\Pi_{ij}^{(neq)}$ can be seen using a Chapman Enskog expansion. Since *lbmpy* has a module that automatically does this expansions we can have a look at it:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
from lbmpy.chapman_enskog import ChapmanEnskogAnalysis, CeMoment from lbmpy.chapman_enskog import ChapmanEnskogAnalysis, CeMoment
from lbmpy.chapman_enskog.chapman_enskog import remove_higher_order_u from lbmpy.chapman_enskog.chapman_enskog import remove_higher_order_u
compressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=True, zero_centered=False) compressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=True, zero_centered=False)
incompressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=False, zero_centered=False) incompressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=False, zero_centered=False)
   
ce_compressible = ChapmanEnskogAnalysis(compressible_model) ce_compressible = ChapmanEnskogAnalysis(compressible_model)
ce_incompressible = ChapmanEnskogAnalysis(incompressible_model) ce_incompressible = ChapmanEnskogAnalysis(incompressible_model)
``` ```
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
The Chapman Enskog analysis yields expresssions for the moment The Chapman Enskog analysis yields expresssions for the moment
   
$\Pi = \Pi^{(eq)} + \epsilon \Pi^{(1)} + \epsilon^2 \Pi^{(2)} \cdots$ $\Pi = \Pi^{(eq)} + \epsilon \Pi^{(1)} + \epsilon^2 \Pi^{(2)} \cdots$
and the strain rate tensor is related to $\Pi^{(1)}$. However the best approximation we have for $\Pi^{(1)}$ is and the strain rate tensor is related to $\Pi^{(1)}$. However the best approximation we have for $\Pi^{(1)}$ is
$\Pi^{(neq)}$. For details, see the paper "Shear stress in lattice Boltzmann simulations" by Krüger, Varnik and Raabe from 2009. $\Pi^{(neq)}$. For details, see the paper "Shear stress in lattice Boltzmann simulations" by Krüger, Varnik and Raabe from 2009.
   
Lets look at the values of $\Pi^{(1)}$ obtained from the Chapman enskog expansion: Lets look at the values of $\Pi^{(1)}$ obtained from the Chapman enskog expansion:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
Π_1_xy = CeMoment("\\Pi", moment_tuple=(1,1), superscript=1) Π_1_xy = CeMoment("\\Pi", moment_tuple=(1,1), superscript=1)
Π_1_xx = CeMoment("\\Pi", moment_tuple=(2,0), superscript=1) Π_1_xx = CeMoment("\\Pi", moment_tuple=(2,0), superscript=1)
Π_1_yy = CeMoment("\\Pi", moment_tuple=(0,2), superscript=1) Π_1_yy = CeMoment("\\Pi", moment_tuple=(0,2), superscript=1)
components = (Π_1_xx, Π_1_yy, Π_1_xy) components = (Π_1_xx, Π_1_yy, Π_1_xy)
   
Π_1_xy_val = ce_compressible.higher_order_moments[Π_1_xy] Π_1_xy_val = ce_compressible.higher_order_moments[Π_1_xy]
Π_1_xy_val Π_1_xy_val
``` ```
   
%% Output %% Output
   
$\displaystyle \frac{\rho u_{0}^{2} {\partial^{(1)}_{0} u_{1}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{0} u_{0}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{1} u_{1}} + \rho u_{1}^{2} {\partial^{(1)}_{1} u_{0}} - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3} + u_{0}^{2} u_{1} {\partial^{(1)}_{0} \rho} + u_{0} u_{1}^{2} {\partial^{(1)}_{1} \rho}}{\omega}$ $\displaystyle \frac{\rho u_{0}^{2} {\partial^{(1)}_{0} u_{1}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{0} u_{0}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{1} u_{1}} + \rho u_{1}^{2} {\partial^{(1)}_{1} u_{0}} - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3} + u_{0}^{2} u_{1} {\partial^{(1)}_{0} \rho} + u_{0} u_{1}^{2} {\partial^{(1)}_{1} \rho}}{\omega}$
2 2 ρ⋅D(u_0) 2 2 ρ⋅D(u_0)
ρ⋅u₀ ⋅D(u_1) + 2⋅ρ⋅u₀⋅u₁⋅D(u_0) + 2⋅ρ⋅u₀⋅u₁⋅D(u_1) + ρ⋅u₁ ⋅D(u_0) - ──────── - ρ⋅u₀ ⋅D(u_1) + 2⋅ρ⋅u₀⋅u₁⋅D(u_0) + 2⋅ρ⋅u₀⋅u₁⋅D(u_1) + ρ⋅u₁ ⋅D(u_0) - ──────── -
3 3
────────────────────────────────────────────────────────────────────────────── ──────────────────────────────────────────────────────────────────────────────
ω ω
ρ⋅D(u_1) 2 2 ρ⋅D(u_1) 2 2
──────── + u₀ ⋅u₁⋅D(rho) + u₀⋅u₁ ⋅D(rho) ──────── + u₀ ⋅u₁⋅D(rho) + u₀⋅u₁ ⋅D(rho)
3 3
───────────────────────────────────────── ─────────────────────────────────────────
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
This term has lots of higher order error terms in it. We assume that $u$ is small in lattice coordinates, so if we neglect all terms in $u$ that are quadratic or higher we get: This term has lots of higher order error terms in it. We assume that $u$ is small in lattice coordinates, so if we neglect all terms in $u$ that are quadratic or higher we get:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
remove_higher_order_u(Π_1_xy_val.expand()) remove_higher_order_u(Π_1_xy_val.expand())
``` ```
   
%% Output %% Output
   
$\displaystyle - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}$ $\displaystyle - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}$
ρ⋅D(u_0) ρ⋅D(u_1) ρ⋅D(u_0) ρ⋅D(u_1)
- ──────── - ──────── - ──────── - ────────
3⋅ω 3⋅ω 3⋅ω 3⋅ω
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Putting these steps together into a function, we can display them for the different cases quickly: Putting these steps together into a function, we can display them for the different cases quickly:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
def get_Π_1(ce_analysis, component): def get_Π_1(ce_analysis, component):
val = ce_analysis.higher_order_moments[component] val = ce_analysis.higher_order_moments[component]
return remove_higher_order_u(val.expand()) return remove_higher_order_u(val.expand())
``` ```
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Compressible case: Compressible case:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
tuple(get_Π_1(ce_compressible, Pi) for Pi in components) tuple(get_Π_1(ce_compressible, Pi) for Pi in components)
``` ```
   
%% Output %% Output
   
$\displaystyle \left( - \frac{2 \rho {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ - \frac{2 \rho {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$ $\displaystyle \left( - \frac{2 \rho {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ - \frac{2 \rho {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$
⎛-2⋅ρ⋅D(u_0) -2⋅ρ⋅D(u_1) ρ⋅D(u_0) ρ⋅D(u_1)⎞ ⎛-2⋅ρ⋅D(u_0) -2⋅ρ⋅D(u_1) ρ⋅D(u_0) ρ⋅D(u_1)⎞
⎜────────────, ────────────, - ──────── - ────────⎟ ⎜────────────, ────────────, - ──────── - ────────⎟
⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω ⎠ ⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω ⎠
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
Incompressible case: Incompressible case:
   
%% Cell type:code id: tags: %% Cell type:code id: tags:
   
``` python ``` python
tuple(get_Π_1(ce_incompressible, Pi) for Pi in components) tuple(get_Π_1(ce_incompressible, Pi) for Pi in components)
``` ```
   
%% Output %% Output
   
$\displaystyle \left( \frac{2 u_{0} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ \frac{2 u_{1} {\partial^{(1)}_{1} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ \frac{u_{0} {\partial^{(1)}_{1} \rho}}{3 \omega} + \frac{u_{1} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{{\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{{\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$ $\displaystyle \left( \frac{2 u_{0} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ \frac{2 u_{1} {\partial^{(1)}_{1} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ \frac{u_{0} {\partial^{(1)}_{1} \rho}}{3 \omega} + \frac{u_{1} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{{\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{{\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$
⎛2⋅u₀⋅D(rho) 2⋅D(u_0) 2⋅u₁⋅D(rho) 2⋅D(u_1) u₀⋅D(rho) u₁⋅D(rho) D(u_0 ⎛2⋅u₀⋅D(rho) 2⋅D(u_0) 2⋅u₁⋅D(rho) 2⋅D(u_1) u₀⋅D(rho) u₁⋅D(rho) D(u_0
⎜─────────── - ────────, ─────────── - ────────, ───────── + ───────── - ───── ⎜─────────── - ────────, ─────────── - ────────, ───────── + ───────── - ─────
⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω ⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω
) D(u_1)⎞ ) D(u_1)⎞
─ - ──────⎟ ─ - ──────⎟
3⋅ω ⎠ 3⋅ω ⎠
   
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
   
In the incompressible case has some terms $\partial \rho$ which are zero, since $\rho$ is assumed constant. In the incompressible case has some terms $\partial \rho$ which are zero, since $\rho$ is assumed constant.
   
Leaving out the error terms we finally obtain: Leaving out the error terms we finally obtain:
   
   
$$\Pi_{ij}^{(neq)} \approx \Pi_{ij}^{(1)} = -\frac{2 \rho_{(0)}}{3 \omega_s} \left( \partial_i u_j + \partial_j u_i \right)$$ $$\Pi_{ij}^{(neq)} \approx \Pi_{ij}^{(1)} = -\frac{2 \rho_{(0)}}{3 \omega_s} \left( \partial_i u_j + \partial_j u_i \right)$$
......
Source diff could not be displayed: it is too large. Options to address this: view the blob.
...@@ -276,4 +276,42 @@ journal = {Communications in Computational Physics} ...@@ -276,4 +276,42 @@ journal = {Communications in Computational Physics}
url = {https://doi.org/10.1063/1.1399290}, url = {https://doi.org/10.1063/1.1399290},
} }
@Article{rozema15,
doi = {10.1063/1.4928700},
year = {2015},
month = {aug},
publisher = {AIP Publishing},
volume = {27},
number = {8},
author = {Wybe Rozema and Hyun J. Bae and Parviz Moin and Roel Verstappen},
title = {Minimum-dissipation models for large-eddy simulation},
journal = {Physics of Fluids}
}
@article{Han2021,
doi = {10.1088/1873-7005/ac1782},
url = {https://dx.doi.org/10.1088/1873-7005/ac1782} ,
year = {2021},
month = {aug},
publisher = {IOP Publishing},
volume = {53},
number = {4},
pages = {045506},
author = {Mengtao Han and Ryozo Ooka and Hideki Kikumoto},
title = {A wall function approach in lattice Boltzmann method: algorithm and validation using turbulent channel flow},
journal = {Fluid Dynamics Research}
}
@article{Maronga2020,
author = {Maronga, Bj{\"o}rn and Knigge, Christoph and Raasch, Siegfried},
year = {2020},
title = {{An Improved Surface Boundary Condition for Large-Eddy Simulations Based on Monin--Obukhov Similarity Theory: Evaluation and Consequences for Grid Convergence in Neutral and Stable Conditions}},
pages = {297--325},
volume = {174},
number = {2},
issn = {0006-8314},
journal = {{Boundary-layer meteorology}},
doi = {10.1007/s10546-019-00485-w}
}
@Comment{jabref-meta: databaseType:bibtex;} @Comment{jabref-meta: databaseType:bibtex;}
...@@ -11,7 +11,7 @@ authors = [ ...@@ -11,7 +11,7 @@ authors = [
] ]
license = { file = "COPYING.txt" } license = { file = "COPYING.txt" }
requires-python = ">=3.10" requires-python = ">=3.10"
dependencies = ["pystencils>=1.3", "sympy>=1.6,<=1.11.1", "numpy>=1.8.0", "appdirs", "joblib"] dependencies = ["pystencils>=1.3", "sympy>=1.9,<=1.12.1", "numpy>=1.8.0", "appdirs", "joblib"]
classifiers = [ classifiers = [
"Development Status :: 4 - Beta", "Development Status :: 4 - Beta",
"Framework :: Jupyter", "Framework :: Jupyter",
...@@ -69,8 +69,7 @@ tests = [ ...@@ -69,8 +69,7 @@ tests = [
[build-system] [build-system]
requires = [ requires = [
"setuptools>=69", "setuptools>=69",
"versioneer>=0.29", "versioneer[toml]>=0.29",
"tomli; python_version < '3.11'",
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
...@@ -87,7 +86,7 @@ namespaces = false ...@@ -87,7 +86,7 @@ namespaces = false
# resulting files. # resulting files.
VCS = "git" VCS = "git"
style = "pep440" style = "pep440"
versionfile_source = "lbmpy/_version.py" versionfile_source = "src/lbmpy/_version.py"
versionfile_build = "lbmpy/_version.py" versionfile_build = "lbmpy/_version.py"
tag_prefix = "release/" tag_prefix = "release/"
parentdir_prefix = "lbmpy-" parentdir_prefix = "lbmpy-"
...@@ -7,11 +7,12 @@ from .creationfunctions import ( ...@@ -7,11 +7,12 @@ from .creationfunctions import (
LBMConfig, LBMConfig,
LBMOptimisation, LBMOptimisation,
) )
from .enums import Stencil, Method, ForceModel, CollisionSpace from .enums import Stencil, Method, ForceModel, CollisionSpace, SubgridScaleModel
from .lbstep import LatticeBoltzmannStep from .lbstep import LatticeBoltzmannStep
from .macroscopic_value_kernels import ( from .macroscopic_value_kernels import (
pdf_initialization_assignments, pdf_initialization_assignments,
macroscopic_values_getter, macroscopic_values_getter,
strain_rate_tensor_getter,
compile_macroscopic_values_getter, compile_macroscopic_values_getter,
compile_macroscopic_values_setter, compile_macroscopic_values_setter,
create_advanced_velocity_setter_collision_rule, create_advanced_velocity_setter_collision_rule,
...@@ -38,9 +39,11 @@ __all__ = [ ...@@ -38,9 +39,11 @@ __all__ = [
"Method", "Method",
"ForceModel", "ForceModel",
"CollisionSpace", "CollisionSpace",
"SubgridScaleModel",
"LatticeBoltzmannStep", "LatticeBoltzmannStep",
"pdf_initialization_assignments", "pdf_initialization_assignments",
"macroscopic_values_getter", "macroscopic_values_getter",
"strain_rate_tensor_getter",
"compile_macroscopic_values_getter", "compile_macroscopic_values_getter",
"compile_macroscopic_values_setter", "compile_macroscopic_values_setter",
"create_advanced_velocity_setter_collision_rule", "create_advanced_velocity_setter_collision_rule",
...@@ -54,7 +57,5 @@ __all__ = [ ...@@ -54,7 +57,5 @@ __all__ = [
] ]
from ._version import get_versions from . import _version
__version__ = _version.get_versions()['version']
__version__ = get_versions()["version"]
del get_versions
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
# directories (produced by setup.py build) will contain a much shorter file # directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number. # that just contains the computed version number.
# This file is released into the public domain. Generated by # This file is released into the public domain.
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) # Generated by versioneer-0.29
# https://github.com/python-versioneer/python-versioneer
"""Git implementation of _version.py.""" """Git implementation of _version.py."""
...@@ -15,9 +16,11 @@ import os ...@@ -15,9 +16,11 @@ import os
import re import re
import subprocess import subprocess
import sys import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
import functools
def get_keywords(): def get_keywords() -> Dict[str, str]:
"""Get the keywords needed to look up the version information.""" """Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive. # these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must # setup.py/versioneer.py will grep for the variable names, so they must
...@@ -33,8 +36,15 @@ def get_keywords(): ...@@ -33,8 +36,15 @@ def get_keywords():
class VersioneerConfig: class VersioneerConfig:
"""Container for Versioneer configuration parameters.""" """Container for Versioneer configuration parameters."""
VCS: str
style: str
tag_prefix: str
parentdir_prefix: str
versionfile_source: str
verbose: bool
def get_config():
def get_config() -> VersioneerConfig:
"""Create, populate and return the VersioneerConfig() object.""" """Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates # these strings are filled in when 'setup.py versioneer' creates
# _version.py # _version.py
...@@ -43,7 +53,7 @@ def get_config(): ...@@ -43,7 +53,7 @@ def get_config():
cfg.style = "pep440" cfg.style = "pep440"
cfg.tag_prefix = "release/" cfg.tag_prefix = "release/"
cfg.parentdir_prefix = "lbmpy-" cfg.parentdir_prefix = "lbmpy-"
cfg.versionfile_source = "lbmpy/_version.py" cfg.versionfile_source = "src/lbmpy/_version.py"
cfg.verbose = False cfg.verbose = False
return cfg return cfg
...@@ -52,13 +62,13 @@ class NotThisMethod(Exception): ...@@ -52,13 +62,13 @@ class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario.""" """Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {} LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS = {} HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS.""" """Create decorator to mark a method as the handler of a VCS."""
def decorate(f): def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method].""" """Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS: if vcs not in HANDLERS:
HANDLERS[vcs] = {} HANDLERS[vcs] = {}
...@@ -67,22 +77,35 @@ def register_vcs_handler(vcs, method): # decorator ...@@ -67,22 +77,35 @@ def register_vcs_handler(vcs, method): # decorator
return decorate return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, def run_command(
env=None): commands: List[str],
args: List[str],
cwd: Optional[str] = None,
verbose: bool = False,
hide_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""Call the given command(s).""" """Call the given command(s)."""
assert isinstance(commands, list) assert isinstance(commands, list)
p = None process = None
for c in commands:
popen_kwargs: Dict[str, Any] = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try: try:
dispcmd = str([c] + args) dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git # remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env, process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr stderr=(subprocess.PIPE if hide_stderr
else None)) else None), **popen_kwargs)
break break
except EnvironmentError: except OSError as e:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT: if e.errno == errno.ENOENT:
continue continue
if verbose: if verbose:
...@@ -93,16 +116,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, ...@@ -93,16 +116,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
if verbose: if verbose:
print("unable to find command, tried %s" % (commands,)) print("unable to find command, tried %s" % (commands,))
return None, None return None, None
stdout = p.communicate()[0].strip().decode() stdout = process.communicate()[0].strip().decode()
if p.returncode != 0: if process.returncode != 0:
if verbose: if verbose:
print("unable to run %s (error)" % dispcmd) print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout) print("stdout was %s" % stdout)
return None, p.returncode return None, process.returncode
return stdout, p.returncode return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose): def versions_from_parentdir(
parentdir_prefix: str,
root: str,
verbose: bool,
) -> Dict[str, Any]:
"""Try to determine the version from the parent directory name. """Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both Source tarballs conventionally unpack into a directory that includes both
...@@ -111,15 +138,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): ...@@ -111,15 +138,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
""" """
rootdirs = [] rootdirs = []
for i in range(3): for _ in range(3):
dirname = os.path.basename(root) dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix): if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):], return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None, "full-revisionid": None,
"dirty": False, "error": None, "date": None} "dirty": False, "error": None, "date": None}
else: rootdirs.append(root)
rootdirs.append(root) root = os.path.dirname(root) # up a level
root = os.path.dirname(root) # up a level
if verbose: if verbose:
print("Tried directories %s but none started with prefix %s" % print("Tried directories %s but none started with prefix %s" %
...@@ -128,39 +154,42 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): ...@@ -128,39 +154,42 @@ def versions_from_parentdir(parentdir_prefix, root, verbose):
@register_vcs_handler("git", "get_keywords") @register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs): def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
"""Extract version information from the given file.""" """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these # the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py, # keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from # so we do it with a regexp instead. This function is not used from
# _version.py. # _version.py.
keywords = {} keywords: Dict[str, str] = {}
try: try:
f = open(versionfile_abs, "r") with open(versionfile_abs, "r") as fobj:
for line in f.readlines(): for line in fobj:
if line.strip().startswith("git_refnames ="): if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line) mo = re.search(r'=\s*"(.*)"', line)
if mo: if mo:
keywords["refnames"] = mo.group(1) keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="): if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line) mo = re.search(r'=\s*"(.*)"', line)
if mo: if mo:
keywords["full"] = mo.group(1) keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="): if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line) mo = re.search(r'=\s*"(.*)"', line)
if mo: if mo:
keywords["date"] = mo.group(1) keywords["date"] = mo.group(1)
f.close() except OSError:
except EnvironmentError:
pass pass
return keywords return keywords
@register_vcs_handler("git", "keywords") @register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose): def git_versions_from_keywords(
keywords: Dict[str, str],
tag_prefix: str,
verbose: bool,
) -> Dict[str, Any]:
"""Get version information from git keywords.""" """Get version information from git keywords."""
if not keywords: if "refnames" not in keywords:
raise NotThisMethod("no keywords at all, weird") raise NotThisMethod("Short version file found")
date = keywords.get("date") date = keywords.get("date")
if date is not None: if date is not None:
# Use only the last line. Previous lines may contain GPG signature # Use only the last line. Previous lines may contain GPG signature
...@@ -179,11 +208,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): ...@@ -179,11 +208,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
if verbose: if verbose:
print("keywords are unexpanded, not using") print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball") raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")]) refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those. # just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: " TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags: if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use # Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d # a heuristic: assume all version tags have a digit. The old git %d
...@@ -192,7 +221,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): ...@@ -192,7 +221,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# between branches and tags. By ignoring refnames without digits, we # between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and # filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master". # "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)]) tags = {r for r in refs if re.search(r'\d', r)}
if verbose: if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags)) print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose: if verbose:
...@@ -201,6 +230,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): ...@@ -201,6 +230,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
# sorting will prefer e.g. "2.0" over "2.0rc1" # sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix): if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):] r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose: if verbose:
print("picking %s" % r) print("picking %s" % r)
return {"version": r, return {"version": r,
...@@ -216,7 +250,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): ...@@ -216,7 +250,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose):
@register_vcs_handler("git", "pieces_from_vcs") @register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): def git_pieces_from_vcs(
tag_prefix: str,
root: str,
verbose: bool,
runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree. """Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not* This only gets called if the git-archive 'subst' keywords were *not*
...@@ -227,8 +266,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): ...@@ -227,8 +266,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if sys.platform == "win32": if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"] GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, # GIT_DIR can interfere with correct operation of Versioneer.
hide_stderr=True) # It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0: if rc != 0:
if verbose: if verbose:
print("Directory %s not under git control" % root) print("Directory %s not under git control" % root)
...@@ -236,24 +282,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): ...@@ -236,24 +282,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM) # if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", describe_out, rc = runner(GITS, [
"--always", "--long", "describe", "--tags", "--dirty", "--always", "--long",
"--match", "%s*" % tag_prefix], "--match", f"{tag_prefix}[[:digit:]]*"
cwd=root) ], cwd=root)
# --long was added in git-1.5.5 # --long was added in git-1.5.5
if describe_out is None: if describe_out is None:
raise NotThisMethod("'git describe' failed") raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip() describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None: if full_out is None:
raise NotThisMethod("'git rev-parse' failed") raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip() full_out = full_out.strip()
pieces = {} pieces: Dict[str, Any] = {}
pieces["long"] = full_out pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens. # TAG might have hyphens.
git_describe = describe_out git_describe = describe_out
...@@ -270,7 +349,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): ...@@ -270,7 +349,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# TAG-NUM-gHEX # TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo: if not mo:
# unparseable. Maybe git-describe is misbehaving? # unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'" pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out) % describe_out)
return pieces return pieces
...@@ -295,13 +374,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): ...@@ -295,13 +374,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
else: else:
# HEX: no tags # HEX: no tags
pieces["closest-tag"] = None pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
cwd=root) pieces["distance"] = len(out.split()) # total number of commits
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords() # commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature # Use only the last line. Previous lines may contain GPG signature
# information. # information.
date = date.splitlines()[-1] date = date.splitlines()[-1]
...@@ -310,14 +387,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): ...@@ -310,14 +387,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
return pieces return pieces
def plus_or_dot(pieces): def plus_or_dot(pieces: Dict[str, Any]) -> str:
"""Return a + if we don't already have one, else return a .""" """Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""): if "+" in pieces.get("closest-tag", ""):
return "." return "."
return "+" return "+"
def render_pep440(pieces): def render_pep440(pieces: Dict[str, Any]) -> str:
"""Build up version string, with post-release "local version identifier". """Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
...@@ -342,23 +419,71 @@ def render_pep440(pieces): ...@@ -342,23 +419,71 @@ def render_pep440(pieces):
return rendered return rendered
def render_pep440_pre(pieces): def render_pep440_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.post0.devDISTANCE] -- No -dirty. """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions: Exceptions:
1: no tags. 0.post0.devDISTANCE 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
""" """
if pieces["closest-tag"]: if pieces["closest-tag"]:
rendered = pieces["closest-tag"] rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces: Dict[str, Any]) -> str:
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]: if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"] # update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else: else:
# exception #1 # exception #1
rendered = "0.post0.dev%d" % pieces["distance"] rendered = "0.post0.dev%d" % pieces["distance"]
return rendered return rendered
def render_pep440_post(pieces): def render_pep440_post(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX] . """TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards The ".dev0" means dirty. Note that .dev0 sorts backwards
...@@ -385,7 +510,36 @@ def render_pep440_post(pieces): ...@@ -385,7 +510,36 @@ def render_pep440_post(pieces):
return rendered return rendered
def render_pep440_old(pieces): def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]] . """TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty. The ".dev0" means dirty.
...@@ -407,7 +561,7 @@ def render_pep440_old(pieces): ...@@ -407,7 +561,7 @@ def render_pep440_old(pieces):
return rendered return rendered
def render_git_describe(pieces): def render_git_describe(pieces: Dict[str, Any]) -> str:
"""TAG[-DISTANCE-gHEX][-dirty]. """TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'. Like 'git describe --tags --dirty --always'.
...@@ -427,7 +581,7 @@ def render_git_describe(pieces): ...@@ -427,7 +581,7 @@ def render_git_describe(pieces):
return rendered return rendered
def render_git_describe_long(pieces): def render_git_describe_long(pieces: Dict[str, Any]) -> str:
"""TAG-DISTANCE-gHEX[-dirty]. """TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'. Like 'git describe --tags --dirty --always -long'.
...@@ -447,7 +601,7 @@ def render_git_describe_long(pieces): ...@@ -447,7 +601,7 @@ def render_git_describe_long(pieces):
return rendered return rendered
def render(pieces, style): def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style.""" """Render the given version pieces into the requested style."""
if pieces["error"]: if pieces["error"]:
return {"version": "unknown", return {"version": "unknown",
...@@ -461,10 +615,14 @@ def render(pieces, style): ...@@ -461,10 +615,14 @@ def render(pieces, style):
if style == "pep440": if style == "pep440":
rendered = render_pep440(pieces) rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre": elif style == "pep440-pre":
rendered = render_pep440_pre(pieces) rendered = render_pep440_pre(pieces)
elif style == "pep440-post": elif style == "pep440-post":
rendered = render_pep440_post(pieces) rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old": elif style == "pep440-old":
rendered = render_pep440_old(pieces) rendered = render_pep440_old(pieces)
elif style == "git-describe": elif style == "git-describe":
...@@ -479,7 +637,7 @@ def render(pieces, style): ...@@ -479,7 +637,7 @@ def render(pieces, style):
"date": pieces.get("date")} "date": pieces.get("date")}
def get_versions(): def get_versions() -> Dict[str, Any]:
"""Get version information or return default if unable to do so.""" """Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some # __file__, we can work backwards from there to the root. Some
...@@ -500,7 +658,7 @@ def get_versions(): ...@@ -500,7 +658,7 @@ def get_versions():
# versionfile_source is the relative path from the top of the source # versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert # tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__. # this to find the root from __file__.
for i in cfg.versionfile_source.split('/'): for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root) root = os.path.dirname(root)
except NameError: except NameError:
return {"version": "0+unknown", "full-revisionid": None, return {"version": "0+unknown", "full-revisionid": None,
......
import itertools import itertools
from pystencils import CreateKernelConfig, Field, Assignment, AssignmentCollection from pystencils import CreateKernelConfig, Field, Assignment, AssignmentCollection
from pystencils.slicing import shift_slice, get_slice_before_ghost_layer, normalize_slice from pystencils.slicing import (
from lbmpy.advanced_streaming.utility import is_inplace, get_accessor, numeric_index, \ shift_slice,
Timestep, get_timesteps, numeric_offsets get_slice_before_ghost_layer,
normalize_slice,
)
from lbmpy.advanced_streaming.utility import (
is_inplace,
get_accessor,
numeric_index,
Timestep,
get_timesteps,
numeric_offsets,
)
from pystencils.datahandling import SerialDataHandling from pystencils.datahandling import SerialDataHandling
from pystencils.enums import Target from pystencils.enums import Target
from itertools import chain from itertools import chain
...@@ -10,22 +20,28 @@ from itertools import chain ...@@ -10,22 +20,28 @@ from itertools import chain
class LBMPeriodicityHandling: class LBMPeriodicityHandling:
def __init__(self, stencil, data_handling, pdf_field_name, def __init__(
streaming_pattern='pull', ghost_layers=1, self,
cupy_direct_copy=True): stencil,
data_handling,
pdf_field_name,
streaming_pattern="pull",
ghost_layers=1,
cupy_direct_copy=True,
):
""" """
Periodicity Handling for Lattice Boltzmann Streaming. Periodicity Handling for Lattice Boltzmann Streaming.
**On the usage with cuda:** **On the usage with cuda:**
- cupy allows the copying of sliced arrays within device memory using the numpy syntax, - cupy allows the copying of sliced arrays within device memory using the numpy syntax,
e.g. `dst[:,0] = src[:,-1]`. In this implementation, this is the default for periodicity e.g. `dst[:,0] = src[:,-1]`. In this implementation, this is the default for periodicity
handling. Alternatively, if you set `cupy_direct_copy=False`, GPU kernels are generated and handling. Alternatively, if you set `cupy_direct_copy=False`, GPU kernels are generated and
compiled. The compiled kernels are almost twice as fast in execution as cupy array copying, compiled. The compiled kernels are almost twice as fast in execution as cupy array copying,
but especially for large stencils like D3Q27, their compilation can take up to 20 seconds. but especially for large stencils like D3Q27, their compilation can take up to 20 seconds.
Choose your weapon depending on your use case. Choose your weapon depending on your use case.
""" """
if not isinstance(data_handling, SerialDataHandling): if not isinstance(data_handling, SerialDataHandling):
raise ValueError('Only serial data handling is supported!') raise ValueError("Only serial data handling is supported!")
self.stencil = stencil self.stencil = stencil
self.dim = stencil.D self.dim = stencil.D
...@@ -56,12 +72,16 @@ class LBMPeriodicityHandling: ...@@ -56,12 +72,16 @@ class LBMPeriodicityHandling:
self.comm_slices = [] self.comm_slices = []
timesteps = get_timesteps(streaming_pattern) timesteps = get_timesteps(streaming_pattern)
for timestep in timesteps: for timestep in timesteps:
slices_per_comm_dir = get_communication_slices(stencil=stencil, slices_per_comm_dir = get_communication_slices(
comm_stencil=copy_directions, stencil=stencil,
streaming_pattern=streaming_pattern, comm_stencil=copy_directions,
prev_timestep=timestep, streaming_pattern=streaming_pattern,
ghost_layers=ghost_layers) prev_timestep=timestep,
self.comm_slices.append(list(chain.from_iterable(v for k, v in slices_per_comm_dir.items()))) ghost_layers=ghost_layers,
)
self.comm_slices.append(
list(chain.from_iterable(v for k, v in slices_per_comm_dir.items()))
)
if self.target == Target.GPU and not cupy_direct_copy: if self.target == Target.GPU and not cupy_direct_copy:
self.device_copy_kernels = list() self.device_copy_kernels = list()
...@@ -81,11 +101,11 @@ class LBMPeriodicityHandling: ...@@ -81,11 +101,11 @@ class LBMPeriodicityHandling:
arr[dst] = arr[src] arr[dst] = arr[src]
def _compile_copy_kernels(self, timestep): def _compile_copy_kernels(self, timestep):
assert self.target == Target.GPU
pdf_field = self.dh.fields[self.pdf_field_name] pdf_field = self.dh.fields[self.pdf_field_name]
kernels = [] kernels = []
for src, dst in self.comm_slices[timestep.idx]: for src, dst in self.comm_slices[timestep.idx]:
kernels.append( kernels.append(periodic_pdf_gpu_copy_kernel(pdf_field, src, dst))
periodic_pdf_copy_kernel(pdf_field, src, dst, target=self.target))
return kernels return kernels
def _periodicity_handling_gpu(self, prev_timestep): def _periodicity_handling_gpu(self, prev_timestep):
...@@ -100,7 +120,12 @@ class LBMPeriodicityHandling: ...@@ -100,7 +120,12 @@ class LBMPeriodicityHandling:
def get_communication_slices( def get_communication_slices(
stencil, comm_stencil=None, streaming_pattern='pull', prev_timestep=Timestep.BOTH, ghost_layers=1): stencil,
comm_stencil=None,
streaming_pattern="pull",
prev_timestep=Timestep.BOTH,
ghost_layers=1,
):
""" """
Return the source and destination slices for periodicity handling or communication between blocks. Return the source and destination slices for periodicity handling or communication between blocks.
...@@ -116,7 +141,9 @@ def get_communication_slices( ...@@ -116,7 +141,9 @@ def get_communication_slices(
if comm_stencil is None: if comm_stencil is None:
comm_stencil = itertools.product(*([-1, 0, 1] for _ in range(stencil.D))) comm_stencil = itertools.product(*([-1, 0, 1] for _ in range(stencil.D)))
pdfs = Field.create_generic('pdfs', spatial_dimensions=len(stencil[0]), index_shape=(stencil.Q,)) pdfs = Field.create_generic(
"pdfs", spatial_dimensions=len(stencil[0]), index_shape=(stencil.Q,)
)
write_accesses = get_accessor(streaming_pattern, prev_timestep).write(pdfs, stencil) write_accesses = get_accessor(streaming_pattern, prev_timestep).write(pdfs, stencil)
slices_per_comm_direction = dict() slices_per_comm_direction = dict()
...@@ -130,7 +157,9 @@ def get_communication_slices( ...@@ -130,7 +157,9 @@ def get_communication_slices(
d = stencil.index(streaming_dir) d = stencil.index(streaming_dir)
write_index = numeric_index(write_accesses[d])[0] write_index = numeric_index(write_accesses[d])[0]
origin_slice = get_slice_before_ghost_layer(comm_dir, ghost_layers=ghost_layers, thickness=1) origin_slice = get_slice_before_ghost_layer(
comm_dir, ghost_layers=ghost_layers, thickness=1
)
src_slice = _fix_length_one_slices(origin_slice) src_slice = _fix_length_one_slices(origin_slice)
write_offsets = numeric_offsets(write_accesses[d]) write_offsets = numeric_offsets(write_accesses[d])
...@@ -138,13 +167,15 @@ def get_communication_slices( ...@@ -138,13 +167,15 @@ def get_communication_slices(
# TODO: this is just a hotfix. _trim_slice_in_direction breaks FreeSlip BC with adjacent periodic side # TODO: this is just a hotfix. _trim_slice_in_direction breaks FreeSlip BC with adjacent periodic side
if streaming_pattern != "pull": if streaming_pattern != "pull":
src_slice = shift_slice(_trim_slice_in_direction(src_slice, tangential_dir), write_offsets) src_slice = shift_slice(
_trim_slice_in_direction(src_slice, tangential_dir), write_offsets
)
neighbour_transform = _get_neighbour_transform(comm_dir, ghost_layers) neighbour_transform = _get_neighbour_transform(comm_dir, ghost_layers)
dst_slice = shift_slice(src_slice, neighbour_transform) dst_slice = shift_slice(src_slice, neighbour_transform)
src_slice = src_slice + (write_index, ) src_slice = src_slice + (write_index,)
dst_slice = dst_slice + (write_index, ) dst_slice = dst_slice + (write_index,)
slices_for_dir.append((src_slice, dst_slice)) slices_for_dir.append((src_slice, dst_slice))
...@@ -152,10 +183,10 @@ def get_communication_slices( ...@@ -152,10 +183,10 @@ def get_communication_slices(
return slices_per_comm_direction return slices_per_comm_direction
def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice, def periodic_pdf_gpu_copy_kernel(pdf_field, src_slice, dst_slice, domain_size=None):
domain_size=None, target=Target.GPU): """Generate a GPU kernel which copies all values from one slice of a field
"""Copies a rectangular array slice onto another non-overlapping array slice""" to another non-overlapping slice."""
from pystencils.gpucuda.kernelcreation import create_cuda_kernel from pystencils import create_kernel
pdf_idx = src_slice[-1] pdf_idx = src_slice[-1]
assert isinstance(pdf_idx, int), "PDF index needs to be an integer constant" assert isinstance(pdf_idx, int), "PDF index needs to be an integer constant"
...@@ -176,18 +207,28 @@ def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice, ...@@ -176,18 +207,28 @@ def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice,
def _stop(s): def _stop(s):
return s.stop if isinstance(s, slice) else s return s.stop if isinstance(s, slice) else s
offset = [_start(s1) - _start(s2) for s1, s2 in zip(normalized_from_slice, normalized_to_slice)] offset = [
assert offset == [_stop(s1) - _stop(s2) for s1, s2 in zip(normalized_from_slice, normalized_to_slice)], \ _start(s1) - _start(s2)
"Slices have to have same size" for s1, s2 in zip(normalized_from_slice, normalized_to_slice)
]
copy_eq = AssignmentCollection(main_assignments=[Assignment(pdf_field(pdf_idx), pdf_field[tuple(offset)](pdf_idx))]) assert offset == [
config = CreateKernelConfig(iteration_slice=dst_slice, skip_independence_check=True) _stop(s1) - _stop(s2)
ast = create_cuda_kernel(copy_eq, config=config) for s1, s2 in zip(normalized_from_slice, normalized_to_slice)
if target == Target.GPU: ], "Slices have to have same size"
from pystencils.gpucuda import make_python_function
return make_python_function(ast) copy_eq = AssignmentCollection(
else: main_assignments=[
raise ValueError('Invalid target:', target) Assignment(pdf_field(pdf_idx), pdf_field[tuple(offset)](pdf_idx))
]
)
config = CreateKernelConfig(
iteration_slice=dst_slice,
skip_independence_check=True,
target=Target.GPU,
)
ast = create_kernel(copy_eq, config=config)
return ast.compile()
def _extend_dir(direction): def _extend_dir(direction):
...@@ -196,10 +237,10 @@ def _extend_dir(direction): ...@@ -196,10 +237,10 @@ def _extend_dir(direction):
elif direction[0] == 0: elif direction[0] == 0:
for d in [-1, 0, 1]: for d in [-1, 0, 1]:
for rest in _extend_dir(direction[1:]): for rest in _extend_dir(direction[1:]):
yield (d, ) + rest yield (d,) + rest
else: else:
for rest in _extend_dir(direction[1:]): for rest in _extend_dir(direction[1:]):
yield (direction[0], ) + rest yield (direction[0],) + rest
def _get_neighbour_transform(direction, ghost_layers): def _get_neighbour_transform(direction, ghost_layers):
......
from lbmpy.boundaries.boundaryconditions import ( from lbmpy.boundaries.boundaryconditions import (
UBB, FixedDensity, DiffusionDirichlet, SimpleExtrapolationOutflow, UBB, FixedDensity, DiffusionDirichlet, SimpleExtrapolationOutflow, WallFunctionBounce,
ExtrapolationOutflow, NeumannByCopy, NoSlip, NoSlipLinearBouzidi, QuadraticBounceBack, StreamInConstant, FreeSlip) ExtrapolationOutflow, NeumannByCopy, NoSlip, NoSlipLinearBouzidi, QuadraticBounceBack, StreamInConstant, FreeSlip)
from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling
from lbmpy.boundaries.wall_function_models import MoninObukhovSimilarityTheory, LogLaw, MuskerLaw, SpaldingsLaw
__all__ = ['NoSlip', 'NoSlipLinearBouzidi', 'QuadraticBounceBack', 'FreeSlip', __all__ = ['NoSlip', 'NoSlipLinearBouzidi', 'QuadraticBounceBack', 'FreeSlip', 'WallFunctionBounce',
'UBB', 'FixedDensity', 'UBB', 'FixedDensity',
'SimpleExtrapolationOutflow', 'ExtrapolationOutflow', 'SimpleExtrapolationOutflow', 'ExtrapolationOutflow',
'DiffusionDirichlet', 'NeumannByCopy', 'StreamInConstant', 'DiffusionDirichlet', 'NeumannByCopy', 'StreamInConstant',
'LatticeBoltzmannBoundaryHandling'] 'LatticeBoltzmannBoundaryHandling',
'MoninObukhovSimilarityTheory', 'LogLaw', 'MuskerLaw', 'SpaldingsLaw']
...@@ -67,7 +67,7 @@ def boundary_conditional(boundary, direction, streaming_pattern, prev_timestep, ...@@ -67,7 +67,7 @@ def boundary_conditional(boundary, direction, streaming_pattern, prev_timestep,
assignments = [] assignments = []
for direction_idx in dir_indices: for direction_idx in dir_indices:
rule = boundary(f_out, f_in, direction_idx, inv_dir, lb_method, index_field=None) rule = boundary(f_out, f_in, direction_idx, inv_dir, lb_method, index_field=None, force_vector=None)
# rhs: replace f_out by post collision symbols. # rhs: replace f_out by post collision symbols.
rhs_substitutions = {f_out(i): sym for i, sym in enumerate(lb_method.post_collision_pdf_symbols)} rhs_substitutions = {f_out(i): sym for i, sym in enumerate(lb_method.post_collision_pdf_symbols)}
......
import abc import abc
from enum import Enum, auto
from warnings import warn from warnings import warn
from pystencils import Assignment, Field from pystencils import Assignment, Field
...@@ -14,6 +15,7 @@ from lbmpy.maxwellian_equilibrium import discrete_equilibrium ...@@ -14,6 +15,7 @@ from lbmpy.maxwellian_equilibrium import discrete_equilibrium
from lbmpy.simplificationfactory import create_simplification_strategy from lbmpy.simplificationfactory import create_simplification_strategy
import sympy as sp import sympy as sp
import numpy as np
class LbBoundary(abc.ABC): class LbBoundary(abc.ABC):
...@@ -26,10 +28,11 @@ class LbBoundary(abc.ABC): ...@@ -26,10 +28,11 @@ class LbBoundary(abc.ABC):
inner_or_boundary = True inner_or_boundary = True
single_link = False single_link = False
def __init__(self, name=None): def __init__(self, name=None, calculate_force_on_boundary=False):
self._name = name self._name = name
self.calculate_force_on_boundary = calculate_force_on_boundary
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
""" """
This function defines the boundary behavior and must therefore be implemented by all boundaries. This function defines the boundary behavior and must therefore be implemented by all boundaries.
The boundary is defined through a list of sympy equations from which a boundary kernel is generated. The boundary is defined through a list of sympy equations from which a boundary kernel is generated.
...@@ -46,6 +49,8 @@ class LbBoundary(abc.ABC): ...@@ -46,6 +49,8 @@ class LbBoundary(abc.ABC):
lb_method: an instance of the LB method used. Use this to adapt the boundary to the method lb_method: an instance of the LB method used. Use this to adapt the boundary to the method
(e.g. compressibility) (e.g. compressibility)
index_field: the boundary index field that can be used to retrieve and update boundary data index_field: the boundary index field that can be used to retrieve and update boundary data
force_vector: vector to store the force on the boundary. Has the same size as the index field and
D-entries per cell
Returns: Returns:
list of pystencils assignments, or pystencils.AssignmentCollection list of pystencils assignments, or pystencils.AssignmentCollection
...@@ -107,14 +112,31 @@ class NoSlip(LbBoundary): ...@@ -107,14 +112,31 @@ class NoSlip(LbBoundary):
Args: Args:
name: optional name of the boundary. name: optional name of the boundary.
calculate_force_on_boundary: stores the force for each PDF at the boundary in a force vector
""" """
def __init__(self, name=None): def __init__(self, name=None, calculate_force_on_boundary=False):
"""Set an optional name here, to mark boundaries, for example for force evaluations""" """Set an optional name here, to mark boundaries, for example for force evaluations"""
super(NoSlip, self).__init__(name) super(NoSlip, self).__init__(name, calculate_force_on_boundary)
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def get_additional_code_nodes(self, lb_method):
return Assignment(f_in(inv_dir[dir_symbol]), f_out(dir_symbol)) if self.calculate_force_on_boundary:
return [NeighbourOffsetArrays(lb_method.stencil)]
else:
return []
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
if self.calculate_force_on_boundary:
force = sp.Symbol("f")
subexpressions = [Assignment(force, sp.Float(2.0) * f_out(dir_symbol))]
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
else:
subexpressions = []
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), f_out(dir_symbol))]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
class NoSlipLinearBouzidi(LbBoundary): class NoSlipLinearBouzidi(LbBoundary):
...@@ -133,11 +155,10 @@ class NoSlipLinearBouzidi(LbBoundary): ...@@ -133,11 +155,10 @@ class NoSlipLinearBouzidi(LbBoundary):
data_type: data type of the wall distance q data_type: data type of the wall distance q
""" """
def __init__(self, name=None, init_wall_distance=None, data_type='double'): def __init__(self, name=None, init_wall_distance=None, data_type='double', calculate_force_on_boundary=False):
self.data_type = data_type self.data_type = data_type
self.init_wall_distance = init_wall_distance self.init_wall_distance = init_wall_distance
super(NoSlipLinearBouzidi, self).__init__(name, calculate_force_on_boundary)
super(NoSlipLinearBouzidi, self).__init__(name)
@property @property
def additional_data(self): def additional_data(self):
...@@ -145,6 +166,12 @@ class NoSlipLinearBouzidi(LbBoundary): ...@@ -145,6 +166,12 @@ class NoSlipLinearBouzidi(LbBoundary):
direction is needed. This information is stored in the index vector.""" direction is needed. This information is stored in the index vector."""
return [('q', create_type(self.data_type))] return [('q', create_type(self.data_type))]
def get_additional_code_nodes(self, lb_method):
if self.calculate_force_on_boundary:
return [NeighbourOffsetArrays(lb_method.stencil)]
else:
return []
@property @property
def additional_data_init_callback(self): def additional_data_init_callback(self):
def default_callback(boundary_data, **_): def default_callback(boundary_data, **_):
...@@ -158,7 +185,7 @@ class NoSlipLinearBouzidi(LbBoundary): ...@@ -158,7 +185,7 @@ class NoSlipLinearBouzidi(LbBoundary):
"(init_wall_distance=None). The boundary condition will fall back to a simple NoSlip BC") "(init_wall_distance=None). The boundary condition will fall back to a simple NoSlip BC")
return default_callback return default_callback
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
f_xf = sp.Symbol("f_xf") f_xf = sp.Symbol("f_xf")
f_xf_inv = sp.Symbol("f_xf_inv") f_xf_inv = sp.Symbol("f_xf_inv")
d_x2f = sp.Symbol("d_x2f") d_x2f = sp.Symbol("d_x2f")
...@@ -180,6 +207,13 @@ class NoSlipLinearBouzidi(LbBoundary): ...@@ -180,6 +207,13 @@ class NoSlipLinearBouzidi(LbBoundary):
(case_two, sp.And(sp.Gt(q, 0), sp.Lt(q, 0.5))), (case_two, sp.And(sp.Gt(q, 0), sp.Lt(q, 0.5))),
(case_three, True)) (case_three, True))
if self.calculate_force_on_boundary:
force = sp.Symbol("f")
subexpressions.append(Assignment(force, f_xf + rhs))
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), rhs)] boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), rhs)]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions) return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
...@@ -199,14 +233,15 @@ class QuadraticBounceBack(LbBoundary): ...@@ -199,14 +233,15 @@ class QuadraticBounceBack(LbBoundary):
data_type: data type of the wall distance q data_type: data type of the wall distance q
""" """
def __init__(self, relaxation_rate, name=None, init_wall_distance=None, data_type='double'): def __init__(self, relaxation_rate, name=None, init_wall_distance=None, data_type='double',
calculate_force_on_boundary=False):
self.relaxation_rate = relaxation_rate self.relaxation_rate = relaxation_rate
self.data_type = data_type self.data_type = data_type
self.init_wall_distance = init_wall_distance self.init_wall_distance = init_wall_distance
self.equilibrium_values_name = "f_eq" self.equilibrium_values_name = "f_eq"
self.inv_dir_symbol = TypedSymbol("inv_dir", create_type("int32")) self.inv_dir_symbol = TypedSymbol("inv_dir", create_type("int32"))
super(QuadraticBounceBack, self).__init__(name) super(QuadraticBounceBack, self).__init__(name, calculate_force_on_boundary)
@property @property
def additional_data(self): def additional_data(self):
...@@ -244,19 +279,20 @@ class QuadraticBounceBack(LbBoundary): ...@@ -244,19 +279,20 @@ class QuadraticBounceBack(LbBoundary):
return [LbmWeightInfo(lb_method, self.data_type), inverse_dir_node, NeighbourOffsetArrays(lb_method.stencil)] return [LbmWeightInfo(lb_method, self.data_type), inverse_dir_node, NeighbourOffsetArrays(lb_method.stencil)]
@staticmethod @staticmethod
def get_equilibrium(v, u, rho, drho, weight, compressible, deviation_only): def get_equilibrium(v, u, rho, drho, weight, compressible, zero_centered):
rho_background = sp.Integer(1) rho_background = sp.Integer(1)
result = discrete_equilibrium(v, u, rho, weight, result = discrete_equilibrium(v, u, rho, weight,
order=2, c_s_sq=sp.Rational(1, 3), compressible=compressible) order=2, c_s_sq=sp.Rational(1, 3), compressible=compressible)
if deviation_only: if zero_centered:
shift = discrete_equilibrium(v, [0] * len(u), rho_background, weight, shift = discrete_equilibrium(v, [0] * len(u), rho_background, weight,
order=0, c_s_sq=sp.Rational(1, 3), compressible=False) order=0, c_s_sq=sp.Rational(1, 3), compressible=False)
result = simplify_by_equality(result - shift, rho, drho, rho_background) result = simplify_by_equality(result - shift, rho, drho, rho_background)
return result return result
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
omega = self.relaxation_rate
inv = sp.IndexedBase(self.inv_dir_symbol, shape=(1,))[dir_symbol] inv = sp.IndexedBase(self.inv_dir_symbol, shape=(1,))[dir_symbol]
weight_info = LbmWeightInfo(lb_method, data_type=self.data_type) weight_info = LbmWeightInfo(lb_method, data_type=self.data_type)
weight_of_direction = weight_info.weight_of_direction weight_of_direction = weight_info.weight_of_direction
...@@ -272,7 +308,7 @@ class QuadraticBounceBack(LbBoundary): ...@@ -272,7 +308,7 @@ class QuadraticBounceBack(LbBoundary):
v = [TypedSymbol(f"c_{i}", self.data_type) for i in range(lb_method.stencil.D)] v = [TypedSymbol(f"c_{i}", self.data_type) for i in range(lb_method.stencil.D)]
v_inv = [TypedSymbol(f"c_inv_{i}", self.data_type) for i in range(lb_method.stencil.D)] v_inv = [TypedSymbol(f"c_inv_{i}", self.data_type) for i in range(lb_method.stencil.D)]
one = sp.Float(1.0) one = sp.Float(1.0)
two = sp.Float(2.0) half = sp.Rational(1, 2)
subexpressions = [Assignment(pdf_symbols[i], pdf) for i, pdf in enumerate(pdf_field_accesses)] subexpressions = [Assignment(pdf_symbols[i], pdf) for i, pdf in enumerate(pdf_field_accesses)]
subexpressions.append(Assignment(f_xf, f_out(dir_symbol))) subexpressions.append(Assignment(f_xf, f_out(dir_symbol)))
...@@ -294,24 +330,26 @@ class QuadraticBounceBack(LbBoundary): ...@@ -294,24 +330,26 @@ class QuadraticBounceBack(LbBoundary):
drho = cqc.density_deviation_symbol drho = cqc.density_deviation_symbol
u = sp.Matrix(cqc.velocity_symbols) u = sp.Matrix(cqc.velocity_symbols)
compressible = cqc.compressible compressible = cqc.compressible
deviation_only = lb_method.equilibrium_distribution.deviation_only zero_centered = cqc.zero_centered_pdfs
cqe = cqc.equilibrium_input_equations_from_pdfs(pdf_symbols, False) cqe = cqc.equilibrium_input_equations_from_pdfs(pdf_symbols, False)
subexpressions.append(cqe.all_assignments) subexpressions.append(cqe.all_assignments)
eq_dir = self.get_equilibrium(v, u, rho, drho, weight, compressible, deviation_only) eq_dir = self.get_equilibrium(v, u, rho, drho, weight, compressible, zero_centered)
eq_inv = self.get_equilibrium(v_inv, u, rho, drho, weight_inv, compressible, deviation_only) eq_inv = self.get_equilibrium(v_inv, u, rho, drho, weight_inv, compressible, zero_centered)
subexpressions.append(Assignment(feq, eq_dir + eq_inv)) subexpressions.append(Assignment(feq, eq_dir + eq_inv))
# equation E.4 first summand t1 = (f_xf - f_xf_inv + (f_xf + f_xf_inv - feq * omega) / (one - omega))
e41 = (f_xf_inv - f_xf) / two t2 = (q * (f_xf + f_xf_inv)) / (one + q)
# equation E.4 second summand result = (one - q) / (one + q) * t1 * half + t2
e42 = (f_xf_inv + f_xf - self.relaxation_rate * feq) / (two - two * self.relaxation_rate)
# equation E.3 if self.calculate_force_on_boundary:
fw = ((one - q) * (e41 + e42)) + q * f_xf_inv force = sp.Symbol("f")
# equation E.1 subexpressions.append(Assignment(force, f_xf + result))
result = (one / (q + one)) * fw + (q / (q + one)) * f_xf offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), result)] boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), result)]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions) return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
...@@ -357,7 +395,7 @@ class FreeSlip(LbBoundary): ...@@ -357,7 +395,7 @@ class FreeSlip(LbBoundary):
if name is None and normal_direction: if name is None and normal_direction:
name = f"Free Slip : {offset_to_direction_string([-x for x in normal_direction])}" name = f"Free Slip : {offset_to_direction_string([-x for x in normal_direction])}"
super(FreeSlip, self).__init__(name) super(FreeSlip, self).__init__(name, calculate_force_on_boundary=False)
def init_callback(self, boundary_data, **_): def init_callback(self, boundary_data, **_):
if len(boundary_data.index_array) > 1e6: if len(boundary_data.index_array) > 1e6:
...@@ -427,7 +465,7 @@ class FreeSlip(LbBoundary): ...@@ -427,7 +465,7 @@ class FreeSlip(LbBoundary):
else: else:
return [NeighbourOffsetArrays(lb_method.stencil)] return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil) neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
if self.normal_direction: if self.normal_direction:
tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, self.normal_direction)) tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, self.normal_direction))
...@@ -448,6 +486,241 @@ class FreeSlip(LbBoundary): ...@@ -448,6 +486,241 @@ class FreeSlip(LbBoundary):
# end class FreeSlip # end class FreeSlip
class WallFunctionBounce(LbBoundary):
"""
Wall function based on the bounce back idea, cf. :cite:`Han2021`. Its implementation is extended to the D3Q27
stencil, whereas different weights of the drag distribution are proposed.
Args:
lb_method: LB method which is used for the simulation
pdfs: Symbolic representation of the particle distribution functions.
normal_direction: Normal direction of the wall. Currently, only straight and axis-aligned walls are supported.
wall_function_model: Wall function that is used to retrieve the wall stress :math:`tau_w` during the simulation.
See :class:`lbmpy.boundaries.wall_treatment.WallFunctionModel` for more details
mean_velocity: Optional field or field access for the mean velocity. As wall functions are typically defined
in terms of the mean velocity, it is recommended to provide this variable. Per default, the
instantaneous velocity obtained from pdfs is used for the wall function.
sampling_shift: Optional sampling shift for the velocity sampling. Can be provided as symbolic variable or
integer. In both cases, the user must assure that the sampling shift is at least 1, as sampling
in boundary cells is not physical. Per default, a sampling shift of 1 is employed which
corresponds to a sampling in the first fluid cell normal to the wall. For lower friction
Reynolds numbers, choosing a sampling shift >1 has shown to improve the results for higher
resolutions.
Mutually exclusive with the Maronga sampling shift.
maronga_sampling_shift: Optionally, apply a correction factor to the wall shear stress proposed by Maronga et
al. :cite:`Maronga2020`. Has only been tested and validated for the MOST wall function.
No guarantee is given that it also works with other wall functions.
Mutually exclusive with the standard sampling shift.
dt: time discretisation. Usually one in LB units
dy: space discretisation. Usually one in LB units
y: distance from the wall
target_friction_velocity: A target friction velocity can be given if an estimate is known a priori. This target
friction velocity will be used as initial guess for implicit wall functions to ensure
convergence of the Newton algorithm.
weight_method: The extension of the WFB to a D3Q27 stencil is non-unique. Different weights can be chosen to
define the drag distribution onto the pdfs. Per default, weights corresponding to the weights
in the D3Q27 stencil are chosen.
name: Optional name of the boundary.
data_type: Floating-point precision. Per default, double.
"""
class WeightMethod(Enum):
LATTICE_WEIGHT = auto(),
GEOMETRIC_WEIGHT = auto()
def __init__(self, lb_method, pdfs, normal_direction, wall_function_model,
mean_velocity=None, sampling_shift=1, maronga_sampling_shift=None,
dt=1, dy=1, y=0.5,
target_friction_velocity=None,
weight_method=WeightMethod.LATTICE_WEIGHT,
name=None, data_type='double'):
"""Set an optional name here, to mark boundaries, for example for force evaluations"""
self.stencil = lb_method.stencil
if not (self.stencil.Q == 19 or self.stencil.Q == 27):
raise ValueError("WFB boundary is currently only defined for D3Q19 and D3Q27 stencils.")
self.pdfs = pdfs
self.wall_function_model = wall_function_model
if mean_velocity:
if isinstance(mean_velocity, Field):
self.mean_velocity = mean_velocity.center_vector
elif isinstance(mean_velocity, Field.Access):
self.mean_velocity = mean_velocity.field.neighbor_vector(mean_velocity.offsets)
else:
raise ValueError("Mean velocity field has to be a pystencils Field or Field.Access")
else:
self.mean_velocity = None
if not isinstance(sampling_shift, int):
self.sampling_shift = TypedSymbol(sampling_shift.name, np.uint32)
else:
assert sampling_shift >= 1, "The sampling shift must be greater than 1."
self.sampling_shift = sampling_shift
if maronga_sampling_shift:
assert self.mean_velocity, "Mean velocity field must be provided when using the Maronga correction"
if not isinstance(maronga_sampling_shift, int):
self.maronga_sampling_shift = TypedSymbol(maronga_sampling_shift.name, np.uint32)
else:
assert maronga_sampling_shift >= 1, "The Maronga sampling shift must be greater than 1."
self.maronga_sampling_shift = maronga_sampling_shift
else:
self.maronga_sampling_shift = None
if (self.sampling_shift != 1) and self.maronga_sampling_shift:
raise ValueError("Both sampling shift and Maronga offset are set. This is currently not supported.")
self.dt = dt
self.dy = dy
self.y = y
self.data_type = data_type
self.target_friction_velocity = target_friction_velocity
self.weight_method = weight_method
if len(normal_direction) - normal_direction.count(0) != 1:
raise ValueError("Only normal directions for straight walls are supported for example (0, 1, 0) for "
"a WallFunctionBounce applied to the southern boundary of the domain")
self.mirror_axis = normal_direction.index(*[direction for direction in normal_direction if direction != 0])
self.normal_direction = normal_direction
assert all([n in [-1, 0, 1] for n in self.normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction"
tangential_component = [int(not n) for n in self.normal_direction]
self.normal_axis = tangential_component.index(0)
self.tangential_axis = [0, 1, 2]
self.tangential_axis.remove(self.normal_axis)
self.dim = self.stencil.D
if name is None:
name = f"WFB : {offset_to_direction_string([-x for x in normal_direction])}"
super(WallFunctionBounce, self).__init__(name, calculate_force_on_boundary=False)
def get_additional_code_nodes(self, lb_method):
return [MirroredStencilDirections(self.stencil, self.mirror_axis),
NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
# needed symbols for offsets and indices
# neighbour offset symbols are basically the stencil directions defined in stencils.py:L130ff.
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, self.normal_direction))
mirrored_stencil_symbol = MirroredStencilDirections._mirrored_symbol(self.mirror_axis)
mirrored_direction = inv_dir[sp.IndexedBase(mirrored_stencil_symbol, shape=(1,))[dir_symbol]]
name_base = "f_in_inv_offsets_"
offset_array_symbols = [TypedSymbol(name_base + d, mirrored_stencil_symbol.dtype) for d in ['x', 'y', 'z']]
mirrored_offset = sp.IndexedBase(mirrored_stencil_symbol, shape=(1,))[dir_symbol]
offsets = tuple(sp.IndexedBase(s, shape=(1,))[mirrored_offset] for s in offset_array_symbols)
# needed symbols in the Assignments
u_m = sp.Symbol("u_m")
tau_w = sp.Symbol("tau_w")
wall_stress = sp.symbols("tau_w_x tau_w_y tau_w_z")
# if the mean velocity field is not given, or the Maronga correction is applied, density and velocity values
# will be calculated from pdfs
cqc = lb_method.conserved_quantity_computation
result = []
if (not self.mean_velocity) or self.maronga_sampling_shift:
pdf_center_vector = sp.Matrix([0] * self.stencil.Q)
for i in range(self.stencil.Q):
pdf_center_vector[i] = self.pdfs[offsets[0] + self.normal_direction[0],
offsets[1] + self.normal_direction[1],
offsets[2] + self.normal_direction[2]](i)
eq_equations = cqc.equilibrium_input_equations_from_pdfs(pdf_center_vector)
result.append(eq_equations.all_assignments)
# sample velocity which will be used in the wall stress calculation
if self.mean_velocity:
if self.maronga_sampling_shift:
u_for_tau_wall = tuple(u_mean_i.get_shifted(
self.maronga_sampling_shift * self.normal_direction[0],
self.maronga_sampling_shift * self.normal_direction[1],
self.maronga_sampling_shift * self.normal_direction[2]
) for u_mean_i in self.mean_velocity)
else:
u_for_tau_wall = tuple(u_mean_i.get_shifted(
self.sampling_shift * self.normal_direction[0],
self.sampling_shift * self.normal_direction[1],
self.sampling_shift * self.normal_direction[2]
) for u_mean_i in self.mean_velocity)
rho_for_tau_wall = sp.Float(1)
else:
rho_for_tau_wall = cqc.density_symbol
u_for_tau_wall = cqc.velocity_symbols
# calculate Maronga factor in case of correction
maronga_fix = sp.Symbol("maronga_fix")
if self.maronga_sampling_shift:
inst_first_cell_vel = cqc.velocity_symbols
mean_first_cell_vel = tuple(u_mean_i.get_shifted(*self.normal_direction) for u_mean_i in self.mean_velocity)
mag_inst_vel_first_cell = sp.sqrt(sum([inst_first_cell_vel[i] ** 2 for i in self.tangential_axis]))
mag_mean_vel_first_cell = sp.sqrt(sum([mean_first_cell_vel[i] ** 2 for i in self.tangential_axis]))
result.append(Assignment(maronga_fix, mag_inst_vel_first_cell / mag_mean_vel_first_cell))
else:
maronga_fix = 1
# store which direction is tangential component (only those are used for the wall shear stress)
red_u_mag = sp.sqrt(sum([u_for_tau_wall[i]**2 for i in self.tangential_axis]))
u_mag = Assignment(u_m, red_u_mag)
result.append(u_mag)
wall_distance = self.maronga_sampling_shift if self.maronga_sampling_shift else self.sampling_shift
# using wall function model
wall_law_assignments = self.wall_function_model.shear_stress_assignments(
density_symbol=rho_for_tau_wall, velocity_symbol=u_m, shear_stress_symbol=tau_w,
wall_distance=(wall_distance - sp.Rational(1, 2) * self.dy),
u_tau_target=self.target_friction_velocity)
result.append(wall_law_assignments)
# calculate wall stress components and use them to calculate the drag
for i in self.tangential_axis:
result.append(Assignment(wall_stress[i], - u_for_tau_wall[i] / u_m * tau_w * maronga_fix))
weight, inv_weight_sq = sp.symbols("wfb_weight inverse_weight_squared")
if self.stencil.Q == 19:
result.append(Assignment(weight, sp.Rational(1, 2)))
elif self.stencil.Q == 27:
result.append(Assignment(inv_weight_sq, sum([neighbor_offset[i]**2 for i in self.tangential_axis])))
a, b = sp.symbols("wfb_a wfb_b")
if self.weight_method == self.WeightMethod.LATTICE_WEIGHT:
res_ab = sp.solve([2 * a + 4 * b - 1, a - 4 * b], [a, b]) # lattice weight scaling
elif self.weight_method == self.WeightMethod.GEOMETRIC_WEIGHT:
res_ab = sp.solve([2 * a + 4 * b - 1, a - sp.sqrt(2) * b], [a, b]) # geometric scaling
else:
raise ValueError("Unknown weighting method for the WFB D3Q27 extension. Currently, only lattice "
"weights and geometric weights are supported.")
result.append(Assignment(weight, sp.Piecewise((sp.Float(0), sp.Equality(inv_weight_sq, 0)),
(res_ab[a], sp.Equality(inv_weight_sq, 1)),
(res_ab[b], True))))
factor = self.dt / self.dy * weight
drag = sum([neighbor_offset[i] * factor * wall_stress[i] for i in self.tangential_axis])
result.append(Assignment(f_in.center(inv_dir[dir_symbol]), f_out[tangential_offset](mirrored_direction) - drag))
return result
# end class WallFunctionBounce
class UBB(LbBoundary): class UBB(LbBoundary):
r"""Velocity bounce back boundary condition, enforcing specified velocity at obstacle. Furthermore, a density r"""Velocity bounce back boundary condition, enforcing specified velocity at obstacle. Furthermore, a density
at the wall can be implied. The boundary condition is implemented with the following formula: at the wall can be implied. The boundary condition is implemented with the following formula:
...@@ -482,7 +755,7 @@ class UBB(LbBoundary): ...@@ -482,7 +755,7 @@ class UBB(LbBoundary):
self.dim = dim self.dim = dim
self.data_type = data_type self.data_type = data_type
super(UBB, self).__init__(name) super(UBB, self).__init__(name, calculate_force_on_boundary=False)
@property @property
def additional_data(self): def additional_data(self):
...@@ -518,7 +791,7 @@ class UBB(LbBoundary): ...@@ -518,7 +791,7 @@ class UBB(LbBoundary):
This is useful if the inflow velocity should have a certain profile for instance""" This is useful if the inflow velocity should have a certain profile for instance"""
return callable(self._velocity) return callable(self._velocity)
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
vel_from_idx_field = callable(self._velocity) vel_from_idx_field = callable(self._velocity)
vel = [index_field(f'vel_{i}') for i in range(self.dim)] if vel_from_idx_field else self._velocity vel = [index_field(f'vel_{i}') for i in range(self.dim)] if vel_from_idx_field else self._velocity
...@@ -594,7 +867,7 @@ class SimpleExtrapolationOutflow(LbBoundary): ...@@ -594,7 +867,7 @@ class SimpleExtrapolationOutflow(LbBoundary):
self.normal_direction = tuple([int(n) for n in normal_direction]) self.normal_direction = tuple([int(n) for n in normal_direction])
assert all([n in [-1, 0, 1] for n in self.normal_direction]), \ assert all([n in [-1, 0, 1] for n in self.normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction" "Only -1, 0 and 1 allowed for defining the normal direction"
super(SimpleExtrapolationOutflow, self).__init__(name) super(SimpleExtrapolationOutflow, self).__init__(name, calculate_force_on_boundary=False)
def get_additional_code_nodes(self, lb_method): def get_additional_code_nodes(self, lb_method):
"""Return a list of code nodes that will be added in the generated code before the index field loop. """Return a list of code nodes that will be added in the generated code before the index field loop.
...@@ -608,7 +881,7 @@ class SimpleExtrapolationOutflow(LbBoundary): ...@@ -608,7 +881,7 @@ class SimpleExtrapolationOutflow(LbBoundary):
""" """
return [NeighbourOffsetArrays(lb_method.stencil)] return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil) neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
tangential_offset = tuple(offset - normal for offset, normal in zip(neighbor_offset, self.normal_direction)) tangential_offset = tuple(offset - normal for offset, normal in zip(neighbor_offset, self.normal_direction))
...@@ -634,7 +907,7 @@ class ExtrapolationOutflow(LbBoundary): ...@@ -634,7 +907,7 @@ class ExtrapolationOutflow(LbBoundary):
Args: Args:
normal_direction: direction vector normal to the outflow normal_direction: direction vector normal to the outflow
lb_method: the lattice boltzman method to be used in the simulation lb_method: the lattice Boltzmann method to be used in the simulation
dt: lattice time step size dt: lattice time step size
dx: lattice spacing distance dx: lattice spacing distance
name: optional name of the boundary. name: optional name of the boundary.
...@@ -687,7 +960,7 @@ class ExtrapolationOutflow(LbBoundary): ...@@ -687,7 +960,7 @@ class ExtrapolationOutflow(LbBoundary):
self.equilibrium_calculation = calc_eq_pdfs self.equilibrium_calculation = calc_eq_pdfs
super(ExtrapolationOutflow, self).__init__(name) super(ExtrapolationOutflow, self).__init__(name, calculate_force_on_boundary=False)
def init_callback(self, boundary_data, **_): def init_callback(self, boundary_data, **_):
dim = boundary_data.dim dim = boundary_data.dim
...@@ -740,7 +1013,7 @@ class ExtrapolationOutflow(LbBoundary): ...@@ -740,7 +1013,7 @@ class ExtrapolationOutflow(LbBoundary):
""" """
return [NeighbourOffsetArrays(lb_method.stencil)] return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
subexpressions = [] subexpressions = []
boundary_assignments = [] boundary_assignments = []
dtdx = sp.Rational(self.dt, self.dx) dtdx = sp.Rational(self.dt, self.dx)
...@@ -786,9 +1059,9 @@ class FixedDensity(LbBoundary): ...@@ -786,9 +1059,9 @@ class FixedDensity(LbBoundary):
name = "Fixed Density " + str(density) name = "Fixed Density " + str(density)
self.density = density self.density = density
super(FixedDensity, self).__init__(name) super(FixedDensity, self).__init__(name, calculate_force_on_boundary=False)
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
def remove_asymmetric_part_of_main_assignments(assignment_collection, degrees_of_freedom): def remove_asymmetric_part_of_main_assignments(assignment_collection, degrees_of_freedom):
new_main_assignments = [Assignment(a.lhs, get_symmetric_part(a.rhs, degrees_of_freedom)) new_main_assignments = [Assignment(a.lhs, get_symmetric_part(a.rhs, degrees_of_freedom))
for a in assignment_collection.main_assignments] for a in assignment_collection.main_assignments]
...@@ -850,7 +1123,7 @@ class DiffusionDirichlet(LbBoundary): ...@@ -850,7 +1123,7 @@ class DiffusionDirichlet(LbBoundary):
self.concentration_is_callable = callable(self.concentration) self.concentration_is_callable = callable(self.concentration)
self.velocity_field = velocity_field self.velocity_field = velocity_field
super(DiffusionDirichlet, self).__init__(name) super(DiffusionDirichlet, self).__init__(name, calculate_force_on_boundary=False)
@property @property
def additional_data(self): def additional_data(self):
...@@ -883,7 +1156,7 @@ class DiffusionDirichlet(LbBoundary): ...@@ -883,7 +1156,7 @@ class DiffusionDirichlet(LbBoundary):
else: else:
return [LbmWeightInfo(lb_method, self._data_type)] return [LbmWeightInfo(lb_method, self._data_type)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
assert lb_method.conserved_quantity_computation.zero_centered_pdfs is False, \ assert lb_method.conserved_quantity_computation.zero_centered_pdfs is False, \
"DiffusionDirichlet only works for methods with normal pdfs storage -> set zero_centered=False" "DiffusionDirichlet only works for methods with normal pdfs storage -> set zero_centered=False"
weight_info = LbmWeightInfo(lb_method, self._data_type) weight_info = LbmWeightInfo(lb_method, self._data_type)
...@@ -926,7 +1199,7 @@ class NeumannByCopy(LbBoundary): ...@@ -926,7 +1199,7 @@ class NeumannByCopy(LbBoundary):
""" """
return [NeighbourOffsetArrays(lb_method.stencil)] return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil) neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
return [Assignment(f_in(inv_dir[dir_symbol]), f_out(inv_dir[dir_symbol])), return [Assignment(f_in(inv_dir[dir_symbol]), f_out(inv_dir[dir_symbol])),
Assignment(f_out[neighbour_offset](dir_symbol), f_out(dir_symbol))] Assignment(f_out[neighbour_offset](dir_symbol), f_out(dir_symbol))]
...@@ -945,7 +1218,7 @@ class StreamInConstant(LbBoundary): ...@@ -945,7 +1218,7 @@ class StreamInConstant(LbBoundary):
""" """
def __init__(self, constant, name=None): def __init__(self, constant, name=None):
super(StreamInConstant, self).__init__(name) super(StreamInConstant, self).__init__(name, calculate_force_on_boundary=False)
self.constant = constant self.constant = constant
def get_additional_code_nodes(self, lb_method): def get_additional_code_nodes(self, lb_method):
...@@ -959,7 +1232,7 @@ class StreamInConstant(LbBoundary): ...@@ -959,7 +1232,7 @@ class StreamInConstant(LbBoundary):
""" """
return [NeighbourOffsetArrays(lb_method.stencil)] return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field): def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil) neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
return [Assignment(f_in(inv_dir[dir_symbol]), self.constant), return [Assignment(f_in(inv_dir[dir_symbol]), self.constant),
Assignment(f_out[neighbour_offset](dir_symbol), self.constant)] Assignment(f_out[neighbour_offset](dir_symbol), self.constant)]
......
from dataclasses import replace
import numpy as np import numpy as np
from pystencils import Assignment, CreateKernelConfig, create_kernel, Field, Target from pystencils import Assignment, CreateKernelConfig, create_kernel, Field, Target
from pystencils.boundaries import BoundaryHandling from pystencils.boundaries import BoundaryHandling
from pystencils.boundaries.createindexlist import numpy_data_type_for_boundary_object from pystencils.boundaries.createindexlist import numpy_data_type_for_boundary_object
from pystencils.field import FieldType
from pystencils.simp import add_subexpressions_for_field_reads from pystencils.simp import add_subexpressions_for_field_reads
from pystencils.stencil import inverse_direction from pystencils.stencil import inverse_direction
...@@ -156,22 +158,31 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling): ...@@ -156,22 +158,31 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
def create_lattice_boltzmann_boundary_kernel(pdf_field, index_field, lb_method, boundary_functor, def create_lattice_boltzmann_boundary_kernel(pdf_field, index_field, lb_method, boundary_functor,
prev_timestep=Timestep.BOTH, streaming_pattern='pull', prev_timestep=Timestep.BOTH, streaming_pattern='pull',
target=Target.CPU, **kernel_creation_args): target=Target.CPU, force_vector=None, **kernel_creation_args):
indexing = BetweenTimestepsIndexing( indexing = BetweenTimestepsIndexing(
pdf_field, lb_method.stencil, prev_timestep, streaming_pattern, np.int32, np.int32) pdf_field, lb_method.stencil, prev_timestep, streaming_pattern, np.int32, np.int32)
dim = lb_method.stencil.D
f_out, f_in = indexing.proxy_fields f_out, f_in = indexing.proxy_fields
dir_symbol = indexing.dir_symbol dir_symbol = indexing.dir_symbol
inv_dir = indexing.inverse_dir_symbol inv_dir = indexing.inverse_dir_symbol
boundary_assignments = boundary_functor(f_out, f_in, dir_symbol, inv_dir, lb_method, index_field) config = CreateKernelConfig(target=target, default_number_int="int32",
boundary_assignments = indexing.substitute_proxies(boundary_assignments)
config = CreateKernelConfig(index_fields=[index_field], target=target, default_number_int="int32",
skip_independence_check=True, **kernel_creation_args) skip_independence_check=True, **kernel_creation_args)
default_data_type = config.data_type.default_factory() default_data_type = config.data_type.default_factory()
if force_vector is None:
force_vector_type = np.dtype([(f"F_{i}", default_data_type.c_name) for i in range(dim)], align=True)
force_vector = Field.create_generic('force_vector', spatial_dimensions=1,
dtype=force_vector_type, field_type=FieldType.INDEXED)
config = replace(config, index_fields=[index_field, force_vector])
boundary_assignments = boundary_functor(f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector)
boundary_assignments = indexing.substitute_proxies(boundary_assignments)
if pdf_field.dtype != default_data_type: if pdf_field.dtype != default_data_type:
boundary_assignments = add_subexpressions_for_field_reads(boundary_assignments, data_type=default_data_type) boundary_assignments = add_subexpressions_for_field_reads(boundary_assignments, data_type=default_data_type)
......
import sympy as sp
from abc import ABC, abstractmethod
from pystencils import Assignment
class WallFunctionModel(ABC):
def __init__(self, name):
self._name = name
@abstractmethod
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target):
"""
Computes a symbolic representation for the log law.
Args:
density_symbol: symbol density, should be provided by the LB method's conserved quantity computation
shear_stress_symbol: symbolic wall shear stress to which the calculated shear stress will be assigned
velocity_symbol: symbolic velocity that is taken as a reference in the wall functions
wall_distance: distance to the wall, equals to 0.5 in standard cell-centered LBM
u_tau_target: in implicit wall functions, a target friction velocity can be provided which will be used as
initial guess in the Newton iteration. This target friction velocity can be obtained, e.g.,
from the target friction Reynolds number
"""
pass
# end class WallFunctionModel
class ExplicitWallFunctionModel(WallFunctionModel, ABC):
"""
Abstract base class for explicit wall functions that can be solved directly for the wall shear stress.
"""
def __init__(self, name):
super(ExplicitWallFunctionModel, self).__init__(name=name)
class MoninObukhovSimilarityTheory(ExplicitWallFunctionModel):
def __init__(self, z0, kappa=0.41, phi=0, name="MOST"):
self.z0 = z0
self.kappa = kappa
self.phi = phi
super(MoninObukhovSimilarityTheory, self).__init__(name=name)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
u_tau = velocity_symbol * self.kappa / sp.ln(wall_distance / self.z0 + self.phi)
return [Assignment(shear_stress_symbol, u_tau ** 2 * density_symbol)]
class ImplicitWallFunctionModel(WallFunctionModel, ABC):
"""
Abstract base class for implicit wall functions that require a Newton procedure to solve for the wall shear stress.
"""
def __init__(self, name, newton_steps, viscosity):
self.newton_steps = newton_steps
self.u_tau = sp.symbols(f"wall_function_u_tau_:{self.newton_steps + 1}")
self.delta = sp.symbols(f"wall_function_delta_:{self.newton_steps}")
self.viscosity = viscosity
super(ImplicitWallFunctionModel, self).__init__(name=name)
def newton_iteration(self, wall_law):
m = -wall_law / wall_law.diff(self.u_tau[0])
assignments = []
for i in range(self.newton_steps):
assignments.append(Assignment(self.delta[i], m.subs({self.u_tau[0]: self.u_tau[i]})))
assignments.append(Assignment(self.u_tau[i + 1], self.u_tau[i] + self.delta[i]))
return assignments
class LogLaw(ImplicitWallFunctionModel):
"""
Analytical model for the velocity profile inside the boundary layer, obtained from the mean velocity gradient.
Only valid in the log-law region.
"""
def __init__(self, viscosity, newton_steps=5, kappa=0.41, b=5.2, name="LogLaw"):
self.kappa = kappa
self.b = b
super(LogLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
return 1 / self.kappa * sp.ln(y_p) + self.b - u_p
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
class SpaldingsLaw(ImplicitWallFunctionModel):
"""
Single formula for the velocity profile inside the boundary layer, proposed by Spalding :cite:`spalding1961`.
Valid in the inner and the outer layer.
"""
def __init__(self, viscosity, newton_steps=5, kappa=0.41, b=5.5, name="Spalding"):
self.kappa = kappa
self.b = b
super(SpaldingsLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
k_times_u = self.kappa * u_p
frac_1 = (k_times_u ** 2) / sp.Float(2)
frac_2 = (k_times_u ** 3) / sp.Float(6)
return (u_p + sp.exp(-self.kappa * self.b) * (sp.exp(k_times_u) - sp.Float(1) - k_times_u - frac_1 - frac_2)
- y_p)
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
class MuskerLaw(ImplicitWallFunctionModel):
"""
Quasi-analytical model for the velocity profile inside the boundary layer, proposed by Musker. Valid in the inner
and the outer layer.
Formulation taken from :cite:`malaspinas2015`, Equation (59).
"""
def __init__(self, viscosity, newton_steps=5, name="Musker"):
super(MuskerLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
arctan = sp.Float(5.424) * sp.atan(sp.Float(0.119760479041916168) * y_p - sp.Float(0.488023952095808383))
logarithm = (sp.Float(0.434) * sp.log((y_p + sp.Float(10.6)) ** sp.Float(9.6)
/ (y_p ** 2 - sp.Float(8.15) * y_p + sp.Float(86)) ** 2))
return (arctan + logarithm - sp.Float(3.50727901936264842)) - u_p
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
...@@ -20,21 +20,21 @@ and belongs to pystencils, not lbmpy. This can be found in the pystencils module ...@@ -20,21 +20,21 @@ and belongs to pystencils, not lbmpy. This can be found in the pystencils module
of the generated code is specified. of the generated code is specified.
1. *Method*: 1. *Method*:
the method defines the collision process. Currently there are two big categories: the method defines the collision process. Currently, there are two big categories:
moment and cumulant based methods. A method defines how each moment or cumulant is relaxed by moment and cumulant based methods. A method defines how each moment or cumulant is relaxed by
storing the equilibrium value and the relaxation rate for each moment/cumulant. storing the equilibrium value and the relaxation rate for each moment/cumulant.
2. *Collision/Update Rule*: 2. *Collision/Update Rule*:
Methods can generate a "collision rule" which is an equation collection that define the Methods can generate a "collision rule" which is an equation collection that define the
post collision values as a function of the pre-collision values. On these equation collection post collision values as a function of the pre-collision values. On these equation collection
simplifications are applied to reduce the number of floating point operations. simplifications are applied to reduce the number of floating point operations.
At this stage an entropic optimization step can also be added to determine one relaxation rate by an At this stage an entropic optimisation step can also be added to determine one relaxation rate by an
entropy condition. entropy condition.
Then a streaming rule is added which transforms the collision rule into an update rule. Then a streaming rule is added which transforms the collision rule into an update rule.
The streaming step depends on the pdf storage (source/destination, AABB pattern, EsoTwist). The streaming step depends on the pdf storage (source/destination, AABB pattern, EsoTwist).
Currently only the simple source/destination pattern is supported. Currently only the simple source/destination pattern is supported.
3. *AST*: 3. *AST*:
The abstract syntax tree describes the structure of the kernel, including loops and conditionals. The abstract syntax tree describes the structure of the kernel, including loops and conditionals.
The ast can be modified e.g. to add OpenMP pragmas, reorder loops or apply other optimizations. The ast can be modified, e.g., to add OpenMP pragmas, reorder loops or apply other optimisations.
4. *Function*: 4. *Function*:
This step compiles the AST into an executable function, either for CPU or GPUs. This function This step compiles the AST into an executable function, either for CPU or GPUs. This function
behaves like a normal Python function and runs one LBM time step. behaves like a normal Python function and runs one LBM time step.
...@@ -63,7 +63,7 @@ import pystencils.astnodes ...@@ -63,7 +63,7 @@ import pystencils.astnodes
import sympy as sp import sympy as sp
import sympy.core.numbers import sympy.core.numbers
from lbmpy.enums import Stencil, Method, ForceModel, CollisionSpace from lbmpy.enums import Stencil, Method, ForceModel, CollisionSpace, SubgridScaleModel
import lbmpy.forcemodels as forcemodels import lbmpy.forcemodels as forcemodels
from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, PdfFieldAccessor, PeriodicTwoFieldsAccessor from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, PdfFieldAccessor, PeriodicTwoFieldsAccessor
from lbmpy.fluctuatinglb import add_fluctuations_to_collision_rule from lbmpy.fluctuatinglb import add_fluctuations_to_collision_rule
...@@ -78,7 +78,7 @@ from lbmpy.methods.momentbased.entropic import add_entropy_condition, add_iterat ...@@ -78,7 +78,7 @@ from lbmpy.methods.momentbased.entropic import add_entropy_condition, add_iterat
from lbmpy.relaxationrates import relaxation_rate_from_magic_number from lbmpy.relaxationrates import relaxation_rate_from_magic_number
from lbmpy.simplificationfactory import create_simplification_strategy from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.stencils import LBStencil from lbmpy.stencils import LBStencil
from lbmpy.turbulence_models import add_smagorinsky_model from lbmpy.turbulence_models import add_sgs_model
from lbmpy.updatekernels import create_lbm_kernel, create_stream_pull_with_output_kernel from lbmpy.updatekernels import create_lbm_kernel, create_stream_pull_with_output_kernel
from lbmpy.advanced_streaming.utility import Timestep, get_accessor from lbmpy.advanced_streaming.utility import Timestep, get_accessor
from pystencils import CreateKernelConfig, create_kernel from pystencils import CreateKernelConfig, create_kernel
...@@ -145,7 +145,7 @@ class LBMConfig: ...@@ -145,7 +145,7 @@ class LBMConfig:
""" """
delta_equilibrium: bool = None delta_equilibrium: bool = None
""" """
Determines whether or not the (continuous or discrete, see `continuous_equilibrium`) maxwellian equilibrium is Determines whether or not the (continuous or discrete, see `continuous_equilibrium`) Maxwellian equilibrium is
expressed in its absolute form, or only by its deviation from the rest state (typically given by the reference expressed in its absolute form, or only by its deviation from the rest state (typically given by the reference
density and zero velocity). This parameter is only effective if `zero_centered` is set to `True`. Then, if density and zero velocity). This parameter is only effective if `zero_centered` is set to `True`. Then, if
`delta_equilibrium` is `False`, the rest state must be reintroduced to the populations during collision. Otherwise, `delta_equilibrium` is `False`, the rest state must be reintroduced to the populations during collision. Otherwise,
...@@ -175,7 +175,7 @@ class LBMConfig: ...@@ -175,7 +175,7 @@ class LBMConfig:
""" """
A list of lists of modes, grouped by common relaxation times. This is usually used in A list of lists of modes, grouped by common relaxation times. This is usually used in
conjunction with `lbmpy.methods.default_moment_sets.mrt_orthogonal_modes_literature`. conjunction with `lbmpy.methods.default_moment_sets.mrt_orthogonal_modes_literature`.
If this argument is not provided, Gram-Schmidt orthogonalization of the default modes is performed. If this argument is not provided, Gram-Schmidt orthogonalisation of the default modes is performed.
""" """
force_model: Union[lbmpy.forcemodels.AbstractForceModel, ForceModel] = None force_model: Union[lbmpy.forcemodels.AbstractForceModel, ForceModel] = None
...@@ -239,12 +239,17 @@ class LBMConfig: ...@@ -239,12 +239,17 @@ class LBMConfig:
omega_output_field: Field = None omega_output_field: Field = None
""" """
A pystencils Field can be passed here, where the calculated free relaxation rate of A pystencils Field can be passed here, where the calculated free relaxation rate of
an entropic or Smagorinsky method is written to an entropic or subgrid-scale method is written to
""" """
smagorinsky: Union[float, bool] = False eddy_viscosity_field: Field = None
""" """
set to Smagorinsky constant to activate turbulence model, ``omega_output_field`` can be set to A pystencils Field can be passed here, where the eddy-viscosity of a subgrid-scale model is written.
write out adapted relaxation rates. If set to `True`, 0.12 is used as default smagorinsky constant. """
subgrid_scale_model: Union[SubgridScaleModel, tuple[SubgridScaleModel, float], tuple[SubgridScaleModel, int]] = None
"""
Choose a subgrid-scale model (SGS) for large-eddy simulations. ``omega_output_field`` can be set to
write out adapted relaxation rates. Either provide just the SGS and use the default model constants or provide a
tuple of the SGS and its corresponding model constant.
""" """
cassons: CassonsParameters = False cassons: CassonsParameters = False
""" """
...@@ -371,8 +376,8 @@ class LBMConfig: ...@@ -371,8 +376,8 @@ class LBMConfig:
if not self.compressible and self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT): if not self.compressible and self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT):
raise ValueError("Incompressible cumulant-based methods are not supported (yet).") raise ValueError("Incompressible cumulant-based methods are not supported (yet).")
if self.zero_centered and (self.entropic or self.fluctuating): if self.zero_centered and self.entropic:
raise ValueError("Entropic and fluctuating methods can only be created with `zero_centered=False`.") raise ValueError("Entropic methods can only be created with `zero_centered=False`.")
# Check or infer delta-equilibrium # Check or infer delta-equilibrium
if self.delta_equilibrium is not None: if self.delta_equilibrium is not None:
...@@ -702,8 +707,8 @@ def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=N ...@@ -702,8 +707,8 @@ def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=N
limiter=cumulant_limiter) limiter=cumulant_limiter)
if lbm_config.entropic: if lbm_config.entropic:
if lbm_config.smagorinsky or lbm_config.cassons: if lbm_config.subgrid_scale_model or lbm_config.cassons:
raise ValueError("Choose either entropic, smagorinsky or cassons") raise ValueError("Choose either entropic, subgrid-scale or cassons")
if lbm_config.entropic_newton_iterations: if lbm_config.entropic_newton_iterations:
if isinstance(lbm_config.entropic_newton_iterations, bool): if isinstance(lbm_config.entropic_newton_iterations, bool):
iterations = 3 iterations = 3
...@@ -714,14 +719,23 @@ def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=N ...@@ -714,14 +719,23 @@ def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=N
else: else:
collision_rule = add_entropy_condition(collision_rule, omega_output_field=lbm_config.omega_output_field) collision_rule = add_entropy_condition(collision_rule, omega_output_field=lbm_config.omega_output_field)
elif lbm_config.smagorinsky: elif lbm_config.subgrid_scale_model:
if lbm_config.cassons: if lbm_config.cassons:
raise ValueError("Cassons model can not be combined with Smagorinsky model") raise ValueError("Cassons model can not be combined with a subgrid-scale model")
smagorinsky_constant = 0.12 if lbm_config.smagorinsky is True else lbm_config.smagorinsky
collision_rule = add_smagorinsky_model(collision_rule, smagorinsky_constant, model_constant = None
omega_output_field=lbm_config.omega_output_field) sgs_model = lbm_config.subgrid_scale_model
if 'split_groups' in collision_rule.simplification_hints:
collision_rule.simplification_hints['split_groups'][0].append(sp.Symbol("smagorinsky_omega")) if isinstance(lbm_config.subgrid_scale_model, tuple):
sgs_model = lbm_config.subgrid_scale_model[0]
model_constant = lbm_config.subgrid_scale_model[1]
collision_rule = add_sgs_model(collision_rule=collision_rule, subgrid_scale_model=sgs_model,
model_constant=model_constant, omega_output_field=lbm_config.omega_output_field,
eddy_viscosity_field=lbm_config.eddy_viscosity_field)
if 'split_groups' in collision_rule.simplification_hints:
collision_rule.simplification_hints['split_groups'][0].append(sp.Symbol("sgs_omega"))
elif lbm_config.cassons: elif lbm_config.cassons:
collision_rule = add_cassons_model(collision_rule, parameter=lbm_config.cassons, collision_rule = add_cassons_model(collision_rule, parameter=lbm_config.cassons,
......
...@@ -4,7 +4,7 @@ import inspect ...@@ -4,7 +4,7 @@ import inspect
from pystencils.runhelper.db import PystencilsJsonEncoder from pystencils.runhelper.db import PystencilsJsonEncoder
from pystencils.simp import SimplificationStrategy from pystencils.simp import SimplificationStrategy
from lbmpy import LBStencil, Method, CollisionSpace from lbmpy import LBStencil, Method, CollisionSpace, SubgridScaleModel
from lbmpy.creationfunctions import LBMConfig, LBMOptimisation from lbmpy.creationfunctions import LBMConfig, LBMOptimisation
from lbmpy.methods import CollisionSpaceInfo from lbmpy.methods import CollisionSpaceInfo
from lbmpy.forcemodels import AbstractForceModel from lbmpy.forcemodels import AbstractForceModel
...@@ -16,7 +16,7 @@ class LbmpyJsonEncoder(PystencilsJsonEncoder): ...@@ -16,7 +16,7 @@ class LbmpyJsonEncoder(PystencilsJsonEncoder):
def default(self, obj): def default(self, obj):
if isinstance(obj, (LBMConfig, LBMOptimisation, CollisionSpaceInfo, CassonsParameters)): if isinstance(obj, (LBMConfig, LBMOptimisation, CollisionSpaceInfo, CassonsParameters)):
return obj.__dict__ return obj.__dict__
if isinstance(obj, (LBStencil, Method, CollisionSpace)): if isinstance(obj, (LBStencil, Method, CollisionSpace, SubgridScaleModel)):
return obj.name return obj.name
if isinstance(obj, AbstractForceModel): if isinstance(obj, AbstractForceModel):
return obj.__class__.__name__ return obj.__class__.__name__
......
...@@ -219,5 +219,20 @@ class ForceModel(Enum): ...@@ -219,5 +219,20 @@ class ForceModel(Enum):
""" """
CENTRALMOMENT = auto() CENTRALMOMENT = auto()
""" """
See :class:`lbmpy.forcemodels` See :class:`lbmpy.forcemodels.CentralMoment`
"""
class SubgridScaleModel(Enum):
"""
The SubgridScaleModel enumeration defines which subgrid-scale model (SGS) is used to perform
Large-Eddy-Simulations (LES).
"""
SMAGORINSKY = auto()
"""
See :func:`lbmpy.turbulence_models.add_smagorinsky_model`
"""
QR = auto()
"""
See :func:`lbmpy.turbulence_models.add_qr_model`
""" """