Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Commits on Source (90)
Showing
with 2488 additions and 191 deletions
[flake8]
max-line-length=120
exclude=lbmpy/plot.py
lbmpy/session.py
ignore = W293 W503 W291 E741
exclude=src/lbmpy/plot.py
src/lbmpy/session.py
ignore = W293 W503 W291 C901 E741
lbmpy/_version.py export-subst
src/lbmpy/_version.py export-subst
__pycache__
.ipynb_checkpoints
.coverage
.coverage*
*.pyc
*.vti
/build
/html_doc
/dist
/*.egg-info
*.egg-info
.cache
_build
/.idea
......@@ -15,14 +15,15 @@ _local_tmp
**/pylintrc
*.bak
*.tmp
/lbmpy_tests/db
/tests/db
doc/bibtex.json
/db
/lbmpy/phasefield/simplex_projection.*.so
/lbmpy/phasefield/simplex_projection.c
/src/lbmpy/phasefield/simplex_projection.*.so
/src/lbmpy/phasefield/simplex_projection.c
# macOS
**/.DS_Store
*.uuid
# benchmark database
/lbmpy_tests/benchmark/db
\ No newline at end of file
/tests/benchmark/db
\ No newline at end of file
stages:
- pretest
- test
- nightly
- docs
- deploy
# -------------------------- Templates ------------------------------------------------------------------------------------
# Base configuration for jobs meant to run at every commit
.every-commit:
rules:
- if: $CI_PIPELINE_SOURCE != "schedule"
# Configuration for jobs meant to run on each commit to pycodegen/pystencils/master
.every-commit-master:
rules:
- if: '$CI_PIPELINE_SOURCE != "schedule" && $CI_PROJECT_PATH == "pycodegen/lbmpy" && $CI_COMMIT_BRANCH == "master"'
# Base configuration for jobs meant to run at a schedule
.scheduled:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
# -------------------------- Pre Tests --------------------------------------------------------------------------------
# Normal test - runs on every commit all but "long run" tests
tests-and-coverage:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
script:
# - pip install sympy --upgrade
- export NUM_CORES=$(nproc --all)
......@@ -22,7 +39,7 @@ tests-and-coverage:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- env
- pip list
- py.test -v -n $NUM_CORES --cov-report html --cov-report term --cov=. -m "not longrun" --junitxml=report.xml
- py.test -v -n $NUM_CORES --cov-report html --cov-report xml --cov-report term --cov=. -m "not longrun" --junitxml=report.xml
- python3 -m coverage xml
tags:
- docker
......@@ -62,25 +79,22 @@ tests-and-coverage-with-longrun:
minimal-conda:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
script:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- python setup.py quicktest
- pip install -e .
- python quicktest.py
tags:
- docker
# Linter for code formatting
flake8-lint:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
script:
- flake8 lbmpy
- flake8 src/lbmpy
tags:
- docker
- cuda11
......@@ -90,9 +104,7 @@ flake8-lint:
# pipeline with latest python version
latest-python:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
......@@ -113,34 +125,34 @@ latest-python:
junit: report.xml
# Minimal tests in windows environment
minimal-windows:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
tags:
- win
script:
- export NUM_CORES=$(nproc --all)
- export MPLBACKEND=Agg
- source /cygdrive/c/Users/build/Miniconda3/Scripts/activate
- source activate pystencils
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- python -c "import numpy"
- pip install sympy==1.9
- py.test -v -m "not (notebook or longrun)"
#minimal-windows:
# stage: test
# except:
# variables:
# - $ENABLE_NIGHTLY_BUILDS
# tags:
# - win
# script:
# - export NUM_CORES=$(nproc --all)
# - export MPLBACKEND=Agg
# - source /cygdrive/c/Users/build/Miniconda3/Scripts/activate
# - source activate pystencils
# - pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
# - python -c "import numpy"
# - pip install sympy==1.9
# - py.test -v -m "not (notebook or longrun)"
minimal-sympy-master:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script:
- pip install -e .
script:
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- python -m pip install --upgrade git+https://github.com/sympy/sympy.git
- pip list
- python setup.py quicktest
- python quicktest.py
allow_failure: true
tags:
- docker
......@@ -148,9 +160,7 @@ minimal-sympy-master:
ubuntu:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu
before_script:
# - apt-get -y remove python3-sympy
......@@ -163,7 +173,7 @@ ubuntu:
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- env
- pip3 list
- pytest-3 -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- cuda11
......@@ -207,11 +217,44 @@ pycodegen-integration:
- cuda11
- AVX
# -------------------- Scheduled Tasks --------------------------------------------------------------------------
nightly-sympy:
stage: nightly
extends: .scheduled
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install -e .
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
- pip install --upgrade --pre sympy
script:
- env
- pip list
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- AVX
- cuda
artifacts:
when: always
reports:
junit: report.xml
# -------------------- Documentation and deploy ------------------------------------------------------------------------
build-documentation:
stage: test
stage: docs
needs: []
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation
before_script:
- pip install -e .
script:
- export PYTHONPATH=`pwd`
- pip install git+https://gitlab-ci-token:${CI_JOB_TOKEN}@i10git.cs.fau.de/pycodegen/pystencils.git@master#egg=pystencils
......@@ -227,7 +270,9 @@ build-documentation:
pages:
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit-master
stage: deploy
needs: ["tests-and-coverage", "build-documentation"]
script:
- ls -l
- mv coverage_report html_doc
......@@ -237,5 +282,3 @@ pages:
- public
tags:
- docker
only:
- master@pycodegen/lbmpy
include README.md
include COPYING.txt
include AUTHORS.txt
include CONTRIBUTING.md
global-include *.pyx
include versioneer.py
include lbmpy/_version.py
include CHANGELOG.md
......@@ -71,6 +71,7 @@ Many thanks go to the [contributors](https://i10git.cs.fau.de/pycodegen/lbmpy/-/
If you use lbmpy in a publication, please cite the following articles:
Overview:
- F. Hennig et al, Advanced Automatic Code Generation for Multiple Relaxation-Time Lattice Boltzmann Methods. SIAM Journal on Scientific Computing, 2023. https://doi.org/10.1137/22M1531348 ([Preprint](https://arxiv.org/abs/2211.02435))
- M. Bauer et al, lbmpy: Automatic code generation for efficient parallel lattice Boltzmann methods. Journal of Computational Science, 2021. https://doi.org/10.1016/j.jocs.2020.101269 ([Preprint](https://arxiv.org/abs/2001.11806))
Multiphase:
......
......@@ -7,7 +7,7 @@
# conda env create -f conda_environment_user.yml
# . activate pystencils
#
# If you have CUDA installed and want to use your GPU, uncomment the last line to install pycuda
# If you have CUDA or ROCm installed and want to use your GPU, uncomment the last line to install cupy
#
# ----------------------------------------------------------------------------------------------------------------------
......@@ -33,4 +33,4 @@ dependencies:
- pyevtk # VTK output for serial simulations
- blitzdb # file-based No-SQL database to store simulation results
- pystencils
#- pycuda # add this if you have CUDA installed
#- cupy # add this if you have CUDA or ROCm installed
......@@ -19,9 +19,10 @@ try:
import pyximport
pyximport.install(language_level=3)
from lbmpy.phasefield.simplex_projection import simplex_projection_2d # NOQA
except ImportError:
pass
from lbmpy.phasefield.simplex_projection import simplex_projection_2d # NOQA
SCRIPT_FOLDER = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.abspath('lbmpy'))
......@@ -42,33 +43,35 @@ def add_path_to_ignore(path):
collect_ignore = [os.path.join(SCRIPT_FOLDER, "doc", "conf.py"),
os.path.join(SCRIPT_FOLDER, "doc", "img", "mb_discretization", "maxwell_boltzmann_stencil_plot.py")]
add_path_to_ignore('pystencils_tests/benchmark')
add_path_to_ignore('_local_tmp')
try:
import pycuda
import cupy
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "lbmpy_tests/test_cpu_gpu_equivalence.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_cpu_gpu_equivalence.py")]
try:
import waLBerla
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "lbmpy_tests/test_datahandling_parallel.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_datahandling_parallel.py")]
try:
import blitzdb
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "lbmpy_tests/benchmark"),
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/benchmark"),
os.path.join(SCRIPT_FOLDER,
"lbmpy_tests/full_scenarios/kida_vortex_flow/scenario_kida_vortex_flow.py")]
"tests/full_scenarios/kida_vortex_flow/scenario_kida_vortex_flow.py"),
os.path.join(SCRIPT_FOLDER, "tests/full_scenarios/shear_wave/scenario_shear_wave.py"),
os.path.join(SCRIPT_FOLDER, "tests/test_json_serializer.py"),
os.path.join(SCRIPT_FOLDER, "src/lbmpy/db.py")]
if platform.system().lower() == 'windows':
collect_ignore += [os.path.join(SCRIPT_FOLDER, "lbmpy_tests/test_quicktests.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_quicktests.py")]
sver = sympy.__version__.split(".")
if int(sver[0]) == 1 and int(sver[1]) < 2:
add_path_to_ignore('lbmpy_tests/phasefield')
collect_ignore += [os.path.join(SCRIPT_FOLDER, "lbmpy_tests/test_n_phase_boyer_noncoupled.ipynb")]
add_path_to_ignore('tests/phasefield')
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_n_phase_boyer_noncoupled.ipynb")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, 'setup.py')]
......@@ -104,18 +107,25 @@ class IPyNbTest(pytest.Item):
# disable matplotlib output
exec("import matplotlib.pyplot as p; "
"p.close('all'); "
"p.switch_backend('Template')", global_dict)
# in notebooks there is an implicit plt.show() - if this is not called a warning is shown when the next
# plot is created. This warning is suppressed here
# Also animations cannot be shown, which also leads to a warning.
exec("import warnings;"
"warnings.filterwarnings('ignore', 'Adding an axes using the same arguments as a previous.*');",
"warnings.filterwarnings('ignore', 'Adding an axes using the same arguments as a previous.*');"
"warnings.filterwarnings('ignore', 'Animation was deleted without rendering anything.*');",
global_dict)
with tempfile.NamedTemporaryFile() as f:
f.write(self.code.encode())
f.flush()
runpy.run_path(f.name, init_globals=global_dict, run_name=self.name)
# Close any open figures
exec("import matplotlib.pyplot as p; "
"p.close('all')", global_dict)
class IPyNbFile(pytest.File):
def collect(self):
......@@ -138,10 +148,19 @@ class IPyNbFile(pytest.File):
pass
def pytest_collect_file(path, parent):
glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"]
if any(path.fnmatch(g) for g in glob_exprs):
if pytest_version >= 50403:
return IPyNbFile.from_parent(fspath=path, parent=parent)
else:
return IPyNbFile(path, parent)
if pytest_version >= 70000:
# Since pytest 7.0, usage of `py.path.local` is deprecated and `pathlib.Path` should be used instead
import pathlib
def pytest_collect_file(file_path: pathlib.Path, parent):
glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"]
if any(file_path.match(g) for g in glob_exprs):
return IPyNbFile.from_parent(path=file_path, parent=parent)
else:
def pytest_collect_file(path, parent):
glob_exprs = ["*demo*.ipynb", "*tutorial*.ipynb", "test_*.ipynb"]
if any(path.fnmatch(g) for g in glob_exprs):
if pytest_version >= 50403:
return IPyNbFile.from_parent(fspath=path, parent=parent)
else:
return IPyNbFile(path, parent)
......@@ -33,7 +33,7 @@ version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', lbmpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = lbmpy.__version__
language = None
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
default_role = 'any'
pygments_style = 'sphinx'
......
File added
This diff is collapsed.
......@@ -6,6 +6,9 @@ lbmpy
:maxdepth: 2
sphinx/tutorials.rst
sphinx/methods.rst
sphinx/boundary_conditions.rst
sphinx/forcemodels.rst
sphinx/api.rst
......
%% Cell type:code id: tags:
 
``` python
import pytest
pytest.importorskip('pycuda')
pytest.importorskip('cupy')
```
 
%% Output
 
<module 'pycuda' from '/home/markus/miniconda3/envs/pystencils/lib/python3.8/site-packages/pycuda/__init__.py'>
<module 'cupy' from '/home/markus/.local/lib/python3.11/site-packages/cupy/__init__.py'>
 
%% Cell type:code id: tags:
 
``` python
from lbmpy.session import *
from pystencils import Target
```
 
%% Cell type:markdown id: tags:
 
# Tutorial 01: Running pre-defined scenarios
 
 
*lbmpy* is a module to do Lattice Boltzmann simulations in Python.
 
In this tutorial you will get a broad overview of *lbmpy*'s features. We will run some of the included scenarios that come with *lbmpy*, like a channel flow and a lid driven cavity. This tutorial uses the simple, high-level API of *lbmpy*, while the following tutorials go into the low-level details.
 
The only prerequisite for this tutorial is basic Python and [numpy](http://www.numpy.org/) knowledge.
 
 
> #### What's special about *lbmpy* ?
> The LBM kernels (i.e. the functions that do all the computations) are not written in Python. Instead *lbmpy* generates optimized C or CUDA code for these kernels and compiles it using the *pystencils* module. In that way we get very fast LBM kernels, a lot faster than pure Python implementations and probably also faster than handwritten C kernels. This sounds complicated, but we don't have to care about all this background work, since all compiled kernels are available as Python functions again. Thus *lbmpy* can be used just like any other Python package.
 
 
## Lid Driven Cavity
 
We start by simulating a fluid in a rectangular box, where one wall (the lid) is moving. This is called a 'lid driven cavity'. At the stationary walls *no-slip* boundary conditions are set, which enforce zero velocity at the wall. At the lid there is a *velocity bounce back (UBB)* boundary condition, which sets zero normal velocity and a prescribed tangential velocity.
 
We don't have to set up all these boundary conditions manually since there is a function ``create_lid_driven_cavity`` that does all the work for us. This function takes the tangential velocity of the lid, which drives the flow. It is given in lattice units and to get a stable simulation it should be smaller than 0.1. The `relaxation_rate` determines the viscosity of the fluid: Small relaxation rates correspond to high viscosity. The `relaxation_rate` has to be between 0 and 2.
 
%% Cell type:code id: tags:
 
``` python
ldc_scenario = create_lid_driven_cavity(domain_size=(80,50), lid_velocity=0.01, relaxation_rate=1.95)
ldc_scenario.method
```
 
%% Output
 
<lbmpy.methods.momentbased.momentbasedmethod.MomentBasedLbMethod at 0x7fe7a8828d30>
 
%% Cell type:markdown id: tags:
 
The *run* method of the scenario runs the specified amount of time steps. When you run the next cell, 2000 time steps are executed and the velocity field is plotted. You can run the cell multiple times to see a time evolution.
 
%% Cell type:code id: tags:
 
``` python
ldc_scenario.run(2000)
plt.figure(dpi=200)
plt.vector_field(ldc_scenario.velocity_slice(), step=2);
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
### Variations to experiment with:
- simulate with a higher ``relaxation_rate`` (i.e. higher [Reynolds number](https://en.wikipedia.org/wiki/Reynolds_number)), keep in mind that the ``relaxation_rate`` has to be smaller than 2. You might have to increase (``domain_size``) to keep the simulation stable and run more time steps to get to the stationary solution. You also might want to increase the ``step`` parameter for the plot, to reduce the number of arrows.
- run a 3D simulation by adding a third dimension size to ``domain_size``. The ``velocity`` property of the scenario is now a 3D field that has to be sliced before it can be plotted, e.g. ``ldc_scenario.velocity[:, :, 10, 0:2]`` generates a slice at ``z=10`` and plot the ``x`` and ``y`` component of the velocity.
 
%% Cell type:markdown id: tags:
 
## Fully periodic flow
 
Another simple scenario is a box with periodic boundary conditions in all directions. We initialize a non-zero initial velocity field, which is decaying over time due to viscous effects and the absence of driving forces or boundary conditions. In this example we initialize a shear flow where in one stripe the fluid is moving to the left, and everywhere else to the right. We perturbe this initial velocity field with random noise to get an instable shear layer.
 
%% Cell type:code id: tags:
 
``` python
width, height = 200, 60
velocity_magnitude = 0.05
init_vel = np.zeros((width,height,2))
# fluid moving to the right everywhere...
init_vel[:, :, 0] = velocity_magnitude
# ...except at a stripe in the middle, where it moves left
init_vel[:, height//3 : height//3*2, 0] = -velocity_magnitude
# small random y velocity component
init_vel[:, :, 1] = 0.1 * velocity_magnitude * np.random.rand(width,height)
 
plt.figure(dpi=200)
plt.vector_field(init_vel, step=4);
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
With this initial velocity field we create a simulation scenario:
 
%% Cell type:code id: tags:
 
``` python
shear_flow_scenario = create_fully_periodic_flow(initial_velocity=init_vel, relaxation_rate=1.97)
```
 
%% Cell type:code id: tags:
 
``` python
shear_flow_scenario.run(500)
plt.figure(dpi=200)
plt.vector_field(shear_flow_scenario.velocity[:, :])
```
 
%% Output
 
<matplotlib.quiver.Quiver at 0x7fe7a80e2bb0>
 
 
%% Cell type:markdown id: tags:
 
Instead of plotting a single point in time we create an animation. For this we first have to
define an update function that runs a few time steps and returns the field to plot.
This function is called ``iterations`` times, then the animation stops.
To cancel the animation while it is running, hit the stop button in the IPython menu bar.
 
%% Cell type:code id: tags:
 
``` python
# def next_frame():
# shear_flow_scenario.run(50)
# return shear_flow_scenario.velocity[:, :]
# plt.figure(dpi=200)
# display_animation(plt.vector_field_animation(next_frame, step=2), iterations=50)
```
 
%% Cell type:markdown id: tags:
 
Vortices are created between the two layers. This phenomenon is called [Kelvin-Helmholz Instability](https://en.wikipedia.org/wiki/Kelvin%E2%80%93Helmholtz_instability). For a better visualization of the vortices we can plot the [vorticity](https://en.wikipedia.org/wiki/Vorticity) of velocity field:
 
%% Cell type:code id: tags:
 
``` python
plt.figure(dpi=200)
plt.scalar_field(vorticity_2d(shear_flow_scenario.velocity[:, :]));
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
### Variations to experiment with:
- make an animation of the vorticity
- increase the ``relaxation_rate``. What is the maximum relaxation rate you can get before the simulation gets unstable?
- use an entropic method to get to higher relaxation rates:
 
```
entropic_shear_flow_scenario = create_fully_periodic_flow(initial_velocity=init_vel, method='trt-kbc-n4',
entropic=True, compressible=True)
entropic_shear_flow_scenario.kernel_params['omega_0'] = 1.999
```
 
%% Cell type:markdown id: tags:
 
## Channel
 
In the last part of this tutorial you learn how to modify the boundary handling of scenarios.
Therefor we set up a channel flow and place some objects into it.
 
The channel will be driven by a constant body force e.g. gravity which acts in x direction. Along the flow direction periodic boundary conditions are used whereas the walls are modeled with a *noslip* boundary condition.
 
%% Cell type:code id: tags:
 
``` python
channel_scenario = create_channel(domain_size=(300, 100), force=1e-7, initial_velocity=(0.025, 0),
relaxation_rate=1.97,
config=CreateKernelConfig(target=Target.GPU))
```
 
%% Cell type:code id: tags:
 
``` python
channel_scenario._lbmKernels[0].ast
```
 
%% Output
 
KernelFunction kernel([_data_force_driven_channel_pdfSrc, _data_force_driven_channel_pdfTmp])
 
%% Cell type:markdown id: tags:
 
As in the last scenario, we specify an initial velocity here. Instead of passing a velocity value for every cell we specify here a constant for the complete domain.
 
%% Cell type:code id: tags:
 
``` python
channel_scenario.run(10000)
plt.figure(dpi=200)
plt.vector_field(channel_scenario.velocity[:, :], step=4);
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
This is a 2D [Poiseuille flow](https://en.wikipedia.org/wiki/Hagen%E2%80%93Poiseuille_equation) where a parabolic profile of the x velocity is expected for the stationary case.
 
%% Cell type:code id: tags:
 
``` python
vel_profile = channel_scenario.velocity[0.5, :, 0]
plt.figure(dpi=200)
plt.plot(vel_profile);
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
The stationary state is not yet reached, you can run more time steps and see how the profile gets closer to a parabola.
 
 
### Modifying boundaries
 
Lets first view the current boundary configuration:
 
%% Cell type:code id: tags:
 
``` python
def draw_boundary_setup():
fig = plt.figure(figsize=(10.0, 3.0), dpi=200)
plt.boundary_handling(channel_scenario.boundary_handling)
plt.axis('off');
 
draw_boundary_setup()
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
In above plot you can see that the *no-slip* boundaries at the top and bottom of the channel, otherwise the channel is empty.
 
Since an empty channel is pretty boring, lets put an obstacle in it. We start with the simplest option: a rectangular, solid block. The rectangle is specified as a slice, similar to advanced *numpy* indexing. The ``make_slice`` function can also take ``float`` as indices for specifying the slice relative to the domain size. The following cell puts an obstacle into the domain that has one third of the channel height.
Additionally we have to pass a function that defines what should happen at the boundary (here ``noSlip``).
By default, the name of the function is also the boundary name.
 
%% Cell type:code id: tags:
 
``` python
from lbmpy.boundaries import NoSlip
 
wall = NoSlip()
channel_scenario.boundary_handling.set_boundary(wall, make_slice[0.2:0.25, 0:0.333])
```
 
%% Output
 
2
 
%% Cell type:markdown id: tags:
 
When plotting the boundary handling again, we see the rectangular obstacle:
 
%% Cell type:code id: tags:
 
``` python
draw_boundary_setup()
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
When setting and plotting boundaries the domain is actually 2 cells larger than originally specified. These so called 'ghost layer' slices are one cell thick and are automatically added at each domain boundary. They can be used to set boundaries and are also used for communication when running distributed memory parallel simulations.
When specifying the slice, keep in mind that the domain is actually slightly larger. When plotting the simulation results the ghost layers are automatically removed.
 
To convert a cell back to ``domain``, the same method can be used. To demonstrate this, we cut a piece out of the obstacle:
 
%% Cell type:code id: tags:
 
``` python
channel_scenario.boundary_handling.set_boundary('domain', make_slice[0.2:0.235, 0.0333:0.3])
fig = plt.figure(figsize=(10.0, 3.0), dpi=200)
plt.boundary_handling(channel_scenario.boundary_handling)
plt.axis('off');
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
To add non-rectangular obstacles one can also pass a mask array, which is ``True`` for cells where the boundary should be set. This is demonstrated in the next cell, where a sphere is placed in the channel.
 
%% Cell type:code id: tags:
 
``` python
def set_sphere(x, y):
shape = channel_scenario.domain_size
mid = (0.5 * shape[0], 0.5 * shape[1])
radius = 13
return (x-mid[0])**2 + (y-mid[1])**2 < radius**2
 
channel_scenario.boundary_handling.set_boundary(wall, mask_callback=set_sphere)
draw_boundary_setup()
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
Now, we run the simulation to see the flow aroung the obstacles:
 
%% Cell type:code id: tags:
 
``` python
channel_scenario.run(10000)
plt.figure(dpi=200)
plt.vector_field_magnitude(channel_scenario.velocity[:,:]);
```
 
%% Output
 
 
%% Cell type:markdown id: tags:
 
### Variations to experiment with:
 
- increase the Reynolds number. You might also have to increase the resolution, and/or use a more advanced method like cumulant or entropic stabilization
......@@ -673,7 +673,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
......@@ -687,7 +687,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2"
"version": "3.11.4"
},
"vscode": {
"interpreter": {
%% Cell type:code id: tags:
 
``` python
from lbmpy.session import *
from lbmpy.relaxationrates import *
```
 
%% Cell type:markdown id: tags:
 
# Tutorial 06: Modifying a LBM method: Smagorinsky model
 
In this demo, we show how to modify a lattice Boltzmann method. As example we are going to add a simple turbulence model, by introducing a rule that locally computes the relaxation parameter dependent on the local strain rate tensor. The Smagorinsky model is implemented directly in *lbmpy* as well, however here we take the manual approach to demonstrate how a LB method can be changed in *lbmpy*.
 
## 1) Theoretical background
 
Since we have *sympy* available, we want to start out with the basic model equations and derive the concrete equations ourselves. This approach is less error prone, since the calculations are done by the computer algebra system, and oftentimes this approach is also more general and easier to understand.
 
### a) Smagorinsky model
 
The basic idea of the Smagorinsky turbulence model is to safe compute time, by not resolving the smallest eddies of the flow on the grid, but model them by an artifical dissipation term.
The energy dissipation of small scale vortices is taken into account by introducing a "turbulent viscosity". This additional viscosity depends on local flow properties, namely the local shear rates. The larger the local shear rates the higher the turbulent viscosity and the more artifical dissipation is added.
 
The total viscosity is
 
$$\nu_{total} = \nu_0 + \underbrace{(C_S \Delta)^2 |S|}_{\nu_{t}}$$
 
where $\nu_0$ is the normal viscosity, $C_S$ is the Smagorinsky constant, not to be confused with the speed of sound! Typical values of the Smagorinsky constant are between 0.1 - 0.2. The filter length $\Delta$ is chosen as 1 in lattice coordinates.
 
The quantity $|S|$ is computed from the local strain rate tensor $S$ that is given by
 
$$S_{ij} = \frac{1}{2} \left( \partial_i u_j + \partial_j u_i \right)$$
 
and
 
$$|S| = \sqrt{2 S_{ij} S_{ij}}$$
 
 
### b) LBM implementation of Smagorinsky model
 
To add the Smagorinsky model to a LB scheme one has to first compute the strain rate tensor $S_{ij}$ in each cell, and compute the turbulent viscosity $\nu_t$ from it. Then the local relaxation rate has to be adapted to match the total viscosity $\nu_{total}$ instead of the standard viscosity $\nu_0$.
 
A fortunate property of LB methods is, that the strain rate tensor can be computed locally from the non-equilibrium part of the distribution function. This is somewhat surprising, since the strain rate tensor contains first order derivatives. The strain rate tensor can be obtained by
 
$$S_{ij} = - \frac{3 \omega_s}{2 \rho_{(0)}} \Pi_{ij}^{(neq)}$$
 
where $\omega_s$ is the relaxation rate that determines the viscosity, $\rho_{(0)}$ is $\rho$ in compressible models and $1$ for incompressible schemes.
$\Pi_{ij}^{(neq)}$ is the second order moment tensor of the non-equilibrium part of the distribution functions $f^{(neq)} = f - f^{(eq)}$ and can be computed as
 
$$\Pi_{ij}^{(neq)} = \sum_q c_{qi} c_{qj} \; f_q^{(neq)}$$
 
%% Cell type:markdown id: tags:
 
We first have to find a closed form for $S_{ij}$ since in the formula above, it depends on $\omega$, which should be adapated according to $S_{ij}$.
So we compute $\omega$ and insert it into the formula for $S$:
 
 
%% Cell type:code id: tags:
 
``` python
τ_0, ρ, ω, ω_total, ω_0 = sp.symbols("tau_0 rho omega omega_total omega_0", positive=True, real=True)
ν_0, C_S, S, Π = sp.symbols("nu_0, C_S, |S|, Pi", positive=True, real=True)
 
Seq = sp.Eq(S, 3 * ω / 2 * Π)
Seq
```
 
%% Output
 
$\displaystyle |S| = \frac{3 \Pi \omega}{2}$
3⋅Π⋅ω
|S| = ─────
2
 
%% Cell type:markdown id: tags:
 
Note that we left of the minus, since we took the absolute value of both tensor. The absolute value is defined as above, with the factor of two inside the square root. The $\rho_{(0)}$ has been left out, remembering that $\Pi^{(neq)}$ has to be divided by $\rho$ in case of compressible models|.
 
Next, we compute $\omega$ from the total viscosity as given by the Smagorinsky equation:
 
%% Cell type:code id: tags:
 
``` python
relaxation_rate_from_lattice_viscosity(ν_0 + C_S ** 2 * S)
```
 
%% Output
 
$\displaystyle \frac{2}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$
2
─────────────────────
2
6⋅C_S ⋅|S| + 6⋅ν₀ + 1
 
%% Cell type:markdown id: tags:
 
and insert it into the equation for $|S|$
 
%% Cell type:code id: tags:
 
``` python
Seq2 = Seq.subs(ω, relaxation_rate_from_lattice_viscosity(ν_0 + C_S **2 * S ))
Seq2
```
 
%% Output
 
$\displaystyle |S| = \frac{3 \Pi}{6 C_{S}^{2} |S| + 6 \nu_{0} + 1}$
3⋅Π
|S| = ─────────────────────
2
6⋅C_S ⋅|S| + 6⋅ν₀ + 1
 
%% Cell type:markdown id: tags:
 
This equation contains only known quantities, such that we can solve it for $|S|$.
Additionally we substitute the lattice viscosity $\nu_0$ by the original relaxation time $\tau_0$. The resulting equations get simpler using relaxation times instead of rates.
 
%% Cell type:code id: tags:
 
``` python
solveRes = sp.solve(Seq2, S)
assert len(solveRes) == 1
SVal = solveRes[0]
SVal = SVal.subs(ν_0, lattice_viscosity_from_relaxation_rate(1 / τ_0)).expand()
SVal
```
 
%% Output
 
$\displaystyle - \frac{\tau_{0}}{6 C_{S}^{2}} + \frac{\sqrt{72 C_{S}^{2} \Pi + 4 \tau_{0}^{2}}}{12 C_{S}^{2}}$
___________________
╱ 2 2
τ₀ ╲╱ 72⋅C_S ⋅Π + 4⋅τ₀
- ────── + ──────────────────────
2 2
6⋅C_S 12⋅C_S
 
%% Cell type:markdown id: tags:
 
Knowning $|S|$ we can compute the total relaxation time using
 
$$\nu_{total} = \nu_0 +C_S^2 |S|$$
 
%% Cell type:code id: tags:
 
``` python
τ_val = 1 / (relaxation_rate_from_lattice_viscosity(lattice_viscosity_from_relaxation_rate(1/τ_0) + C_S**2 * SVal)).cancel()
τ_val
```
 
%% Output
 
$\displaystyle \frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}$
_________________
╱ 2 2
τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀
── + ────────────────────
2 2
 
%% Cell type:markdown id: tags:
 
To compute $\Pi^{(neq)}$ we use the following functions:
 
%% Cell type:code id: tags:
 
``` python
def second_order_moment_tensor(function_values, stencil):
assert len(function_values) == len(stencil)
dim = len(stencil[0])
return sp.Matrix(dim, dim, lambda i, j: sum(c[i] * c[j] * f for f, c in zip(function_values, stencil)))
 
 
def frobenius_norm(matrix, factor=1):
return sp.sqrt(sum(i*i for i in matrix) * factor)
```
 
%% Cell type:markdown id: tags:
 
In the next cell we construct equations that take an standard relaxation rate $\omega_0$ and compute a new relaxation rate $\omega_{total}$ according to the Smagorinksy model, using `τ_val` computed above
 
%% Cell type:code id: tags:
 
``` python
def smagorinsky_equations(ω_0, ω_total, method):
f_neq = sp.Matrix(method.pre_collision_pdf_symbols) - method.get_equilibrium_terms()
return [sp.Eq(τ_0, 1 / ω_0),
sp.Eq(Π, frobenius_norm(second_order_moment_tensor(f_neq, method.stencil), factor=2)),
sp.Eq(ω_total, 1 / τ_val)]
 
 
smagorinsky_equations(ω_0, ω_total, create_lb_method())
```
 
%% Output
 
$\displaystyle \left[ \tau_{0} = \frac{1}{\omega_{0}}, \ \Pi = \sqrt{4 \left(- f_{5} + f_{6} + f_{7} - f_{8} - u_{0} u_{1}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{1} + f_{2} + f_{5} + f_{6} + f_{7} + f_{8} - u_{1}^{2}\right)^{2} + 2 \left(- \frac{\delta_{\rho}}{3} + f_{3} + f_{4} + f_{5} + f_{6} + f_{7} + f_{8} - u_{0}^{2}\right)^{2}}, \ \omega_{total} = \frac{1}{\frac{\tau_{0}}{2} + \frac{\sqrt{18 C_{S}^{2} \Pi + \tau_{0}^{2}}}{2}}\right]$
⎡ ___________________________________________________________
⎢ ╱
⎢ 1 ╱ 2 ⎛ δᵨ
⎢τ₀ = ──, Π = ╱ 4⋅(-f₅ + f₆ + f₇ - f₈ - u₀⋅u₁) + 2⋅⎜- ── + f₁ + f₂ + f₅ +
⎢ ω₀ ╲╱ ⎝ 3
______________________________________________________________________
2 2
2⎞ ⎛ δᵨ 2⎞
f₆ + f₇ + f₈ - u₁ ⎟ + 2⋅⎜- ── + f₃ + f₄ + f₅ + f₆ + f₇ + f₈ - u₀ ⎟ , ωₜₒₜₐₗ
⎠ ⎝ 3 ⎠
1 ⎥
= ─────────────────────────⎥
_________________⎥
╱ 2 2 ⎥
τ₀ ╲╱ 18⋅C_S ⋅Π + τ₀ ⎥
── + ────────────────────⎥
2 2 ⎦
 
%% Cell type:markdown id: tags:
 
## 2) Application: Channel flow
 
Next we modify a *lbmpy* scenario to use the Smagorinsky model.
We create a MRT method, where we fix all relaxation rates except the relaxation rate that controls the viscosity.
 
%% Cell type:code id: tags:
 
``` python
lbm_conifg = LBMConfig(stencil=Stencil.D2Q9, method=Method.MRT, force=(1e-6, 0),
lbm_config = LBMConfig(stencil=Stencil.D2Q9, method=Method.MRT, force=(1e-6, 0),
force_model=ForceModel.LUO, relaxation_rates=[ω, 1.9, 1.9, 1.9])
 
method = create_lb_method(lbm_config=lbm_conifg)
method = create_lb_method(lbm_config=lbm_config)
method
```
 
%% Output
 
<lbmpy.methods.momentbased.momentbasedmethod.MomentBasedLbMethod at 0x7f745d5c3b20>
 
%% Cell type:markdown id: tags:
 
Only the collision rule has to be changed. Thus we first construct the collision rule, add the Smagorinsky equations and create a normal scenario from the modified collision rule. To avoid that the macroscopic quantity symbols in the Smagorinsky equations fall prey to optimization, we must disable simplification:
 
%% Cell type:code id: tags:
 
``` python
optimization = {'simplification' : False}
collision_rule = create_lb_collision_rule(lb_method=method, optimization=optimization)
collision_rule = collision_rule.new_with_substitutions({ω: ω_total})
 
collision_rule.subexpressions += smagorinsky_equations(ω, ω_total, method)
collision_rule.topological_sort(sort_subexpressions=True, sort_main_assignments=False)
collision_rule
```
 
%% Output
 
AssignmentCollection: d_6, d_5, d_2, d_3, d_0, d_1, d_7, d_4, d_8 <- f(f_2, f_4, f_7, f_5, omega_total, f_6, f_3, f_1, f_0, f_8)
 
%% Cell type:markdown id: tags:
 
In the next cell the collision rule is simplified by extracting common subexpressions
 
%% Cell type:code id: tags:
 
``` python
from pystencils.simp import sympy_cse
#collision_rule = sympy_cse(collision_rule)
```
 
%% Cell type:markdown id: tags:
 
A channel scenario can be created from a modified collision rule:
 
%% Cell type:code id: tags:
 
``` python
ch = create_channel((300, 100), force=1e-6, collision_rule=collision_rule,
kernel_params={"C_S": 0.12, "omega": 1.999})
```
 
%% Cell type:code id: tags:
 
``` python
#show_code(ch.ast)
```
 
%% Cell type:code id: tags:
 
``` python
ch.run(5000)
```
 
%% Cell type:code id: tags:
 
``` python
plt.figure(dpi=200)
plt.vector_field(ch.velocity[:, :])
np.max(ch.velocity[:, :])
```
 
%% Output
 
$\displaystyle 0.00504266401703371$
0.00504266401703371
 
 
%% Cell type:markdown id: tags:
 
## Appendix: Strain rate tensor formula from Chapman Enskog
 
The connection between $S_{ij}$ and $\Pi_{ij}^{(neq)}$ can be seen using a Chapman Enskog expansion. Since *lbmpy* has a module that automatically does this expansions we can have a look at it:
 
%% Cell type:code id: tags:
 
``` python
from lbmpy.chapman_enskog import ChapmanEnskogAnalysis, CeMoment
from lbmpy.chapman_enskog.chapman_enskog import remove_higher_order_u
compressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=True, zero_centered=False)
incompressible_model = create_lb_method(stencil=Stencil.D2Q9, compressible=False, zero_centered=False)
 
ce_compressible = ChapmanEnskogAnalysis(compressible_model)
ce_incompressible = ChapmanEnskogAnalysis(incompressible_model)
```
 
%% Cell type:markdown id: tags:
 
The Chapman Enskog analysis yields expresssions for the moment
 
$\Pi = \Pi^{(eq)} + \epsilon \Pi^{(1)} + \epsilon^2 \Pi^{(2)} \cdots$
and the strain rate tensor is related to $\Pi^{(1)}$. However the best approximation we have for $\Pi^{(1)}$ is
$\Pi^{(neq)}$. For details, see the paper "Shear stress in lattice Boltzmann simulations" by Krüger, Varnik and Raabe from 2009.
 
Lets look at the values of $\Pi^{(1)}$ obtained from the Chapman enskog expansion:
 
%% Cell type:code id: tags:
 
``` python
Π_1_xy = CeMoment("\\Pi", moment_tuple=(1,1), superscript=1)
Π_1_xx = CeMoment("\\Pi", moment_tuple=(2,0), superscript=1)
Π_1_yy = CeMoment("\\Pi", moment_tuple=(0,2), superscript=1)
components = (Π_1_xx, Π_1_yy, Π_1_xy)
 
Π_1_xy_val = ce_compressible.higher_order_moments[Π_1_xy]
Π_1_xy_val
```
 
%% Output
 
$\displaystyle \frac{\rho u_{0}^{2} {\partial^{(1)}_{0} u_{1}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{0} u_{0}} + 2 \rho u_{0} u_{1} {\partial^{(1)}_{1} u_{1}} + \rho u_{1}^{2} {\partial^{(1)}_{1} u_{0}} - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3} + u_{0}^{2} u_{1} {\partial^{(1)}_{0} \rho} + u_{0} u_{1}^{2} {\partial^{(1)}_{1} \rho}}{\omega}$
2 2 ρ⋅D(u_0)
ρ⋅u₀ ⋅D(u_1) + 2⋅ρ⋅u₀⋅u₁⋅D(u_0) + 2⋅ρ⋅u₀⋅u₁⋅D(u_1) + ρ⋅u₁ ⋅D(u_0) - ──────── -
3
──────────────────────────────────────────────────────────────────────────────
ω
ρ⋅D(u_1) 2 2
──────── + u₀ ⋅u₁⋅D(rho) + u₀⋅u₁ ⋅D(rho)
3
─────────────────────────────────────────
 
%% Cell type:markdown id: tags:
 
This term has lots of higher order error terms in it. We assume that $u$ is small in lattice coordinates, so if we neglect all terms in $u$ that are quadratic or higher we get:
 
%% Cell type:code id: tags:
 
``` python
remove_higher_order_u(Π_1_xy_val.expand())
```
 
%% Output
 
$\displaystyle - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}$
ρ⋅D(u_0) ρ⋅D(u_1)
- ──────── - ────────
3⋅ω 3⋅ω
 
%% Cell type:markdown id: tags:
 
Putting these steps together into a function, we can display them for the different cases quickly:
 
%% Cell type:code id: tags:
 
``` python
def get_Π_1(ce_analysis, component):
val = ce_analysis.higher_order_moments[component]
return remove_higher_order_u(val.expand())
```
 
%% Cell type:markdown id: tags:
 
Compressible case:
 
%% Cell type:code id: tags:
 
``` python
tuple(get_Π_1(ce_compressible, Pi) for Pi in components)
```
 
%% Output
 
$\displaystyle \left( - \frac{2 \rho {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ - \frac{2 \rho {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ - \frac{\rho {\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{\rho {\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$
⎛-2⋅ρ⋅D(u_0) -2⋅ρ⋅D(u_1) ρ⋅D(u_0) ρ⋅D(u_1)⎞
⎜────────────, ────────────, - ──────── - ────────⎟
⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω ⎠
 
%% Cell type:markdown id: tags:
 
Incompressible case:
 
%% Cell type:code id: tags:
 
``` python
tuple(get_Π_1(ce_incompressible, Pi) for Pi in components)
```
 
%% Output
 
$\displaystyle \left( \frac{2 u_{0} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{0} u_{0}}}{3 \omega}, \ \frac{2 u_{1} {\partial^{(1)}_{1} \rho}}{3 \omega} - \frac{2 {\partial^{(1)}_{1} u_{1}}}{3 \omega}, \ \frac{u_{0} {\partial^{(1)}_{1} \rho}}{3 \omega} + \frac{u_{1} {\partial^{(1)}_{0} \rho}}{3 \omega} - \frac{{\partial^{(1)}_{1} u_{0}}}{3 \omega} - \frac{{\partial^{(1)}_{0} u_{1}}}{3 \omega}\right)$
⎛2⋅u₀⋅D(rho) 2⋅D(u_0) 2⋅u₁⋅D(rho) 2⋅D(u_1) u₀⋅D(rho) u₁⋅D(rho) D(u_0
⎜─────────── - ────────, ─────────── - ────────, ───────── + ───────── - ─────
⎝ 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω 3⋅ω
) D(u_1)⎞
─ - ──────⎟
3⋅ω ⎠
 
%% Cell type:markdown id: tags:
 
In the incompressible case has some terms $\partial \rho$ which are zero, since $\rho$ is assumed constant.
 
Leaving out the error terms we finally obtain:
 
 
$$\Pi_{ij}^{(neq)} \approx \Pi_{ij}^{(1)} = -\frac{2 \rho_{(0)}}{3 \omega_s} \left( \partial_i u_j + \partial_j u_i \right)$$
......
This diff is collapsed.
This diff is collapsed.