Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • Sparse
  • WallLaw
  • improved_comm
  • master
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
  • release/0.4.1
  • release/0.4.2
  • release/0.4.3
  • release/0.4.4
  • release/1.0
  • release/1.0.1
  • release/1.1
  • release/1.1.1
  • release/1.2
  • release/1.3
  • release/1.3.1
  • release/1.3.2
  • release/1.3.3
  • release/1.3.4
  • release/1.3.5
  • release/1.3.6
  • release/1.3.7
42 results

Target

Select target project
  • ravi.k.ayyala/lbmpy
  • brendan-waters/lbmpy
  • anirudh.jonnalagadda/lbmpy
  • jbadwaik/lbmpy
  • alexander.reinauer/lbmpy
  • itischler/lbmpy
  • he66coqe/lbmpy
  • ev81oxyl/lbmpy
  • Bindgen/lbmpy
  • da15siwa/lbmpy
  • holzer/lbmpy
  • RudolfWeeber/lbmpy
  • pycodegen/lbmpy
13 results
Select Git revision
  • Zerocentering
  • csebug
  • improved_comm
  • master
  • schiller
  • test_martin
  • tutorial_fixes_new
  • win
  • windows
  • release/0.2.1
  • release/0.2.10
  • release/0.2.11
  • release/0.2.12
  • release/0.2.13
  • release/0.2.14
  • release/0.2.15
  • release/0.2.2
  • release/0.2.3
  • release/0.2.4
  • release/0.2.5
  • release/0.2.6
  • release/0.2.7
  • release/0.2.8
  • release/0.2.9
  • release/0.3.0
  • release/0.3.1
  • release/0.3.2
  • release/0.3.3
  • release/0.3.4
  • release/0.4.0
30 results
Show changes
Showing
with 3687 additions and 326 deletions
from .creationfunctions import (
create_lb_ast,
create_lb_collision_rule,
create_lb_function,
create_lb_method,
create_lb_update_rule,
LBMConfig,
LBMOptimisation,
)
from .enums import Stencil, Method, ForceModel, CollisionSpace, SubgridScaleModel
from .lbstep import LatticeBoltzmannStep
from .macroscopic_value_kernels import (
pdf_initialization_assignments,
macroscopic_values_getter,
strain_rate_tensor_getter,
compile_macroscopic_values_getter,
compile_macroscopic_values_setter,
create_advanced_velocity_setter_collision_rule,
)
from .maxwellian_equilibrium import get_weights
from .relaxationrates import (
relaxation_rate_from_lattice_viscosity,
lattice_viscosity_from_relaxation_rate,
relaxation_rate_from_magic_number,
)
from .scenarios import create_lid_driven_cavity, create_fully_periodic_flow
from .stencils import LBStencil
__all__ = [
"create_lb_ast",
"create_lb_collision_rule",
"create_lb_function",
"create_lb_method",
"create_lb_update_rule",
"LBMConfig",
"LBMOptimisation",
"Stencil",
"Method",
"ForceModel",
"CollisionSpace",
"SubgridScaleModel",
"LatticeBoltzmannStep",
"pdf_initialization_assignments",
"macroscopic_values_getter",
"strain_rate_tensor_getter",
"compile_macroscopic_values_getter",
"compile_macroscopic_values_setter",
"create_advanced_velocity_setter_collision_rule",
"get_weights",
"relaxation_rate_from_lattice_viscosity",
"lattice_viscosity_from_relaxation_rate",
"relaxation_rate_from_magic_number",
"create_lid_driven_cavity",
"create_fully_periodic_flow",
"LBStencil",
]
from . import _version
__version__ = _version.get_versions()['version']
from pystencils import __version__ as ps_version
# Determine if we're running pystencils 1.x or 2.x
version_tokes = ps_version.split(".")
PYSTENCILS_VERSION_MAJOR = int(version_tokes[0])
IS_PYSTENCILS_2 = PYSTENCILS_VERSION_MAJOR == 2
if IS_PYSTENCILS_2:
from pystencils.defaults import DEFAULTS
def get_loop_counter_symbol(coord: int):
return DEFAULTS.spatial_counters[coord]
def get_supported_instruction_sets():
from pystencils import Target
vector_targets = Target.available_vector_cpu_targets()
isas = []
for target in vector_targets:
tokens = target.name.split("_")
isas.append(tokens[-1].lower())
return isas
else:
from pystencils.backends.simd_instruction_sets import (
get_supported_instruction_sets as get_supported_instruction_sets_,
)
get_supported_instruction_sets = get_supported_instruction_sets_
def get_loop_counter_symbol(coord: int):
from pystencils.astnodes import LoopOverCoordinate
return LoopOverCoordinate.get_loop_counter_symbol(coord)
def import_guard_pystencils1(feature):
if IS_PYSTENCILS_2:
raise ImportError(
f"The following feature is not yet available when running pystencils 2.x: {feature}"
)
return True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain.
# Generated by versioneer-0.29
# https://github.com/python-versioneer/python-versioneer
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
import functools
def get_keywords() -> Dict[str, str]:
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
VCS: str
style: str
tag_prefix: str
parentdir_prefix: str
versionfile_source: str
verbose: bool
def get_config() -> VersioneerConfig:
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "release/"
cfg.parentdir_prefix = "lbmpy-"
cfg.versionfile_source = "src/lbmpy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands: List[str],
args: List[str],
cwd: Optional[str] = None,
verbose: bool = False,
hide_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs: Dict[str, Any] = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError as e:
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(
parentdir_prefix: str,
root: str,
verbose: bool,
) -> Dict[str, Any]:
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords: Dict[str, str] = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(
keywords: Dict[str, str],
tag_prefix: str,
verbose: bool,
) -> Dict[str, Any]:
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(
tag_prefix: str,
root: str,
verbose: bool,
runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, [
"describe", "--tags", "--dirty", "--always", "--long",
"--match", f"{tag_prefix}[[:digit:]]*"
], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces: Dict[str, Any] = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces: Dict[str, Any]) -> str:
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces: Dict[str, Any]) -> str:
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces: Dict[str, Any]) -> str:
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces: Dict[str, Any]) -> str:
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces: Dict[str, Any]) -> str:
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces: Dict[str, Any]) -> str:
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions() -> Dict[str, Any]:
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
from .indexing import BetweenTimestepsIndexing, NeighbourOffsetArrays
from .indexing import BetweenTimestepsIndexing
from .communication import get_communication_slices, LBMPeriodicityHandling
from .utility import Timestep, get_accessor, is_inplace, get_timesteps, \
numeric_index, numeric_offsets, inverse_dir_index, AccessPdfValues
__all__ = ['BetweenTimestepsIndexing', 'NeighbourOffsetArrays',
__all__ = ['BetweenTimestepsIndexing',
'get_communication_slices', 'LBMPeriodicityHandling',
'Timestep', 'get_accessor', 'is_inplace', 'get_timesteps',
'numeric_index', 'numeric_offsets', 'inverse_dir_index', 'AccessPdfValues']
import itertools
from pystencils import Field, Assignment
from pystencils.slicing import shift_slice, get_slice_before_ghost_layer, normalize_slice
from lbmpy.advanced_streaming.utility import is_inplace, get_accessor, numeric_index, \
numeric_offsets, Timestep, get_timesteps
from lbmpy.stencils import get_stencil
from pystencils import CreateKernelConfig, Field, Assignment, AssignmentCollection, Target
from pystencils.slicing import (
shift_slice,
get_slice_before_ghost_layer,
normalize_slice,
)
from lbmpy.advanced_streaming.utility import (
is_inplace,
get_accessor,
numeric_index,
Timestep,
get_timesteps,
numeric_offsets,
)
from pystencils.datahandling import SerialDataHandling
from itertools import chain
def _trim_slice_in_direction(slices, direction):
assert len(slices) == len(direction)
class LBMPeriodicityHandling:
result = []
for s, d in zip(slices, direction):
if isinstance(s, int):
result.append(s)
continue
start = s.start + 1 if d == -1 else s.start
stop = s.stop - 1 if d == 1 else s.stop
result.append(slice(start, stop, s.step))
def __init__(
self,
stencil,
data_handling,
pdf_field_name,
streaming_pattern="pull",
ghost_layers=1,
cupy_direct_copy=True,
):
"""
Periodicity Handling for Lattice Boltzmann Streaming.
**On the usage with cuda:**
- cupy allows the copying of sliced arrays within device memory using the numpy syntax,
e.g. `dst[:,0] = src[:,-1]`. In this implementation, this is the default for periodicity
handling. Alternatively, if you set `cupy_direct_copy=False`, GPU kernels are generated and
compiled. The compiled kernels are almost twice as fast in execution as cupy array copying,
but especially for large stencils like D3Q27, their compilation can take up to 20 seconds.
Choose your weapon depending on your use case.
"""
if not isinstance(data_handling, SerialDataHandling):
raise ValueError("Only serial data handling is supported!")
return tuple(result)
self.stencil = stencil
self.dim = stencil.D
self.dh = data_handling
assert data_handling.default_target in [Target.CPU, Target.GPU]
self.target = data_handling.default_target
def _extend_dir(direction):
if len(direction) == 0:
yield tuple()
elif direction[0] == 0:
for d in [-1, 0, 1]:
for rest in _extend_dir(direction[1:]):
yield (d, ) + rest
else:
for rest in _extend_dir(direction[1:]):
yield (direction[0], ) + rest
self.pdf_field_name = pdf_field_name
self.ghost_layers = ghost_layers
self.periodicity = data_handling.periodicity
self.inplace_pattern = is_inplace(streaming_pattern)
self.cpu = self.target == Target.CPU
self.cupy_direct_copy = self.target == Target.GPU and cupy_direct_copy
def _get_neighbour_transform(direction, ghost_layers):
return tuple(d * (ghost_layers + 1) for d in direction)
def is_copy_direction(direction):
s = 0
for d, p in zip(direction, self.periodicity):
s += abs(d)
if d != 0 and not p:
return False
return s != 0
def _fix_length_one_slices(slices):
"""Slices of length one are replaced by their start value for correct periodic shifting"""
if isinstance(slices, int):
return slices
elif isinstance(slices, slice):
if slices.stop is not None and abs(slices.start - slices.stop) == 1:
return slices.start
elif slices.stop is None and slices.start == -1:
return -1 # [-1:] also has length one
full_stencil = itertools.product(*([-1, 0, 1] for _ in range(self.dim)))
copy_directions = tuple(filter(is_copy_direction, full_stencil))
self.comm_slices = []
timesteps = get_timesteps(streaming_pattern)
for timestep in timesteps:
slices_per_comm_dir = get_communication_slices(
stencil=stencil,
comm_stencil=copy_directions,
streaming_pattern=streaming_pattern,
prev_timestep=timestep,
ghost_layers=ghost_layers,
)
self.comm_slices.append(
list(chain.from_iterable(v for k, v in slices_per_comm_dir.items()))
)
if self.target == Target.GPU and not cupy_direct_copy:
self.device_copy_kernels = list()
for timestep in timesteps:
self.device_copy_kernels.append(self._compile_copy_kernels(timestep))
def __call__(self, prev_timestep=Timestep.BOTH):
if self.cpu:
self._periodicity_handling_cpu(prev_timestep)
else:
return slices
else:
return tuple(_fix_length_one_slices(s) for s in slices)
self._periodicity_handling_gpu(prev_timestep)
def _periodicity_handling_cpu(self, prev_timestep):
arr = self.dh.cpu_arrays[self.pdf_field_name]
comm_slices = self.comm_slices[prev_timestep.idx]
for src, dst in comm_slices:
arr[dst] = arr[src]
def _compile_copy_kernels(self, timestep):
assert self.target == Target.GPU
pdf_field = self.dh.fields[self.pdf_field_name]
kernels = []
for src, dst in self.comm_slices[timestep.idx]:
kernels.append(periodic_pdf_gpu_copy_kernel(pdf_field, src, dst))
return kernels
def _periodicity_handling_gpu(self, prev_timestep):
arr = self.dh.gpu_arrays[self.pdf_field_name]
if self.cupy_direct_copy:
for src, dst in self.comm_slices[prev_timestep.idx]:
arr[dst] = arr[src]
else:
kernel_args = {self.pdf_field_name: arr}
for kernel in self.device_copy_kernels[prev_timestep.idx]:
kernel(**kernel_args)
def get_communication_slices(
stencil, comm_stencil=None, streaming_pattern='pull', prev_timestep=Timestep.BOTH, ghost_layers=1):
stencil,
comm_stencil=None,
streaming_pattern="pull",
prev_timestep=Timestep.BOTH,
ghost_layers=1,
):
"""
Return the source and destination slices for periodicity handling or communication between blocks.
:param stencil: The stencil used by the LB method.
:param comm_stencil: The stencil defining the communication directions. If None, it will be set to the
:param comm_stencil: The stencil defining the communication directions. If None, it will be set to the
full stencil (D2Q9 in 2D, D3Q27 in 3D, etc.).
:param streaming_pattern: The streaming pattern.
:param prev_timestep: Timestep after which communication is run.
......@@ -68,11 +137,12 @@ def get_communication_slices(
"""
dim = len(stencil[0])
if comm_stencil is None:
comm_stencil = itertools.product(*([-1, 0, 1] for _ in range(dim)))
comm_stencil = itertools.product(*([-1, 0, 1] for _ in range(stencil.D)))
pdfs = Field.create_generic('pdfs', spatial_dimensions=len(stencil[0]), index_shape=(len(stencil),))
pdfs = Field.create_generic(
"pdfs", spatial_dimensions=len(stencil[0]), index_shape=(stencil.Q,)
)
write_accesses = get_accessor(streaming_pattern, prev_timestep).write(pdfs, stencil)
slices_per_comm_direction = dict()
......@@ -84,19 +154,27 @@ def get_communication_slices(
for streaming_dir in set(_extend_dir(comm_dir)) & set(stencil):
d = stencil.index(streaming_dir)
write_offsets = numeric_offsets(write_accesses[d])
write_index = numeric_index(write_accesses[d])[0]
origin_slice = get_slice_before_ghost_layer(
comm_dir, ghost_layers=ghost_layers, thickness=1
)
src_slice = _fix_length_one_slices(origin_slice)
write_offsets = numeric_offsets(write_accesses[d])
tangential_dir = tuple(s - c for s, c in zip(streaming_dir, comm_dir))
origin_slice = get_slice_before_ghost_layer(comm_dir, ghost_layers=ghost_layers, thickness=1)
origin_slice = _fix_length_one_slices(origin_slice)
src_slice = shift_slice(_trim_slice_in_direction(origin_slice, tangential_dir), write_offsets)
# TODO: this is just a hotfix. _trim_slice_in_direction breaks FreeSlip BC with adjacent periodic side
if streaming_pattern != "pull":
src_slice = shift_slice(
_trim_slice_in_direction(src_slice, tangential_dir), write_offsets
)
neighbour_transform = _get_neighbour_transform(comm_dir, ghost_layers)
dst_slice = shift_slice(src_slice, neighbour_transform)
src_slice = src_slice + (write_index, )
dst_slice = dst_slice + (write_index, )
src_slice = src_slice + (write_index,)
dst_slice = dst_slice + (write_index,)
slices_for_dir.append((src_slice, dst_slice))
......@@ -104,11 +182,10 @@ def get_communication_slices(
return slices_per_comm_direction
def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice,
domain_size=None, target='gpu',
opencl_queue=None, opencl_ctx=None):
"""Copies a rectangular array slice onto another non-overlapping array slice"""
from pystencils.gpucuda.kernelcreation import create_cuda_kernel
def periodic_pdf_gpu_copy_kernel(pdf_field, src_slice, dst_slice, domain_size=None):
"""Generate a GPU kernel which copies all values from one slice of a field
to another non-overlapping slice."""
from pystencils import create_kernel
pdf_idx = src_slice[-1]
assert isinstance(pdf_idx, int), "PDF index needs to be an integer constant"
......@@ -116,6 +193,7 @@ def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice,
src_slice = src_slice[:-1]
dst_slice = dst_slice[:-1]
# TODO this is the domain_size with GL
if domain_size is None:
domain_size = pdf_field.spatial_shape
......@@ -128,120 +206,71 @@ def periodic_pdf_copy_kernel(pdf_field, src_slice, dst_slice,
def _stop(s):
return s.stop if isinstance(s, slice) else s
offset = [_start(s1) - _start(s2) for s1, s2 in zip(normalized_from_slice, normalized_to_slice)]
assert offset == [_stop(s1) - _stop(s2) for s1, s2 in zip(normalized_from_slice, normalized_to_slice)], \
"Slices have to have same size"
copy_eq = Assignment(pdf_field(pdf_idx), pdf_field[tuple(offset)](pdf_idx))
ast = create_cuda_kernel([copy_eq], iteration_slice=dst_slice, skip_independence_check=True)
if target == 'gpu':
from pystencils.gpucuda import make_python_function
return make_python_function(ast)
elif target == 'opencl':
from pystencils.opencl import make_python_function
return make_python_function(ast, opencl_queue, opencl_ctx)
else:
raise ValueError('Invalid target:', target)
class LBMPeriodicityHandling:
offset = [
_start(s1) - _start(s2)
for s1, s2 in zip(normalized_from_slice, normalized_to_slice)
]
assert offset == [
_stop(s1) - _stop(s2)
for s1, s2 in zip(normalized_from_slice, normalized_to_slice)
], "Slices have to have same size"
copy_eq = AssignmentCollection(
main_assignments=[
Assignment(pdf_field(pdf_idx), pdf_field[tuple(offset)](pdf_idx))
]
)
config = CreateKernelConfig(
iteration_slice=dst_slice,
skip_independence_check=True,
target=Target.GPU,
)
ast = create_kernel(copy_eq, config=config)
return ast.compile()
def __init__(self, stencil, data_handling, pdf_field_name,
streaming_pattern='pull', ghost_layers=1,
opencl_queue=None, opencl_ctx=None,
pycuda_direct_copy=True):
"""
Periodicity Handling for Lattice Boltzmann Streaming.
**On the usage with cuda/opencl:**
- pycuda allows the copying of sliced arrays within device memory using the numpy syntax,
e.g. `dst[:,0] = src[:,-1]`. In this implementation, this is the default for periodicity
handling. Alternatively, if you set `pycuda_direct_copy=False`, GPU kernels are generated and
compiled. The compiled kernels are almost twice as fast in execution as pycuda array copying,
but especially for large stencils like D3Q27, their compilation can take up to 20 seconds.
Choose your weapon depending on your use case.
- pyopencl does not support copying of non-contiguous sliced arrays, so the usage of compiled
copy kernels is forced on us. On the positive side, compilation of the OpenCL kernels appears
to be about four times faster.
"""
if not isinstance(data_handling, SerialDataHandling):
raise ValueError('Only serial data handling is supported!')
if isinstance(stencil, str):
stencil = get_stencil(stencil)
self.stencil = stencil
self.dim = len(stencil[0])
self.dh = data_handling
target = data_handling.default_target
assert target in ['cpu', 'gpu', 'opencl']
self.pdf_field_name = pdf_field_name
self.ghost_layers = ghost_layers
periodicity = data_handling.periodicity
self.inplace_pattern = is_inplace(streaming_pattern)
self.target = target
self.cpu = target == 'cpu'
self.opencl_queue = opencl_queue
self.opencl_ctx = opencl_ctx
self.pycuda_direct_copy = target == 'gpu' and pycuda_direct_copy
def _extend_dir(direction):
if len(direction) == 0:
yield tuple()
elif direction[0] == 0:
for d in [-1, 0, 1]:
for rest in _extend_dir(direction[1:]):
yield (d,) + rest
else:
for rest in _extend_dir(direction[1:]):
yield (direction[0],) + rest
def is_copy_direction(direction):
s = 0
for d, p in zip(direction, periodicity):
s += abs(d)
if d != 0 and not p:
return False
return s != 0
def _get_neighbour_transform(direction, ghost_layers):
return tuple(d * (ghost_layers + 1) for d in direction)
full_stencil = itertools.product(*([-1, 0, 1] for _ in range(self.dim)))
copy_directions = tuple(filter(is_copy_direction, full_stencil))
self.comm_slices = []
timesteps = get_timesteps(streaming_pattern)
for timestep in timesteps:
slices_per_comm_dir = get_communication_slices(stencil=stencil,
comm_stencil=copy_directions,
streaming_pattern=streaming_pattern,
prev_timestep=timestep,
ghost_layers=ghost_layers)
self.comm_slices.append(list(chain.from_iterable(v for k, v in slices_per_comm_dir.items())))
if target == 'opencl' or (target == 'gpu' and not pycuda_direct_copy):
self.device_copy_kernels = []
for timestep in timesteps:
self.device_copy_kernels.append(self._compile_copy_kernels(timestep))
def __call__(self, prev_timestep=Timestep.BOTH):
if self.cpu:
self._periodicity_handling_cpu(prev_timestep)
def _fix_length_one_slices(slices):
"""Slices of length one are replaced by their start value for correct periodic shifting"""
if isinstance(slices, int):
return slices
elif isinstance(slices, slice):
if slices.stop is not None and abs(slices.start - slices.stop) == 1:
return slices.start
elif slices.stop is None and slices.start == -1:
return -1 # [-1:] also has length one
else:
self._periodicity_handling_gpu(prev_timestep)
return slices
else:
return tuple(_fix_length_one_slices(s) for s in slices)
def _periodicity_handling_cpu(self, prev_timestep):
arr = self.dh.cpu_arrays[self.pdf_field_name]
comm_slices = self.comm_slices[prev_timestep.idx]
for src, dst in comm_slices:
arr[dst] = arr[src]
def _compile_copy_kernels(self, timestep):
pdf_field = self.dh.fields[self.pdf_field_name]
kernels = []
for src, dst in self.comm_slices[timestep.idx]:
kernels.append(
periodic_pdf_copy_kernel(
pdf_field, src, dst, target=self.target,
opencl_queue=self.opencl_queue, opencl_ctx=self.opencl_ctx))
return kernels
def _trim_slice_in_direction(slices, direction):
assert len(slices) == len(direction)
def _periodicity_handling_gpu(self, prev_timestep):
arr = self.dh.gpu_arrays[self.pdf_field_name]
if self.pycuda_direct_copy:
for src, dst in self.comm_slices[prev_timestep.idx]:
arr[dst] = arr[src]
else:
kernel_args = {self.pdf_field_name: arr}
for kernel in self.device_copy_kernels[prev_timestep.idx]:
kernel(**kernel_args)
result = []
for s, d in zip(slices, direction):
if isinstance(s, int):
result.append(s)
continue
start = s.start + 1 if d == -1 else s.start
stop = s.stop - 1 if d == 1 else s.stop
result.append(slice(start, stop, s.step))
return tuple(result)
......@@ -2,19 +2,20 @@ import numpy as np
import sympy as sp
import pystencils as ps
from pystencils.data_types import TypedSymbol, create_type
from pystencils.backends.cbackend import CustomCodeNode
from .._compat import IS_PYSTENCILS_2
from lbmpy.stencils import get_stencil
from lbmpy.advanced_streaming.utility import get_accessor, inverse_dir_index, is_inplace, Timestep
if IS_PYSTENCILS_2:
from pystencils import TypedSymbol, create_type
from pystencils.types.quick import Arr
from lbmpy.lookup_tables import TranslationArraysNode
else:
from pystencils.typing import TypedSymbol, create_type
from ..custom_code_nodes import TranslationArraysNode
from lbmpy.advanced_streaming.utility import get_accessor, inverse_dir_index, is_inplace, Timestep
from itertools import product
def _array_pattern(dtype, name, content):
return f"const {str(dtype)} {name} [] = {{ {','.join(str(c) for c in content)} }}; \n"
class BetweenTimestepsIndexing:
# ==============================================
......@@ -31,7 +32,7 @@ class BetweenTimestepsIndexing:
@property
def inverse_dir_symbol(self):
"""Symbol denoting the inversion of a PDF field index.
"""Symbol denoting the inversion of a PDF field index.
Use only at top-level of index to f_out or f_in, otherwise it can't be correctly replaced."""
return sp.IndexedBase('invdir')
......@@ -45,9 +46,6 @@ class BetweenTimestepsIndexing:
raise ValueError('Cannot create index arrays for both kinds of timesteps for inplace streaming pattern '
+ streaming_pattern)
if isinstance(stencil, str):
stencil = get_stencil(stencil)
prev_accessor = get_accessor(streaming_pattern, prev_timestep)
next_accessor = get_accessor(streaming_pattern, prev_timestep.next())
......@@ -58,8 +56,8 @@ class BetweenTimestepsIndexing:
self._pdf_field = pdf_field
self._stencil = stencil
self._dim = len(stencil[0])
self._q = len(stencil)
self._dim = stencil.D
self._q = stencil.Q
self._coordinate_names = ['x', 'y', 'z'][:self._dim]
self._index_dtype = create_type(index_dtype)
......@@ -73,13 +71,21 @@ class BetweenTimestepsIndexing:
assert f_dir in ['in', 'out']
inv = '_inv' if inverse else ''
name = f"f_{f_dir}{inv}_dir_idx"
return TypedSymbol(name, self._index_dtype)
if IS_PYSTENCILS_2:
return TypedSymbol(name, Arr(self._index_dtype, self._q))
else:
return TypedSymbol(name, self._index_dtype)
def _offset_array_symbols(self, f_dir, inverse):
assert f_dir in ['in', 'out']
inv = '_inv' if inverse else ''
name_base = f"f_{f_dir}{inv}_offsets_"
symbols = [TypedSymbol(name_base + d, self._index_dtype) for d in self._coordinate_names]
if IS_PYSTENCILS_2:
symbols = [TypedSymbol(name_base + d, Arr(self._index_dtype, self._q)) for d in self._coordinate_names]
else:
symbols = [TypedSymbol(name_base + d, self._index_dtype) for d in self._coordinate_names]
return symbols
def _array_symbols(self, f_dir, inverse, index):
......@@ -172,62 +178,31 @@ class BetweenTimestepsIndexing:
return trivial_index_translations, trivial_offset_translations
def create_code_node(self):
return BetweenTimestepsIndexing.TranslationArraysNode(self)
class TranslationArraysNode(CustomCodeNode):
def __init__(self, indexing):
code = ''
symbols_defined = set()
for f_dir, inv in indexing._required_index_arrays:
indices, offsets = indexing._get_translated_indices_and_offsets(f_dir, inv)
index_array_symbol = indexing._index_array_symbol(f_dir, inv)
symbols_defined.add(index_array_symbol)
code += _array_pattern(indexing._index_dtype, index_array_symbol.name, indices)
for f_dir, inv in indexing._required_offset_arrays:
indices, offsets = indexing._get_translated_indices_and_offsets(f_dir, inv)
offset_array_symbols = indexing._offset_array_symbols(f_dir, inv)
symbols_defined |= set(offset_array_symbols)
for d, arrsymb in enumerate(offset_array_symbols):
code += _array_pattern(indexing._offsets_dtype, arrsymb.name, offsets[d])
super(BetweenTimestepsIndexing.TranslationArraysNode, self).__init__(
code, symbols_read=set(), symbols_defined=symbols_defined)
def __str__(self):
return "Variable PDF Access Translation Arrays"
def __repr__(self):
return "Variable PDF Access Translation Arrays"
# end class AdvancedStreamingIndexing
array_content = list()
symbols_defined = set()
for f_dir, inv in self._required_index_arrays:
indices, offsets = self._get_translated_indices_and_offsets(f_dir, inv)
index_array_symbol = self._index_array_symbol(f_dir, inv)
symbols_defined.add(index_array_symbol)
class NeighbourOffsetArrays(CustomCodeNode):
if IS_PYSTENCILS_2:
array_content.append((index_array_symbol, indices))
else:
array_content.append((self._index_dtype, index_array_symbol.name, indices))
@staticmethod
def neighbour_offset(dir_idx, stencil):
if isinstance(sp.sympify(dir_idx), sp.Integer):
return stencil[dir_idx]
for f_dir, inv in self._required_offset_arrays:
indices, offsets = self._get_translated_indices_and_offsets(f_dir, inv)
offset_array_symbols = self._offset_array_symbols(f_dir, inv)
symbols_defined |= set(offset_array_symbols)
for d, arrsymb in enumerate(offset_array_symbols):
if IS_PYSTENCILS_2:
array_content.append((arrsymb, offsets[d]))
else:
array_content.append((self._offsets_dtype, arrsymb.name, offsets[d]))
if IS_PYSTENCILS_2:
return TranslationArraysNode(array_content)
else:
return tuple([sp.IndexedBase(symbol, shape=(1,))[dir_idx]
for symbol in NeighbourOffsetArrays._offset_symbols(len(stencil[0]))])
@staticmethod
def _offset_symbols(dim):
return [TypedSymbol(f"neighbour_offset_{d}", create_type(np.int64)) for d in ['x', 'y', 'z'][:dim]]
return TranslationArraysNode(array_content, symbols_defined)
def __init__(self, stencil, offsets_dtype=np.int32):
offsets_dtype = create_type(offsets_dtype)
dim = len(stencil[0])
array_symbols = NeighbourOffsetArrays._offset_symbols(dim)
code = "\n"
for i, arrsymb in enumerate(array_symbols):
code += _array_pattern(offsets_dtype, arrsymb.name, (d[i] for d in stencil))
offset_symbols = NeighbourOffsetArrays._offset_symbols(dim)
super(NeighbourOffsetArrays, self).__init__(code, symbols_read=set(),
symbols_defined=set(offset_symbols))
# end class AdvancedStreamingIndexing
......@@ -4,7 +4,11 @@ from lbmpy.fieldaccess import PdfFieldAccessor, \
AAEvenTimeStepAccessor, \
AAOddTimeStepAccessor, \
EsoTwistEvenTimeStepAccessor, \
EsoTwistOddTimeStepAccessor
EsoTwistOddTimeStepAccessor, \
EsoPullEvenTimeStepAccessor, \
EsoPullOddTimeStepAccessor, \
EsoPushEvenTimeStepAccessor, \
EsoPushOddTimeStepAccessor
import numpy as np
import pystencils as ps
......@@ -33,41 +37,48 @@ class Timestep(IntEnum):
return 'Both'
streaming_patterns = ['push', 'pull', 'aa', 'esotwist']
streaming_patterns = ['push', 'pull', 'aa', 'esotwist', 'esopull', 'esopush']
even_accessors = {
'pull': StreamPullTwoFieldsAccessor,
'push': StreamPushTwoFieldsAccessor,
'aa': AAEvenTimeStepAccessor,
'esotwist': EsoTwistEvenTimeStepAccessor
'esotwist': EsoTwistEvenTimeStepAccessor,
'esopull': EsoPullEvenTimeStepAccessor,
'esopush': EsoPushEvenTimeStepAccessor
}
odd_accessors = {
'pull': StreamPullTwoFieldsAccessor,
'push': StreamPushTwoFieldsAccessor,
'aa': AAOddTimeStepAccessor,
'esotwist': EsoTwistOddTimeStepAccessor
'esotwist': EsoTwistOddTimeStepAccessor,
'esopull': EsoPullOddTimeStepAccessor,
'esopush': EsoPushOddTimeStepAccessor
}
def is_inplace(streaming_pattern):
if streaming_pattern not in streaming_patterns:
raise ValueError('Invalid streaming pattern', streaming_pattern)
return streaming_pattern in ['aa', 'esotwist', 'esopull', 'esopush']
def get_accessor(streaming_pattern: str, timestep: Timestep) -> PdfFieldAccessor:
if streaming_pattern not in streaming_patterns:
raise ValueError(
"Invalid value of parameter 'streaming_pattern'.", streaming_pattern)
if is_inplace(streaming_pattern) and (timestep == Timestep.BOTH):
raise ValueError(f"Invalid timestep for streaming pattern {streaming_pattern}: {str(timestep)}")
if timestep == Timestep.EVEN:
return even_accessors[streaming_pattern]
else:
return odd_accessors[streaming_pattern]
def is_inplace(streaming_pattern):
if streaming_pattern not in streaming_patterns:
raise ValueError('Invalid streaming pattern', streaming_pattern)
return streaming_pattern in ['aa', 'esotwist']
def get_timesteps(streaming_pattern):
return (Timestep.EVEN, Timestep.ODD) if is_inplace(streaming_pattern) else (Timestep.BOTH, )
......@@ -94,7 +105,7 @@ class AccessPdfValues:
if streaming_dir not in ['in', 'out']:
raise ValueError('Invalid streaming direction.', streaming_dir)
pdf_field = ps.Field.create_generic('pdfs', len(stencil[0]), index_shape=(len(stencil),))
pdf_field = ps.Field.create_generic('pdfs', len(stencil[0]), index_shape=(stencil.Q,))
if accessor is None:
accessor = get_accessor(streaming_pattern, timestep)
......
from typing import Union
from numpy.typing import NDArray
def poiseuille_flow(middle_distance: Union[float, NDArray], height,
ext_force_density: float, dyn_visc: float) -> Union[float, NDArray]:
"""
Analytical solution for plane Poiseuille flow.
Args:
middle_distance: Distance to the middle plane of the channel.
height: Distance between the boundaries.
ext_force_density: Force density on the fluid normal to the boundaries.
dyn_visc: dyn_visc
Returns:
A numpy array of the poiseuille profile if middle_distance is given as array otherwise of velocity of
the position given with middle_distance
"""
return ext_force_density * 1. / (2 * dyn_visc) * (height**2.0 / 4.0 - middle_distance**2.0)
from lbmpy.boundaries.boundaryconditions import (
UBB, FixedDensity, DiffusionDirichlet, SimpleExtrapolationOutflow, WallFunctionBounce,
ExtrapolationOutflow, NeumannByCopy, NoSlip, NoSlipLinearBouzidi, QuadraticBounceBack, StreamInConstant, FreeSlip)
from lbmpy.boundaries.boundaryhandling import LatticeBoltzmannBoundaryHandling
from lbmpy.boundaries.wall_function_models import MoninObukhovSimilarityTheory, LogLaw, MuskerLaw, SpaldingsLaw
__all__ = ['NoSlip', 'NoSlipLinearBouzidi', 'QuadraticBounceBack', 'FreeSlip', 'WallFunctionBounce',
'UBB', 'FixedDensity',
'SimpleExtrapolationOutflow', 'ExtrapolationOutflow',
'DiffusionDirichlet', 'NeumannByCopy', 'StreamInConstant',
'LatticeBoltzmannBoundaryHandling',
'MoninObukhovSimilarityTheory', 'LogLaw', 'MuskerLaw', 'SpaldingsLaw']
import sympy as sp
from lbmpy.boundaries.boundaryhandling import LbmWeightInfo
from .._compat import IS_PYSTENCILS_2
from lbmpy.advanced_streaming.indexing import BetweenTimestepsIndexing
from lbmpy.advanced_streaming.utility import Timestep, get_accessor
from pystencils.boundaries.boundaryhandling import BoundaryOffsetInfo
from pystencils.assignment import Assignment
from pystencils.astnodes import Block, Conditional, LoopOverCoordinate, SympyAssignment
from pystencils.data_types import type_all_numbers
from pystencils.simp.assignment_collection import AssignmentCollection
from pystencils.simp.simplifications import sympy_cse_on_assignment_list
from pystencils import Assignment
from pystencils.simp import AssignmentCollection, sympy_cse_on_assignment_list
from pystencils.stencil import inverse_direction
from pystencils.sympyextensions import fast_subs
if IS_PYSTENCILS_2:
from lbmpy.lookup_tables import LbmWeightInfo
else:
from lbmpy.custom_code_nodes import LbmWeightInfo
from pystencils.astnodes import Block, Conditional, LoopOverCoordinate, SympyAssignment # TODO replace
def direction_indices_in_direction(direction, stencil):
for i, offset in enumerate(stencil):
......@@ -48,17 +52,20 @@ def border_conditions(direction, field, ghost_layers=1):
border_condition = sp.Eq(loop_ctr, gl if val < 0 else field.shape[idx] - gl - 1)
if ghost_layers == 0:
return type_all_numbers(border_condition, loop_ctr.dtype)
return border_condition
else:
other_min = [sp.Ge(c, gl)
for c in loop_ctrs if c != loop_ctr]
other_max = [sp.Lt(c, field.shape[i] - gl)
for i, c in enumerate(loop_ctrs) if c != loop_ctr]
result = sp.And(border_condition, *other_min, *other_max)
return type_all_numbers(result, loop_ctr.dtype)
return result
def boundary_conditional(boundary, direction, streaming_pattern, prev_timestep, lb_method, output_field, cse=False):
if IS_PYSTENCILS_2:
raise NotImplementedError("In-Kernel Boundaries are not yet available on pystencils 2.0")
stencil = lb_method.stencil
dir_indices = direction_indices_in_direction(direction, stencil)
......@@ -68,7 +75,7 @@ def boundary_conditional(boundary, direction, streaming_pattern, prev_timestep,
assignments = []
for direction_idx in dir_indices:
rule = boundary(f_out, f_in, direction_idx, inv_dir, lb_method, index_field=None)
rule = boundary(f_out, f_in, direction_idx, inv_dir, lb_method, index_field=None, force_vector=None)
# rhs: replace f_out by post collision symbols.
rhs_substitutions = {f_out(i): sym for i, sym in enumerate(lb_method.post_collision_pdf_symbols)}
......
import abc
from enum import Enum, auto
from warnings import warn
from pystencils import Assignment, AssignmentCollection, Field, TypedSymbol
from pystencils.stencil import offset_to_direction_string, direction_string_to_offset, inverse_direction
from pystencils.sympyextensions import get_symmetric_part, simplify_by_equality, scalar_product
from lbmpy.advanced_streaming.utility import AccessPdfValues, Timestep
from pystencils.simp.assignment_collection import AssignmentCollection
from pystencils import Assignment, Field
from lbmpy.boundaries.boundaryhandling import LbmWeightInfo
from pystencils.data_types import create_type
from pystencils.sympyextensions import get_symmetric_part
from lbmpy.maxwellian_equilibrium import discrete_equilibrium
from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.advanced_streaming.indexing import NeighbourOffsetArrays
from pystencils.stencil import offset_to_direction_string, direction_string_to_offset, inverse_direction
import sympy as sp
class LbBoundary:
import numpy as np
from .._compat import IS_PYSTENCILS_2
if IS_PYSTENCILS_2:
from pystencils import create_type
from pystencils.sympyextensions.typed_sympy import CastFunc
from pystencils.types.quick import Arr
from lbmpy.lookup_tables import (
NeighbourOffsetArrays,
MirroredStencilDirections,
LbmWeightInfo,
TranslationArraysNode
)
else:
from pystencils.typing import create_type, CastFunc
from lbmpy.custom_code_nodes import (
NeighbourOffsetArrays,
MirroredStencilDirections,
LbmWeightInfo,
TranslationArraysNode
)
class LbBoundary(abc.ABC):
"""Base class that all boundaries should derive from.
Args:
......@@ -21,10 +45,11 @@ class LbBoundary:
inner_or_boundary = True
single_link = False
def __init__(self, name=None):
def __init__(self, name=None, calculate_force_on_boundary=False):
self._name = name
self.calculate_force_on_boundary = calculate_force_on_boundary
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
"""
This function defines the boundary behavior and must therefore be implemented by all boundaries.
The boundary is defined through a list of sympy equations from which a boundary kernel is generated.
......@@ -41,6 +66,8 @@ class LbBoundary:
lb_method: an instance of the LB method used. Use this to adapt the boundary to the method
(e.g. compressibility)
index_field: the boundary index field that can be used to retrieve and update boundary data
force_vector: vector to store the force on the boundary. Has the same size as the index field and
D-entries per cell
Returns:
list of pystencils assignments, or pystencils.AssignmentCollection
......@@ -79,45 +106,687 @@ class LbBoundary:
def name(self, new_value):
self._name = new_value
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.__dict__ == other.__dict__
# end class Boundary
class NoSlip(LbBoundary):
r"""
No-Slip, (half-way) simple bounce back boundary condition, enforcing zero velocity at obstacle.
Populations leaving the boundary node :math:`\mathbf{x}_b` at time :math:`t` are reflected
back with :math:`\mathbf{c}_{\overline{i}} = -\mathbf{c}_{i}`
.. math ::
f_{\overline{i}}(\mathbf{x}_b, t + \Delta t) = f^{\star}_{i}(\mathbf{x}_b, t)
Args:
name: optional name of the boundary.
calculate_force_on_boundary: stores the force for each PDF at the boundary in a force vector
"""
def __init__(self, name=None, calculate_force_on_boundary=False):
"""Set an optional name here, to mark boundaries, for example for force evaluations"""
super(NoSlip, self).__init__(name, calculate_force_on_boundary)
def get_additional_code_nodes(self, lb_method):
if self.calculate_force_on_boundary:
return [NeighbourOffsetArrays(lb_method.stencil)]
else:
return []
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
if self.calculate_force_on_boundary:
force = sp.Symbol("f")
subexpressions = [Assignment(force, sp.Float(2.0) * f_out(dir_symbol))]
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
if IS_PYSTENCILS_2:
offset = [CastFunc.as_numeric(o) for o in offset]
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
else:
subexpressions = []
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), f_out(dir_symbol))]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
class NoSlipLinearBouzidi(LbBoundary):
"""
No-Slip, (half-way) simple bounce back boundary condition with interpolation
to increase accuracy: :cite:`BouzidiBC`. In order to make the boundary condition work properly a
Python callback function needs to be provided to calculate the distance from the wall for each cell near to the
boundary. If this is not done the boundary condition will fall back to a simple NoSlip boundary.
Furthermore, for this boundary condition a second fluid cell away from the wall is needed. If the second fluid
cell is not available (e.g. because it is marked as boundary as well), the boundary condition should fall back to
a NoSlip boundary as well.
Args:
name: optional name of the boundary.
init_wall_distance: Python callback function to calculate the wall distance for each cell near to the boundary
data_type: data type of the wall distance q
"""
def __init__(self, name=None, init_wall_distance=None, data_type='double', calculate_force_on_boundary=False):
self.data_type = data_type
self.init_wall_distance = init_wall_distance
super(NoSlipLinearBouzidi, self).__init__(name, calculate_force_on_boundary)
@property
def additional_data(self):
"""Used internally only. For the NoSlipLinearBouzidi boundary the distance to the obstacle of every
direction is needed. This information is stored in the index vector."""
return [('q', create_type(self.data_type))]
def get_additional_code_nodes(self, lb_method):
if self.calculate_force_on_boundary:
return [NeighbourOffsetArrays(lb_method.stencil)]
else:
return []
@property
def additional_data_init_callback(self):
def default_callback(boundary_data, **_):
for cell in boundary_data.index_array:
cell['q'] = -1
if self.init_wall_distance:
return self.init_wall_distance
else:
warn("No callback function provided to initialise the wall distance for each cell "
"(init_wall_distance=None). The boundary condition will fall back to a simple NoSlip BC")
return default_callback
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
f_xf = sp.Symbol("f_xf")
f_xf_inv = sp.Symbol("f_xf_inv")
d_x2f = sp.Symbol("d_x2f")
q = sp.Symbol("q")
one = sp.Float(1.0)
two = sp.Float(2.0)
half = sp.Rational(1, 2)
subexpressions = [Assignment(f_xf, f_out(dir_symbol)),
Assignment(f_xf_inv, f_out(inv_dir[dir_symbol])),
Assignment(d_x2f, f_in(dir_symbol)),
Assignment(q, index_field[0]('q'))]
case_one = (half * (f_xf + f_xf_inv * (two * q - one))) / q
case_two = two * q * f_xf + (one - two * q) * d_x2f
case_three = f_xf
rhs = sp.Piecewise((case_one, sp.Ge(q, 0.5)),
(case_two, sp.And(sp.Gt(q, 0), sp.Lt(q, 0.5))),
(case_three, True))
if self.calculate_force_on_boundary:
force = sp.Symbol("f")
subexpressions.append(Assignment(force, f_xf + rhs))
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
if IS_PYSTENCILS_2:
offset = [CastFunc.as_numeric(o) for o in offset]
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), rhs)]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
# end class NoSlipLinearBouzidi
class QuadraticBounceBack(LbBoundary):
"""
Second order accurate bounce back boundary condition. Implementation details are provided in a demo notebook here:
https://pycodegen.pages.i10git.cs.fau.de/lbmpy/notebooks/demo_interpolation_boundary_conditions.html
Args:
relaxation_rate: relaxation rate to realise a BGK scheme for recovering the pre collision PDF value.
name: optional name of the boundary.
init_wall_distance: Python callback function to calculate the wall distance for each cell near to the boundary
data_type: data type of the wall distance q
"""
def __init__(self, relaxation_rate, name=None, init_wall_distance=None, data_type='double',
calculate_force_on_boundary=False):
self.relaxation_rate = relaxation_rate
self.data_type = data_type
self.init_wall_distance = init_wall_distance
self.equilibrium_values_name = "f_eq"
super(QuadraticBounceBack, self).__init__(name, calculate_force_on_boundary)
def inv_dir_symbol(self, stencil):
if IS_PYSTENCILS_2:
return TypedSymbol("inv_dir", Arr(create_type("int32"), stencil.Q))
else:
return TypedSymbol("inv_dir", create_type("int32"))
@property
def additional_data(self):
"""Used internally only. For the NoSlipLinearBouzidi boundary the distance to the obstacle of every
direction is needed. This information is stored in the index vector."""
return [('q', create_type(self.data_type))]
@property
def additional_data_init_callback(self):
def default_callback(boundary_data, **_):
for cell in boundary_data.index_array:
cell['q'] = 0.5
if self.init_wall_distance:
return self.init_wall_distance
else:
warn("No callback function provided to initialise the wall distance for each cell "
"(init_wall_distance=None). The boundary condition will fall back to a simple NoSlip BC")
return default_callback
def get_additional_code_nodes(self, lb_method):
"""Return a list of code nodes that will be added in the generated code before the index field loop.
Args:
lb_method: Lattice Boltzmann method. See :func:`lbmpy.creationfunctions.create_lb_method`
Returns:
list containing LbmWeightInfo
"""
stencil = lb_method.stencil
inv_directions = [str(stencil.index(inverse_direction(direction))) for direction in stencil]
if IS_PYSTENCILS_2:
inverse_dir_node = TranslationArraysNode([(self.inv_dir_symbol(stencil), inv_directions), ])
else:
inv_dir_symbol = self.inv_dir_symbol(stencil)
dtype = inv_dir_symbol.dtype
name = inv_dir_symbol.name
inverse_dir_node = TranslationArraysNode([(dtype, name, inv_directions), ], {inv_dir_symbol})
return [LbmWeightInfo(lb_method, self.data_type), inverse_dir_node, NeighbourOffsetArrays(lb_method.stencil)]
@staticmethod
def get_equilibrium(v, u, rho, drho, weight, compressible, zero_centered):
rho_background = sp.Integer(1)
result = discrete_equilibrium(v, u, rho, weight,
order=2, c_s_sq=sp.Rational(1, 3), compressible=compressible)
if zero_centered:
shift = discrete_equilibrium(v, [0] * len(u), rho_background, weight,
order=0, c_s_sq=sp.Rational(1, 3), compressible=False)
result = simplify_by_equality(result - shift, rho, drho, rho_background)
return result
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
omega = self.relaxation_rate
inv = sp.IndexedBase(self.inv_dir_symbol(lb_method.stencil), shape=(1,))[dir_symbol]
weight_info = LbmWeightInfo(lb_method, data_type=self.data_type)
weight_of_direction = weight_info.weight_of_direction
pdf_field_accesses = [f_out(i) for i in range(len(lb_method.stencil))]
pdf_symbols = [sp.Symbol(f"pdf_{i}") for i in range(len(lb_method.stencil))]
f_xf = sp.Symbol("f_xf")
f_xf_inv = sp.Symbol("f_xf_inv")
q = sp.Symbol("q")
feq = sp.Symbol("f_eq")
weight = sp.Symbol("w")
weight_inv = sp.Symbol("w_inv")
v = [TypedSymbol(f"c_{i}", self.data_type) for i in range(lb_method.stencil.D)]
v_inv = [TypedSymbol(f"c_inv_{i}", self.data_type) for i in range(lb_method.stencil.D)]
one = sp.Float(1.0)
half = sp.Rational(1, 2)
subexpressions = [Assignment(pdf_symbols[i], pdf) for i, pdf in enumerate(pdf_field_accesses)]
subexpressions.append(Assignment(f_xf, f_out(dir_symbol)))
subexpressions.append(Assignment(f_xf_inv, f_out(inv_dir[dir_symbol])))
subexpressions.append(Assignment(q, index_field[0]('q')))
subexpressions.append(Assignment(weight, weight_of_direction(dir_symbol, lb_method)))
subexpressions.append(Assignment(weight_inv, weight_of_direction(inv, lb_method)))
if IS_PYSTENCILS_2:
cast_offset = CastFunc.as_numeric
else:
def cast_offset(x):
return x
for i in range(lb_method.stencil.D):
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
subexpressions.append(Assignment(v[i], cast_offset(offset[i])))
for i in range(lb_method.stencil.D):
offset = NeighbourOffsetArrays.neighbour_offset(inv, lb_method.stencil)
subexpressions.append(Assignment(v_inv[i], cast_offset(offset[i])))
cqc = lb_method.conserved_quantity_computation
rho = cqc.density_symbol
drho = cqc.density_deviation_symbol
u = sp.Matrix(cqc.velocity_symbols)
compressible = cqc.compressible
zero_centered = cqc.zero_centered_pdfs
cqe = cqc.equilibrium_input_equations_from_pdfs(pdf_symbols, False)
subexpressions.append(cqe.all_assignments)
eq_dir = self.get_equilibrium(v, u, rho, drho, weight, compressible, zero_centered)
eq_inv = self.get_equilibrium(v_inv, u, rho, drho, weight_inv, compressible, zero_centered)
subexpressions.append(Assignment(feq, eq_dir + eq_inv))
t1 = (f_xf - f_xf_inv + (f_xf + f_xf_inv - feq * omega) / (one - omega))
t2 = (q * (f_xf + f_xf_inv)) / (one + q)
result = (one - q) / (one + q) * t1 * half + t2
if self.calculate_force_on_boundary:
force = sp.Symbol("f")
subexpressions.append(Assignment(force, f_xf + result))
offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
if IS_PYSTENCILS_2:
offset = [CastFunc.as_numeric(o) for o in offset]
for i in range(lb_method.stencil.D):
subexpressions.append(Assignment(force_vector[0](f'F_{i}'), force * offset[i]))
boundary_assignments = [Assignment(f_in(inv_dir[dir_symbol]), result)]
return AssignmentCollection(boundary_assignments, subexpressions=subexpressions)
# end class QuadraticBounceBack
class FreeSlip(LbBoundary):
"""
No-Slip, (half-way) simple bounce back boundary condition, enforcing zero velocity at obstacle.
Extended for use with any streaming pattern.
Free-Slip boundary condition, which enforces a zero normal fluid velocity :math:`u_n = 0` but places no restrictions
on the tangential fluid velocity :math:`u_t`.
Args:
stencil: LBM stencil which is used for the simulation
normal_direction: optional normal direction pointing from wall to fluid.
If the Free slip boundary is applied to a certain side in the domain it is not necessary
to calculate the normal direction since it can be stated for all boundary cells.
This reduces the memory space for the index array significantly.
name: optional name of the boundary.
"""
def __init__(self, name=None):
def __init__(self, stencil, normal_direction=None, name=None):
"""Set an optional name here, to mark boundaries, for example for force evaluations"""
super(NoSlip, self).__init__(name)
self.stencil = stencil
if normal_direction and len(normal_direction) - normal_direction.count(0) != 1:
raise ValueError("It is only possible to pre specify the normal direction for simple situations."
"This means if the free slip boundary is applied to a straight wall or side in the "
"simulation domain. A possible value for example would be (0, 1, 0) if the "
"free slip boundary is applied to the northern wall. For more complex situations "
"the normal direction has to be calculated for each cell. This is done when "
"the normal direction is not defined for this class")
if normal_direction:
normal_direction = tuple([int(n) for n in normal_direction])
assert all([n in [-1, 0, 1] for n in normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction"
self.mirror_axis = normal_direction.index(*[d for d in normal_direction if d != 0])
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
return Assignment(f_in(inv_dir[dir_symbol]), f_out(dir_symbol))
self.normal_direction = normal_direction
self.dim = len(stencil[0])
def __hash__(self):
return hash(self.name)
if name is None and normal_direction:
name = f"Free Slip : {offset_to_direction_string([-x for x in normal_direction])}"
def __eq__(self, other):
if not isinstance(other, NoSlip):
return False
return self.name == other.name
super(FreeSlip, self).__init__(name, calculate_force_on_boundary=False)
def init_callback(self, boundary_data, **_):
if len(boundary_data.index_array) > 1e6:
warn(f"The calculation of the normal direction for each cell might take a long time, because "
f"{len(boundary_data.index_array)} cells are marked as Free Slip boundary cells. Consider specifying "
f" the normal direction beforehand, which is possible if it is equal for all cells (e.g. at a wall)")
dim = boundary_data.dim
coords = [coord for coord, _ in zip(['x', 'y', 'z'], range(dim))]
boundary_cells = set()
# get a set containing all boundary cells
for cell in boundary_data.index_array:
fluid_cell = tuple([cell[coord] for coord in coords])
direction = self.stencil[cell['dir']]
boundary_cell = tuple([i + d for i, d in zip(fluid_cell, direction)])
boundary_cells.add(boundary_cell)
for cell in boundary_data.index_array:
fluid_cell = tuple([cell[coord] for coord in coords])
direction = self.stencil[cell['dir']]
ref_direction = direction
normal_direction = [0] * dim
for i in range(dim):
sub_direction = [0] * dim
sub_direction[i] = direction[i]
test_cell = tuple([x + y for x, y in zip(fluid_cell, sub_direction)])
if test_cell in boundary_cells:
normal_direction[i] = direction[i]
ref_direction = MirroredStencilDirections.mirror_stencil(ref_direction, i)
# convex corner special case:
if all(n == 0 for n in normal_direction):
normal_direction = direction
else:
ref_direction = inverse_direction(ref_direction)
for i, cell_name in zip(range(dim), self.additional_data):
cell[cell_name[0]] = -normal_direction[i]
cell['ref_dir'] = self.stencil.index(ref_direction)
@property
def additional_data(self):
"""Used internally only. For the FreeSlip boundary the information of the normal direction for each pdf
direction is needed. This information is stored in the index vector."""
if self.normal_direction:
return []
else:
data_type = create_type('int32')
wnz = [] if self.dim == 2 else [('wnz', data_type)]
data = [('wnx', data_type), ('wny', data_type)] + wnz
return data + [('ref_dir', data_type)]
@property
def additional_data_init_callback(self):
if self.normal_direction:
return None
else:
return self.init_callback
def get_additional_code_nodes(self, lb_method):
if self.normal_direction:
return [MirroredStencilDirections(self.stencil, self.mirror_axis), NeighbourOffsetArrays(lb_method.stencil)]
else:
return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
if self.normal_direction:
tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, self.normal_direction))
mirrored_stencil_symbol = MirroredStencilDirections._mirrored_symbol(self.mirror_axis, self.stencil)
mirrored_direction = inv_dir[sp.IndexedBase(mirrored_stencil_symbol, shape=(1,))[dir_symbol]]
else:
normal_direction = list()
for i, cell_name in zip(range(self.dim), self.additional_data):
normal_direction.append(index_field[0](cell_name[0]))
normal_direction = tuple(normal_direction)
tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, normal_direction))
mirrored_direction = index_field[0]('ref_dir')
return Assignment(f_in.center(inv_dir[dir_symbol]), f_out[tangential_offset](mirrored_direction))
# end class NoSlip
# end class FreeSlip
class WallFunctionBounce(LbBoundary):
"""
Wall function based on the bounce back idea, cf. :cite:`Han2021`. Its implementation is extended to the D3Q27
stencil, whereas different weights of the drag distribution are proposed.
Args:
lb_method: LB method which is used for the simulation
pdfs: Symbolic representation of the particle distribution functions.
normal_direction: Normal direction of the wall. Currently, only straight and axis-aligned walls are supported.
wall_function_model: Wall function that is used to retrieve the wall stress :math:`tau_w` during the simulation.
See :class:`lbmpy.boundaries.wall_treatment.WallFunctionModel` for more details
mean_velocity: Optional field or field access for the mean velocity. As wall functions are typically defined
in terms of the mean velocity, it is recommended to provide this variable. Per default, the
instantaneous velocity obtained from pdfs is used for the wall function.
sampling_shift: Optional sampling shift for the velocity sampling. Can be provided as symbolic variable or
integer. In both cases, the user must assure that the sampling shift is at least 1, as sampling
in boundary cells is not physical. Per default, a sampling shift of 1 is employed which
corresponds to a sampling in the first fluid cell normal to the wall. For lower friction
Reynolds numbers, choosing a sampling shift >1 has shown to improve the results for higher
resolutions.
Mutually exclusive with the Maronga sampling shift.
maronga_sampling_shift: Optionally, apply a correction factor to the wall shear stress proposed by Maronga et
al. :cite:`Maronga2020`. Has only been tested and validated for the MOST wall function.
No guarantee is given that it also works with other wall functions.
Mutually exclusive with the standard sampling shift.
dt: time discretisation. Usually one in LB units
dy: space discretisation. Usually one in LB units
y: distance from the wall
target_friction_velocity: A target friction velocity can be given if an estimate is known a priori. This target
friction velocity will be used as initial guess for implicit wall functions to ensure
convergence of the Newton algorithm.
weight_method: The extension of the WFB to a D3Q27 stencil is non-unique. Different weights can be chosen to
define the drag distribution onto the pdfs. Per default, weights corresponding to the weights
in the D3Q27 stencil are chosen.
name: Optional name of the boundary.
data_type: Floating-point precision. Per default, double.
"""
class WeightMethod(Enum):
LATTICE_WEIGHT = auto(),
GEOMETRIC_WEIGHT = auto()
def __init__(self, lb_method, pdfs, normal_direction, wall_function_model,
mean_velocity=None, sampling_shift=1, maronga_sampling_shift=None,
dt=1, dy=1, y=0.5,
target_friction_velocity=None,
weight_method=WeightMethod.LATTICE_WEIGHT,
name=None, data_type='double'):
"""Set an optional name here, to mark boundaries, for example for force evaluations"""
self.stencil = lb_method.stencil
if not (self.stencil.Q == 19 or self.stencil.Q == 27):
raise ValueError("WFB boundary is currently only defined for D3Q19 and D3Q27 stencils.")
self.pdfs = pdfs
self.wall_function_model = wall_function_model
if mean_velocity:
if isinstance(mean_velocity, Field):
self.mean_velocity = mean_velocity.center_vector
elif isinstance(mean_velocity, Field.Access):
self.mean_velocity = mean_velocity.field.neighbor_vector(mean_velocity.offsets)
else:
raise ValueError("Mean velocity field has to be a pystencils Field or Field.Access")
else:
self.mean_velocity = None
if not isinstance(sampling_shift, int):
self.sampling_shift = TypedSymbol(sampling_shift.name, np.uint32)
else:
assert sampling_shift >= 1, "The sampling shift must be greater than 1."
self.sampling_shift = sampling_shift
if maronga_sampling_shift:
assert self.mean_velocity, "Mean velocity field must be provided when using the Maronga correction"
if not isinstance(maronga_sampling_shift, int):
self.maronga_sampling_shift = TypedSymbol(maronga_sampling_shift.name, np.uint32)
else:
assert maronga_sampling_shift >= 1, "The Maronga sampling shift must be greater than 1."
self.maronga_sampling_shift = maronga_sampling_shift
else:
self.maronga_sampling_shift = None
if (self.sampling_shift != 1) and self.maronga_sampling_shift:
raise ValueError("Both sampling shift and Maronga offset are set. This is currently not supported.")
self.dt = dt
self.dy = dy
self.y = y
self.data_type = data_type
self.target_friction_velocity = target_friction_velocity
self.weight_method = weight_method
if len(normal_direction) - normal_direction.count(0) != 1:
raise ValueError("Only normal directions for straight walls are supported for example (0, 1, 0) for "
"a WallFunctionBounce applied to the southern boundary of the domain")
self.mirror_axis = normal_direction.index(*[direction for direction in normal_direction if direction != 0])
self.normal_direction = normal_direction
assert all([n in [-1, 0, 1] for n in self.normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction"
tangential_component = [int(not n) for n in self.normal_direction]
self.normal_axis = tangential_component.index(0)
self.tangential_axis = [0, 1, 2]
self.tangential_axis.remove(self.normal_axis)
self.dim = self.stencil.D
if name is None:
name = f"WFB : {offset_to_direction_string([-x for x in normal_direction])}"
super(WallFunctionBounce, self).__init__(name, calculate_force_on_boundary=False)
def get_additional_code_nodes(self, lb_method):
return [MirroredStencilDirections(self.stencil, self.mirror_axis),
NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
# needed symbols for offsets and indices
# neighbour offset symbols are basically the stencil directions defined in stencils.py:L130ff.
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
tangential_offset = tuple(offset + normal for offset, normal in zip(neighbor_offset, self.normal_direction))
mirrored_stencil_symbol = MirroredStencilDirections._mirrored_symbol(self.mirror_axis, self.stencil)
mirrored_direction = inv_dir[sp.IndexedBase(mirrored_stencil_symbol, shape=(1,))[dir_symbol]]
name_base = "f_in_inv_offsets_"
offset_array_symbols = [TypedSymbol(name_base + d, mirrored_stencil_symbol.dtype) for d in ['x', 'y', 'z']]
mirrored_offset = sp.IndexedBase(mirrored_stencil_symbol, shape=(1,))[dir_symbol]
offsets = tuple(sp.IndexedBase(s, shape=(1,))[mirrored_offset] for s in offset_array_symbols)
# needed symbols in the Assignments
u_m = sp.Symbol("u_m")
tau_w = sp.Symbol("tau_w")
wall_stress = sp.symbols("tau_w_x tau_w_y tau_w_z")
# if the mean velocity field is not given, or the Maronga correction is applied, density and velocity values
# will be calculated from pdfs
cqc = lb_method.conserved_quantity_computation
result = []
if (not self.mean_velocity) or self.maronga_sampling_shift:
pdf_center_vector = sp.Matrix([0] * self.stencil.Q)
for i in range(self.stencil.Q):
pdf_center_vector[i] = self.pdfs[offsets[0] + self.normal_direction[0],
offsets[1] + self.normal_direction[1],
offsets[2] + self.normal_direction[2]](i)
eq_equations = cqc.equilibrium_input_equations_from_pdfs(pdf_center_vector)
result.append(eq_equations.all_assignments)
# sample velocity which will be used in the wall stress calculation
if self.mean_velocity:
if self.maronga_sampling_shift:
u_for_tau_wall = tuple(u_mean_i.get_shifted(
self.maronga_sampling_shift * self.normal_direction[0],
self.maronga_sampling_shift * self.normal_direction[1],
self.maronga_sampling_shift * self.normal_direction[2]
) for u_mean_i in self.mean_velocity)
else:
u_for_tau_wall = tuple(u_mean_i.get_shifted(
self.sampling_shift * self.normal_direction[0],
self.sampling_shift * self.normal_direction[1],
self.sampling_shift * self.normal_direction[2]
) for u_mean_i in self.mean_velocity)
rho_for_tau_wall = sp.Float(1)
else:
rho_for_tau_wall = cqc.density_symbol
u_for_tau_wall = cqc.velocity_symbols
# calculate Maronga factor in case of correction
maronga_fix = sp.Symbol("maronga_fix")
if self.maronga_sampling_shift:
inst_first_cell_vel = cqc.velocity_symbols
mean_first_cell_vel = tuple(u_mean_i.get_shifted(*self.normal_direction) for u_mean_i in self.mean_velocity)
mag_inst_vel_first_cell = sp.sqrt(sum([inst_first_cell_vel[i] ** 2 for i in self.tangential_axis]))
mag_mean_vel_first_cell = sp.sqrt(sum([mean_first_cell_vel[i] ** 2 for i in self.tangential_axis]))
result.append(Assignment(maronga_fix, mag_inst_vel_first_cell / mag_mean_vel_first_cell))
else:
maronga_fix = 1
# store which direction is tangential component (only those are used for the wall shear stress)
red_u_mag = sp.sqrt(sum([u_for_tau_wall[i]**2 for i in self.tangential_axis]))
u_mag = Assignment(u_m, red_u_mag)
result.append(u_mag)
wall_distance = self.maronga_sampling_shift if self.maronga_sampling_shift else self.sampling_shift
# using wall function model
wall_law_assignments = self.wall_function_model.shear_stress_assignments(
density_symbol=rho_for_tau_wall, velocity_symbol=u_m, shear_stress_symbol=tau_w,
wall_distance=(wall_distance - sp.Rational(1, 2) * self.dy),
u_tau_target=self.target_friction_velocity)
result.append(wall_law_assignments)
# calculate wall stress components and use them to calculate the drag
for i in self.tangential_axis:
result.append(Assignment(wall_stress[i], - u_for_tau_wall[i] / u_m * tau_w * maronga_fix))
weight, inv_weight_sq = sp.symbols("wfb_weight inverse_weight_squared")
if self.stencil.Q == 19:
result.append(Assignment(weight, sp.Rational(1, 2)))
elif self.stencil.Q == 27:
result.append(
Assignment(
inv_weight_sq,
sum([CastFunc(neighbor_offset[i], self.data_type)**2 for i in self.tangential_axis])
)
)
a, b = sp.symbols("wfb_a wfb_b")
if self.weight_method == self.WeightMethod.LATTICE_WEIGHT:
res_ab = sp.solve([2 * a + 4 * b - 1, a - 4 * b], [a, b]) # lattice weight scaling
elif self.weight_method == self.WeightMethod.GEOMETRIC_WEIGHT:
res_ab = sp.solve([2 * a + 4 * b - 1, a - sp.sqrt(2) * b], [a, b]) # geometric scaling
else:
raise ValueError("Unknown weighting method for the WFB D3Q27 extension. Currently, only lattice "
"weights and geometric weights are supported.")
result.append(Assignment(weight, sp.Piecewise((sp.Float(0), sp.Equality(inv_weight_sq, 0)),
(res_ab[a], sp.Equality(inv_weight_sq, 1)),
(res_ab[b], True))))
factor = self.dt / self.dy * weight
drag = sum(
[
CastFunc(neighbor_offset[i], self.data_type) * factor * wall_stress[i]
for i in self.tangential_axis
]
)
result.append(Assignment(f_in.center(inv_dir[dir_symbol]), f_out[tangential_offset](mirrored_direction) - drag))
return result
# end class WallFunctionBounce
class UBB(LbBoundary):
"""Velocity bounce back boundary condition, enforcing specified velocity at obstacle
r"""Velocity bounce back boundary condition, enforcing specified velocity at obstacle. Furthermore, a density
at the wall can be implied. The boundary condition is implemented with the following formula:
.. math ::
f_{\overline{i}}(\mathbf{x}_b, t + \Delta t) = f^{\star}_{i}(\mathbf{x}_b, t) -
2 w_{i} \rho_{w} \frac{\mathbf{c}_i \cdot \mathbf{u}_w}{c_s^2}
Args:
velocity: can either be a constant, an access into a field, or a callback function.
The callback functions gets a numpy record array with members, 'x','y','z', 'dir' (direction)
and 'velocity' which has to be set to the desired velocity of the corresponding link
velocity: Prescribe the fluid velocity :math:`\mathbf{u}_w` at the wall.
Can either be a constant, an access into a field, or a callback function.
The callback functions gets a numpy record array with members, ``x``, ``y``, ``z``, ``dir``
(direction) and ``velocity`` which has to be set to the desired velocity of the corresponding link
density: Prescribe the fluid density :math:`\rho_{w}` at the wall. If not prescribed the density is
calculated from the PDFs at the wall. The density can only be set constant.
adapt_velocity_to_force: adapts the velocity to the correct equilibrium when the lattice Boltzmann method holds
a forcing term. If no forcing term is set and adapt_velocity_to_force is set to True
it has no effect.
......@@ -125,22 +794,25 @@ class UBB(LbBoundary):
name: optional name of the boundary.
"""
def __init__(self, velocity, adapt_velocity_to_force=False, dim=None, name=None):
super(UBB, self).__init__(name)
def __init__(self, velocity, density=None, adapt_velocity_to_force=False, dim=None, name=None, data_type='double'):
self._velocity = velocity
self._density = density
self._adaptVelocityToForce = adapt_velocity_to_force
if callable(self._velocity) and not dim:
raise ValueError("When using a velocity callback the dimension has to be specified with the dim parameter")
elif not callable(self._velocity):
dim = len(velocity)
self.dim = dim
self.data_type = data_type
super(UBB, self).__init__(name, calculate_force_on_boundary=False)
@property
def additional_data(self):
""" In case of the UBB boundary additional data is a velocity vector. This vector is added to each cell to
realize velocity profiles for the inlet."""
if self.velocity_is_callable:
return [('vel_%d' % (i,), create_type("double")) for i in range(self.dim)]
return [(f'vel_{i}', create_type(self.data_type)) for i in range(self.dim)]
else:
return []
......@@ -161,7 +833,7 @@ class UBB(LbBoundary):
Returns:
list containing LbmWeightInfo and NeighbourOffsetArrays
"""
return [LbmWeightInfo(lb_method), NeighbourOffsetArrays(lb_method.stencil)]
return [LbmWeightInfo(lb_method, self.data_type), NeighbourOffsetArrays(lb_method.stencil)]
@property
def velocity_is_callable(self):
......@@ -169,15 +841,15 @@ class UBB(LbBoundary):
This is useful if the inflow velocity should have a certain profile for instance"""
return callable(self._velocity)
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
dtype = create_type(self.data_type)
vel_from_idx_field = callable(self._velocity)
vel = [index_field(f'vel_{i}') for i in range(self.dim)] if vel_from_idx_field else self._velocity
direction = dir_symbol
assert self.dim == lb_method.dim, \
f"Dimension of UBB ({self.dim}) does not match dimension of method ({lb_method.dim})"
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(direction, lb_method.stencil)
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
velocity = tuple(v_i.get_shifted(*neighbor_offset)
if isinstance(v_i, Field.Access) and not vel_from_idx_field
......@@ -187,29 +859,37 @@ class UBB(LbBoundary):
if self._adaptVelocityToForce:
cqc = lb_method.conserved_quantity_computation
shifted_vel_eqs = cqc.equilibrium_input_equations_from_init_values(velocity=velocity)
velocity = [eq.rhs for eq in shifted_vel_eqs.new_filtered(cqc.first_order_moment_symbols).main_assignments]
shifted_vel_eqs = shifted_vel_eqs.new_without_subexpressions()
velocity = [eq.rhs for eq in shifted_vel_eqs.new_filtered(cqc.velocity_symbols).main_assignments]
c_s_sq = sp.Rational(1, 3)
weight_of_direction = LbmWeightInfo.weight_of_direction
vel_term = 2 / c_s_sq * sum([d_i * v_i for d_i, v_i in zip(neighbor_offset, velocity)]) * weight_of_direction(
direction, lb_method)
weight_info = LbmWeightInfo(lb_method, data_type=self.data_type)
weight_of_direction = weight_info.weight_of_direction
vel_term = (
2 / c_s_sq
* sum([CastFunc(d_i, dtype) * v_i for d_i, v_i in zip(neighbor_offset, velocity)])
* weight_of_direction(dir_symbol, lb_method)
)
# Better alternative: in conserved value computation
# rename what is currently called density to "virtual_density"
# provide a new quantity density, which is constant in case of incompressible models
if not lb_method.conserved_quantity_computation.zero_centered_pdfs:
if lb_method.conserved_quantity_computation.compressible:
cqc = lb_method.conserved_quantity_computation
density_symbol = sp.Symbol("rho")
pdf_field_accesses = [f_out(i) for i in range(len(lb_method.stencil))]
density_equations = cqc.output_equations_from_pdfs(pdf_field_accesses, {'density': density_symbol})
density_symbol = lb_method.conserved_quantity_computation.defined_symbols()['density']
result = density_equations.all_assignments
result += [Assignment(f_in(inv_dir[direction]),
f_out(direction) - vel_term * density_symbol)]
density_symbol = lb_method.conserved_quantity_computation.density_symbol
if self._density:
result = [Assignment(density_symbol, self._density)]
else:
result = density_equations.all_assignments
result += [Assignment(f_in(inv_dir[dir_symbol]),
f_out(dir_symbol) - vel_term * density_symbol)]
return result
else:
return [Assignment(f_in(inv_dir[direction]),
f_out(direction) - vel_term)]
return [Assignment(f_in(inv_dir[dir_symbol]),
f_out(dir_symbol) - vel_term)]
# end class UBB
......@@ -217,9 +897,9 @@ class UBB(LbBoundary):
class SimpleExtrapolationOutflow(LbBoundary):
r"""
Simple Outflow boundary condition :cite:`geier2015`, equation F.1 (listed below).
This boundary condition extrapolates missing populations from the last layer of
fluid cells onto the boundary by copying them in the normal direction.
Simple Outflow boundary condition :cite:`geier2015`, equation F.1 (listed below).
This boundary condition extrapolates missing populations from the last layer of
fluid cells onto the boundary by copying them in the normal direction.
.. math ::
f_{\overline{1}jkxyzt} = f_{\overline{1}jk(x - \Delta x)yzt}
......@@ -238,8 +918,10 @@ class SimpleExtrapolationOutflow(LbBoundary):
if name is None:
name = f"Simple Outflow: {offset_to_direction_string(normal_direction)}"
self.normal_direction = normal_direction
super(SimpleExtrapolationOutflow, self).__init__(name)
self.normal_direction = tuple([int(n) for n in normal_direction])
assert all([n in [-1, 0, 1] for n in self.normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction"
super(SimpleExtrapolationOutflow, self).__init__(name, calculate_force_on_boundary=False)
def get_additional_code_nodes(self, lb_method):
"""Return a list of code nodes that will be added in the generated code before the index field loop.
......@@ -253,21 +935,23 @@ class SimpleExtrapolationOutflow(LbBoundary):
"""
return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbor_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
tangential_offset = tuple(offset - normal for offset, normal in zip(neighbor_offset, self.normal_direction))
return Assignment(f_in.center(inv_dir[dir_symbol]), f_out[tangential_offset](inv_dir[dir_symbol]))
# end class SimpleExtrapolationOutflow
class ExtrapolationOutflow(LbBoundary):
r"""
Outflow boundary condition :cite:`geier2015`, equation F.2, with u neglected (listed below).
This boundary condition interpolates populations missing on the boundary in normal direction.
For this interpolation, the PDF values of the last time step are used. They are interpolated
between fluid cell and boundary cell. To get the PDF values from the last time step an index
array is used which stores them.
Outflow boundary condition :cite:`geier2015`, equation F.2, with u neglected (listed below).
This boundary condition interpolates populations missing on the boundary in normal direction.
For this interpolation, the PDF values of the last time step are used. They are interpolated
between fluid cell and boundary cell. To get the PDF values from the last time step an index
array is used which stores them.
.. math ::
f_{\overline{1}jkxyzt} = f_{\overline{1}jk(x - \Delta x)yz(t - \Delta t)} c \theta^{\frac{1}{2}}
......@@ -277,7 +961,7 @@ class ExtrapolationOutflow(LbBoundary):
Args:
normal_direction: direction vector normal to the outflow
lb_method: the lattice boltzman method to be used in the simulation
lb_method: the lattice Boltzmann method to be used in the simulation
dt: lattice time step size
dx: lattice spacing distance
name: optional name of the boundary.
......@@ -291,18 +975,21 @@ class ExtrapolationOutflow(LbBoundary):
def __init__(self, normal_direction, lb_method, dt=1, dx=1, name=None,
streaming_pattern='pull', zeroth_timestep=Timestep.BOTH,
initial_density=None, initial_velocity=None):
initial_density=None, initial_velocity=None, data_type='double'):
self.lb_method = lb_method
self.stencil = lb_method.stencil
self.dim = len(self.stencil[0])
if isinstance(normal_direction, str):
normal_direction = direction_string_to_offset(normal_direction, dim=self.dim)
if name is None:
name = f"Outflow: {offset_to_direction_string(normal_direction)}"
self.normal_direction = normal_direction
self.normal_direction = tuple([int(n) for n in normal_direction])
assert all([n in [-1, 0, 1] for n in self.normal_direction]), \
"Only -1, 0 and 1 allowed for defining the normal direction"
self.streaming_pattern = streaming_pattern
self.zeroth_timestep = zeroth_timestep
self.dx = sp.Number(dx)
......@@ -313,6 +1000,8 @@ class ExtrapolationOutflow(LbBoundary):
self.initial_velocity = initial_velocity
self.equilibrium_calculation = None
self.data_type = data_type
if initial_density and initial_velocity:
equilibrium = lb_method.get_equilibrium(conserved_quantity_equations=AssignmentCollection([]))
rho = lb_method.zeroth_order_equilibrium_moment_symbol
......@@ -325,7 +1014,7 @@ class ExtrapolationOutflow(LbBoundary):
self.equilibrium_calculation = calc_eq_pdfs
super(ExtrapolationOutflow, self).__init__(name)
super(ExtrapolationOutflow, self).__init__(name, calculate_force_on_boundary=False)
def init_callback(self, boundary_data, **_):
dim = boundary_data.dim
......@@ -359,7 +1048,7 @@ class ExtrapolationOutflow(LbBoundary):
def additional_data(self):
"""Used internally only. For the ExtrapolationOutflow information of the previous PDF values is needed. This
information is stored in the index vector."""
data = [('pdf', create_type("double")), ('pdf_nd', create_type("double"))]
data = [('pdf', create_type(self.data_type)), ('pdf_nd', create_type(self.data_type))]
return data
@property
......@@ -378,7 +1067,7 @@ class ExtrapolationOutflow(LbBoundary):
"""
return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
subexpressions = []
boundary_assignments = []
dtdx = sp.Rational(self.dt, self.dx)
......@@ -407,7 +1096,12 @@ class ExtrapolationOutflow(LbBoundary):
class FixedDensity(LbBoundary):
"""Boundary condition that fixes the density/pressure at the obstacle.
r"""Boundary condition for prescribing a density at the wall. Through :math:`p = c_s^2 \rho` this boundary condition
can also function as a pressure boundary condition.
.. math ::
f_{\overline{i}}(\mathbf{x}_b, t + \Delta t) = - f^{\star}_{i}(\mathbf{x}_b, t) +
2 w_{i} \rho_{w} (1 + \frac{(\mathbf{c}_i \cdot \mathbf{u}_w)^2}{2c_s^4} + \frac{\mathbf{u}_w^2}{2c_s^2})
Args:
density: value of the density which should be set.
......@@ -417,17 +1111,18 @@ class FixedDensity(LbBoundary):
def __init__(self, density, name=None):
if name is None:
name = "Fixed Density " + str(density)
super(FixedDensity, self).__init__(name)
self._density = density
self.density = density
super(FixedDensity, self).__init__(name, calculate_force_on_boundary=False)
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
def remove_asymmetric_part_of_main_assignments(assignment_collection, degrees_of_freedom):
new_main_assignments = [Assignment(a.lhs, get_symmetric_part(a.rhs, degrees_of_freedom))
for a in assignment_collection.main_assignments]
return assignment_collection.copy(new_main_assignments)
cqc = lb_method.conserved_quantity_computation
velocity = cqc.defined_symbols()['velocity']
velocity = cqc.velocity_symbols
symmetric_eq = remove_asymmetric_part_of_main_assignments(lb_method.get_equilibrium(),
degrees_of_freedom=velocity)
substitutions = {sym: f_out(i) for i, sym in enumerate(lb_method.pre_collision_pdf_symbols)}
......@@ -436,28 +1131,112 @@ class FixedDensity(LbBoundary):
simplification = create_simplification_strategy(lb_method)
symmetric_eq = simplification(symmetric_eq)
density_symbol = cqc.defined_symbols()['density']
density = self._density
density = self.density
equilibrium_input = cqc.equilibrium_input_equations_from_init_values(density=density)
equilibrium_input = equilibrium_input.new_without_subexpressions()
density_eq = equilibrium_input.main_assignments[0]
assert density_eq.lhs == density_symbol
transformed_density = density_eq.rhs
equilibrium_input = equilibrium_input.main_assignments_dict
subexpressions_dict = symmetric_eq.subexpressions_dict
subexpressions_dict[cqc.density_symbol] = equilibrium_input[cqc.density_symbol]
subexpressions_dict[cqc.density_deviation_symbol] = equilibrium_input[cqc.density_deviation_symbol]
conditions = [(eq_i.rhs, sp.Equality(dir_symbol, i))
for i, eq_i in enumerate(symmetric_eq.main_assignments)] + [(0, True)]
eq_component = sp.Piecewise(*conditions)
subexpressions = [Assignment(eq.lhs, transformed_density if eq.lhs == density_symbol else eq.rhs)
for eq in symmetric_eq.subexpressions]
main_assignments = [Assignment(f_in(inv_dir[dir_symbol]), 2 * eq_component - f_out(dir_symbol))]
return subexpressions + [Assignment(f_in(inv_dir[dir_symbol]),
2 * eq_component - f_out(dir_symbol))]
ac = AssignmentCollection(main_assignments, subexpressions=subexpressions_dict)
ac = ac.new_without_unused_subexpressions()
ac.topological_sort()
return ac
# end class FixedDensity
class DiffusionDirichlet(LbBoundary):
"""Concentration boundary which is used for concentration or thermal boundary conditions of convection-diffusion
equation Base on https://doi.org/10.1103/PhysRevE.85.016701.
Args:
concentration: can either be a constant, an access into a field, or a callback function.
The callback functions gets a numpy record array with members, ``x``, ``y``, ``z``, ``dir``
(direction) and ``concentration`` which has to be set to the desired
velocity of the corresponding link
velocity_field: if velocity field is given the boundary value is approximated by using the discrete equilibrium.
name: optional name of the boundary.
data_type: data type of the concentration value. default is double
"""
def __init__(self, concentration, velocity_field=None, name=None, data_type='double'):
if name is None:
name = "DiffusionDirichlet"
self.concentration = concentration
self._data_type = data_type
self.concentration_is_callable = callable(self.concentration)
self.velocity_field = velocity_field
super(DiffusionDirichlet, self).__init__(name, calculate_force_on_boundary=False)
@property
def additional_data(self):
""" In case of the UBB boundary additional data is a velocity vector. This vector is added to each cell to
realize velocity profiles for the inlet."""
if self.concentration_is_callable:
return [('concentration', create_type(self._data_type))]
else:
return []
@property
def additional_data_init_callback(self):
"""Initialise additional data of the boundary. For an example see
`tutorial 02 <https://pycodegen.pages.i10git.cs.fau.de/lbmpy/notebooks/02_tutorial_boundary_setup.html>`_
or lbmpy.geometry.add_pipe_inflow_boundary"""
if self.concentration_is_callable:
return self.concentration
def get_additional_code_nodes(self, lb_method):
"""Return a list of code nodes that will be added in the generated code before the index field loop.
Args:
lb_method: Lattice Boltzmann method. See :func:`lbmpy.creationfunctions.create_lb_method`
Returns:
list containing LbmWeightInfo
"""
if self.velocity_field:
return [LbmWeightInfo(lb_method, self._data_type), NeighbourOffsetArrays(lb_method.stencil)]
else:
return [LbmWeightInfo(lb_method, self._data_type)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
assert lb_method.conserved_quantity_computation.zero_centered_pdfs is False, \
"DiffusionDirichlet only works for methods with normal pdfs storage -> set zero_centered=False"
weight_info = LbmWeightInfo(lb_method, self._data_type)
w_dir = weight_info.weight_of_direction(dir_symbol, lb_method)
if self.concentration_is_callable:
concentration = index_field[0]('concentration')
else:
concentration = self.concentration
if self.velocity_field:
neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
u = self.velocity_field
cs = sp.Rational(1, 3)
equilibrium = (1 + scalar_product(neighbour_offset, u.center_vector)**2 / (2 * cs**4)
- scalar_product(u.center_vector, u.center_vector) / (2 * cs**2))
else:
equilibrium = sp.Rational(1, 1)
result = [Assignment(f_in(inv_dir[dir_symbol]), 2.0 * w_dir * concentration * equilibrium - f_out(dir_symbol))]
return result
# end class DiffusionDirichlet
class NeumannByCopy(LbBoundary):
"""Neumann boundary condition which is implemented by coping the PDF values to achieve similar values at the fluid
......@@ -474,18 +1253,11 @@ class NeumannByCopy(LbBoundary):
"""
return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
return [Assignment(f_in(inv_dir[dir_symbol]), f_out(inv_dir[dir_symbol])),
Assignment(f_out[neighbour_offset](dir_symbol), f_out(dir_symbol))]
def __hash__(self):
# All boundaries of these class behave equal -> should also be equal
return hash("NeumannByCopy")
def __eq__(self, other):
return type(other) == NeumannByCopy
# end class NeumannByCopy
......@@ -500,8 +1272,8 @@ class StreamInConstant(LbBoundary):
"""
def __init__(self, constant, name=None):
super(StreamInConstant, self).__init__(name)
self._constant = constant
super(StreamInConstant, self).__init__(name, calculate_force_on_boundary=False)
self.constant = constant
def get_additional_code_nodes(self, lb_method):
"""Return a list of code nodes that will be added in the generated code before the index field loop.
......@@ -514,15 +1286,9 @@ class StreamInConstant(LbBoundary):
"""
return [NeighbourOffsetArrays(lb_method.stencil)]
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field):
def __call__(self, f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector):
neighbour_offset = NeighbourOffsetArrays.neighbour_offset(dir_symbol, lb_method.stencil)
return [Assignment(f_in(inv_dir[dir_symbol]), self._constant),
Assignment(f_out[neighbour_offset](dir_symbol), self._constant)]
return [Assignment(f_in(inv_dir[dir_symbol]), self.constant),
Assignment(f_out[neighbour_offset](dir_symbol), self.constant)]
def __hash__(self):
# All boundaries of these class behave equal -> should also be equal
return hash("StreamInConstant")
def __eq__(self, other):
return type(other) == StreamInConstant
# end class StreamInConstant
from dataclasses import replace
import numpy as np
import sympy as sp
from lbmpy.advanced_streaming.indexing import BetweenTimestepsIndexing
from lbmpy.advanced_streaming.utility import is_inplace, Timestep, AccessPdfValues
from pystencils import Field, Assignment, TypedSymbol, create_indexed_kernel
from pystencils.stencil import inverse_direction
from pystencils import Assignment, CreateKernelConfig, create_kernel, Field, Target, FieldType
from pystencils.boundaries import BoundaryHandling
from pystencils.boundaries.createindexlist import numpy_data_type_for_boundary_object
from pystencils.backends.cbackend import CustomCodeNode
from pystencils.simp import add_subexpressions_for_field_reads
from pystencils.stencil import inverse_direction
from lbmpy.advanced_streaming.indexing import BetweenTimestepsIndexing
from lbmpy.advanced_streaming.utility import is_inplace, Timestep, AccessPdfValues
from .._compat import IS_PYSTENCILS_2
if IS_PYSTENCILS_2:
from pystencils.types import PsNumericType
class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
"""
Enables boundary handling for LBM simulations with advanced streaming patterns.
For the in-place patterns AA and EsoTwist, two kernels are generated for a boundary
Enables boundary handling for LBM simulations with advanced streaming patterns.
For the in-place patterns AA and EsoTwist, two kernels are generated for a boundary
object and the right one selected depending on the time step.
"""
def __init__(self, lb_method, data_handling, pdf_field_name, streaming_pattern='pull',
name="boundary_handling", flag_interface=None, target='cpu', openmp=True):
name="boundary_handling", flag_interface=None, target=Target.CPU, openmp=False, **kwargs):
self._lb_method = lb_method
self._streaming_pattern = streaming_pattern
self._inplace = is_inplace(streaming_pattern)
self._prev_timestep = None
super(LatticeBoltzmannBoundaryHandling, self).__init__(data_handling, pdf_field_name, lb_method.stencil,
name, flag_interface, target, openmp)
super(LatticeBoltzmannBoundaryHandling, self).__init__(
data_handling, pdf_field_name, lb_method.stencil,
name, flag_interface, target=target, openmp=openmp,
**kwargs
)
# ------------------------- Overridden methods of pystencils.BoundaryHandling -------------------------
......@@ -37,7 +47,7 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
self._prev_timestep = None
def add_fixed_steps(self, fixed_loop, **kwargs):
if self._inplace: # Fixed Loop can't do timestep selection
if self._inplace: # Fixed Loop can't do timestep selection
raise NotImplementedError("Adding to fixed loop is currently not supported for inplace kernels")
super(LatticeBoltzmannBoundaryHandling, self).add_fixed_steps(fixed_loop, **kwargs)
......@@ -49,12 +59,14 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
def _add_inplace_boundary(self, boundary_obj, flag=None):
if boundary_obj not in self._boundary_object_to_boundary_info:
sym_index_field = Field.create_generic('indexField', spatial_dimensions=1,
sym_index_field = Field.create_generic('indexField', spatial_dimensions=1, field_type=FieldType.INDEXED,
dtype=numpy_data_type_for_boundary_object(boundary_obj, self.dim))
kernels = [self._create_boundary_kernel(
self._data_handling.fields[self._field_name], sym_index_field, boundary_obj, Timestep.EVEN).compile(),
self._create_boundary_kernel(
self._data_handling.fields[self._field_name], sym_index_field, boundary_obj, Timestep.ODD).compile()]
ast_even = self._create_boundary_kernel(self._data_handling.fields[self._field_name], sym_index_field,
boundary_obj, Timestep.EVEN)
ast_odd = self._create_boundary_kernel(self._data_handling.fields[self._field_name], sym_index_field,
boundary_obj, Timestep.ODD)
kernels = [ast_even.compile(), ast_odd.compile()]
if flag is None:
flag = self.flag_interface.reserve_next_flag()
boundary_info = self.InplaceStreamingBoundaryInfo(self, boundary_obj, flag, kernels)
......@@ -62,10 +74,15 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
return self._boundary_object_to_boundary_info[boundary_obj].flag
def _create_boundary_kernel(self, symbolic_field, symbolic_index_field, boundary_obj, prev_timestep=Timestep.BOTH):
if IS_PYSTENCILS_2:
additional_args = {"default_dtype": self._default_dtype}
else:
additional_args = dict()
return create_lattice_boltzmann_boundary_kernel(
symbolic_field, symbolic_index_field, self._lb_method, boundary_obj,
prev_timestep=prev_timestep, streaming_pattern=self._streaming_pattern,
target=self._target, cpu_openmp=self._openmp)
target=self._target, cpu_openmp=self._openmp, **additional_args)
class InplaceStreamingBoundaryInfo(object):
......@@ -83,6 +100,7 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
self.boundary_object = boundary_obj
self.flag = flag
self._kernels = kernels
# end class InplaceStreamingBoundaryInfo
# ------------------------------ Force On Boundary ------------------------------------------------------------
......@@ -147,57 +165,85 @@ class LatticeBoltzmannBoundaryHandling(BoundaryHandling):
return dh.reduce_float_sequence(list(result), 'sum')
# end class LatticeBoltzmannBoundaryHandling
def create_lattice_boltzmann_boundary_kernel(pdf_field, index_field, lb_method, boundary_functor,
prev_timestep=Timestep.BOTH, streaming_pattern='pull',
target=Target.CPU, force_vector=None, **kernel_creation_args):
from .._compat import IS_PYSTENCILS_2
class LbmWeightInfo(CustomCodeNode):
indexing = BetweenTimestepsIndexing(
pdf_field, lb_method.stencil, prev_timestep, streaming_pattern, np.int32, np.int32)
# --------------------------- Functions to be used by boundaries --------------------------
dim = lb_method.stencil.D
f_out, f_in = indexing.proxy_fields
dir_symbol = indexing.dir_symbol
inv_dir = indexing.inverse_dir_symbol
@staticmethod
def weight_of_direction(dir_idx, lb_method=None):
if isinstance(sp.sympify(dir_idx), sp.Integer):
return lb_method.weights[dir_idx].evalf()
else:
return sp.IndexedBase(LbmWeightInfo.WEIGHTS_SYMBOL, shape=(1,))[dir_idx]
if IS_PYSTENCILS_2:
from pystencils.types.quick import SInt
config = CreateKernelConfig(
index_field=index_field,
target=target,
index_dtype=SInt(32),
skip_independence_check=True,
**kernel_creation_args
)
# ---------------------------------- Internal ---------------------------------------------
default_data_type: PsNumericType = config.get_option("default_dtype")
WEIGHTS_SYMBOL = TypedSymbol("weights", "double")
if force_vector is None:
force_vector_type = np.dtype([(f"F_{i}", default_data_type.numpy_dtype) for i in range(dim)], align=True)
force_vector = Field.create_generic('force_vector', spatial_dimensions=1,
dtype=force_vector_type, field_type=FieldType.INDEXED)
def __init__(self, lb_method):
weights = [str(w.evalf()) for w in lb_method.weights]
w_sym = LbmWeightInfo.WEIGHTS_SYMBOL
code = "const double %s [] = { %s };\n" % (w_sym.name, ",".join(weights))
super(LbmWeightInfo, self).__init__(code, symbols_read=set(), symbols_defined={w_sym})
# end class LbmWeightInfo
boundary_assignments = boundary_functor(f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector)
boundary_assignments = indexing.substitute_proxies(boundary_assignments)
if pdf_field.dtype != default_data_type:
boundary_assignments = add_subexpressions_for_field_reads(boundary_assignments, data_type=default_data_type)
def create_lattice_boltzmann_boundary_kernel(pdf_field, index_field, lb_method, boundary_functor,
prev_timestep=Timestep.BOTH, streaming_pattern='pull',
target='cpu', **kernel_creation_args):
elements: list[Assignment] = []
index_dtype = index_field.dtype.numpy_dtype.fields['dir'][0]
offsets_dtype = index_field.dtype.numpy_dtype.fields['x'][0]
indexing = BetweenTimestepsIndexing(
pdf_field, lb_method.stencil, prev_timestep, streaming_pattern, index_dtype, offsets_dtype)
index_arrs_node = indexing.create_code_node()
elements += index_arrs_node.get_array_declarations()
for node in boundary_functor.get_additional_code_nodes(lb_method)[::-1]:
elements += node.get_array_declarations()
elements += [Assignment(dir_symbol, index_field[0]('dir'))]
elements += boundary_assignments.all_assignments
f_out, f_in = indexing.proxy_fields
dir_symbol = indexing.dir_symbol
inv_dir = indexing.inverse_dir_symbol
kernel = create_kernel(elements, config=config)
return kernel
else:
config = CreateKernelConfig(index_fields=[index_field], target=target, default_number_int="int32",
skip_independence_check=True, **kernel_creation_args)
default_data_type = config.data_type.default_factory()
if force_vector is None:
force_vector_type = np.dtype([(f"F_{i}", default_data_type.c_name) for i in range(dim)], align=True)
force_vector = Field.create_generic('force_vector', spatial_dimensions=1,
dtype=force_vector_type, field_type=FieldType.INDEXED)
config = replace(config, index_fields=[index_field, force_vector])
boundary_assignments = boundary_functor(f_out, f_in, dir_symbol, inv_dir, lb_method, index_field, force_vector)
boundary_assignments = indexing.substitute_proxies(boundary_assignments)
boundary_assignments = boundary_functor(f_out, f_in, dir_symbol, inv_dir, lb_method, index_field)
boundary_assignments = indexing.substitute_proxies(boundary_assignments)
if pdf_field.dtype != default_data_type:
boundary_assignments = add_subexpressions_for_field_reads(boundary_assignments, data_type=default_data_type)
# Code Elements inside the loop
elements = [Assignment(dir_symbol, index_field[0]('dir'))]
elements += boundary_assignments.all_assignments
elements = [Assignment(dir_symbol, index_field[0]('dir'))]
elements += boundary_assignments.all_assignments
kernel = create_indexed_kernel(elements, [index_field], target=target, **kernel_creation_args)
kernel = create_kernel(elements, config=config)
# Code Elements ahead of the loop
index_arrs_node = indexing.create_code_node()
for node in boundary_functor.get_additional_code_nodes(lb_method)[::-1]:
kernel.body.insert_front(node)
kernel.body.insert_front(index_arrs_node)
return kernel
# Code Elements ahead of the loop
index_arrs_node = indexing.create_code_node()
for node in boundary_functor.get_additional_code_nodes(lb_method)[::-1]:
kernel.body.insert_front(node)
kernel.body.insert_front(index_arrs_node)
return kernel
import sympy as sp
from abc import ABC, abstractmethod
from pystencils import Assignment
class WallFunctionModel(ABC):
def __init__(self, name):
self._name = name
@abstractmethod
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target):
"""
Computes a symbolic representation for the log law.
Args:
density_symbol: symbol density, should be provided by the LB method's conserved quantity computation
shear_stress_symbol: symbolic wall shear stress to which the calculated shear stress will be assigned
velocity_symbol: symbolic velocity that is taken as a reference in the wall functions
wall_distance: distance to the wall, equals to 0.5 in standard cell-centered LBM
u_tau_target: in implicit wall functions, a target friction velocity can be provided which will be used as
initial guess in the Newton iteration. This target friction velocity can be obtained, e.g.,
from the target friction Reynolds number
"""
pass
# end class WallFunctionModel
class ExplicitWallFunctionModel(WallFunctionModel, ABC):
"""
Abstract base class for explicit wall functions that can be solved directly for the wall shear stress.
"""
def __init__(self, name):
super(ExplicitWallFunctionModel, self).__init__(name=name)
class MoninObukhovSimilarityTheory(ExplicitWallFunctionModel):
def __init__(self, z0, kappa=0.41, phi=0, name="MOST"):
self.z0 = z0
self.kappa = kappa
self.phi = phi
super(MoninObukhovSimilarityTheory, self).__init__(name=name)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
u_tau = velocity_symbol * self.kappa / sp.ln(wall_distance / self.z0 + self.phi)
return [Assignment(shear_stress_symbol, u_tau ** 2 * density_symbol)]
class ImplicitWallFunctionModel(WallFunctionModel, ABC):
"""
Abstract base class for implicit wall functions that require a Newton procedure to solve for the wall shear stress.
"""
def __init__(self, name, newton_steps, viscosity):
self.newton_steps = newton_steps
self.u_tau = sp.symbols(f"wall_function_u_tau_:{self.newton_steps + 1}")
self.delta = sp.symbols(f"wall_function_delta_:{self.newton_steps}")
self.viscosity = viscosity
super(ImplicitWallFunctionModel, self).__init__(name=name)
def newton_iteration(self, wall_law):
m = -wall_law / wall_law.diff(self.u_tau[0])
assignments = []
for i in range(self.newton_steps):
assignments.append(Assignment(self.delta[i], m.subs({self.u_tau[0]: self.u_tau[i]})))
assignments.append(Assignment(self.u_tau[i + 1], self.u_tau[i] + self.delta[i]))
return assignments
class LogLaw(ImplicitWallFunctionModel):
"""
Analytical model for the velocity profile inside the boundary layer, obtained from the mean velocity gradient.
Only valid in the log-law region.
"""
def __init__(self, viscosity, newton_steps=5, kappa=0.41, b=5.2, name="LogLaw"):
self.kappa = kappa
self.b = b
super(LogLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
return 1 / self.kappa * sp.ln(y_p) + self.b - u_p
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
class SpaldingsLaw(ImplicitWallFunctionModel):
"""
Single formula for the velocity profile inside the boundary layer, proposed by Spalding :cite:`spalding1961`.
Valid in the inner and the outer layer.
"""
def __init__(self, viscosity, newton_steps=5, kappa=0.41, b=5.5, name="Spalding"):
self.kappa = kappa
self.b = b
super(SpaldingsLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
k_times_u = self.kappa * u_p
frac_1 = (k_times_u ** 2) / sp.Float(2)
frac_2 = (k_times_u ** 3) / sp.Float(6)
return (u_p + sp.exp(-self.kappa * self.b) * (sp.exp(k_times_u) - sp.Float(1) - k_times_u - frac_1 - frac_2)
- y_p)
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
class MuskerLaw(ImplicitWallFunctionModel):
"""
Quasi-analytical model for the velocity profile inside the boundary layer, proposed by Musker. Valid in the inner
and the outer layer.
Formulation taken from :cite:`malaspinas2015`, Equation (59).
"""
def __init__(self, viscosity, newton_steps=5, name="Musker"):
super(MuskerLaw, self).__init__(name=name, newton_steps=newton_steps, viscosity=viscosity)
def shear_stress_assignments(self, density_symbol: sp.Symbol, shear_stress_symbol: sp.Symbol,
velocity_symbol: sp.Symbol, wall_distance, u_tau_target=None):
def law(u_p, y_p):
arctan = sp.Float(5.424) * sp.atan(sp.Float(0.119760479041916168) * y_p - sp.Float(0.488023952095808383))
logarithm = (sp.Float(0.434) * sp.log((y_p + sp.Float(10.6)) ** sp.Float(9.6)
/ (y_p ** 2 - sp.Float(8.15) * y_p + sp.Float(86)) ** 2))
return (arctan + logarithm - sp.Float(3.50727901936264842)) - u_p
u_plus = velocity_symbol / self.u_tau[0]
y_plus = (wall_distance * self.u_tau[0]) / self.viscosity
u_tau_init = u_tau_target if u_tau_target else velocity_symbol / sp.Float(100)
wall_law = law(u_plus, y_plus)
assignments = [Assignment(self.u_tau[0], u_tau_init), # initial guess
*self.newton_iteration(wall_law), # newton iterations
Assignment(shear_stress_symbol, self.u_tau[-1] ** 2 * density_symbol)] # final result
return assignments
......@@ -21,8 +21,8 @@ class ChapmanEnskogAnalysis:
cqc = method.conserved_quantity_computation
self._method = method
self._moment_cache = LbMethodEqMoments(method)
self.rho = cqc.defined_symbols(order=0)[1]
self.u = cqc.defined_symbols(order=1)[1]
self.rho = cqc.density_symbol
self.u = cqc.velocity_symbols
self.t = sp.Symbol("t")
self.epsilon = sp.Symbol("epsilon")
......@@ -370,7 +370,7 @@ def take_moments(eqn, pdf_to_moment_name=(('f', '\\Pi'), ('\\Omega f', '\\Upsilo
if new_f_index is None:
rest *= factor
else:
assert not(new_f_index and f_index)
assert not (new_f_index and f_index)
f_index = new_f_index
moment_tuple = [0] * len(velocity_terms)
......
import functools
import numpy as np
import sympy as sp
from lbmpy.chapman_enskog.chapman_enskog import (
......@@ -150,7 +151,8 @@ class SteadyStateChapmanEnskogAnalysis:
have_shape = hasattr(arg, 'shape') and hasattr(new_prod, 'shape')
if have_shape and arg.shape == new_prod.shape and arg.shape[1] == 1:
new_prod = sp.matrix_multiply_elementwise(new_prod, arg)
# since sympy 1.9 sp.matrix_multiply_elementwise does not work anymore in this case
new_prod = sp.Matrix(np.multiply(new_prod, arg))
else:
new_prod = arg * new_prod
if new_prod == 0:
......
......@@ -6,21 +6,23 @@ import sympy as sp
from lbmpy.moments import polynomial_to_exponent_representation
from pystencils.cache import disk_cache, memorycache
from pystencils.sympyextensions import complete_the_squares_in_exp
from pystencils.sympyextensions import complete_the_squares_in_exp, scalar_product
@memorycache()
def moment_generating_function(generating_function, symbols, symbols_in_result):
def moment_generating_function(generating_function, symbols, symbols_in_result, velocity=None):
r"""
Computes the moment generating function of a probability distribution. It is defined as:
.. math ::
F[f(\mathbf{x})](\mathbf{t}) = \int e^{<\mathbf{x}, \mathbf{t}>} f(x)\; dx
F[f(\mathbf{x})](t) = \int e^{<\mathbf{x}, t>} f(\mathbf{x})\; dx
Args:
generating_function: sympy expression
symbols: a sequence of symbols forming the vector x
symbols: a sequence of symbols forming the vector :math:`\mathbf{x}`
symbols_in_result: a sequence forming the vector t
velocity: if the generating function generates central moments, the velocity needs to be substracted. Thus the
velocity symbols need to be passed. All generating functions need to have the same parameters.
Returns:
transformation result F: an expression that depends now on symbols_in_result
......@@ -55,9 +57,27 @@ def moment_generating_function(generating_function, symbols, symbols_in_result):
return sp.simplify(result)
def cumulant_generating_function(func, symbols, symbols_in_result):
def central_moment_generating_function(func, symbols, symbols_in_result, velocity=sp.symbols("u_:3")):
r"""
Computes central moment generating func, which is defined as:
.. math ::
K( \mathbf{\Xi} ) = \exp ( - \mathbf{\Xi} \cdot \mathbf{u} ) M( \mathbf{\Xi} ).
For parameter description see :func:`moment_generating_function`.
"""
Computes cumulant generating func, which is the logarithm of the moment generating func.
argument = - scalar_product(symbols_in_result, velocity)
return sp.exp(argument) * moment_generating_function(func, symbols, symbols_in_result)
def cumulant_generating_function(func, symbols, symbols_in_result, velocity=None):
r"""
Computes cumulant generating func, which is the logarithm of the moment generating func:
.. math ::
C(\mathbf{\Xi}) = \log M(\mathbf{\Xi})
For parameter description see :func:`moment_generating_function`.
"""
return sp.ln(moment_generating_function(func, symbols, symbols_in_result))
......@@ -93,16 +113,16 @@ def multi_differentiation(generating_function, index, symbols):
@memorycache(maxsize=512)
def __continuous_moment_or_cumulant(func, moment, symbols, generating_function):
def __continuous_moment_or_cumulant(func, moment, symbols, generating_function, velocity=sp.symbols("u_:3")):
if type(moment) is tuple and not symbols:
symbols = sp.symbols("xvar yvar zvar")
dim = len(moment) if type(moment) is tuple else len(symbols)
# not using sp.Dummy here - since it prohibits caching
t = tuple([sp.Symbol("tmpvar_%d" % i, ) for i in range(dim)])
t = sp.symbols(f"tmpvar_:{dim}")
symbols = symbols[:dim]
generating_function = generating_function(func, symbols, t)
generating_function = generating_function(func, symbols, t, velocity=velocity)
if type(moment) is tuple:
return multi_differentiation(generating_function, moment, t)
......@@ -128,6 +148,18 @@ def continuous_moment(func, moment, symbols=None):
return __continuous_moment_or_cumulant(func, moment, symbols, moment_generating_function)
def continuous_central_moment(func, moment, symbols=None, velocity=sp.symbols("u_:3")):
"""Computes central moment of given function.
Args:
func: function to compute moments of
moment: tuple or polynomial describing the moment
symbols: if moment is given as polynomial, pass the moment symbols, i.e. the dof of the polynomial
"""
return __continuous_moment_or_cumulant(func, moment, symbols, central_moment_generating_function,
velocity=velocity)
def continuous_cumulant(func, moment, symbols=None):
"""Computes cumulant of continuous function.
......
r"""
Creating LBM kernels and Parameter Specifications
-------------------------------------------------
Kernel functions are created in four/five steps represented by five
python functions: `create_lb_method`, *create_lb_collision_rule/create_lb_update_rule*, `create_lb_ast` and
`create_lb_function` Each of those functions is configured with three data classes.
One dataclass defines the lattice Boltzmann method itself. This class is called `LBMConfig`. It defines, for example,
which collision space or LB stencil should be used.
The second one determines optimisations that are specific to the LBM. Optimisations like the
common subexpression elimination. Most of these optimisations act on the assignment level.
This means they only manipulate the assignments. The config class is called `LBMOptimisation`.
The third data class determines hardware optimisation. This means that contrary to the `LBMOptimisation` class,
it acts on the level of the abstract syntax tree. Thus, it is independent of the assignments and the LBM
and belongs to pystencils, not lbmpy. This can be found in the pystencils module as
'pystencils.kernelcreation.CreateKernelConfig'. With this class, for example, the target (CPU, GPU etc.)
of the generated code is specified.
1. *Method*:
the method defines the collision process. Currently, there are two big categories:
moment and cumulant based methods. A method defines how each moment or cumulant is relaxed by
storing the equilibrium value and the relaxation rate for each moment/cumulant.
2. *Collision/Update Rule*:
Methods can generate a "collision rule" which is an equation collection that define the
post collision values as a function of the pre-collision values. On these equation collection
simplifications are applied to reduce the number of floating point operations.
At this stage an entropic optimisation step can also be added to determine one relaxation rate by an
entropy condition.
Then a streaming rule is added which transforms the collision rule into an update rule.
The streaming step depends on the pdf storage (source/destination, AABB pattern, EsoTwist).
Currently only the simple source/destination pattern is supported.
3. *AST*:
The abstract syntax tree describes the structure of the kernel, including loops and conditionals.
The ast can be modified, e.g., to add OpenMP pragmas, reorder loops or apply other optimisations.
4. *Function*:
This step compiles the AST into an executable function, either for CPU or GPUs. This function
behaves like a normal Python function and runs one LBM time step.
Each stage (apart from *Function*) also adds its result to the given `LBMConfig` object. The `LBMConfig`
thus coalesces all information defining the LBM kernel.
The function :func:`create_lb_function` runs the whole pipeline, the other functions in this module
execute this pipeline only up to a certain step. Each function optionally also takes the result of the previous step.
For example, to modify the AST one can run::
ast = create_lb_ast(...)
# modify ast here
func = create_lb_function(ast=ast, ...)
"""
import copy
from dataclasses import dataclass, field, replace
from typing import Union, List, Tuple, Any, Type, Iterable
from warnings import warn, filterwarnings
from ._compat import IS_PYSTENCILS_2
import sympy as sp
from lbmpy.enums import Stencil, Method, ForceModel, CollisionSpace, SubgridScaleModel
import lbmpy.forcemodels as forcemodels
from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, PdfFieldAccessor, PeriodicTwoFieldsAccessor
from lbmpy.fluctuatinglb import add_fluctuations_to_collision_rule
from lbmpy.partially_saturated_cells import (replace_by_psm_collision_rule, PSMConfig,
add_psm_solid_collision_to_collision_rule)
from lbmpy.non_newtonian_models import add_cassons_model, CassonsParameters
from lbmpy.methods import (create_mrt_orthogonal, create_mrt_raw, create_central_moment,
create_srt, create_trt, create_trt_kbc)
from lbmpy.methods.creationfunctions import CollisionSpaceInfo
from lbmpy.methods.creationfunctions import (
create_with_monomial_cumulants, create_cumulant, create_with_default_polynomial_cumulants)
from lbmpy.methods.momentbased.entropic import add_entropy_condition, add_iterative_entropy_condition
from lbmpy.relaxationrates import relaxation_rate_from_magic_number
from lbmpy.simplificationfactory import create_simplification_strategy
from lbmpy.stencils import LBStencil
from lbmpy.turbulence_models import add_sgs_model
from lbmpy.updatekernels import create_lbm_kernel, create_stream_pull_with_output_kernel
from lbmpy.advanced_streaming.utility import Timestep, get_accessor
from .forcemodels import AbstractForceModel
import pystencils
from pystencils import CreateKernelConfig, create_kernel
from pystencils.cache import disk_cache_no_fallback
from pystencils.field import Field
from pystencils.simp import sympy_cse, SimplificationStrategy
# needed for the docstring
from lbmpy.methods.abstractlbmethod import LbmCollisionRule, AbstractLbMethod
from lbmpy.methods.cumulantbased import CumulantBasedLbMethod
if IS_PYSTENCILS_2:
from pystencils import Kernel as KernelFunction
else:
from pystencils.astnodes import KernelFunction
# Filter out JobLib warnings. They are not useful for use:
# https://github.com/joblib/joblib/issues/683
filterwarnings("ignore", message="Persisting input arguments took")
@dataclass
class LBMConfig:
"""
**Below all parameters for the LBMConfig are explained**
"""
stencil: LBStencil = LBStencil(Stencil.D2Q9)
"""
All stencils are defined in :class:`lbmpy.enums.Stencil`. From that :class:`lbmpy.stencils.LBStencil`
class will be created
"""
method: Method = Method.SRT
"""
Name of lattice Boltzmann method. Defined by :class:`lbmpy.enums.Method`.
This determines the selection and relaxation pattern of moments/cumulants, i.e. which moment/cumulant basis is
chosen, and which of the basis vectors are relaxed together
"""
relaxation_rates: Iterable = None
"""
Sequence of relaxation rates, number depends on selected method. If you specify more rates than
method needs, the additional rates are ignored.
If no relaxation rates are specified, the parameter `relaxation_rate` will be consulted.
"""
relaxation_rate: Union[int, float, Type[sp.Symbol]] = None
"""
The method's primary relaxation rate. In most cases, this is the relaxation rate governing shear viscosity.
For SRT, this is the only relaxation rate.
For TRT, the second relaxation rate is then determined via magic number.
In the case of raw moment, central moment, and cumulant-based MRT methods, all other relaxation rates will be
set to unity.
If neither `relaxation_rate` nor `relaxation_rates` is specified, the behaviour is as if
`relaxation_rate=sp.Symbol('omega')` was set.
"""
compressible: bool = False
"""
Affects the selection of equilibrium moments. Both options approximate the *incompressible*
Navier Stokes Equations. However when chosen as False, the approximation is better, the standard LBM derivation is
compressible.
"""
zero_centered: bool = True
"""
Governs the storage format of populations. If `False`, the discrete particle distribution vector is stored in its
absolute form. If `True`, instead, only the distribution's deviation from its rest state (typically given by the
lattice weights) is stored.
"""
delta_equilibrium: bool = None
"""
Determines whether or not the (continuous or discrete, see `continuous_equilibrium`) Maxwellian equilibrium is
expressed in its absolute form, or only by its deviation from the rest state (typically given by the reference
density and zero velocity). This parameter is only effective if `zero_centered` is set to `True`. Then, if
`delta_equilibrium` is `False`, the rest state must be reintroduced to the populations during collision. Otherwise,
if `delta_equilibrium` is `True`, the collision equations can be derived using only the deviations from the rest
state.
If `None` is passed to `delta_equilibrium`, its value will be chosen automatically, depending on the value of
`zero_centered` and the chosen `method`.
"""
equilibrium_order: int = 2
"""
Order in velocity, at which the equilibrium moment approximation is
truncated. Order 2 is sufficient to approximate Navier-Stokes. This parameter has no effect on cumulant-based
methods, whose equilibrium terms have no contributions above order one.
"""
c_s_sq: sp.Expr = sp.Rational(1, 3)
"""
The squared lattice speed of sound used to derive the LB method. It is very uncommon to use a value different
to 1 / 3.
"""
weighted: bool = True
"""
Affects only orthogonal MRT methods. If set to True a weighted Gram-Schmidt procedure is used to orthogonalise
the moments.
"""
nested_moments: List[List] = None
"""
A list of lists of modes, grouped by common relaxation times. This is usually used in
conjunction with `lbmpy.methods.default_moment_sets.mrt_orthogonal_modes_literature`.
If this argument is not provided, Gram-Schmidt orthogonalisation of the default modes is performed.
"""
force_model: Union[AbstractForceModel, ForceModel] = None
"""
Force model to determine how forcing terms enter the collision rule.
Possibilities are defined in :class: `lbmpy.enums.ForceModel`
"""
force: Union[Tuple, Field] = (0, 0, 0)
"""
Either constant force or a symbolic expression depending on field value
"""
continuous_equilibrium: bool = True
"""
Way to compute equilibrium moments/cumulants, if False the standard discretised LBM equilibrium is used,
otherwise the equilibrium moments are computed from the continuous Maxwellian. This makes only a
difference if sparse stencils are used e.g. D2Q9 and D3Q27 are not affected, D319 and DQ15 are affected.
"""
maxwellian_moments: bool = None
"""
Deprecated and due for removal by version 0.5; use `continuous_equilibrium` instead.
"""
initial_velocity: Tuple = None,
"""
Initial velocity in domain, can either be a tuple (x,y,z) velocity to set a constant
velocity everywhere, or a numpy array with the same size of the domain, with a last coordinate of shape dim to set
velocities on cell level
"""
galilean_correction: bool = False
"""
Special correction for D3Q27 cumulant LBMs. For Details see
:mod:`lbmpy.methods.cumulantbased.galilean_correction`
"""
fourth_order_correction: Union[float, bool] = False
"""
Special correction for rendering D3Q27 cumulant LBMs fourth-order accurate in diffusion. For Details see
:mod:`lbmpy.methods.cumulantbased.fourth_order_correction`. If set to `True`, the fourth-order correction is
employed without limiters (or more precisely with a very high limiter, practically disabling the limiters). If this
variable is set to a number, the latter is used for the limiters (uniformly for omega_3, omega_4 and omega_5).
"""
collision_space_info: CollisionSpaceInfo = None
"""
Information about the LB method's collision space (see :class:`lbmpy.methods.creationfunctions.CollisionSpaceInfo`)
including the classes defining how populations are transformed to these spaces.
If left at `None`, it will be inferred according to the value of `method`.
If an instance of the :class:`lbmpy.enums.CollisionSpace` enum is passed, a
:class:`lbmpy.method.CollisionSpaceInfo` instance with the space's default setup is created.
Otherwise, the selected collision space must be in accord with the chosen :class:`lbmpy.enum.Method`.
"""
entropic: bool = False
"""
In case there are two distinct relaxation rate in a method, one of them (usually the one, not
determining the viscosity) can be automatically chosen w.r.t an entropy condition. For details see
:mod:`lbmpy.methods.momentbased.entropic`
"""
entropic_newton_iterations: int = None
"""
For moment methods the entropy optimum can be calculated in closed form.
For cumulant methods this is not possible, in that case it is computed using Newton iterations.
This parameter can be used to force Newton iterations and specify how many should be done
"""
omega_output_field: Field = None
"""
A pystencils Field can be passed here, where the calculated free relaxation rate of
an entropic or subgrid-scale method is written to
"""
eddy_viscosity_field: Field = None
"""
A pystencils Field can be passed here, where the eddy-viscosity of a subgrid-scale model is written.
"""
subgrid_scale_model: Union[SubgridScaleModel, tuple[SubgridScaleModel, float], tuple[SubgridScaleModel, int]] = None
"""
Choose a subgrid-scale model (SGS) for large-eddy simulations. ``omega_output_field`` can be set to
write out adapted relaxation rates. Either provide just the SGS and use the default model constants or provide a
tuple of the SGS and its corresponding model constant.
"""
cassons: CassonsParameters = False
"""
Adds the Cassons model according to https://doi.org/10.1007/s10955-005-8415-x
The parameters are set with the ``CassonsParameters`` dataclass.
"""
fluctuating: dict = False
"""
Enables fluctuating lattice Boltzmann by randomizing collision process.
Pass dictionary with parameters to ``lbmpy.fluctuatinglb.add_fluctuations_to_collision_rule``.
Can only be used for weighed MRT collision operators.
"""
temperature: Any = None
"""
Temperature for fluctuating lattice Boltzmann methods.
"""
psm_config: PSMConfig = None
"""
If a PSM config is specified, (1 - fractionField) is added to the relaxation rates of the collision
and to the potential force term, and a solid collision is build and added to the main assignments.
"""
output: dict = field(default_factory=dict)
"""
A dictionary mapping macroscopic quantites e.g. the strings 'density' and 'velocity' to pystencils
fields. In each timestep the corresponding quantities are written to the given fields. Possible input would be:
{'density': density_field, 'velocity': velocity_field}
"""
velocity_input: Field = None
"""
Symbolic field where the velocities are read from. If `None` is given the velocity is calculated inplace from
with first order moments.
"""
density_input: Field = None
"""
Symbolic field where the density is read from. If `None` is given the density is calculated inplace from
with zeroth order moment.
"""
conserved_moments: bool = True
"""
If lower order moments are conserved or not. If velocity or density input is set the lower order moments are not
conserved anymore.
"""
kernel_type: Union[str, Type[PdfFieldAccessor]] = 'default_stream_collide'
"""
Supported values: ``'default_stream_collide'`` (default), ``'collide_only'``, ``'stream_pull_only'``.
With ``'default_stream_collide'``, streaming pattern and even/odd time-step (for in-place patterns) can be specified
by the ``streaming_pattern`` and ``timestep`` arguments. For backwards compatibility, ``kernel_type`` also accepts
``'stream_pull_collide'``, ``'collide_stream_push'``, ``'esotwist_even'``, ``'esotwist_odd'``, ``'aa_even'``
and ``'aa_odd'`` for selection of the streaming pattern.
"""
streaming_pattern: str = 'pull'
"""
The streaming pattern to be used with a ``'default_stream_collide'`` kernel. Accepted values are
``'pull'``, ``'push'``, ``'aa'`` and ``'esotwist'``.
"""
timestep: Timestep = Timestep.BOTH
"""
Timestep modulus for the streaming pattern. For two-fields patterns, this argument is irrelevant and
by default set to ``Timestep.BOTH``. For in-place patterns, ``Timestep.EVEN`` or ``Timestep.ODD`` must be specified.
"""
field_name: str = 'src'
"""
Name of the PDF field.
"""
temporary_field_name: str = 'dst'
"""
Name of the temporary PDF field.
"""
lb_method: Type[AbstractLbMethod] = None
"""
Instance of `lbmpy.methods.abstractlbmethod.AbstractLbMethod`. If this parameter is `None`, the lb_method is derived
via `create_lb_method`.
"""
collision_rule: LbmCollisionRule = None
"""
Instance of :class:`lbmpy.methods.LbmCollisionRule`. If this parameter is `None`,
the collision rule is derived via *create_lb_collision_rule*.
"""
update_rule: LbmCollisionRule = None
"""
Instance of :class:`lbmpy.methods.LbmCollisionRule`. If this parameter is `None`,
the update rule is derived via *create_lb_update_rule*.
"""
ast: KernelFunction = None
"""
Instance of *pystencils.KernelFunction*. If this parameter is `None`,
the ast is derived via `create_lb_ast`.
"""
def __post_init__(self):
if isinstance(self.method, str):
new_method = Method[self.method.upper()]
warn(f'Method "{self.method}" as str is deprecated. Use {new_method} instead')
self.method = new_method
if self.maxwellian_moments is not None:
warn("Argument 'maxwellian_moments' is deprecated and will be removed by version 0.5."
"Use `continuous_equilibrium` instead.")
self.continuous_equilibrium = self.maxwellian_moments
if not isinstance(self.stencil, LBStencil):
self.stencil = LBStencil(self.stencil)
if self.relaxation_rates is None:
# Fall back to regularized method
if self.relaxation_rate is None:
self.relaxation_rate = sp.Symbol("omega")
# if only a single relaxation rate is defined,
# it is internally treated as a list with one element and just sets the relaxation_rates parameter
if self.relaxation_rate is not None:
if self.method in [Method.TRT, Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4]:
self.relaxation_rates = [self.relaxation_rate,
relaxation_rate_from_magic_number(self.relaxation_rate)]
else:
self.relaxation_rates = [self.relaxation_rate]
# Incompressible cumulant method is not available
if not self.compressible and self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT):
raise ValueError("Incompressible cumulant-based methods are not supported (yet).")
if self.zero_centered and self.entropic:
raise ValueError("Entropic methods can only be created with `zero_centered=False`.")
# Check or infer delta-equilibrium
if self.delta_equilibrium is not None:
# Must be zero-centered
if self.delta_equilibrium:
if not self.zero_centered:
raise ValueError("`delta_equilibrium=True` requires `zero_centered=True`!")
# Must not be a cumulant-method
if self.method in (Method.MONOMIAL_CUMULANT, Method.CUMULANT):
raise ValueError("Cannot create a cumulant-based method from a delta-equilibrium!")
else:
if self.zero_centered:
if self.method in (Method.CENTRAL_MOMENT, Method.MONOMIAL_CUMULANT, Method.CUMULANT):
self.delta_equilibrium = False
else:
self.delta_equilibrium = True
else:
self.delta_equilibrium = False
# Check or infer collision space
if isinstance(self.collision_space_info, CollisionSpace):
self.collision_space_info = CollisionSpaceInfo(self.collision_space_info)
if self.collision_space_info is not None:
if (self.entropic or self.fluctuating) \
and self.collision_space_info.collision_space != CollisionSpace.POPULATIONS:
# Workaround until entropic method supports relaxation in subexpressions
# and the problem with RNGs in the assignment collection has been solved
raise ValueError("Entropic and Fluctuating methods are only available in population space.")
elif not self.collision_space_info.collision_space.compatible(self.method):
raise ValueError("Given method is not compatible with given collision space.")
else:
if self.method in {Method.SRT, Method.TRT,
Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.POPULATIONS)
elif self.entropic or self.fluctuating:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.POPULATIONS)
elif self.method in {Method.MRT_RAW, Method.MRT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.RAW_MOMENTS)
elif self.method in {Method.CENTRAL_MOMENT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.CENTRAL_MOMENTS)
elif self.method in {Method.MONOMIAL_CUMULANT, Method.CUMULANT}:
self.collision_space_info = CollisionSpaceInfo(CollisionSpace.CUMULANTS)
else:
raise Exception(f"No default collision space is given for method {self.method}."
"This is a bug; please report it to the developers.")
# for backwards compatibility
kernel_type_to_streaming_pattern = {
'stream_pull_collide': ('pull', Timestep.BOTH),
'collide_stream_push': ('push', Timestep.BOTH),
'aa_even': ('aa', Timestep.EVEN),
'aa_odd': ('aa', Timestep.ODD),
'esotwist_even': ('esotwist', Timestep.EVEN),
'esotwist_odd': ('esotwist', Timestep.ODD)
}
if self.kernel_type in kernel_type_to_streaming_pattern.keys():
self.streaming_pattern, self.timestep = kernel_type_to_streaming_pattern[self.kernel_type]
self.kernel_type = 'default_stream_collide'
if isinstance(self.force, Field):
self.force = tuple([self.force(i) for i in range(self.stencil.D)])
force_not_zero = False
for f_i in self.force:
if f_i != 0:
force_not_zero = True
if self.force_model is None and force_not_zero:
if self.method == Method.CUMULANT:
self.force_model = forcemodels.CentralMoment(self.force[:self.stencil.D])
else:
self.force_model = forcemodels.Guo(self.force[:self.stencil.D])
force_model_dict = {
'simple': forcemodels.Simple,
'luo': forcemodels.Luo,
'guo': forcemodels.Guo,
'schiller': forcemodels.Guo,
'buick': forcemodels.Buick,
'silva': forcemodels.Buick,
'edm': forcemodels.EDM,
'kupershtokh': forcemodels.EDM,
'he': forcemodels.He,
'shanchen': forcemodels.ShanChen,
'centralmoment': forcemodels.CentralMoment
}
if self.psm_config is not None and self.psm_config.fraction_field is not None:
self.force = [(1.0 - self.psm_config.fraction_field_symbol) * f for f in self.force]
if isinstance(self.force_model, str):
new_force_model = ForceModel[self.force_model.upper()]
warn(f'ForceModel "{self.force_model}" as str is deprecated. Use {new_force_model} instead or '
f'provide a class of type AbstractForceModel', category=DeprecationWarning)
force_model_class = force_model_dict[new_force_model.name.lower()]
self.force_model = force_model_class(force=self.force[:self.stencil.D])
elif isinstance(self.force_model, ForceModel):
force_model_class = force_model_dict[self.force_model.name.lower()]
self.force_model = force_model_class(force=self.force[:self.stencil.D])
if self.density_input or self.velocity_input:
self.conserved_moments = False
@dataclass
class LBMOptimisation:
"""
**Below all parameters for the LBMOptimisation are explained**
"""
cse_pdfs: bool = False
"""
Run common subexpression elimination for opposing stencil directions.
"""
cse_global: bool = False
"""
Run common subexpression elimination after all other simplifications have been executed.
"""
simplification: Union[str, bool, SimplificationStrategy] = 'auto'
"""
Simplifications applied during the derivation of the collision rule. If ``True`` or ``'auto'``,
a default simplification strategy is selected according to the type of the method;
see :func:`lbmpy.simplificationfactory.create_simplification_strategy`.
If ``False``, no simplification is applied.
Otherwise, the given simplification strategy will be applied.
"""
pre_simplification: bool = True
"""
Simplifications applied during the derivation of the collision rule for cumulant LBMs.
For details see :mod:`lbmpy.moment_transforms`.
"""
split: bool = False
"""
Split innermost loop, to handle only two directions per loop. This reduces the number of parallel
load/store streams and thus speeds up the kernel on most architectures.
"""
field_size: Any = None
"""
Create kernel for fixed field size.
"""
field_layout: str = 'fzyx'
"""
``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse_numpy'`` or ``'f'`` for fortran
layout, this does not apply when pdf_arr was given, then the same layout as pdf_arr is used.
"""
symbolic_field: pystencils.field.Field = None
"""
Pystencils field for source (pdf field that is read)
"""
symbolic_temporary_field: pystencils.field.Field = None
"""
Pystencils field for temporary (pdf field that is written in stream, or stream-collide)
"""
builtin_periodicity: Tuple[bool] = (False, False, False)
"""
Instead of handling periodicity by copying ghost layers, the periodicity
is built into the kernel. This parameters specifies if the domain is periodic in (x,y,z) direction. Even if the
periodicity is built into the kernel, the fields have one ghost layer to be consistent with other functions.
"""
def create_lb_function(ast=None, lbm_config=None, lbm_optimisation=None, config=None, optimization=None, **kwargs):
"""Creates a Python function for the LB method"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.ast is not None:
ast = lbm_config.ast
if ast is None:
ast = create_lb_ast(lbm_config.update_rule, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation, config=config)
res = ast.compile()
res.method = ast.method
res.update_rule = ast.update_rule
return res
def create_lb_ast(update_rule=None, lbm_config=None, lbm_optimisation=None, config=None, optimization=None, **kwargs):
"""Creates a pystencils AST for the LB method"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.update_rule is not None:
update_rule = lbm_config.update_rule
if update_rule is None:
update_rule = create_lb_update_rule(lbm_config.collision_rule, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation, config=config)
config = replace(config, ghost_layers=1)
ast = create_kernel(update_rule, config=config)
ast.method = update_rule.method
ast.update_rule = update_rule
lbm_config.ast = ast
return ast
@disk_cache_no_fallback
def create_lb_update_rule(collision_rule=None, lbm_config=None, lbm_optimisation=None, config=None,
optimization=None, **kwargs):
"""Creates an update rule (list of Assignments) for a LB method that describe a full sweep"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.collision_rule is not None:
collision_rule = lbm_config.collision_rule
if collision_rule is None:
collision_rule = create_lb_collision_rule(lbm_config.lb_method, lbm_config=lbm_config,
lbm_optimisation=lbm_optimisation,
config=config)
lb_method = collision_rule.method
if IS_PYSTENCILS_2:
fallback_field_data_type = config.get_option("default_dtype")
else:
fallback_field_data_type = config.data_type[lbm_config.field_name].numpy_dtype
q = collision_rule.method.stencil.Q
if lbm_optimisation.symbolic_field is not None:
src_field = lbm_optimisation.symbolic_field
elif lbm_optimisation.field_size:
field_size = tuple([s + 2 for s in lbm_optimisation.field_size] + [q])
src_field = Field.create_fixed_size(lbm_config.field_name, field_size, index_dimensions=1,
layout=lbm_optimisation.field_layout, dtype=fallback_field_data_type)
else:
src_field = Field.create_generic(lbm_config.field_name, spatial_dimensions=collision_rule.method.dim,
index_shape=(q,), layout=lbm_optimisation.field_layout,
dtype=fallback_field_data_type)
if lbm_optimisation.symbolic_temporary_field is not None:
dst_field = lbm_optimisation.symbolic_temporary_field
else:
dst_field = src_field.new_field_with_different_name(lbm_config.temporary_field_name)
kernel_type = lbm_config.kernel_type
if kernel_type == 'stream_pull_only':
update_rule = create_stream_pull_with_output_kernel(lb_method, src_field, dst_field, lbm_config.output)
else:
if kernel_type == 'default_stream_collide':
if lbm_config.streaming_pattern == 'pull' and any(lbm_optimisation.builtin_periodicity):
accessor = PeriodicTwoFieldsAccessor(lbm_optimisation.builtin_periodicity, ghost_layers=1)
else:
accessor = get_accessor(lbm_config.streaming_pattern, lbm_config.timestep)
elif kernel_type == 'collide_only':
accessor = CollideOnlyInplaceAccessor
elif isinstance(kernel_type, PdfFieldAccessor):
accessor = kernel_type
else:
raise ValueError("Invalid value of parameter 'kernel_type'", lbm_config.kernel_type)
update_rule = create_lbm_kernel(collision_rule, src_field, dst_field, accessor)
lbm_config.update_rule = update_rule
return update_rule
@disk_cache_no_fallback
def create_lb_collision_rule(lb_method=None, lbm_config=None, lbm_optimisation=None, config=None,
optimization=None, **kwargs):
"""Creates a collision rule (list of Assignments) for a LB method describing the collision operator (no stream)"""
lbm_config, lbm_optimisation, config = update_with_default_parameters(kwargs, optimization,
lbm_config, lbm_optimisation, config)
if lbm_config.lb_method is not None:
lb_method = lbm_config.lb_method
if lb_method is None:
lb_method = create_lb_method(lbm_config)
cqc = lb_method.conserved_quantity_computation
rho_in = lbm_config.density_input
u_in = lbm_config.velocity_input
if u_in is not None and isinstance(u_in, Field):
u_in = u_in.center_vector
if rho_in is not None and isinstance(rho_in, Field):
rho_in = rho_in.center
pre_simplification = lbm_optimisation.pre_simplification
if rho_in is not None or u_in is not None:
cqe = cqc.equilibrium_input_equations_from_pdfs(lb_method.pre_collision_pdf_symbols)
cqe_main_assignments = cqe.main_assignments_dict
if rho_in is not None:
if u_in is None:
raise ValueError("When setting 'density_input' parameter, "
"'velocity_input' has to be specified as well.")
cqe_main_assignments[cqc.density_symbol] = rho_in
cqe_main_assignments[cqc.density_deviation_symbol] = rho_in - cqc.background_density
if u_in is not None:
for u_sym, u in zip(cqc.velocity_symbols, u_in):
cqe_main_assignments[u_sym] = u
cqe.set_main_assignments_from_dict(cqe_main_assignments)
cqe = cqe.new_without_unused_subexpressions()
collision_rule = lb_method.get_collision_rule(conserved_quantity_equations=cqe,
pre_simplification=pre_simplification)
else:
collision_rule = lb_method.get_collision_rule(pre_simplification=pre_simplification)
if lbm_config.galilean_correction:
from lbmpy.methods.cumulantbased import add_galilean_correction
collision_rule = add_galilean_correction(collision_rule)
if lbm_config.fourth_order_correction:
from lbmpy.methods.cumulantbased import add_fourth_order_correction
# must provide a second relaxation rate in implementation; defaults to 1
if len(lbm_config.relaxation_rates) == 1:
lbm_config.relaxation_rates.append(1)
cumulant_limiter = 1e6 if lbm_config.fourth_order_correction is True else lbm_config.fourth_order_correction
collision_rule = add_fourth_order_correction(collision_rule=collision_rule,
shear_relaxation_rate=lbm_config.relaxation_rates[0],
bulk_relaxation_rate=lbm_config.relaxation_rates[1],
limiter=cumulant_limiter)
if lbm_config.psm_config is not None:
if lbm_config.psm_config.fraction_field is None or lbm_config.psm_config.object_velocity_field is None:
raise ValueError("Specify a fraction and object velocity field in the PSM Config")
collision_rule = replace_by_psm_collision_rule(collision_rule, lbm_config.psm_config)
if lbm_config.entropic:
if lbm_config.subgrid_scale_model or lbm_config.cassons:
raise ValueError("Choose either entropic, subgrid-scale or cassons")
if lbm_config.entropic_newton_iterations:
if isinstance(lbm_config.entropic_newton_iterations, bool):
iterations = 3
else:
iterations = lbm_config.entropic_newton_iterations
collision_rule = add_iterative_entropy_condition(collision_rule, newton_iterations=iterations,
omega_output_field=lbm_config.omega_output_field)
else:
collision_rule = add_entropy_condition(collision_rule, omega_output_field=lbm_config.omega_output_field)
elif lbm_config.subgrid_scale_model:
if lbm_config.cassons:
raise ValueError("Cassons model can not be combined with a subgrid-scale model")
model_constant = None
sgs_model = lbm_config.subgrid_scale_model
if isinstance(lbm_config.subgrid_scale_model, tuple):
sgs_model = lbm_config.subgrid_scale_model[0]
model_constant = lbm_config.subgrid_scale_model[1]
collision_rule = add_sgs_model(collision_rule=collision_rule, subgrid_scale_model=sgs_model,
model_constant=model_constant, omega_output_field=lbm_config.omega_output_field,
eddy_viscosity_field=lbm_config.eddy_viscosity_field)
if 'split_groups' in collision_rule.simplification_hints:
collision_rule.simplification_hints['split_groups'][0].append(sp.Symbol("sgs_omega"))
elif lbm_config.cassons:
collision_rule = add_cassons_model(collision_rule, parameter=lbm_config.cassons,
omega_output_field=lbm_config.omega_output_field)
if lbm_config.output:
output_eqs = cqc.output_equations_from_pdfs(lb_method.pre_collision_pdf_symbols, lbm_config.output)
collision_rule = collision_rule.new_merged(output_eqs)
if lbm_optimisation.simplification is True or lbm_optimisation.simplification == 'auto':
simplification = create_simplification_strategy(lb_method, split_inner_loop=lbm_optimisation.split)
elif callable(lbm_optimisation.simplification):
simplification = lbm_optimisation.simplification
else:
simplification = SimplificationStrategy()
collision_rule = simplification(collision_rule)
if isinstance(collision_rule.method, CumulantBasedLbMethod):
from lbmpy.methods.cumulantbased.cumulant_simplifications import check_for_logarithms
check_for_logarithms(collision_rule)
if lbm_config.fluctuating:
add_fluctuations_to_collision_rule(collision_rule, **lbm_config.fluctuating)
if lbm_optimisation.cse_pdfs:
from lbmpy.methods.momentbased.momentbasedsimplifications import cse_in_opposing_directions
collision_rule = cse_in_opposing_directions(collision_rule)
if lbm_optimisation.cse_global:
collision_rule = sympy_cse(collision_rule)
lbm_config.collision_rule = collision_rule
return collision_rule
def create_lb_method(lbm_config=None, **params):
"""Creates a LB method, defined by moments/cumulants for collision space, equilibrium and relaxation rates."""
lbm_config, _, _ = update_with_default_parameters(params, lbm_config=lbm_config)
relaxation_rates = lbm_config.relaxation_rates
dim = lbm_config.stencil.D
if isinstance(lbm_config.force, Field):
lbm_config.force = tuple(lbm_config.force(i) for i in range(dim))
if lbm_config.psm_config is None:
fraction_field = None
else:
fraction_field = lbm_config.psm_config.fraction_field_symbol
common_params = {
'compressible': lbm_config.compressible,
'zero_centered': lbm_config.zero_centered,
'delta_equilibrium': lbm_config.delta_equilibrium,
'equilibrium_order': lbm_config.equilibrium_order,
'force_model': lbm_config.force_model,
'continuous_equilibrium': lbm_config.continuous_equilibrium,
'c_s_sq': lbm_config.c_s_sq,
'collision_space_info': lbm_config.collision_space_info,
'fraction_field': fraction_field,
}
cumulant_params = {
'zero_centered': lbm_config.zero_centered,
'force_model': lbm_config.force_model,
'c_s_sq': lbm_config.c_s_sq,
'collision_space_info': lbm_config.collision_space_info,
'fraction_field': fraction_field,
}
if lbm_config.method == Method.SRT:
assert len(relaxation_rates) >= 1, "Not enough relaxation rates"
method = create_srt(lbm_config.stencil, relaxation_rates[0], **common_params)
elif lbm_config.method == Method.TRT:
assert len(relaxation_rates) >= 2, "Not enough relaxation rates"
method = create_trt(lbm_config.stencil, relaxation_rates[0], relaxation_rates[1], **common_params)
elif lbm_config.method == Method.MRT:
method = create_mrt_orthogonal(lbm_config.stencil, relaxation_rates, weighted=lbm_config.weighted,
nested_moments=lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method == Method.CENTRAL_MOMENT:
method = create_central_moment(lbm_config.stencil, relaxation_rates,
nested_moments=lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method == Method.MRT_RAW:
method = create_mrt_raw(lbm_config.stencil, relaxation_rates,
conserved_moments=lbm_config.conserved_moments, **common_params)
elif lbm_config.method in (Method.TRT_KBC_N1, Method.TRT_KBC_N2, Method.TRT_KBC_N3, Method.TRT_KBC_N4):
if lbm_config.stencil.D == 2 and lbm_config.stencil.Q == 9:
dim = 2
elif lbm_config.stencil.D == 3 and lbm_config.stencil.Q == 27:
dim = 3
else:
raise NotImplementedError("KBC type TRT methods can only be constructed for D2Q9 and D3Q27 stencils")
method_nr = lbm_config.method.name[-1]
method = create_trt_kbc(dim, relaxation_rates[0], relaxation_rates[1], 'KBC-N' + method_nr, **common_params)
elif lbm_config.method == Method.CUMULANT:
if lbm_config.fourth_order_correction:
if lbm_config.stencil.D != 3 and lbm_config.stencil.Q != 27:
raise ValueError("Fourth-order correction can only be applied to D3Q27 cumulant methods.")
assert len(relaxation_rates) <= 2, "Optimal parametrisation for fourth-order cumulants needs either one " \
"or two relaxation rates, associated with the shear (and bulk) " \
"viscosity. All other relaxation rates are automatically chosen " \
"optimally"
# define method in terms of symbolic relaxation rates and assign optimal values later
from lbmpy.methods.cumulantbased.fourth_order_correction import FOURTH_ORDER_RELAXATION_RATE_SYMBOLS
relaxation_rates = FOURTH_ORDER_RELAXATION_RATE_SYMBOLS
if lbm_config.nested_moments is not None:
method = create_cumulant(lbm_config.stencil, relaxation_rates, lbm_config.nested_moments,
conserved_moments=lbm_config.conserved_moments, **cumulant_params)
else:
method = create_with_default_polynomial_cumulants(lbm_config.stencil, relaxation_rates, **cumulant_params)
elif lbm_config.method == Method.MONOMIAL_CUMULANT:
method = create_with_monomial_cumulants(lbm_config.stencil, relaxation_rates,
conserved_moments=lbm_config.conserved_moments, **cumulant_params)
else:
raise ValueError("Failed to create LB method. Please use lbmpy.enums.Method for the creation")
# >>Entropic methods can only be created for methods with two relaxation rates One free relaxation rate
# determining the viscosity and one to be determined by the entropy condition<<
# Thus we fix the conserved quantities to one of the relaxation rates because zero would be recognised as
# a third relaxation rate here.
if lbm_config.entropic:
method.set_conserved_moments_relaxation_rate(relaxation_rates[0])
lbm_config.lb_method = method
return method
def create_psm_update_rule(lbm_config, lbm_optimisation):
if IS_PYSTENCILS_2:
raise NotImplementedError(
"`create_psm_update_rule` is not yet available when using pystencils 2.0. "
"To instead derive a (potentially less efficient) PSM kernel without branches, "
"use `create_lb_update_rule` with a `PsmConfig` object instead."
)
from pystencils.astnodes import Conditional, Block
from pystencils.node_collection import NodeCollection
if lbm_config.psm_config is None:
raise ValueError("Specify a PSM Config in the LBM Config, when creating a psm update rule")
config_without_particles = copy.deepcopy(lbm_config)
config_without_particles.psm_config.max_particles_per_cell = 0
lb_update_rule = create_lb_update_rule(
lbm_config=config_without_particles, lbm_optimisation=lbm_optimisation)
node_collection = lb_update_rule.all_assignments
if lbm_config.psm_config.individual_fraction_field is None:
assert lbm_config.psm_config.max_particles_per_cell == 1
fraction_field = lbm_config.psm_config.fraction_field
else:
fraction_field = lbm_config.psm_config.individual_fraction_field
for p in range(lbm_config.psm_config.max_particles_per_cell):
psm_solid_collision = add_psm_solid_collision_to_collision_rule(lb_update_rule, lbm_config, p)
psm_update_rule = create_lb_update_rule(
collision_rule=psm_solid_collision, lbm_config=lbm_config, lbm_optimisation=lbm_optimisation)
node_collection.append(
Conditional(
fraction_field.center(p) > 0.0,
Block(psm_update_rule.all_assignments),
)
)
return NodeCollection(node_collection)
# ----------------------------------------------------------------------------------------------------------------------
def update_with_default_parameters(params, opt_params=None, lbm_config=None, lbm_optimisation=None, config=None):
# Fix CreateKernelConfig params
pystencils_config_params = ['target', 'backend', 'cpu_openmp', 'double_precision', 'gpu_indexing',
'gpu_indexing_params', 'cpu_vectorize_info']
if opt_params is not None:
config_params = {k: v for k, v in opt_params.items() if k in pystencils_config_params}
else:
config_params = {}
if 'double_precision' in config_params:
if config_params['double_precision']:
config_params['data_type'] = 'float64'
else:
config_params['data_type'] = 'float32'
del config_params['double_precision']
if not config:
config = CreateKernelConfig(**config_params)
else:
for k, v in config_params.items():
if not hasattr(config, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in CreateKernelConfig for valid settings')
config = replace(config, **config_params)
lbm_opt_params = ['cse_pdfs', 'cse_global', 'simplification', 'pre_simplification', 'split', 'field_size',
'field_layout', 'symbolic_field', 'symbolic_temporary_field', 'builtin_periodicity']
if opt_params is not None:
opt_params_dict = {k: v for k, v in opt_params.items() if k in lbm_opt_params}
else:
opt_params_dict = {}
if not lbm_optimisation:
lbm_optimisation = LBMOptimisation(**opt_params_dict)
else:
for k, v in opt_params_dict.items():
if not hasattr(lbm_optimisation, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in LBMOptimisation for valid settings')
lbm_optimisation = replace(lbm_optimisation, **opt_params_dict)
if params is None:
params = {}
if not lbm_config:
lbm_config = LBMConfig(**params)
else:
for k, v in params.items():
if not hasattr(lbm_config, k):
raise KeyError(f'{v} is not a valid kwarg. Please look in LBMConfig for valid settings')
lbm_config = replace(lbm_config, **params)
return lbm_config, lbm_optimisation, config