Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Commits on Source (43)
Showing
with 728 additions and 514 deletions
pystencils/_version.py export-subst src/pystencils/_version.py export-subst
stages: stages:
- pretest - pretest
- test - test
- nightly
- docs
- deploy - deploy
# -------------------------- Templates ------------------------------------------------------------------------------------
# Base configuration for jobs meant to run at every commit
.every-commit:
rules:
- if: $CI_PIPELINE_SOURCE != "schedule"
# Configuration for jobs meant to run on each commit to pycodegen/pystencils/master
.every-commit-master:
rules:
- if: '$CI_PIPELINE_SOURCE != "schedule" && $CI_PROJECT_PATH == "pycodegen/pystencils" && $CI_COMMIT_BRANCH == "master"'
# Base configuration for jobs meant to run at a schedule
.scheduled:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
# -------------------------- Tests ------------------------------------------------------------------------------------ # -------------------------- Tests ------------------------------------------------------------------------------------
# Normal test - runs on every commit all but "long run" tests # Normal test - runs on every commit all but "long run" tests
tests-and-coverage: tests-and-coverage:
stage: pretest stage: pretest
except: extends: .every-commit
variables: image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
before_script: before_script:
- pip install -e . - pip install -e .
script: script:
...@@ -45,7 +62,7 @@ tests-and-coverage-with-longrun: ...@@ -45,7 +62,7 @@ tests-and-coverage-with-longrun:
stage: test stage: test
when: manual when: manual
allow_failure: true allow_failure: true
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
before_script: before_script:
- pip install sympy --upgrade - pip install sympy --upgrade
- pip install -e . - pip install -e .
...@@ -65,9 +82,7 @@ tests-and-coverage-with-longrun: ...@@ -65,9 +82,7 @@ tests-and-coverage-with-longrun:
# pipeline with latest python version # pipeline with latest python version
latest-python: latest-python:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script: before_script:
- pip install -e . - pip install -e .
...@@ -92,9 +107,6 @@ latest-python: ...@@ -92,9 +107,6 @@ latest-python:
# Minimal tests in windows environment # Minimal tests in windows environment
#minimal-windows: #minimal-windows:
# stage: test # stage: test
# except:
# variables:
# - $ENABLE_NIGHTLY_BUILDS
# tags: # tags:
# - win # - win
# script: # script:
...@@ -108,9 +120,7 @@ latest-python: ...@@ -108,9 +120,7 @@ latest-python:
ubuntu: ubuntu:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu
before_script: before_script:
- ln -s /usr/include/locale.h /usr/include/xlocale.h - ln -s /usr/include/locale.h /usr/include/xlocale.h
...@@ -134,9 +144,8 @@ ubuntu: ...@@ -134,9 +144,8 @@ ubuntu:
.multiarch_template: .multiarch_template:
stage: test stage: test
except: extends: .every-commit
variables: allow_failure: true
- $ENABLE_NIGHTLY_BUILDS
before_script: &multiarch_before_script before_script: &multiarch_before_script
# - pip3 install -v . # - pip3 install -v .
- export PYTHONPATH=src - export PYTHONPATH=src
...@@ -149,13 +158,19 @@ ubuntu: ...@@ -149,13 +158,19 @@ ubuntu:
- sed -i 's/--doctest-modules //g' pytest.ini - sed -i 's/--doctest-modules //g' pytest.ini
- env - env
- pip3 list - pip3 list
- python3 -m pytest -v -n $NUM_CORES --junitxml=report.xml tests/test_*vec*.py tests/test_random.py tests/test_half_precision.py - python3 -m pytest -v -n $NUM_CORES --cov-report html --cov-report xml --cov=. --junitxml=report.xml tests/test_*vec*.py tests/test_random.py tests/test_half_precision.py
- python3 -m coverage xml
tags: tags:
- docker - docker
- AVX - AVX
artifacts: artifacts:
when: always when: always
paths:
- coverage_report
reports: reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
junit: report.xml junit: report.xml
arm64v8: arm64v8:
...@@ -181,29 +196,25 @@ arm64v9: ...@@ -181,29 +196,25 @@ arm64v9:
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/arm64 image: i10git.cs.fau.de:5005/pycodegen/pycodegen/arm64
before_script: before_script:
- *multiarch_before_script - *multiarch_before_script
- sed -i s/march=native/march=armv8-a+sve/g ~/.config/pystencils/config.json - sed -i s/march=native/march=armv9-a+sve2+sme/g ~/.config/pystencils/config.json
- sed -i s/g\+\+/clang++/g ~/.config/pystencils/config.json - sed -i s/g\+\+/clang++/g ~/.config/pystencils/config.json
riscv64: riscv64:
# RISC-V vector extension are currently not supported by GCC. # RISC-V vector extension are currently not supported by GCC.
# Also, the image is built without the libomp package which is not yet available on Ubuntu.
extends: .multiarch_template extends: .multiarch_template
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/riscv64 image: i10git.cs.fau.de:5005/pycodegen/pycodegen/riscv64
variables: variables:
# explicitly set SIMD as detection does not appear to work on QEMU # explicitly set SIMD as detection requires QEMU >= 8.1
PYSTENCILS_SIMD: "rvv" PYSTENCILS_SIMD: "rvv"
QEMU_CPU: "rv64,v=true" QEMU_CPU: "rv64,v=true,zicboz=true"
before_script: before_script:
- *multiarch_before_script - *multiarch_before_script
- sed -i 's/march=native/march=rv64imfdv/g' ~/.config/pystencils/config.json - sed -i 's/march=native/march=rv64imfdvzicboz/g' ~/.config/pystencils/config.json
- sed -i s/g\+\+/clang++/g ~/.config/pystencils/config.json - sed -i s/g\+\+/clang++-15/g ~/.config/pystencils/config.json
- sed -i 's/fopenmp/fopenmp=libgomp -I\/usr\/include\/riscv64-linux-gnu/g' ~/.config/pystencils/config.json
minimal-conda: minimal-conda:
stage: pretest stage: pretest
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script: before_script:
- pip install -e . - pip install -e .
...@@ -216,9 +227,7 @@ minimal-conda: ...@@ -216,9 +227,7 @@ minimal-conda:
minimal-sympy-master: minimal-sympy-master:
stage: test stage: test
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script: before_script:
- pip install -e . - pip install -e .
...@@ -275,14 +284,42 @@ pycodegen-integration: ...@@ -275,14 +284,42 @@ pycodegen-integration:
reports: reports:
junit: pycodegen/*/report.xml junit: pycodegen/*/report.xml
# -------------------- Scheduled Tasks --------------------------------------------------------------------------
# Nightly test against the latest (pre-release) version of SymPy published on PyPI
nightly-sympy:
stage: nightly
needs: []
extends: .scheduled
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install -e .
- pip install --upgrade --pre sympy
script:
- env
- pip list
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- AVX
- cuda
artifacts:
when: always
reports:
junit: report.xml
# -------------------- Linter & Documentation -------------------------------------------------------------------------- # -------------------- Linter & Documentation --------------------------------------------------------------------------
flake8-lint: flake8-lint:
stage: pretest stage: pretest
except: extends: .every-commit
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
script: script:
- flake8 src/pystencils - flake8 src/pystencils
...@@ -291,8 +328,10 @@ flake8-lint: ...@@ -291,8 +328,10 @@ flake8-lint:
build-documentation: build-documentation:
stage: test stage: docs
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation
needs: []
before_script: before_script:
- pip install -e . - pip install -e .
script: script:
...@@ -308,7 +347,9 @@ build-documentation: ...@@ -308,7 +347,9 @@ build-documentation:
pages: pages:
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit-master
stage: deploy stage: deploy
needs: ["tests-and-coverage", "build-documentation"]
script: script:
- ls -l - ls -l
- mv coverage_report html_doc - mv coverage_report html_doc
...@@ -318,5 +359,3 @@ pages: ...@@ -318,5 +359,3 @@ pages:
- public - public
tags: tags:
- docker - docker
only:
- master@pycodegen/pystencils
...@@ -12,7 +12,7 @@ authors = [ ...@@ -12,7 +12,7 @@ authors = [
] ]
license = { file = "COPYING.txt" } license = { file = "COPYING.txt" }
requires-python = ">=3.10" requires-python = ">=3.10"
dependencies = ["sympy>=1.6,<=1.11.1", "numpy>=1.8.0", "appdirs", "joblib", "pyyaml"] dependencies = ["sympy>=1.9,<=1.12.1", "numpy>=1.8.0", "appdirs", "joblib", "pyyaml", "fasteners"]
classifiers = [ classifiers = [
"Development Status :: 4 - Beta", "Development Status :: 4 - Beta",
"Framework :: Jupyter", "Framework :: Jupyter",
...@@ -70,8 +70,7 @@ tests = [ ...@@ -70,8 +70,7 @@ tests = [
[build-system] [build-system]
requires = [ requires = [
"setuptools>=61", "setuptools>=61",
"versioneer>=0.29", "versioneer[toml]>=0.29",
"tomli; python_version < '3.11'",
# 'Cython' # 'Cython'
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
......
...@@ -36,7 +36,5 @@ __all__ = ['Field', 'FieldType', 'fields', ...@@ -36,7 +36,5 @@ __all__ = ['Field', 'FieldType', 'fields',
'fd', 'fd',
'stencil'] 'stencil']
from ._version import get_versions from . import _version
__version__ = _version.get_versions()['version']
__version__ = get_versions()['version']
del get_versions
This diff is collapsed.
...@@ -5,12 +5,12 @@ from typing import Any, List, Optional, Sequence, Set, Union ...@@ -5,12 +5,12 @@ from typing import Any, List, Optional, Sequence, Set, Union
import sympy as sp import sympy as sp
import pystencils from pystencils.assignment import Assignment
from pystencils.typing.utilities import create_type, get_next_parent_of_type
from pystencils.enums import Target, Backend from pystencils.enums import Target, Backend
from pystencils.field import Field from pystencils.field import Field
from pystencils.typing.typed_sympy import FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol, TypedSymbol
from pystencils.sympyextensions import fast_subs from pystencils.sympyextensions import fast_subs
from pystencils.typing import (create_type, get_next_parent_of_type,
FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol, TypedSymbol, CFunction)
NodeOrExpr = Union['Node', sp.Expr] NodeOrExpr = Union['Node', sp.Expr]
...@@ -270,6 +270,9 @@ class KernelFunction(Node): ...@@ -270,6 +270,9 @@ class KernelFunction(Node):
parameters = [self.Parameter(symbol, get_fields(symbol)) for symbol in argument_symbols] parameters = [self.Parameter(symbol, get_fields(symbol)) for symbol in argument_symbols]
if hasattr(self, 'indexing'): if hasattr(self, 'indexing'):
parameters += [self.Parameter(s, []) for s in self.indexing.symbolic_parameters()] parameters += [self.Parameter(s, []) for s in self.indexing.symbolic_parameters()]
# Exclude paramters of type CFunction. These parameters will result in a C function call that will be handled
# by including a respective header file in the compute kernel. Hence, it is not a free parameter.
parameters = [p for p in parameters if not isinstance(p.symbol, CFunction)]
parameters.sort(key=lambda p: p.symbol.name) parameters.sort(key=lambda p: p.symbol.name)
return parameters return parameters
...@@ -387,7 +390,7 @@ class Block(Node): ...@@ -387,7 +390,7 @@ class Block(Node):
def symbols_defined(self): def symbols_defined(self):
result = set() result = set()
for a in self.args: for a in self.args:
if isinstance(a, pystencils.Assignment): if isinstance(a, Assignment):
result.update(a.free_symbols) result.update(a.free_symbols)
else: else:
result.update(a.symbols_defined) result.update(a.symbols_defined)
...@@ -398,7 +401,7 @@ class Block(Node): ...@@ -398,7 +401,7 @@ class Block(Node):
result = set() result = set()
defined_symbols = set() defined_symbols = set()
for a in self.args: for a in self.args:
if isinstance(a, pystencils.Assignment): if isinstance(a, Assignment):
result.update(a.free_symbols) result.update(a.free_symbols)
defined_symbols.update({a.lhs}) defined_symbols.update({a.lhs})
else: else:
......
from pystencils.typing import CFunction
def get_argument_string(function_shortcut, first=''): def get_argument_string(function_shortcut, first=''):
args = function_shortcut[function_shortcut.index('[') + 1: -1] args = function_shortcut[function_shortcut.index('[') + 1: -1]
arg_string = "(" arg_string = "("
...@@ -16,10 +19,13 @@ def get_argument_string(function_shortcut, first=''): ...@@ -16,10 +19,13 @@ def get_argument_string(function_shortcut, first=''):
def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
if instruction_set != 'neon' and not instruction_set.startswith('sve'): if instruction_set not in ['neon', 'sme'] and not instruction_set.startswith('sve'):
raise NotImplementedError(instruction_set) raise NotImplementedError(instruction_set)
if instruction_set == 'sve': if instruction_set in ['sve', 'sve2', 'sme']:
cmp = 'cmp'
elif instruction_set.startswith('sve2') and instruction_set not in ('sve256', 'sve2048'):
cmp = 'cmp' cmp = 'cmp'
bitwidth = int(instruction_set[4:])
elif instruction_set.startswith('sve'): elif instruction_set.startswith('sve'):
cmp = 'cmp' cmp = 'cmp'
bitwidth = int(instruction_set[3:]) bitwidth = int(instruction_set[3:])
...@@ -52,7 +58,7 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): ...@@ -52,7 +58,7 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
result = dict() result = dict()
if instruction_set == 'sve': if instruction_set in ['sve', 'sve2', 'sme']:
width = 'svcntd()' if data_type == 'double' else 'svcntw()' width = 'svcntd()' if data_type == 'double' else 'svcntw()'
intwidth = 'svcntw()' intwidth = 'svcntw()'
result['bytes'] = 'svcntb()' result['bytes'] = 'svcntb()'
...@@ -60,14 +66,15 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): ...@@ -60,14 +66,15 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
width = bitwidth // bits[data_type] width = bitwidth // bits[data_type]
intwidth = bitwidth // bits['int'] intwidth = bitwidth // bits['int']
result['bytes'] = bitwidth // 8 result['bytes'] = bitwidth // 8
if instruction_set.startswith('sve'): if instruction_set.startswith('sve') or instruction_set == 'sme':
base_names['stream'] = 'stnt1[0, 1]'
prefix = 'sv' prefix = 'sv'
suffix = f'_f{bits[data_type]}' suffix = f'_f{bits[data_type]}'
elif instruction_set == 'neon': elif instruction_set == 'neon':
prefix = 'v' prefix = 'v'
suffix = f'q_f{bits[data_type]}' suffix = f'q_f{bits[data_type]}'
if instruction_set == 'sve': if instruction_set in ['sve', 'sve2', 'sme']:
predicate = f'{prefix}whilelt_b{bits[data_type]}_u64({{loop_counter}}, {{loop_stop}})' predicate = f'{prefix}whilelt_b{bits[data_type]}_u64({{loop_counter}}, {{loop_stop}})'
int_predicate = f'{prefix}whilelt_b{bits["int"]}_u64({{loop_counter}}, {{loop_stop}})' int_predicate = f'{prefix}whilelt_b{bits["int"]}_u64({{loop_counter}}, {{loop_stop}})'
else: else:
...@@ -86,33 +93,36 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): ...@@ -86,33 +93,36 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
result[intrinsic_id] = prefix + name + suffix + undef + arg_string result[intrinsic_id] = prefix + name + suffix + undef + arg_string
if instruction_set == 'sve': if instruction_set in ['sve', 'sve2', 'sme']:
from pystencils.backends.cbackend import CFunction
result['width'] = CFunction(width, "int") result['width'] = CFunction(width, "int")
result['intwidth'] = CFunction(intwidth, "int") result['intwidth'] = CFunction(intwidth, "int")
else: else:
result['width'] = width result['width'] = width
result['intwidth'] = intwidth result['intwidth'] = intwidth
if instruction_set.startswith('sve'): if instruction_set.startswith('sve') or instruction_set == 'sme':
result['makeVecConst'] = f'svdup_f{bits[data_type]}' + '({0})' result['makeVecConst'] = f'svdup_f{bits[data_type]}' + '({0})'
result['makeVecConstInt'] = f'svdup_s{bits["int"]}' + '({0})' result['makeVecConstInt'] = f'svdup_s{bits["int"]}' + '({0})'
result['makeVecIndex'] = f'svindex_s{bits["int"]}' + '({0}, {1})' result['makeVecIndex'] = f'svindex_s{bits["int"]}' + '({0}, {1})'
vindex = f'svindex_u{bits[data_type]}(0, {{0}})' if instruction_set != 'sme':
result['storeS'] = f'svst1_scatter_u{bits[data_type]}index_f{bits[data_type]}({predicate}, {{0}}, ' + \ vindex = f'svindex_u{bits[data_type]}(0, {{0}})'
vindex.format("{2}") + ', {1})' result['storeS'] = f'svst1_scatter_u{bits[data_type]}index_f{bits[data_type]}({predicate}, {{0}}, ' + \
result['loadS'] = f'svld1_gather_u{bits[data_type]}index_f{bits[data_type]}({predicate}, {{0}}, ' + \ vindex.format("{2}") + ', {1})'
vindex.format("{1}") + ')' result['loadS'] = f'svld1_gather_u{bits[data_type]}index_f{bits[data_type]}({predicate}, {{0}}, ' + \
vindex.format("{1}") + ')'
if instruction_set.startswith('sve2') and instruction_set not in ('sve256', 'sve2048'):
result['streamS'] = f'svstnt1_scatter_u{bits[data_type]}offset_f{bits[data_type]}({predicate}, {{0}}, ' + \
vindex.format(f"{{2}}*{bits[data_type]//8}") + ', {1})'
result['+int'] = f"svadd_s{bits['int']}_x({int_predicate}, " + "{0}, {1})" result['+int'] = f"svadd_s{bits['int']}_x({int_predicate}, " + "{0}, {1})"
result['float'] = f'svfloat{bits["float"]}_{"s" if instruction_set != "sve" else ""}t' result['float'] = f'svfloat{bits["float"]}_{"s" if instruction_set not in ["sve", "sve2", "sme"] else ""}t'
result['double'] = f'svfloat{bits["double"]}_{"s" if instruction_set != "sve" else ""}t' result['double'] = f'svfloat{bits["double"]}_{"s" if instruction_set not in ["sve", "sve2", "sme"] else ""}t'
result['int'] = f'svint{bits["int"]}_{"s" if instruction_set != "sve" else ""}t' result['int'] = f'svint{bits["int"]}_{"s" if instruction_set not in ["sve", "sve2", "sme"] else ""}t'
result['bool'] = f'svbool_{"s" if instruction_set != "sve" else ""}t' result['bool'] = f'svbool_{"s" if instruction_set not in ["sve", "sve2", "sme"] else ""}t'
result['headers'] = ['<arm_sve.h>', '"arm_neon_helpers.h"'] result['headers'] = ['<arm_sve.h>', '<arm_acle.h>', '"arm_neon_helpers.h"']
result['&'] = f'svand_b_z({predicate},' + ' {0}, {1})' result['&'] = f'svand_b_z({predicate},' + ' {0}, {1})'
result['|'] = f'svorr_b_z({predicate},' + ' {0}, {1})' result['|'] = f'svorr_b_z({predicate},' + ' {0}, {1})'
...@@ -121,9 +131,17 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): ...@@ -121,9 +131,17 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
result['all'] = f'svcntp_b{bits[data_type]}({predicate}, {{0}}) == {width}' result['all'] = f'svcntp_b{bits[data_type]}({predicate}, {{0}}) == {width}'
result['maskStoreU'] = result['storeU'].replace(predicate, '{2}') result['maskStoreU'] = result['storeU'].replace(predicate, '{2}')
result['maskStoreS'] = result['storeS'].replace(predicate, '{3}') result['maskStream'] = result['stream'].replace(predicate, '{2}')
if instruction_set != 'sme':
result['maskStoreS'] = result['storeS'].replace(predicate, '{3}')
if instruction_set.startswith('sve2') and instruction_set not in ('sve256', 'sve2048'):
result['maskStreamS'] = result['streamS'].replace(predicate, '{3}')
if instruction_set != 'sve': result['streamFence'] = '__dmb(15)'
if instruction_set == 'sme':
result['function_prefix'] = '__attribute__((arm_locally_streaming))'
elif instruction_set not in ['sve', 'sve2', 'sme']:
result['compile_flags'] = [f'-msve-vector-bits={bitwidth}'] result['compile_flags'] = [f'-msve-vector-bits={bitwidth}']
else: else:
result['makeVecConst'] = f'vdupq_n_f{bits[data_type]}' + '({0})' result['makeVecConst'] = f'vdupq_n_f{bits[data_type]}' + '({0})'
...@@ -148,7 +166,9 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'): ...@@ -148,7 +166,9 @@ def get_vector_instruction_set_arm(data_type='double', instruction_set='neon'):
result['any'] = f'vaddlvq_u8(vreinterpretq_u8_u{bits[data_type]}({{0}})) > 0' result['any'] = f'vaddlvq_u8(vreinterpretq_u8_u{bits[data_type]}({{0}})) > 0'
result['all'] = f'vaddlvq_u8(vreinterpretq_u8_u{bits[data_type]}({{0}})) == 16*0xff' result['all'] = f'vaddlvq_u8(vreinterpretq_u8_u{bits[data_type]}({{0}})) == 16*0xff'
# SVE has real nontemporal stores, so we only need to zero cachlines on Neon
result['cachelineZero'] = 'cachelineZero((void*) {0})'
result['cachelineSize'] = 'cachelineSize()' result['cachelineSize'] = 'cachelineSize()'
result['cachelineZero'] = 'cachelineZero((void*) {0})'
return result return result
...@@ -6,7 +6,6 @@ from typing import Set ...@@ -6,7 +6,6 @@ from typing import Set
import numpy as np import numpy as np
import sympy as sp import sympy as sp
from sympy.core import S from sympy.core import S
from sympy.core.cache import cacheit
from sympy.logic.boolalg import BooleanFalse, BooleanTrue from sympy.logic.boolalg import BooleanFalse, BooleanTrue
from sympy.functions.elementary.trigonometric import TrigonometricFunction, InverseTrigonometricFunction from sympy.functions.elementary.trigonometric import TrigonometricFunction, InverseTrigonometricFunction
from sympy.functions.elementary.hyperbolic import HyperbolicFunction from sympy.functions.elementary.hyperbolic import HyperbolicFunction
...@@ -15,7 +14,7 @@ from pystencils.astnodes import KernelFunction, LoopOverCoordinate, Node ...@@ -15,7 +14,7 @@ from pystencils.astnodes import KernelFunction, LoopOverCoordinate, Node
from pystencils.cpu.vectorization import vec_all, vec_any, CachelineSize from pystencils.cpu.vectorization import vec_all, vec_any, CachelineSize
from pystencils.typing import ( from pystencils.typing import (
PointerType, VectorType, CastFunc, create_type, get_type_of_expression, PointerType, VectorType, CastFunc, create_type, get_type_of_expression,
ReinterpretCastFunc, VectorMemoryAccess, BasicType, TypedSymbol) ReinterpretCastFunc, VectorMemoryAccess, BasicType, TypedSymbol, CFunction)
from pystencils.enums import Backend from pystencils.enums import Backend
from pystencils.fast_approximation import fast_division, fast_inv_sqrt, fast_sqrt from pystencils.fast_approximation import fast_division, fast_inv_sqrt, fast_sqrt
from pystencils.functions import DivFunc, AddressOf from pystencils.functions import DivFunc, AddressOf
...@@ -166,23 +165,6 @@ class PrintNode(CustomCodeNode): ...@@ -166,23 +165,6 @@ class PrintNode(CustomCodeNode):
self.headers.append("<iostream>") self.headers.append("<iostream>")
class CFunction(TypedSymbol):
def __new__(cls, function, dtype):
return CFunction.__xnew_cached_(cls, function, dtype)
def __new_stage2__(cls, function, dtype):
return super(CFunction, cls).__xnew__(cls, function, dtype)
__xnew__ = staticmethod(__new_stage2__)
__xnew_cached_ = staticmethod(cacheit(__new_stage2__))
def __getnewargs__(self):
return self.name, self.dtype
def __getnewargs_ex__(self):
return (self.name, self.dtype), {}
# ------------------------------------------- Printer ------------------------------------------------------------------ # ------------------------------------------- Printer ------------------------------------------------------------------
...@@ -280,14 +262,25 @@ class CBackend: ...@@ -280,14 +262,25 @@ class CBackend:
if type(lhs_type) is VectorType and isinstance(node.lhs, CastFunc): if type(lhs_type) is VectorType and isinstance(node.lhs, CastFunc):
arg, data_type, aligned, nontemporal, mask, stride = node.lhs.args arg, data_type, aligned, nontemporal, mask, stride = node.lhs.args
instr = 'storeU' instr = 'storeU'
if aligned: if nontemporal and 'storeA' not in self._vector_instruction_set and \
'stream' in self._vector_instruction_set:
instr = 'stream'
elif aligned:
instr = 'stream' if nontemporal and 'stream' in self._vector_instruction_set else 'storeA' instr = 'stream' if nontemporal and 'stream' in self._vector_instruction_set else 'storeA'
if mask != True: # NOQA if mask != True: # NOQA
instr = 'maskStoreA' if aligned else 'maskStoreU' instr = 'maskStream' if nontemporal and 'maskStream' in self._vector_instruction_set else \
'maskStoreA' if aligned else 'maskStoreU'
if instr not in self._vector_instruction_set: if instr not in self._vector_instruction_set:
self._vector_instruction_set[instr] = self._vector_instruction_set['store' + instr[-1]].format( if instr == 'maskStream' and 'stream' in self._vector_instruction_set:
store, load = 'stream', 'loadA'
elif (instr in ('maskStream', 'maskStoreA')) and 'storeA' in self._vector_instruction_set:
store, load = 'storeA', 'loadA'
else:
store, load = 'storeU', 'loadU'
load = load if load in self._vector_instruction_set else 'loadU'
self._vector_instruction_set[instr] = self._vector_instruction_set[store].format(
'{0}', self._vector_instruction_set['blendv'].format( '{0}', self._vector_instruction_set['blendv'].format(
self._vector_instruction_set['load' + instr[-1]].format('{0}', **self._kwargs), self._vector_instruction_set[load].format('{0}', **self._kwargs),
'{1}', '{2}', **self._kwargs), **self._kwargs) '{1}', '{2}', **self._kwargs), **self._kwargs)
printed_mask = self.sympy_printer.doprint(mask) printed_mask = self.sympy_printer.doprint(mask)
if data_type.base_type.c_name == 'double': if data_type.base_type.c_name == 'double':
...@@ -312,12 +305,14 @@ class CBackend: ...@@ -312,12 +305,14 @@ class CBackend:
ptr = "&" + self.sympy_printer.doprint(node.lhs.args[0]) ptr = "&" + self.sympy_printer.doprint(node.lhs.args[0])
if stride != 1: if stride != 1:
instr = 'maskStoreS' if mask != True else 'storeS' # NOQA instr = ('maskStreamS' if nontemporal and 'maskStreamS' in self._vector_instruction_set else
'maskStoreS') if mask != True else \
('streamS' if nontemporal and 'streamS' in self._vector_instruction_set else 'storeS') # NOQA
return self._vector_instruction_set[instr].format(ptr, self.sympy_printer.doprint(rhs), return self._vector_instruction_set[instr].format(ptr, self.sympy_printer.doprint(rhs),
stride, printed_mask, **self._kwargs) + ';' stride, printed_mask, **self._kwargs) + ';'
pre_code = '' pre_code = ''
if nontemporal and 'cachelineZero' in self._vector_instruction_set: if nontemporal and 'cachelineZero' in self._vector_instruction_set and mask == True: # NOQA
first_cond = f"((uintptr_t) {ptr} & {CachelineSize.mask_symbol}) == 0" first_cond = f"((uintptr_t) {ptr} & {CachelineSize.mask_symbol}) == 0"
offset = sp.Add(*[sp.Symbol(LoopOverCoordinate.get_loop_counter_name(i)) offset = sp.Add(*[sp.Symbol(LoopOverCoordinate.get_loop_counter_name(i))
* node.lhs.args[0].field.spatial_strides[i] for i in * node.lhs.args[0].field.spatial_strides[i] for i in
...@@ -337,15 +332,22 @@ class CBackend: ...@@ -337,15 +332,22 @@ class CBackend:
code2 = self._vector_instruction_set['flushCacheline'].format( code2 = self._vector_instruction_set['flushCacheline'].format(
ptr, self.sympy_printer.doprint(rhs), **self._kwargs) + ';' ptr, self.sympy_printer.doprint(rhs), **self._kwargs) + ';'
code = f"{code}\nif ({flushcond}) {{\n\t{code2}\n}}" code = f"{code}\nif ({flushcond}) {{\n\t{code2}\n}}"
elif nontemporal and 'storeAAndFlushCacheline' in self._vector_instruction_set: elif aligned and nontemporal and 'storeAAndFlushCacheline' in self._vector_instruction_set:
lhs_hash = hashlib.sha1(self.sympy_printer.doprint(node.lhs).encode('ascii')).hexdigest()[:8] lhs_hash = hashlib.sha1(self.sympy_printer.doprint(node.lhs).encode('ascii')).hexdigest()[:8]
rhs_hash = hashlib.sha1(self.sympy_printer.doprint(rhs).encode('ascii')).hexdigest()[:8] rhs_hash = hashlib.sha1(self.sympy_printer.doprint(rhs).encode('ascii')).hexdigest()[:8]
tmpvar = f'_tmp_{lhs_hash}_{rhs_hash}' tmpvar = f'_tmp_{lhs_hash}_{rhs_hash}'
code = 'const ' + self._print(node.lhs.dtype).replace(' const', '') + ' ' + tmpvar + ' = ' \ code = 'const ' + self._print(node.lhs.dtype).replace(' const', '') + ' ' + tmpvar + ' = ' \
+ self.sympy_printer.doprint(rhs) + ';' + self.sympy_printer.doprint(rhs) + ';'
code1 = self._vector_instruction_set[instr].format(ptr, tmpvar, printed_mask, **self._kwargs) + ';' code1 = self._vector_instruction_set[instr].format(ptr, tmpvar, printed_mask, **self._kwargs) + ';'
code2 = self._vector_instruction_set['storeAAndFlushCacheline'].format(ptr, tmpvar, printed_mask, maskStore, store, load = 'maskStoreAAndFlushCacheline', 'storeAAndFlushCacheline', 'loadA'
**self._kwargs) + ';' instr2 = maskStore if mask != True else store # NOQA
if instr2 not in self._vector_instruction_set:
self._vector_instruction_set[maskStore] = self._vector_instruction_set[store].format(
'{0}', self._vector_instruction_set['blendv'].format(
self._vector_instruction_set[load].format('{0}', **self._kwargs),
'{1}', '{2}', **self._kwargs),
**self._kwargs)
code2 = self._vector_instruction_set[instr2].format(ptr, tmpvar, printed_mask, **self._kwargs) + ';'
code += f"\nif ({flushcond}) {{\n\t{code2}\n}} else {{\n\t{code1}\n}}" code += f"\nif ({flushcond}) {{\n\t{code2}\n}} else {{\n\t{code1}\n}}"
return pre_code + code return pre_code + code
else: else:
...@@ -614,7 +616,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter): ...@@ -614,7 +616,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
return None return None
def _print_Abs(self, expr): def _print_Abs(self, expr):
if 'abs' in self.instruction_set and isinstance(expr.args[0], VectorMemoryAccess): if isinstance(get_type_of_expression(expr), (VectorType, VectorMemoryAccess)):
return self.instruction_set['abs'].format(self._print(expr.args[0]), **self._kwargs) return self.instruction_set['abs'].format(self._print(expr.args[0]), **self._kwargs)
return super()._print_Abs(expr) return super()._print_Abs(expr)
......
from pystencils.typing import CFunction
def get_argument_string(function_shortcut, last=''): def get_argument_string(function_shortcut, last=''):
args = function_shortcut[function_shortcut.index('[') + 1: -1] args = function_shortcut[function_shortcut.index('[') + 1: -1]
arg_string = "(" arg_string = "("
...@@ -34,7 +37,7 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'): ...@@ -34,7 +37,7 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'):
'maskStoreU': f'se{bits[data_type]}_v[2, 0, 1]', 'maskStoreU': f'se{bits[data_type]}_v[2, 0, 1]',
'loadS': f'lse{bits[data_type]}_v[0, 1]', 'loadS': f'lse{bits[data_type]}_v[0, 1]',
'storeS': f'sse{bits[data_type]}_v[0, 2, 1]', 'storeS': f'sse{bits[data_type]}_v[0, 2, 1]',
'maskStoreS': f'sse{bits[data_type]}_v[2, 0, 3, 1]', 'maskStoreS': f'sse{bits[data_type]}_v[3, 0, 2, 1]',
'abs': 'fabs_v[0]', 'abs': 'fabs_v[0]',
'==': 'mfeq_vv[0, 1]', '==': 'mfeq_vv[0, 1]',
...@@ -78,7 +81,6 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'): ...@@ -78,7 +81,6 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'):
result[intrinsic_id] = prefix + name + suffix2 + arg_string result[intrinsic_id] = prefix + name + suffix2 + arg_string
from pystencils.backends.cbackend import CFunction
result['width'] = CFunction(width, "int") result['width'] = CFunction(width, "int")
result['intwidth'] = CFunction(intwidth, "int") result['intwidth'] = CFunction(intwidth, "int")
...@@ -89,7 +91,7 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'): ...@@ -89,7 +91,7 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'):
result['storeS'] = result['storeS'].replace('{2}', f'{{2}}*{bits[data_type]//8}') result['storeS'] = result['storeS'].replace('{2}', f'{{2}}*{bits[data_type]//8}')
result['loadS'] = result['loadS'].replace('{1}', f'{{1}}*{bits[data_type]//8}') result['loadS'] = result['loadS'].replace('{1}', f'{{1}}*{bits[data_type]//8}')
result['maskStoreS'] = result['maskStoreS'].replace('{3}', f'{{3}}*{bits[data_type]//8}') result['maskStoreS'] = result['maskStoreS'].replace('{2}', f'{{2}}*{bits[data_type]//8}')
result['+int'] = f"vadd_vv_i{bits['int']}m1({{0}}, {{1}}, {int_vl})" result['+int'] = f"vadd_vv_i{bits['int']}m1({{0}}, {{1}}, {int_vl})"
...@@ -98,9 +100,12 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'): ...@@ -98,9 +100,12 @@ def get_vector_instruction_set_riscv(data_type='double', instruction_set='rvv'):
result['int'] = f'vint{bits["int"]}m1_t' result['int'] = f'vint{bits["int"]}m1_t'
result['bool'] = f'vbool{bits[data_type]}_t' result['bool'] = f'vbool{bits[data_type]}_t'
result['headers'] = ['<riscv_vector.h>'] result['headers'] = ['<riscv_vector.h>', '"riscv_v_helpers.h"']
result['any'] += ' > 0x0' result['any'] += ' > 0x0'
result['all'] += f' == vsetvl_e{bits[data_type]}m1({vl})' result['all'] += f' == vsetvl_e{bits[data_type]}m1({vl})'
result['cachelineSize'] = 'cachelineSize()'
result['cachelineZero'] = 'cachelineZero((void*) {0})'
return result return result
import os import os
import platform import platform
from ctypes import CDLL from ctypes import CDLL, c_int, c_size_t, sizeof, byref
from warnings import warn from warnings import warn
import numpy as np import numpy as np
...@@ -22,7 +22,7 @@ def get_vector_instruction_set(data_type='double', instruction_set='avx'): ...@@ -22,7 +22,7 @@ def get_vector_instruction_set(data_type='double', instruction_set='avx'):
type_name = numpy_name_to_c(np.dtype(data_type).name) type_name = numpy_name_to_c(np.dtype(data_type).name)
if instruction_set in ['neon'] or instruction_set.startswith('sve'): if instruction_set in ['neon', 'sme'] or instruction_set.startswith('sve'):
return get_vector_instruction_set_arm(type_name, instruction_set) return get_vector_instruction_set_arm(type_name, instruction_set)
elif instruction_set in ['vsx']: elif instruction_set in ['vsx']:
return get_vector_instruction_set_ppc(type_name, instruction_set) return get_vector_instruction_set_ppc(type_name, instruction_set)
...@@ -38,21 +38,35 @@ def get_supported_instruction_sets(): ...@@ -38,21 +38,35 @@ def get_supported_instruction_sets():
if 'PYSTENCILS_SIMD' in os.environ: if 'PYSTENCILS_SIMD' in os.environ:
return os.environ['PYSTENCILS_SIMD'].split(',') return os.environ['PYSTENCILS_SIMD'].split(',')
if platform.system() == 'Darwin' and platform.machine() == 'arm64': if platform.system() == 'Darwin' and platform.machine() == 'arm64':
return ['neon'] result = ['neon']
libc = CDLL('/usr/lib/libc.dylib')
value = c_int(0)
size = c_size_t(sizeof(value))
status = libc.sysctlbyname(b"hw.optional.arm.FEAT_SME", byref(value), byref(size), None, 0)
if status == 0 and value.value == 1:
result.insert(0, "sme")
return result
elif platform.system() == 'Windows' and platform.machine() == 'ARM64': elif platform.system() == 'Windows' and platform.machine() == 'ARM64':
return ['neon'] return ['neon']
elif platform.system() == 'Linux' and platform.machine() == 'aarch64': elif platform.system() == 'Linux' and platform.machine() == 'aarch64':
result = ['neon'] # Neon is mandatory on 64-bit ARM result = ['neon'] # Neon is mandatory on 64-bit ARM
libc = CDLL('libc.so.6') libc = CDLL('libc.so.6')
hwcap = libc.getauxval(16) # AT_HWCAP hwcap = libc.getauxval(16) # AT_HWCAP
hwcap2 = libc.getauxval(26) # AT_HWCAP2
if hwcap & (1 << 22): # HWCAP_SVE if hwcap & (1 << 22): # HWCAP_SVE
if hwcap2 & (1 << 1): # HWCAP2_SVE2
name = 'sve2'
else:
name = 'sve'
length = 8 * libc.prctl(51, 0, 0, 0, 0) # PR_SVE_GET_VL length = 8 * libc.prctl(51, 0, 0, 0, 0) # PR_SVE_GET_VL
if length < 0: if length < 0:
raise OSError("SVE length query failed") raise OSError("SVE length query failed")
while length >= 128: while length >= 128:
result.append(f"sve{length}") result.append(f"{name}{length}")
length //= 2 length //= 2
result.append("sve") result.append(name)
if hwcap2 & (1 << 23): # HWCAP2_SME
result.insert(0, "sme") # prepend to list so it is not automatically chosen as best instruction set
return result return result
elif platform.system() == 'Linux' and platform.machine().startswith('riscv'): elif platform.system() == 'Linux' and platform.machine().startswith('riscv'):
libc = CDLL('libc.so.6') libc = CDLL('libc.so.6')
......
...@@ -35,11 +35,11 @@ class FlagInterface: ...@@ -35,11 +35,11 @@ class FlagInterface:
>>> dh = create_data_handling((4, 5)) >>> dh = create_data_handling((4, 5))
>>> fi = FlagInterface(dh, 'flag_field', np.uint8) >>> fi = FlagInterface(dh, 'flag_field', np.uint8)
>>> assert dh.has_data('flag_field') >>> assert dh.has_data('flag_field')
>>> fi.reserve_next_flag() >>> int(fi.reserve_next_flag())
2 2
>>> fi.reserve_flag(4) >>> int(fi.reserve_flag(4))
4 4
>>> fi.reserve_next_flag() >>> int(fi.reserve_next_flag())
8 8
""" """
...@@ -450,5 +450,6 @@ def create_boundary_kernel(field, index_field, stencil, boundary_functor, target ...@@ -450,5 +450,6 @@ def create_boundary_kernel(field, index_field, stencil, boundary_functor, target
dir_symbol = TypedSymbol("dir", np.int32) dir_symbol = TypedSymbol("dir", np.int32)
elements += [SympyAssignment(dir_symbol, index_field[0]('dir'))] elements += [SympyAssignment(dir_symbol, index_field[0]('dir'))]
elements += boundary_functor(field, direction_symbol=dir_symbol, index_field=index_field) elements += boundary_functor(field, direction_symbol=dir_symbol, index_field=index_field)
config = CreateKernelConfig(index_fields=[index_field], target=target, **kernel_creation_args) config = CreateKernelConfig(index_fields=[index_field], target=target, skip_independence_check=True,
**kernel_creation_args)
return create_kernel(elements, config=config) return create_kernel(elements, config=config)
...@@ -135,8 +135,9 @@ class CreateKernelConfig: ...@@ -135,8 +135,9 @@ class CreateKernelConfig:
""" """
skip_independence_check: bool = False skip_independence_check: bool = False
""" """
Don't check that loop iterations are independent. This is needed e.g. for By default the assignment list is checked for read/write independence. This means fields are only written at
periodicity kernel, that access the field outside the iteration bounds. Use with care! locations where they are read. Doing so guarantees thread safety. In some cases e.g. for
periodicity kernel, this can not be assured and does the check needs to be deactivated. Use with care!
""" """
class DataTypeFactory: class DataTypeFactory:
......
...@@ -57,12 +57,14 @@ import tempfile ...@@ -57,12 +57,14 @@ import tempfile
import textwrap import textwrap
import time import time
import warnings import warnings
import pathlib
import numpy as np import numpy as np
from pystencils import FieldType from pystencils import FieldType
from pystencils.astnodes import LoopOverCoordinate from pystencils.astnodes import LoopOverCoordinate
from pystencils.backends.cbackend import generate_c, get_headers, CFunction from pystencils.backends.cbackend import generate_c, get_headers
from pystencils.backends.simd_instruction_sets import get_supported_instruction_sets
from pystencils.cpu.msvc_detection import get_environment from pystencils.cpu.msvc_detection import get_environment
from pystencils.include import get_pystencils_include_path from pystencils.include import get_pystencils_include_path
from pystencils.kernel_wrapper import KernelWrapper from pystencils.kernel_wrapper import KernelWrapper
...@@ -122,15 +124,15 @@ def get_configuration_file_path(): ...@@ -122,15 +124,15 @@ def get_configuration_file_path():
# 1) Read path from environment variable if found # 1) Read path from environment variable if found
if 'PYSTENCILS_CONFIG' in os.environ: if 'PYSTENCILS_CONFIG' in os.environ:
return os.environ['PYSTENCILS_CONFIG'], True return os.environ['PYSTENCILS_CONFIG']
# 2) Look in current directory for pystencils.json # 2) Look in current directory for pystencils.json
elif os.path.exists("pystencils.json"): elif os.path.exists("pystencils.json"):
return "pystencils.json", True return "pystencils.json"
# 3) Try ~/.pystencils.json # 3) Try ~/.pystencils.json
elif os.path.exists(config_path_in_home): elif os.path.exists(config_path_in_home):
return config_path_in_home, True return config_path_in_home
else: else:
return config_path_in_home, False return config_path_in_home
def create_folder(path, is_file): def create_folder(path, is_file):
...@@ -172,7 +174,11 @@ def read_config(): ...@@ -172,7 +174,11 @@ def read_config():
('restrict_qualifier', '__restrict__') ('restrict_qualifier', '__restrict__')
]) ])
if platform.machine() == 'arm64': if platform.machine() == 'arm64':
default_compiler_config['flags'] = default_compiler_config['flags'].replace('-march=native ', '') if 'sme' in get_supported_instruction_sets():
flag = '-march=armv8.7-a+sme '
else:
flag = ''
default_compiler_config['flags'] = default_compiler_config['flags'].replace('-march=native ', flag)
for libomp in ['/opt/local/lib/libomp/libomp.dylib', '/usr/local/lib/libomp.dylib', for libomp in ['/opt/local/lib/libomp/libomp.dylib', '/usr/local/lib/libomp.dylib',
'/opt/homebrew/lib/libomp.dylib']: '/opt/homebrew/lib/libomp.dylib']:
if os.path.exists(libomp): if os.path.exists(libomp):
...@@ -190,16 +196,22 @@ def read_config(): ...@@ -190,16 +196,22 @@ def read_config():
default_config = OrderedDict([('compiler', default_compiler_config), default_config = OrderedDict([('compiler', default_compiler_config),
('cache', default_cache_config)]) ('cache', default_cache_config)])
config_path, config_exists = get_configuration_file_path() from fasteners import InterProcessLock
config_path = pathlib.Path(get_configuration_file_path())
config_path.parent.mkdir(parents=True, exist_ok=True)
config = default_config.copy() config = default_config.copy()
if config_exists:
with open(config_path, 'r') as json_config_file: lockfile = config_path.with_suffix(config_path.suffix + ".lock")
loaded_config = json.load(json_config_file) with InterProcessLock(lockfile):
config = recursive_dict_update(config, loaded_config) if config_path.exists():
else: with open(config_path, 'r') as json_config_file:
create_folder(config_path, True) loaded_config = json.load(json_config_file)
with open(config_path, 'w') as f: config = recursive_dict_update(config, loaded_config)
json.dump(config, f, indent=4) else:
with open(config_path, 'w') as f:
json.dump(config, f, indent=4)
if config['cache']['object_cache'] is not False: if config['cache']['object_cache'] is not False:
config['cache']['object_cache'] = os.path.expanduser(config['cache']['object_cache']).format(pid=os.getpid()) config['cache']['object_cache'] = os.path.expanduser(config['cache']['object_cache']).format(pid=os.getpid())
...@@ -447,8 +459,6 @@ def create_function_boilerplate_code(parameter_info, name, ast_node, insert_chec ...@@ -447,8 +459,6 @@ def create_function_boilerplate_code(parameter_info, name, ast_node, insert_chec
parameters.append(f"buffer_{field.name}.strides[{param.symbol.coordinate}] / {item_size}") parameters.append(f"buffer_{field.name}.strides[{param.symbol.coordinate}] / {item_size}")
elif param.is_field_shape: elif param.is_field_shape:
parameters.append(f"buffer_{param.field_name}.shape[{param.symbol.coordinate}]") parameters.append(f"buffer_{param.field_name}.shape[{param.symbol.coordinate}]")
elif type(param.symbol) is CFunction:
continue
else: else:
extract_function, target_type = type_mapping[param.symbol.dtype.numpy_dtype.type] extract_function, target_type = type_mapping[param.symbol.dtype.numpy_dtype.type]
pre_call_code += template_extract_scalar.format(extract_function=extract_function, pre_call_code += template_extract_scalar.format(extract_function=extract_function,
...@@ -617,7 +627,12 @@ def compile_and_load(ast, custom_backend=None): ...@@ -617,7 +627,12 @@ def compile_and_load(ast, custom_backend=None):
cache_config = get_cache_config() cache_config = get_cache_config()
compiler_config = get_compiler_config() compiler_config = get_compiler_config()
function_prefix = '__declspec(dllexport)' if compiler_config['os'].lower() == 'windows' else '' if compiler_config['os'].lower() == 'windows':
function_prefix = '__declspec(dllexport)'
elif ast.instruction_set and 'function_prefix' in ast.instruction_set:
function_prefix = ast.instruction_set['function_prefix']
else:
function_prefix = ''
code = ExtensionModuleCode(custom_backend=custom_backend) code = ExtensionModuleCode(custom_backend=custom_backend)
code.add_function(ast, ast.function_name) code.add_function(ast, ast.function_name)
......
...@@ -948,24 +948,35 @@ def create_numpy_array_with_layout(shape, layout, alignment=False, byte_offset=0 ...@@ -948,24 +948,35 @@ def create_numpy_array_with_layout(shape, layout, alignment=False, byte_offset=0
def spatial_layout_string_to_tuple(layout_str: str, dim: int) -> Tuple[int, ...]: def spatial_layout_string_to_tuple(layout_str: str, dim: int) -> Tuple[int, ...]:
if layout_str in ('fzyx', 'zyxf'): if dim <= 0:
assert dim <= 3 raise ValueError("Dimensionality must be positive")
return tuple(reversed(range(dim)))
layout_str = layout_str.lower()
if layout_str in ('fzyx', 'f', 'reverse_numpy', 'SoA'): if layout_str in ('fzyx', 'zyxf', 'soa', 'aos'):
if dim > 3:
raise ValueError(f"Invalid spatial dimensionality for layout descriptor {layout_str}: May be at most 3.")
return tuple(reversed(range(dim)))
if layout_str in ('f', 'reverse_numpy'):
return tuple(reversed(range(dim))) return tuple(reversed(range(dim)))
elif layout_str in ('c', 'numpy', 'AoS'): elif layout_str in ('c', 'numpy'):
return tuple(range(dim)) return tuple(range(dim))
raise ValueError("Unknown layout descriptor " + layout_str) raise ValueError("Unknown layout descriptor " + layout_str)
def layout_string_to_tuple(layout_str, dim): def layout_string_to_tuple(layout_str, dim):
if dim <= 0:
raise ValueError("Dimensionality must be positive")
layout_str = layout_str.lower() layout_str = layout_str.lower()
if layout_str == 'fzyx' or layout_str == 'soa': if layout_str == 'fzyx' or layout_str == 'soa':
assert dim <= 4 if dim > 4:
raise ValueError(f"Invalid total dimensionality for layout descriptor {layout_str}: May be at most 4.")
return tuple(reversed(range(dim))) return tuple(reversed(range(dim)))
elif layout_str == 'zyxf' or layout_str == 'aos': elif layout_str == 'zyxf' or layout_str == 'aos':
assert dim <= 4 if dim > 4:
raise ValueError(f"Invalid total dimensionality for layout descriptor {layout_str}: May be at most 4.")
return tuple(reversed(range(dim - 1))) + (dim - 1,) return tuple(reversed(range(dim - 1))) + (dim - 1,)
elif layout_str == 'f' or layout_str == 'reverse_numpy': elif layout_str == 'f' or layout_str == 'reverse_numpy':
return tuple(reversed(range(dim))) return tuple(reversed(range(dim)))
......
...@@ -224,6 +224,9 @@ class BlockIndexing(AbstractIndexing): ...@@ -224,6 +224,9 @@ class BlockIndexing(AbstractIndexing):
assert len(self._iteration_space) == len(arr_shape), "Iteration space must be equal to the array shape" assert len(self._iteration_space) == len(arr_shape), "Iteration space must be equal to the array shape"
numeric_iteration_slice = _get_numeric_iteration_slice(self._iteration_space, arr_shape) numeric_iteration_slice = _get_numeric_iteration_slice(self._iteration_space, arr_shape)
end = [s.stop if s.stop != 0 else 1 for s in numeric_iteration_slice] end = [s.stop if s.stop != 0 else 1 for s in numeric_iteration_slice]
for i, s in enumerate(numeric_iteration_slice):
if s.step and s.step != 1:
end[i] = div_ceil(s.stop - s.start, s.step) + s.start
if self._dim < 4: if self._dim < 4:
conditions = [c < e for c, e in zip(self.coordinates, end)] conditions = [c < e for c, e in zip(self.coordinates, end)]
......
...@@ -66,15 +66,18 @@ def create_cuda_kernel(assignments: NodeCollection, config: CreateKernelConfig): ...@@ -66,15 +66,18 @@ def create_cuda_kernel(assignments: NodeCollection, config: CreateKernelConfig):
iteration_space = normalize_slice(iteration_slice, common_shape) iteration_space = normalize_slice(iteration_slice, common_shape)
else: else:
iteration_space = normalize_slice(iteration_slice, common_shape) iteration_space = normalize_slice(iteration_slice, common_shape)
iteration_space = tuple([s if isinstance(s, slice) else slice(s, s, 1) for s in iteration_space])
iteration_space = tuple([s if isinstance(s, slice) else slice(s, s + 1, 1) for s in iteration_space])
loop_counter_symbols = [LoopOverCoordinate.get_loop_counter_symbol(i) for i in range(len(iteration_space))] loop_counter_symbols = [LoopOverCoordinate.get_loop_counter_symbol(i) for i in range(len(iteration_space))]
if len(indexed_elements) > 0: if len(indexed_elements) > 0:
common_indexed_element = get_common_indexed_element(indexed_elements) common_indexed_element = get_common_indexed_element(indexed_elements)
index = common_indexed_element.indices[0].atoms(TypedSymbol)
assert len(index) == 1, "index expressions must only contain one symbol representing the index"
indexing = indexing_creator(iteration_space=(slice(0, common_indexed_element.shape[0], 1), *iteration_space), indexing = indexing_creator(iteration_space=(slice(0, common_indexed_element.shape[0], 1), *iteration_space),
data_layout=common_field.layout) data_layout=common_field.layout)
extended_ctrs = [common_indexed_element.indices[0], *loop_counter_symbols] extended_ctrs = [index.pop(), *loop_counter_symbols]
loop_counter_assignments = indexing.get_loop_ctr_assignments(extended_ctrs) loop_counter_assignments = indexing.get_loop_ctr_assignments(extended_ctrs)
else: else:
indexing = indexing_creator(iteration_space=iteration_space, data_layout=common_field.layout) indexing = indexing_creator(iteration_space=iteration_space, data_layout=common_field.layout)
......
#pragma once
extern "C++" {
#ifdef __CUDA_ARCH__
template <typename DTYPE_T, std::size_t DIMENSION> struct PyStencilsField {
DTYPE_T *data;
DTYPE_T shape[DIMENSION];
DTYPE_T stride[DIMENSION];
};
#else
#include <array>
template <typename DTYPE_T, std::size_t DIMENSION> struct PyStencilsField {
DTYPE_T *data;
std::array<DTYPE_T, DIMENSION> shape;
std::array<DTYPE_T, DIMENSION> stride;
};
#endif
}