Skip to content
Snippets Groups Projects
Commit 282f5239 authored by Markus Holzer's avatar Markus Holzer Committed by Michael Kuron
Browse files

Fix integration pipeline

parent 5c70dd3a
No related branches found
No related tags found
1 merge request!62Fix integration pipeline
......@@ -15,3 +15,4 @@ _local_tmp
/lbmpy_tests/db
doc/bibtex.json
RELEASE-VERSION
/db
......@@ -6,6 +6,7 @@ from copy import copy
import pytest
import sympy as sp
import math
from lbmpy.methods.creationfunctions import RelaxationInfo, create_srt, create_trt, create_trt_kbc, \
create_with_default_polynomial_cumulants
......@@ -36,21 +37,38 @@ def __change_relaxation_rate_of_conserved_moments(method, new_relaxation_rate=sp
return changed_method
def check_for_collision_rule_equivalence(collision_rule1, collision_rule2):
def check_for_collision_rule_equivalence(collision_rule1, collision_rule2, use_numeric_subs=False):
collision_rule1 = collision_rule1.new_without_subexpressions()
collision_rule2 = collision_rule2.new_without_subexpressions()
if use_numeric_subs:
free_symbols = collision_rule1.free_symbols
free_symbols.update(collision_rule2.free_symbols)
subs_dict = dict()
value = 10.0
for symbol in free_symbols:
subs_dict.update({symbol: value})
value += 1.1
collision_rule1 = collision_rule1.subs(subs_dict)
collision_rule2 = collision_rule2.subs(subs_dict)
for eq1, eq2 in zip(collision_rule1.main_assignments, collision_rule2.main_assignments):
diff = sp.cancel(sp.expand(eq1.rhs - eq2.rhs))
assert diff == 0
if use_numeric_subs:
assert math.isclose(diff, 0, rel_tol=0.0, abs_tol=1e-12)
else:
assert diff == 0
def check_method_equivalence(m1, m2, do_simplifications):
def check_method_equivalence(m1, m2, do_simplifications, use_numeric_subs=False):
cr1 = m1.get_collision_rule()
cr2 = m2.get_collision_rule()
if do_simplifications:
cr1 = create_simplification_strategy(m1)(cr1)
cr2 = create_simplification_strategy(m2)(cr2)
check_for_collision_rule_equivalence(cr1, cr2)
check_for_collision_rule_equivalence(cr1, cr2, use_numeric_subs)
@pytest.mark.longrun
......@@ -60,8 +78,8 @@ def test_cumulant():
original_method = create_with_default_polynomial_cumulants(stencil, [sp.Symbol("omega")])
changed_method = __change_relaxation_rate_of_conserved_moments(original_method)
check_method_equivalence(original_method, changed_method, True)
check_method_equivalence(original_method, changed_method, False)
check_method_equivalence(original_method, changed_method, True, True)
check_method_equivalence(original_method, changed_method, False, True)
@pytest.mark.longrun
......
......@@ -30,27 +30,28 @@ def test_split_number_of_operations():
assert op_without_splitting['divs'] == op_with_splitting['divs']
@pytest.mark.parametrize('stencil', ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27'])
@pytest.mark.parametrize('compressible', [True, False])
@pytest.mark.parametrize('method', ['srt', 'mrt'])
@pytest.mark.parametrize('force', [(0, 0, 0), (1e-6, 1e-7, 2e-6)])
@pytest.mark.longrun
def test_equivalence():
def test_equivalence(stencil, compressible, method, force):
relaxation_rates = [1.8, 1.7, 1.0, 1.0, 1.0, 1.0]
for stencil in ['D2Q9', 'D3Q15', 'D3Q19', 'D3Q27']:
for compressible in (True, False):
for method in ('srt', 'mrt'):
for force in ((0, 0, 0), (1e-6, 1e-7, 2e-6)):
clear_cache()
common_params = {'domain_size': (20, 30) if stencil.startswith('D2') else (10, 13, 7),
'stencil': stencil,
'method': method,
'weighted': True,
'compressible': compressible,
'force': force,
'relaxation_rates': relaxation_rates}
print("Running Scenario", common_params)
with_split = create_lid_driven_cavity(optimization={'split': True}, **common_params)
without_split = create_lid_driven_cavity(optimization={'split': False}, **common_params)
with_split.run(100)
without_split.run(100)
np.testing.assert_almost_equal(with_split.velocity_slice(), without_split.velocity_slice())
clear_cache()
common_params = {'domain_size': (10, 20) if stencil.startswith('D2') else (5, 10, 7),
'stencil': stencil,
'method': method,
'weighted': True,
'compressible': compressible,
'force': force,
'force_model': 'schiller',
'relaxation_rates': relaxation_rates}
print("Running Scenario", common_params)
with_split = create_lid_driven_cavity(optimization={'split': True}, **common_params)
without_split = create_lid_driven_cavity(optimization={'split': False}, **common_params)
with_split.run(100)
without_split.run(100)
np.testing.assert_almost_equal(with_split.velocity_slice(), without_split.velocity_slice())
def test_equivalence_short():
......
......@@ -23,18 +23,18 @@ def test_lbm_vectorization_short():
ldc1.run(10)
@pytest.mark.parametrize('instruction_set', ['sse', 'avx'])
@pytest.mark.parametrize('aligned_and_padding', [[False, False], [True, False], [True, True]])
@pytest.mark.parametrize('nontemporal', [False, True])
@pytest.mark.parametrize('double_precision', [False, True])
@pytest.mark.parametrize('fixed_loop_sizes', [False, True])
@pytest.mark.longrun
def test_lbm_vectorization():
vectorization_options = [{'instruction_set': instruction_set,
'assume_aligned': aa,
'nontemporal': nt,
def test_lbm_vectorization(instruction_set, aligned_and_padding, nontemporal, double_precision, fixed_loop_sizes):
vectorization_options = {'instruction_set': instruction_set,
'assume_aligned': aligned_and_padding[0],
'nontemporal': nontemporal,
'assume_inner_stride_one': True,
'assume_sufficient_line_padding': lp,
}
for instruction_set in ('sse', 'avx')
for aa, lp in ([False, False], [True, False], [True, True],)
for nt in (False, True)
]
'assume_sufficient_line_padding': aligned_and_padding[1]}
time_steps = 100
size1 = (64, 32)
size2 = (666, 34)
......@@ -46,25 +46,22 @@ def test_lbm_vectorization():
ldc2_ref = create_lid_driven_cavity(size2, relaxation_rate=relaxation_rate)
ldc2_ref.run(time_steps)
for double_precision in (False, True):
for vec_opt in vectorization_options:
for fixed_loop_sizes in (True, False):
optimization = {'double_precision': double_precision,
'vectorization': vec_opt,
'cse_global': True,
}
print("Vectorization test, double precision {}, vectorization {}, fixed loop sizes {}".format(
double_precision, vec_opt, fixed_loop_sizes))
ldc1 = create_lid_driven_cavity(size1, relaxation_rate=relaxation_rate, optimization=optimization,
fixed_loop_sizes=fixed_loop_sizes)
ldc1.run(time_steps)
np.testing.assert_almost_equal(ldc1_ref.velocity[:, :], ldc1.velocity[:, :])
optimization = {'double_precision': double_precision,
'vectorization': vectorization_options,
'cse_global': True,
}
print("Vectorization test, double precision {}, vectorization {}, fixed loop sizes {}".format(
double_precision, vectorization_options, fixed_loop_sizes))
ldc1 = create_lid_driven_cavity(size1, relaxation_rate=relaxation_rate, optimization=optimization,
fixed_loop_sizes=fixed_loop_sizes)
ldc1.run(time_steps)
np.testing.assert_almost_equal(ldc1_ref.velocity[:, :], ldc1.velocity[:, :])
optimization['split'] = True
ldc2 = create_lid_driven_cavity(size2, relaxation_rate=relaxation_rate, optimization=optimization,
fixed_loop_sizes=fixed_loop_sizes)
ldc2.run(time_steps)
np.testing.assert_almost_equal(ldc2_ref.velocity[:, :], ldc2.velocity[:, :])
optimization['split'] = True
ldc2 = create_lid_driven_cavity(size2, relaxation_rate=relaxation_rate, optimization=optimization,
fixed_loop_sizes=fixed_loop_sizes)
ldc2.run(time_steps)
np.testing.assert_almost_equal(ldc2_ref.velocity[:, :], ldc2.velocity[:, :])
if __name__ == '__main__':
......
import pystencils as ps
import lbmpy
from pathlib import Path
def test_version_string():
......@@ -7,6 +7,6 @@ def test_version_string():
if release_version.exists ():
with open(release_version, "r") as f:
version = f.read()
assert ps.__version__ == version
assert lbmpy.__version__ == version
else:
assert ps.__version__ == "development"
assert lbmpy.__version__ == "development"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment