Skip to content
Snippets Groups Projects
Commit 7ac1998d authored by Markus Holzer's avatar Markus Holzer
Browse files

[skip ci] test

parent ef1077d5
No related branches found
No related tags found
No related merge requests found
Pipeline #66312 skipped
import waLBerla as wlb
from waLBerla.tools.config import block_decomposition
from waLBerla.tools.sqlitedb import sequenceValuesToScalars, checkAndUpdateSchema, storeSingle
import sqlite3
import os
......@@ -25,7 +26,8 @@ class Scenario:
logger_frequency=30,
blockforest_filestem="blockforest",
write_setup_vtk=True,
async_communication=False):
async_communication=False,
db_file_name=None):
self.domain_size = domain_size
self.root_blocks = root_blocks
......@@ -43,6 +45,7 @@ class Scenario:
self.logger_frequency = logger_frequency
self.async_communication = async_communication
self.db_file_name = DB_FILE if db_file_name is None else db_file_name
self.config_dict = self.config(print_dict=False)
......@@ -62,7 +65,7 @@ class Scenario:
'blockForestFilestem': self.bfs_filestem,
'writeVtk': self.write_setup_vtk,
'outputStatistics': True,
'writeSetupForestAndReturn': False,
'writeSetupForestAndReturn': True,
},
'Parameters': {
'omega': 1.95,
......@@ -114,8 +117,8 @@ class Scenario:
table_name = table_name.replace("-", "_")
for num_try in range(num_tries):
try:
checkAndUpdateSchema(result, table_name, DB_FILE)
storeSingle(result, table_name, DB_FILE)
checkAndUpdateSchema(result, table_name, self.db_file_name)
storeSingle(result, table_name, self.db_file_name)
break
except sqlite3.OperationalError as e:
wlb.log_warning(f"Sqlite DB writing failed: try {num_try + 1}/{num_tries} {str(e)}")
......@@ -142,8 +145,8 @@ def validation_run():
scenarios.add(scenario)
def scaling(num_proc, gpu_enabled_mpi=False, uniform=True):
wlb.log_info_on_root("Running scaling benchmark...")
def weak_scaling_ldc(num_proc, gpu_enabled_mpi=False, uniform=True):
wlb.log_info_on_root("Running weak scaling benchmark...")
if wlb.mpi.numProcesses() > 1:
num_proc = wlb.mpi.numProcesses()
......@@ -157,7 +160,7 @@ def scaling(num_proc, gpu_enabled_mpi=False, uniform=True):
factor = int(num_proc // 4)
name = "nonuniform"
cells_per_block = (152, 152, 152)
cells_per_block = (184, 184, 184)
domain_size = (cells_per_block[0] * 3, cells_per_block[1] * 3, cells_per_block[2] * factor)
root_blocks = tuple([d // c for d, c in zip(domain_size, cells_per_block)])
......@@ -172,9 +175,48 @@ def scaling(num_proc, gpu_enabled_mpi=False, uniform=True):
refinement_depth=0 if uniform else 3,
timesteps=10,
gpu_enabled_mpi=gpu_enabled_mpi,
async_communication=async_communication)
async_communication=async_communication,
db_file_name=f"weakScaling{name}LDC.sqlite3")
scenarios.add(scenario)
def strong_scaling_ldc(num_proc, gpu_enabled_mpi=False, uniform=True):
wlb.log_info_on_root("Running strong scaling benchmark...")
# This benchmark must run from 64 GPUs onwards
if wlb.mpi.numProcesses() > 1:
num_proc = wlb.mpi.numProcesses()
if num_proc % 64 != 0:
raise RuntimeError("Number of processes must be dividable by 16")
cells_per_block = (448, 448, 448)
if uniform:
domain_size = (cells_per_block[0] * 2, cells_per_block[1] * 2, cells_per_block[2] * 16)
name = "uniform"
else:
factor = int(num_proc / 64)
blocks64 = block_decomposition(factor)
cells_per_block = tuple([int(c / b) for c, b in zip(cells_per_block, reversed(blocks64))])
domain_size = (cells_per_block[0] * 3, cells_per_block[1] * 3, cells_per_block[2] * factor)
name = "nonuniform"
root_blocks = tuple([d // c for d, c in zip(domain_size, cells_per_block)])
scenarios = wlb.ScenarioManager()
for async_communication in [False, True]:
scenario = Scenario(blockforest_filestem=f"blockforest_{name}_{num_proc}",
domain_size=domain_size,
root_blocks=root_blocks,
num_processes=num_proc,
cells_per_block=cells_per_block,
refinement_depth=0 if uniform else 3,
timesteps=10,
gpu_enabled_mpi=gpu_enabled_mpi,
async_communication=async_communication,
db_file_name=f"strongScaling{name}LDC.sqlite3")
scenarios.add(scenario)
# validation_run()
scaling(4, False, False)
strong_scaling_ldc(1, True, False)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment