Skip to content
Snippets Groups Projects
Commit f2e8e238 authored by Christoph Alt's avatar Christoph Alt
Browse files

init commit

parents
No related branches found
No related tags found
No related merge requests found
Showing
with 426 additions and 0 deletions
[flake8]
exclude =
.git,
__pycache__,
.pytest_cache,
venv
max-line-length = 120
max-complexity = 10
venv/
__pycache__/
*.swp
from dataclasses import dataclass, asdict
@dataclass
class DataPoint:
"""Represents a single Datapoint from the InfluxDB perspective."""
measurement: str
tags: dict
fields: dict
time: int
def asdict(self):
return asdict(self)
from .plain_text import process_linewise
from typing import Callable, Iterable
import csv
def process_linewise(func: Callable, lines: Iterable):
""" Iterate over lines and apply func on each """
for line in lines:
try:
yield func(line)
except ValueError:
pass
def iterate_csv(path):
with open(path, "r") as in_file:
for row in csv.DictReader(in_file):
yield row
import sqlite3
import logging
from contextlib import contextmanager
import os
logger = logging.getLogger(__file__)
def select_stmt(arg="*"):
return f"SELECT {arg}"
def from_stmt(table_name):
return f"FROM {table_name}"
def where_stmt(lhs, rhs, op="="):
return f"WHERE {lhs}{op}{rhs}"
def join_stmt(lhs, rhs, key):
return f"{lhs} inner join {rhs} on {lhs}.{key} = {rhs}.{key}"
def table_name_query():
where = where_stmt('type', "'table'")
return f"{select_stmt('name')} {from_stmt('sqlite_master')} {where}"
@contextmanager
def sqlite_context(path):
if not os.path.exists(path):
raise ValueError(f"{path} does not exists")
logging.info(f"trying to connect to {path}")
connection = sqlite3.connect(path)
connection.row_factory = sqlite3.Row
try:
yield connection
finally:
logging.info(f"closing {connection}")
connection.close()
def get_all_table_names(connection):
return [t[0] for t in connection.execute(table_name_query()).fetchall()]
def query_complete_table(connection, table_name: str):
return connection.execute(f"{select_stmt()} {from_stmt(table_name)}")
def query_join(connection, lhs, rhs, key):
return connection.execute(f"SELECT * FROM {lhs} inner join {rhs} on {lhs}.{key} = {rhs}.{key}")
def tables2dict(table_iterator):
for table in table_iterator:
yield dict(table)
def iterate_all_tables(path):
with sqlite_context(path) as connection:
for table_name in get_all_table_names(connection):
yield from tables2dict(query_complete_table(connection, table_name))
def iterate_join(path, lhs, rhs, key):
with sqlite_context(path) as connection:
yield from tables2dict(query_join(connection, lhs, rhs, key))
"""Set of example functions to transform some input"""
import re
def mesa_pd_text(line: str):
m = re.search(r'\[0\]\s*(\w*)\s*\|[\s\d\.\%]*\|\s*([\d\.]*)', line)
if m is not None:
return m.group(1), float(m.group(2))
raise ValueError()
import os
import logging
import dotenv
import pprint
from influxdb import InfluxDBClient
from dataclasses import dataclass
logger = logging.getLogger(__file__)
MISSING_DB_PW = """
Password for the InfluxDB write_user was not set.
See https://docs.gitlab.com/ee/ci/variables/#secret-variables
"""
def load_config_from_env(env_path: str = ".env"):
if os.path.exists(env_path):
dotenv.load_dotenv()
return DBConfig(
host=os.environ["INFLUXDB_HOST"],
port=os.environ["INFLUXDB_PORT"],
user_name=os.environ["INFLUXDB_USER_NAME"],
database=os.environ["INFLUXDB_DATABASE"],
write_user_pw=os.environ["INFLUXDB_WRITE_USER_PASSWORD"]
)
@dataclass
class DBConfig:
"""
Configclass that stores the information for accessing the Database.
"""
host: str
port: int
user_name: str
database: str
write_user_pw: str
class Uploader:
def __init__(self, config: DBConfig = load_config_from_env()):
self.config = config
self.client = InfluxDBClient(
host=config.host,
port=config.port,
username=config.user_name,
password=config.write_user_pw,
database=config.database,
)
def upload(self, points, dry_run=False, *args, **kwargs):
logger.info(f"Uploading: {pprint.pformat(points)}")
if not dry_run:
success = self.client.write_points(points,
*args,
**kwargs)
if success:
logger.info(f"Uploaded {len(points)} items")
else:
raise ValueError("Uploading to influxdb went wrong!")
def read_file_line_wise(path):
"""Linewise generator of a file """
with open(path, "r") as file_to_read:
for line in file_to_read.readlines():
yield line.strip("\n")
setup.py 0 → 100644
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="cb-util",
version="0.1",
description="Collection of scripts and wrapper of contious benchmarking",
author="Christoph Alt",
author_email="Christoph.alt@fau.de",
packages=find_packages(include=["cbutil"]),
install_requires=[
"python-dotenv",
"influxdb"
],
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
timesteps,vtkWriteFrequency,remainingTimeLoggerFrequency,timeStepStrategy,warmupSteps,scenario,cudaEnabledMpi,mlupsPerProcess,stencil_phase,stencil_hydro,executable,compile_flags,walberla_version,build_machine,blocks_0,blocks_1,blocks_2,cellsPerBlock_0,cellsPerBlock_1,cellsPerBlock_2,periodic_0,periodic_1,periodic_2
101,0,-1,phase_only,10,1,False,16.04957889718735,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,8,8,8,1,1,1
101,0,-1,phase_only,10,1,False,24.13489254297534,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,16,16,16,1,1,1
101,0,-1,phase_only,10,1,False,26.044562633466292,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,32,32,32,1,1,1
101,0,-1,phase_only,10,1,False,22.943083078832906,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,64,64,64,1,1,1
101,0,-1,phase_only,10,1,False,22.58538807348407,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,128,128,128,1,1,1
101,0,-1,hydro_only,10,1,False,8.338149096012547,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,8,8,8,1,1,1
101,0,-1,hydro_only,10,1,False,9.64033669591777,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,16,16,16,1,1,1
101,0,-1,hydro_only,10,1,False,7.33978706214917,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,32,32,32,1,1,1
101,0,-1,hydro_only,10,1,False,6.622759126080623,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,64,64,64,1,1,1
101,0,-1,hydro_only,10,1,False,6.980441649406062,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,128,128,128,1,1,1
101,0,-1,kernel_only,10,1,False,5.3146705956640705,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,8,8,8,1,1,1
101,0,-1,kernel_only,10,1,False,6.766545795613468,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,16,16,16,1,1,1
101,0,-1,kernel_only,10,1,False,5.309041838682824,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,32,32,32,1,1,1
101,0,-1,kernel_only,10,1,False,5.129618797069078,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,64,64,64,1,1,1
101,0,-1,kernel_only,10,1,False,5.304717416560969,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,128,128,128,1,1,1
101,0,-1,normal,10,1,False,0.4138063836804595,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,8,8,8,1,1,1
101,0,-1,normal,10,1,False,2.176102729285279,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,16,16,16,1,1,1
101,0,-1,normal,10,1,False,4.205279303995048,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,32,32,32,1,1,1
101,0,-1,normal,10,1,False,4.844465508243095,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,64,64,64,1,1,1
101,0,-1,normal,10,1,False,5.220620712313197,D3Q15,D3Q27,benchmark_cpu.py,-O3 -DNDEBUG -Wall -Wconversion -Wshadow -march=native -Wfloat-equal -Wextra -pedantic -ffast-math -D_GLIBCXX_USE_CXX11_ABI=1 -pthread -pedantic-errors -Werror,,rome1,1,1,1,128,128,128,1,1,1
File added
File added
================================================================================
| BEGIN LOGGING - Tuesday, 26.July 2022, 11:34:04 |
================================================================================
[0][INFO ]------(0.000 sec) config file: MESA_PD_Benchmark.cfg
[0][INFO ]------(0.000 sec) waLBerla Revision:
[0][INFO ]------(0.000 sec) *** READING CONFIG FILE ***
[0][INFO ]------(0.000 sec) sorting: linear
[0][INFO ]------(0.000 sec) normal: <1,1,1>
[0][INFO ]------(0.000 sec) spacing: 1
[0][INFO ]------(0.000 sec) shift: <0.1,0.1,0.1>
[0][INFO ]------(0.000 sec) radius: 0.6
[0][INFO ]------(0.000 sec) bBarrier: 0
[0][INFO ]------(0.000 sec) storeNodeTimings: 0
[0][INFO ]------(0.000 sec) checkSimulation: 0
[0][INFO ]------(0.000 sec) numOuterIterations: 1
[0][INFO ]------(0.000 sec) initialRefinementLevel: 1
[0][INFO ]------(0.000 sec) simulationSteps: 100
[0][INFO ]------(0.000 sec) dt: 0.0001
[0][INFO ]------(0.000 sec) visSpacing: 100
[0][INFO ]------(0.000 sec) vtk_out: vtk_out
[0][INFO ]------(0.000 sec) sqlFile: benchmark.sqlite
[0][INFO ]------(0.000 sec) recalculateBlockLevelsInRefresh: 1
[0][INFO ]------(0.000 sec) alwaysRebalanceInRefresh: 1
[0][INFO ]------(0.000 sec) reevaluateMinTargetLevelsAfterForcedRefinement: 1
[0][INFO ]------(0.000 sec) allowRefreshChangingDepth: 1
[0][INFO ]------(0.000 sec) allowMultipleRefreshCycles: 0
[0][INFO ]------(0.000 sec) checkForEarlyOutInRefresh: 1
[0][INFO ]------(0.000 sec) checkForLateOutInRefresh: 1
[0][INFO ]------(0.001 sec) regridMin: 2000
[0][INFO ]------(0.001 sec) regridMax: 100
[0][INFO ]------(0.001 sec) maxBlocksPerProcess: 1000
[0][INFO ]------(0.001 sec) baseWeight: 1
[0][INFO ]------(0.001 sec) metisipc2redist: 1000
[0][INFO ]------(0.001 sec) LBAlgorithm: Morton
[0][INFO ]------(0.001 sec) metisAlgorithm: PART_GEOM_KWAY
[0][INFO ]------(0.001 sec) metisWeightsToUse: BOTH_WEIGHTS
[0][INFO ]------(0.001 sec) metisEdgeSource: EDGES_FROM_EDGE_WEIGHTS
[0][INFO ]------(0.001 sec) *** BLOCKFOREST ***
[0][INFO ]------(0.001 sec) No setup file specified: Creation without setup file!
[0][INFO ]------(0.001 sec) Balancing 8 blocks for 1 processes...
[0][INFO ]------(0.001 sec) *** SETUP - START ***
[0][INFO ]-----(18.554 sec) #particles created: 4096000
[0][INFO ]-----(18.554 sec) *** SETUP - END ***
[0][INFO ]-----(18.554 sec) *** VTK ***
[0][INFO ]-----(18.562 sec) *** SIMULATION - START ***
[0][INFO ]-----(23.765 sec) *** RUNNING OUTER ITERATION 0 ***
[0][INFO ]----(469.069 sec) *** SIMULATION - END ***
[0][INFO ]----(469.070 sec) *** SQL OUTPUT - START ***
[0][INFO ]----(469.070 sec) Contacts Checked:
[0][INFO ]----(469.074 sec) Sample has 1 values in [52559676.000000, 52559676.000000], sum = 52559676.000000, mean = 52559676.000000, med = 52559676.000000, stddev = 0.000000 (relative: 0.000000), mad = 0.000000
[0][INFO ]----(469.074 sec) Contacts Detected:
[0][INFO ]----(469.075 sec) Sample has 1 values in [12211200.000000, 12211200.000000], sum = 12211200.000000, mean = 12211200.000000, med = 12211200.000000, stddev = 0.000000 (relative: 0.000000), mad = 0.000000
[0][INFO ]----(469.075 sec) Contacts Treated:
[0][INFO ]----(469.075 sec) Sample has 1 values in [12211200.000000, 12211200.000000], sum = 12211200.000000, mean = 12211200.000000, med = 12211200.000000, stddev = 0.000000 (relative: 0.000000), mad = 0.000000
[0][DEVEL ]----(469.075 sec) SNN bytes communicated: 0 / 0
[0][DEVEL ]----(469.075 sec) SNN communication partners: 0 / 0
[0][DEVEL ]----(469.075 sec) RP bytes communicated: 0 / 0
[0][DEVEL ]----(469.075 sec) RP communication partners: 0 / 0
[0][DEVEL ]----(469.075 sec) contacts checked/detected/treated: 52559676 / 12211200 / 12211200
[0][INFO ]----(470.644 sec) Timer | % | Total| Average| Count| Min| Max| Variance|
[0] ----------------------------------------------------------------------------------------------------------------------------------
[0] AssocToBlock | 1.44% | 6.43| 6.434| 1| 6.434| 6.434| 0.000|
[0] ContactDetection | 25.50% | 113.55| 113.551| 1| 113.551| 113.551| 0.000|
[0] Euler | 4.41% | 19.65| 19.653| 1| 19.653| 19.653| 0.000|
[0] GenerateLinkedCells | 2.91% | 12.96| 12.964| 1| 12.964| 12.964| 0.000|
[0] ReduceContactHistory | 1.39% | 6.17| 6.170| 1| 6.170| 6.170| 0.000|
[0] ReduceForce | 0.00% | 0.00| 0.000| 1| 0.000| 0.000| 0.000|
[0] SNN | 0.00% | 0.00| 0.000| 1| 0.000| 0.000| 0.000|
[0] SpringDashpot | 13.51% | 60.14| 60.142| 1| 60.142| 60.142| -0.000|
[0] SpringDashpotSpring | 50.84% | 226.38| 226.379| 1| 226.379| 226.379| -0.000|
[0]
[0][INFO ]----(470.647 sec) Total Particles:
[0][INFO ]----(470.647 sec) Sample has 1 values in [4096000.000000, 4096000.000000], sum = 4096000.000000, mean = 4096000.000000, med = 4096000.000000, stddev = 0.000000 (relative: 0.000000), mad = 0.000000
[0][INFO ]----(470.647 sec) Number of Particles:
[0][INFO ]----(470.647 sec) Sample has 1 values in [4096000.000000, 4096000.000000], sum = 4096000.000000, mean = 4096000.000000, med = 4096000.000000, stddev = 0.000000 (relative: 0.000000), mad = 0.000000
[0][INFO ]----(470.647 sec) Number of Ghost Particles:
[0][INFO ]----(470.648 sec) Sample has 1 values in [0.000000, 0.000000], sum = 0.000000, mean = 0.000000, med = 0.000000, stddev = 0.000000 (relative: -nan), mad = 0.000000
[0][DEVEL ]----(470.648 sec) Estimation: 153600
[0][INFO ]----(470.702 sec) *** SQL OUTPUT - END ***
================================================================================
| END LOGGING - Tuesday, 26.July 2022, 11:42:03 |
================================================================================
from cbutil.postprocessing.plain_text import iterate_csv
def test_iteration():
dicts = list(iterate_csv("tests/benchmark.csv"))
assert len(dicts) == 20
from cbutil.postprocessing.sqlite import (select_stmt, from_stmt, where_stmt, join_stmt, table_name_query)
from cbutil.postprocessing.sqlite import get_all_table_names, tables2dict, query_join, iterate_all_tables
from cbutil.postprocessing.sqlite import sqlite_context
def test_select():
assert select_stmt().strip() == "SELECT *"
def test_from():
assert from_stmt("table").strip() == "FROM table"
def test_where():
lhs = "lhs"
rhs = "rhs"
assert where_stmt(lhs, rhs).strip() == f"WHERE {lhs}={rhs}"
def test_join():
lhs = "lhs"
rhs = "rhs"
key = "key"
assert join_stmt(lhs, rhs, key).strip() == f"{lhs} inner join {rhs} on {lhs}.{key} = {rhs}.{key}"
def test_table_name():
assert table_name_query() == "SELECT name FROM sqlite_master WHERE type='table'"
def test_all_table_names():
with sqlite_context("tests/benchmark.sqlite") as connection:
names = get_all_table_names(connection)
assert sorted(["runs", "timingPool"]) == sorted(names)
def test_join_query():
with sqlite_context("tests/benchmark.sqlite") as connection:
dicts = list(tables2dict(query_join(connection, "runs", "timingPool", "runId")))
assert len(dicts) == 9
def test_iterate_query():
dicts = list(tables2dict(iterate_all_tables("tests/cpu_benchmark.sqlite3")))
assert len(dicts) == 150
from cbutil.postprocessing import process_linewise
from cbutil.processing_functions import mesa_pd_text
from cbutil.util import read_file_line_wise
def id(x):
return x
def test_linewise_trivial():
lines = ["aaa", "bbb"]
for raw, processed in zip(lines, process_linewise(id, lines)):
assert raw == processed
def test_linewise_split():
def splitter(line):
splitted = line.split(":")
return splitted[0], splitted[1]
lines = ["lhs1:rhs1", "lhs1:rhs1"]
for raw, processed in zip(lines, process_linewise(splitter, lines)):
assert splitter(raw) == processed
def test_mesa_pd():
mesa_pd_example_output = "tests/mesa_pd.txt"
expected = {'AssocToBlock': 6.43,
'ContactDetection': 113.55,
'Euler': 19.65,
'GenerateLinkedCells': 12.96,
'ReduceContactHistory': 6.17,
'ReduceForce': 0.0,
'SNN': 0.0,
'SpringDashpot': 60.14,
'SpringDashpotSpring': 226.38}
for key, value in process_linewise(mesa_pd_text, read_file_line_wise(mesa_pd_example_output)):
assert value == expected[key]
from cbutil.util import read_file_line_wise
from contextlib import contextmanager
from pathlib import Path
import os
@contextmanager
def tmp_file(path: Path):
"""Contextmanager that creates a temporary file and removes it afterwards."""
try:
with open(path, "w") as tmp_test_file:
yield tmp_test_file
finally:
os.remove(path)
def test_read_file():
test_lines = ["test1", "test2"]
test_path = Path("/tmp/tmp_test_file")
with tmp_file(test_path) as tmp_test_file:
for line in test_lines:
tmp_test_file.write(f"{line}\n")
for expected, actual in zip(test_lines, read_file_line_wise(test_path)):
assert expected == actual
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment