diff --git a/cbutil/ncu_parser.py b/cbutil/ncu_parser.py index 820ae26645979cceb04805ba6b7ed635a20c8472..327344e5d342f67a7344b3166a15c6ca71c7540b 100644 --- a/cbutil/ncu_parser.py +++ b/cbutil/ncu_parser.py @@ -1,5 +1,6 @@ import numpy as np import pandas as pd +import re from cbutil.ncu_keys import ( memory_write_data_key, memory_read_data_key, @@ -27,17 +28,17 @@ def get_unit(col): def detect_prefix(unit): - if unit[0] == 'G': + if unit[0] == "G": return 1e9 - elif unit[0] == 'M': + elif unit[0] == "M": return 1e6 - elif unit[0] == 'K': + elif unit[0] == "K": return 1e3 - elif unit[0] == 'm': + elif unit[0] == "m": return 1e-3 - elif unit[0] == 'u': + elif unit[0] == "u": return 1e-6 - elif unit[0] == 'n': + elif unit[0] == "n": return 1e-9 else: return 1 @@ -61,35 +62,57 @@ def normalize_and_add_prefix(value, prefix: str): def extract_raw_counter(df: pd.DataFrame): - fields = pd.DataFrame() tags = pd.DataFrame() tags["Block Size"] = df["Block Size"] tags["Grid Size"] = df["Grid Size"] - tags["GPU"] = df["device__attribute_display_name"] + tags["GPU"] = df["device__attribute_display_name"].str.replace(" ", "") - fields[memory_write_data_key] = normalize_and_add_prefix(df["dram__bytes_write.sum"], 'G') - fields[memory_read_data_key] = normalize_and_add_prefix(df["dram__bytes_read.sum"], 'G') - fields[memory_data_key] = fields[memory_write_data_key] + fields[memory_read_data_key] + fields[memory_write_data_key] = normalize_and_add_prefix( + df["dram__bytes_write.sum"], "G" + ) + fields[memory_read_data_key] = normalize_and_add_prefix( + df["dram__bytes_read.sum"], "G" + ) + fields[memory_data_key] = ( + fields[memory_write_data_key] + fields[memory_read_data_key] + ) - fields[memory_write_bandwidth_key] = normalize_and_add_prefix(df["dram__bytes_write.sum.per_second"], 'M') - fields[memory_read_bandwidth_key] = normalize_and_add_prefix(df["dram__bytes_read.sum.per_second"], 'M') - fields[memory_bandwidth_key] = normalize_and_add_prefix(df["dram__bytes.sum.per_second"], 'M') + fields[memory_write_bandwidth_key] = normalize_and_add_prefix( + df["dram__bytes_write.sum.per_second"], "M" + ) + fields[memory_read_bandwidth_key] = normalize_and_add_prefix( + df["dram__bytes_read.sum.per_second"], "M" + ) + fields[memory_bandwidth_key] = normalize_and_add_prefix( + df["dram__bytes.sum.per_second"], "M" + ) fields[runtime_key] = get_normalized(df["gpu__time_duration.sum"]) - fields[smsp_cycles_key] = get_normalized(df["smsp__cycles_elapsed.avg.per_second"]) + fields[smsp_cycles_key] = get_normalized( + df["smsp__cycles_elapsed.avg.per_second"]) fields[smsp_cycles_total_key] = fields[smsp_cycles_key] * fields[runtime_key] fields[fp_inst_per_cycle_key] = ( - 2 * df["smsp__sass_thread_inst_executed_op_dfma_pred_on.sum.per_cycle_elapsed"] + - df["smsp__sass_thread_inst_executed_op_dadd_pred_on.sum.per_cycle_elapsed"] + - df["smsp__sass_thread_inst_executed_op_dmul_pred_on.sum.per_cycle_elapsed"] + 2 * df["smsp__sass_thread_inst_executed_op_dfma_pred_on.sum.per_cycle_elapsed"] + + df["smsp__sass_thread_inst_executed_op_dadd_pred_on.sum.per_cycle_elapsed"] + + df["smsp__sass_thread_inst_executed_op_dmul_pred_on.sum.per_cycle_elapsed"] + ) + fields[total_fp_inst_key] = ( + fields[fp_inst_per_cycle_key] * fields[smsp_cycles_total_key] ) - fields[total_fp_inst_key] = fields[fp_inst_per_cycle_key] * fields[smsp_cycles_total_key] - fields[operational_intensity_key] = fields[total_fp_inst_key] / (fields[memory_data_key] * 1e9) - fields[p_max_key] = add_unit_prefix(fields[operational_intensity_key] * fields[memory_bandwidth_key] * 1e6, 'M') - fields[dp_key] = np.divide(np.asarray(fields[total_fp_inst_key]), fields[runtime_key]) / 1e6 + fields[operational_intensity_key] = fields[total_fp_inst_key] / ( + fields[memory_data_key] * 1e9 + ) + fields[p_max_key] = add_unit_prefix( + fields[operational_intensity_key] * + fields[memory_bandwidth_key] * 1e6, "M" + ) + fields[dp_key] = ( + np.divide(np.asarray(fields[total_fp_inst_key]), + fields[runtime_key]) / 1e6 + ) return fields, tags diff --git a/dashboards/dashboard_walberla.py b/dashboards/dashboard_walberla.py index 5efff6487754d1bdc7a4470fbe4ad7eb109e22b1..6feb7030ae60f1b373577925182e4612264a5793 100644 --- a/dashboards/dashboard_walberla.py +++ b/dashboards/dashboard_walberla.py @@ -618,6 +618,8 @@ def dashboard_fslbmgravitywave(): Filter("project_id", multi=True, default_value="walberla/walberla"), Filter("branch", multi=True, default_value="master"), Filter("numMPIProcesses"), + Filter("barrierAfterSweep"), + Filter("blockDecomposition"), ] fields = [ diff --git a/dashboards/panels.py b/dashboards/panels.py index b6f1c3b92b89a21fda35f2c80d3cccc98e986ea3..766fcee51095df9a2eb497099f2e5ae19e2e12a9 100644 --- a/dashboards/panels.py +++ b/dashboards/panels.py @@ -1,10 +1,22 @@ from dataclasses import dataclass, field + # from collections import namedtuple from dashboards.influx_queries import Query -from grafanalib.core import TimeSeries, Text, Stat, Template, Repeat, Threshold, Table, BarChart, PieChartv2 +from grafanalib.core import ( + TimeSeries, + Text, + Stat, + Template, + Repeat, + Threshold, + Table, + BarChart, + PieChartv2, +) from dashboards.dashboard_base import get_influx_target from dashboards.legends import Units from numbers import Number +from typing import List # PanelInfos = namedtuple("PanelInfos", ("name", "unit")) @@ -20,30 +32,49 @@ def is_regex(name): return name[0] == "/" and name[-1] == "/" -def get_time_series_panel(panel_infos: PanelInfos, - data_source: str, - query_list: list[Query], - *, - overrides=None, - pointSize: int = 9, - **kwargs): +def get_time_series_panel( + panel_infos: PanelInfos, + data_source: str, + query_list: List[Query], + *, + overrides=None, + pointSize: int = 9, + **kwargs, +): targets = [get_influx_target(str(query)) for query in query_list] new_kwargs = {**kwargs} if panel_infos.absthreshold is not None: - if 'thresholdsStyleMode' not in new_kwargs: - new_kwargs.update({ - 'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt')], - 'thresholdsStyleMode': 'line' - }) + if "thresholdsStyleMode" not in new_kwargs: + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", + index=1, + value=float(panel_infos.absthreshold), + op="lt", + ), + ], + "thresholdsStyleMode": "line", + } + ) else: - new_kwargs.update({ - 'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt')] - }) - + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", + index=1, + value=float(panel_infos.absthreshold), + op="lt", + ), + ], + } + ) return TimeSeries( title=panel_infos.name, @@ -53,71 +84,85 @@ def get_time_series_panel(panel_infos: PanelInfos, pointSize=pointSize, overrides=overrides, **new_kwargs, - ) -def get_text_panel(content: str, *, mode='markdown', **kwargs) -> Text: - return Text( - content=content, - mode=mode, - **kwargs - ) +def get_text_panel(content: str, *, mode="markdown", **kwargs) -> Text: + return Text(content=content, mode=mode, **kwargs) -def get_stat_panel(title: str, - dataSource: str, - stat_query: Query, - repeat: Template = None, - alias: str = "", - *, - maxPerRow=0, - **kwargs): +def get_stat_panel( + title: str, + dataSource: str, + stat_query: Query, + repeat: Template = None, + alias: str = "", + *, + maxPerRow=0, + **kwargs, +): new_kwargs = { - 'alignment': 'center', - 'colorMode': 'value', - 'graphMode': 'area', - 'reduceCalc': 'last', - 'orientation': 'auto', - 'transparent': True, + "alignment": "center", + "colorMode": "value", + "graphMode": "area", + "reduceCalc": "last", + "orientation": "auto", + "transparent": True, } new_kwargs.update(kwargs) if repeat: - rep_args = ['h', repeat.name] + rep_args = ["h", repeat.name] if maxPerRow: rep_args.append(maxPerRow) - new_kwargs.setdefault('repeat', Repeat(*rep_args)) + new_kwargs.setdefault("repeat", Repeat(*rep_args)) return Stat( title=title, dataSource=dataSource, targets=[get_influx_target(str(stat_query), alias=alias)], - thresholdType='percentage', - thresholds=[Threshold('green', 0, 0.0), Threshold('yellow', 1, 50.0), Threshold('red', 2, 80.0)], - ** new_kwargs, + thresholdType="percentage", + thresholds=[ + Threshold("green", 0, 0.0), + Threshold("yellow", 1, 50.0), + Threshold("red", 2, 80.0), + ], + **new_kwargs, ) -def get_table_panel(panel_infos: PanelInfos, - data_source: str, - query_list: list[Query], - *, - result_format_list: list[str] = None, - alias_list: list[str] = None, - transformations=[], - overrides=None, - **kwargs): +def get_table_panel( + panel_infos: PanelInfos, + data_source: str, + query_list: List[Query], + *, + result_format_list: List[str] = None, + alias_list: List[str] = None, + transformations=[], + overrides=None, + **kwargs, +): if not alias_list: - alias_list = [''] * len(query_list) + alias_list = [""] * len(query_list) if not result_format_list: - result_format_list = ['table'] * len(query_list) - targets = [get_influx_target(str(query), result_format=result_format, alias=alias) for query, result_format, alias in zip(query_list, result_format_list, alias_list)] + result_format_list = ["table"] * len(query_list) + targets = [ + get_influx_target(str(query), result_format=result_format, alias=alias) + for query, result_format, alias in zip( + query_list, result_format_list, alias_list + ) + ] new_kwargs = {**kwargs} if panel_infos.absthreshold is not None: - new_kwargs.update({'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt'), ], - } - ) + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", index=1, value=float(panel_infos.absthreshold), op="lt" + ), + ], + } + ) return Table( title=panel_infos.name, @@ -127,80 +172,106 @@ def get_table_panel(panel_infos: PanelInfos, unit=panel_infos.unit, overrides=overrides, **new_kwargs, - ) -def get_bar_chart_panel(panel_infos: PanelInfos, - data_source: str, - query_list: list[Query], - *, - result_format_list: list[str] = None, - alias_list: list[str] = None, - transformations=[], - overrides=None, - **kwargs): +def get_bar_chart_panel( + panel_infos: PanelInfos, + data_source: str, + query_list: List[Query], + *, + result_format_list: List[str] = None, + alias_list: List[str] = None, + transformations=[], + overrides=None, + **kwargs, +): if not alias_list: - alias_list = [''] * len(query_list) + alias_list = [""] * len(query_list) if not result_format_list: - result_format_list = ['table'] * len(query_list) - targets = [get_influx_target(str(query), result_format=result_format, alias=alias) for query, result_format, alias in zip(query_list, result_format_list, alias_list)] + result_format_list = ["table"] * len(query_list) + targets = [ + get_influx_target(str(query), result_format=result_format, alias=alias) + for query, result_format, alias in zip( + query_list, result_format_list, alias_list + ) + ] new_kwargs = {**kwargs} if panel_infos.absthreshold is not None: - new_kwargs.update({'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt'), ], - } - ) - extraJson = {"fieldConfig": { - "defaults": { - "fieldMinMax": True, - "max": 1, - "unit": panel_infos.unit + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", index=1, value=float(panel_infos.absthreshold), op="lt" + ), + ], + } + ) + extraJson = { + "fieldConfig": { + "defaults": {"fieldMinMax": True, "max": 1, "unit": panel_infos.unit} } } - } - + return BarChart( title=panel_infos.name, dataSource=data_source, targets=targets, transformations=transformations, - xTickLabelRotation=-45, + xTickLabelRotation=-45, extraJson=extraJson, **new_kwargs, - ) -def get_pie_chart_panel(panel_infos: PanelInfos, - data_source: str, - query_list: list[Query], - *, - result_format_list: list[str] = None, - alias_list: list[str] = None, - transformations=[], - overrides=[], - **kwargs): - +def get_pie_chart_panel( + panel_infos: PanelInfos, + data_source: str, + query_list: List[Query], + *, + result_format_list: List[str] = None, + alias_list: List[str] = None, + transformations=[], + overrides=[], + **kwargs, +): targets = [get_influx_target(str(query)) for query in query_list] new_kwargs = {**kwargs} if panel_infos.absthreshold is not None: - if 'thresholdsStyleMode' not in new_kwargs: - new_kwargs.update({ - 'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt')], - 'thresholdsStyleMode': 'line' - }) + if "thresholdsStyleMode" not in new_kwargs: + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", + index=1, + value=float(panel_infos.absthreshold), + op="lt", + ), + ], + "thresholdsStyleMode": "line", + } + ) else: - new_kwargs.update({ - 'thresholdType': 'absolute', - 'thresholds': [Threshold('green', 0, 0.0), - Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt')] - }) + new_kwargs.update( + { + "thresholdType": "absolute", + "thresholds": [ + Threshold("green", 0, 0.0), + Threshold( + "red", + index=1, + value=float(panel_infos.absthreshold), + op="lt", + ), + ], + } + ) - return PieChartv2( title=panel_infos.name, dataSource=data_source, @@ -209,5 +280,4 @@ def get_pie_chart_panel(panel_infos: PanelInfos, unit=panel_infos.unit, overrides=overrides, **new_kwargs, - ) diff --git a/dashboards/variables.py b/dashboards/variables.py index 2bd78dca5f8f78654cd35d80aaf350ec514a4fdd..53990b03bd0cf23f71ff77f0bb64701b0ad8be5b 100644 --- a/dashboards/variables.py +++ b/dashboards/variables.py @@ -1,6 +1,7 @@ from grafanalib.core import Template from collections import namedtuple from dashboards.influx_queries import show_tag_values, get_tag_values, show_field_keys +from typing import List Filter = namedtuple( "Filter", ("name", "multi", "default_value", "refresh"), defaults=("", True, "", 1) @@ -34,12 +35,12 @@ def get_time_dependend_dashboard_variable( *, inner_field_key: str = "", where: str = "", - group_by: list[str] = None + group_by: List[str] = None ): query = get_tag_values( measurment_name, filter.name, - inner_field_key=inner_field_key, + inner_field_key=inner_field_key, where=where, group_by=group_by ) diff --git a/tests/test_ncu_parser.py b/tests/test_ncu_parser.py index ab38be61f32dab3c068cc8f27747b26e63cbcf3f..c49460e6b67dccedfbd9668dd29de0e44c69e87c 100644 --- a/tests/test_ncu_parser.py +++ b/tests/test_ncu_parser.py @@ -10,7 +10,7 @@ def sample_data(): data = { ("Block Size", ""): [128, 256, 512], ("Grid Size", ""): [64, 128, 256], - ("device__attribute_display_name", ""): ["GPU1", "GPU2", "GPU3"], + ("device__attribute_display_name", ""): [" GPU 1", "GPU2", "GPU 3"], ("dram__bytes_write.sum", "Bytes"): [1e9, 2e9, 3e9], ("dram__bytes_read.sum", "GBytes"): [0.5, 1., 1.5], ("dram__bytes_write.sum.per_second", "MByte/s"): [100, 200, 300],