diff --git a/dashboards/dashboard_base.py b/dashboards/dashboard_base.py
index f95efd88e5a8b4d575f23e08fd2fea0c41b834c7..80d161ef07342491c6f61408c3733d3728113370 100644
--- a/dashboards/dashboard_base.py
+++ b/dashboards/dashboard_base.py
@@ -12,11 +12,14 @@ from grafanalib.influxdb import InfluxDBTarget
 
 from dashboards.annotations import Annotation
 from dashboards.influx_queries import Query
+from dashboards.legends import AxisLabel, Units
 
 
-def get_influx_target(target_query: str, **kwargs) -> InfluxDBTarget:
+def get_influx_target(target_query: str, result_format: str = 'time_series', alias: str = "", **kwargs) -> InfluxDBTarget:
     return InfluxDBTarget(
         query=target_query,
+        format=result_format,
+        alias=alias,
         **{k: v for k, v in kwargs.items() if v is not None},
     )
 
@@ -76,11 +79,11 @@ def build_row_repeat_dashboard(
     dataSource: str,
     measurment_name: str,
     panel_query: Query,
-    unit: str,
-    axisLabel: str,
+    unit: Units,
+    axisLabel: AxisLabel,
     other_vars: List[Template] = None,
     annotations: Annotations = Annotations(),
-    alias: str = None,
+    alias: str = '',
 ) -> Dashboard:
     """Build a Dashboard that takes one query and repeats that with 2 variables."""
     time_series_kwargs = {
@@ -88,8 +91,8 @@ def build_row_repeat_dashboard(
         'dataSource': dataSource,
         'targets': [get_influx_target(str(panel_query), alias=alias)],
         'repeat': Repeat('h', panel_repeat_var.name),
-        'unit': unit,
-        'axisLabel': axisLabel,
+        'unit': unit.value,
+        'axisLabel': axisLabel.value,
         'pointSize': 9,
     }
     if other_vars is None:
diff --git a/dashboards/dashboard_fe2ti.py b/dashboards/dashboard_fe2ti.py
index b540eaca9880c2e45b97fe644231d3ee3442dfbf..7431f35008176834f1c4d68933ff77dddc5bad08 100644
--- a/dashboards/dashboard_fe2ti.py
+++ b/dashboards/dashboard_fe2ti.py
@@ -6,12 +6,14 @@ from dashboards.dashboard_base import (DashboardOptions,
 
 from dashboards.overrides import get_line_style_regex_override, get_color_regex_override
 
-from dashboards.panels import PanelInfos, get_time_series_panel, get_text_panel
+from dashboards.panels import PanelInfos, get_time_series_panel, get_text_panel, is_regex
 from dashboards.variables import get_dashboard_variable, Filter
 
 from dashboards.influx_queries import join_variable_and
 from dashboards.legends import Units
 
+from dashboards.influx_queries import Query
+
 description_markdown = r"""
 
   - Linestyle indicates the compiler:
@@ -102,9 +104,13 @@ def dashboard_fe2ti():
         get_time_series_panel(
             field,
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
+            [Query(select_=field.name,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=[f.name for f in filters],
+                  from_string=not is_regex(measurment_name),
+                  select_string=not is_regex(field.name))
+                  ],
             overrides=overrides,
         )
         for field in fields]
diff --git a/dashboards/dashboard_pystencils.py b/dashboards/dashboard_pystencils.py
index 251dde7cabfafd52ac97b9573ae720e81e79693e..45986fc59e64253109c88d7a86b7f2bfd203eb7e 100644
--- a/dashboards/dashboard_pystencils.py
+++ b/dashboards/dashboard_pystencils.py
@@ -5,7 +5,7 @@ from dashboards.dashboard_base import (DashboardOptions,
                                        Repeat,)
 
 
-from dashboards.panels import PanelInfos, get_time_series_panel
+from dashboards.panels import PanelInfos, get_time_series_panel, is_regex
 from dashboards.variables import get_dashboard_variable, Filter, get_measurement_filter
 
 from dashboards.influx_queries import join_variable_and
@@ -14,6 +14,7 @@ from dashboards.legends import Units
 import cbutil.ncu_keys as ncu_keys
 import cbutil.likwid_keys as likwid_keys
 
+from dashboards.influx_queries import Query
 
 INTEL_LINESTYLE = "solid"
 GCC_LINESTYLE = "dashed"
@@ -64,9 +65,12 @@ def dashboard_pystencils_cpu():
         get_time_series_panel(
             field,
             data_source,
-            f"/^${benchmark.name}$/",
-            where=where,
-            group_by=group_by,
+            [Query(select_=field.name,
+                  from_=f"/^${benchmark.name}$/",
+                  where_=where,
+                  group_by=group_by,
+                  from_string=not is_regex(f"/^${benchmark.name}$/"),
+                  select_string=not is_regex(field.name))],
         )
         for field in fields]
 
@@ -125,9 +129,12 @@ def dashboard_pystencils_gpu():
         get_time_series_panel(
             field,
             data_source,
-            f"/^${benchmark.name}$/",
-            where=where,
-            group_by=group_by,
+            [Query(select_=field.name,
+                  from_=f"/^${benchmark.name}$/",
+                  where_=where,
+                  group_by=group_by,
+                  from_string=not is_regex(f"/^${benchmark.name}$/"),
+                  select_string=not is_regex(field.name))],
         )
         for field in fields]
 
diff --git a/dashboards/dashboard_walberla.py b/dashboards/dashboard_walberla.py
index 739b15c3044999c59dab2ee959a8966c2cae2604..e6cfff6db50e2c978f20f2fb2f73e40d58c5d8ac 100644
--- a/dashboards/dashboard_walberla.py
+++ b/dashboards/dashboard_walberla.py
@@ -18,15 +18,17 @@ from dashboards.dashboard_base import (
     get_grid_pos,
     pack_in_row,
 )
-from dashboards.influx_queries import join_variable_and
-from dashboards.legends import Units
-from dashboards.overrides import get_color_regex_override
-from dashboards.panels import PanelInfos, get_time_series_panel
+from dashboards.influx_queries import join_variable_and, get_variable_condition_with_tag, get_variable_tag, join_conditions, get_variable_condition, get_variable_condition_without_regex, get_variable_condition_unbounded
+from dashboards.legends import Units, AxisLabel
+from dashboards.overrides import get_color_regex_override, get_line_style_regex_override
+from dashboards.panels import PanelInfos, get_time_series_panel, get_table_panel, get_bar_chart_panel, get_pie_chart_panel, is_regex
 from dashboards.variables import (
     Filter,
     get_dashboard_variable,
     get_time_dependend_dashboard_variable,
+    get_field_keys_dashboard_variable,
 )
+from dashboards.influx_queries import Query
 
 
 def dashboard_uniformgridcpu():
@@ -109,9 +111,12 @@ def dashboard_uniformgridcpu():
         get_time_series_panel(
             field,
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
+            [Query(select_=field.name,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=[f.name for f in filters],
+                  from_string=not is_regex(measurment_name),
+                  select_string=not is_regex(field.name))],
         )
         for field in fields
     ]
@@ -182,9 +187,12 @@ def dashboard_uniformgridgpu():
         get_time_series_panel(
             field,
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
+            [Query(select_=field.name,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=[f.name for f in filters],
+                  from_string=not is_regex(measurment_name),
+                  select_string=not is_regex(field.name))],
         )
         for field in fields
     ]
@@ -265,9 +273,12 @@ def dashboard_uniformgridgpu_profile():
         get_time_series_panel(
             field,
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
+            [Query(select_=field.name,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=[f.name for f in filters],
+                  from_string=not is_regex(measurment_name),
+                  select_string=not is_regex(field.name))],
         )
         for field in fields
     ]
@@ -282,6 +293,310 @@ def dashboard_uniformgridgpu_profile():
     )
 
 
+
+def dashboard_uniformgridcpu_relativeperformance():
+    data_source = "InfluxDB-1"
+    arch = "CPU"
+    measurment_name = f"UniformGrid{arch}"
+    measurment_name2 = f"roofline"
+    row_repeat = "host"
+
+    options = DashboardOptions(
+        title=f"Uniform Grid {arch} - Relative Performance",
+        description=f"Relative performance benchmark dashboard for the Uniform Grid {arch} Benchmark from walberla",
+        tags=[arch, "walberla"],
+        timezone="browser",
+    )
+
+    filters = [
+        Filter("host", multi=False, default_value="icx36"),
+        Filter("cellsPerBlock_0", multi=False, default_value="128"),
+        Filter("collisionSetup"),
+        Filter("project_id", multi=False, default_value="walberla/walberla"),
+        Filter("branch", multi=False, default_value="master"),
+        Filter("BandwidthBenchmark", multi=False, default_value="bw_stream"),
+    ]
+
+    fields = [
+        PanelInfos("Relative Performance", Units.number, absthreshold=80),
+        PanelInfos("Bandwidth", Units.number, absthreshold=80),
+        PanelInfos("Relative Performance", Units.percent, absthreshold=80),
+    ]
+
+    filter_vars = [
+        get_dashboard_variable(filter, measurment_name, data_source)
+        for filter in filters[1:5]
+    ]
+    filter_vars = [
+        get_dashboard_variable(filters[0], measurment_name2, data_source),
+        *filter_vars,
+        get_field_keys_dashboard_variable(
+            filters[5], measurment_name2, data_source
+        ),
+    ]
+       
+    row_repeat_var = [fv for fv in filter_vars if fv.name == row_repeat][0]
+
+    where0A2 = join_conditions(
+        [
+            get_variable_condition_with_tag(filters[i].name) for i in range(2)
+        ] + [
+            get_variable_condition_without_regex(filters[i].name) for i in [2]
+        ] + [
+            get_variable_condition(filters[3].name)
+        ] + [
+            get_variable_condition_without_regex(filters[i].name) for i in [4]
+        ], "AND"
+        , include_time_filter=True
+    )
+    where0B2 = get_variable_condition_with_tag(filters[0].name)
+    where1A = get_variable_condition_with_tag(filters[0].name)
+    where2A2 = join_conditions(
+        [
+            get_variable_condition_with_tag(filters[i].name) for i in range(2)
+        ] + [
+            get_variable_condition_without_regex(filters[i].name) for i in [2]
+        ] + [
+            get_variable_condition(filters[3].name)
+        ] + [
+            get_variable_condition_without_regex(filters[i].name) for i in [4]
+        ], "AND"
+        , include_time_filter=True
+    )
+    where2B2 = get_variable_condition_with_tag(filters[0].name)
+
+    group_by0A2 = ["host", "project_id", "branch", "collisionSetup", "mpi_num_processes", "streamingPattern", "timeStepStrategy", "stencil", "blocks_0", "blocks_1", "blocks_2", "cellsPerBlock_0", "cellsPerBlock_1", "cellsPerBlock_2", "periodic_0", "periodic_1", "periodic_2"]
+    group_by0B2 = [get_variable_tag(filters[0].name)]
+    group_by1A = [filters[0].name]
+    group_by2A2 = ["host", "project_id", "branch", "collisionSetup", "mpi_num_processes", "streamingPattern", "timeStepStrategy", "stencil", "blocks_0", "blocks_1", "blocks_2", "cellsPerBlock_0", "cellsPerBlock_1", "cellsPerBlock_2", "periodic_0", "periodic_1", "periodic_2"]
+    group_by2B2 = [get_variable_tag(filters[0].name)]
+    
+    annotations = get_commit_annotation(
+        data_source, "red", "commits", measurment_name2)
+
+    selected_columns0A1 = ", ".join([
+        '"host"',
+        '"collisionSetup"',
+        '"MLUPSperProcess"',
+        '"mpi_num_processes"'])
+    selected_columns0A2 = ", ".join([
+        '(last("mlupsPerProcess")) as "MLUPSperProcess"',
+        '"mpi_num_processes"'])
+    selected_columns0B1 = ", ".join([
+        '"host"',
+        '"P_max"'])
+    selected_columns0B2 = ", ".join([
+        'last($BandwidthBenchmark)/(27*2*8) as "P_max"'])
+    selected_columns1A = ", ".join([
+        'last($BandwidthBenchmark) as "BW[MByte/s]"',
+        'last($BandwidthBenchmark)/(27*2*8) as "P_max[MLUPS]"'])
+    selected_columns2A1 = ", ".join([
+        '"host"',
+        '"collisionSetup"',
+        '"MLUPSperProcess"',
+        '"mpi_num_processes"'])
+    selected_columns2A2 = ", ".join([
+        '(last("mlupsPerProcess")) as "MLUPSperProcess"',
+        '"mpi_num_processes"'])
+    selected_columns2B1 = ", ".join([
+        '"host"',
+        '"P_max"'])
+    selected_columns2B2 = ", ".join([
+        'last($BandwidthBenchmark)/(27*2*8) as "P_max"'])
+
+
+    query0A2 = Query(select_=selected_columns0A2,
+                  from_=measurment_name,
+                  where_=where0A2,
+                  group_by=group_by0A2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query0A1 = Query(selected_columns0A1,
+                  from_=query0A2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query0B2 = Query(select_=selected_columns0B2,
+                  from_=measurment_name2,
+                  where_=where0B2,
+                  group_by=group_by0B2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)   
+    query0B1 = Query(select_=selected_columns0B1,
+                  from_=query0B2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)     
+
+    query1A = Query(select_=selected_columns1A,
+                  from_=measurment_name2,
+                  where_=where1A,
+                  group_by=group_by1A,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+
+    query2A2 = Query(select_=selected_columns2A2,
+                  from_=measurment_name,
+                  where_=where2A2,
+                  group_by=group_by2A2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query2A1 = Query(selected_columns2A1,
+                  from_=query2A2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query2B2 = Query(select_=selected_columns2B2,
+                  from_=measurment_name2,
+                  where_=where2B2,
+                  group_by=group_by2B2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)   
+    query2B1 = Query(select_=selected_columns2B1,
+                  from_=query2B2,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)  
+
+    alias0A = "$tag_collisionSetup"
+    alias2A = "$tag_collisionSetup"
+                  
+
+    transformations0 = [
+    {
+      "id": "joinByField",
+      "options": {
+        "byField": "host",
+        "mode": "outerTabular"
+      }
+    },
+    {
+      "id": "calculateField",
+      "options": {
+        "alias": "P",
+        "binary": {
+          "left": "MLUPSperProcess",
+          "operator": "*",
+          "right": "mpi_num_processes"
+        },
+        "mode": "binary",
+        "reduce": {
+          "reducer": "sum"
+        },
+        "replaceFields": False
+      }
+    },
+    {
+      "id": "calculateField",
+      "options": {
+        "binary": {
+          "left": "P",
+          "operator": "/",
+          "right": "P_max"
+        },
+        "mode": "binary",
+        "reduce": {
+          "reducer": "sum"
+        }
+      }
+    },
+    {
+      "id": "filterFieldsByName",
+      "options": {
+        "include": {
+          "names": [
+            "Time 1",
+            "collisionSetup",
+            "P",
+            "P / P_max",
+            "MLUPSperProcess"
+          ]
+        }
+      }
+    }
+  ]
+
+    transformations2 = [
+    {
+      "id": "joinByField",
+      "options": {
+        "byField": "host",
+        "mode": "outerTabular"
+      }
+    },
+    {
+      "id": "calculateField",
+      "options": {
+        "alias": "P",
+        "binary": {
+          "left": "MLUPSperProcess",
+          "operator": "*",
+          "right": "mpi_num_processes"
+        },
+        "mode": "binary",
+        "reduce": {
+          "reducer": "sum"
+        },
+        "replaceFields": False
+      }
+    },
+    {
+      "id": "calculateField",
+      "options": {
+        "binary": {
+          "left": "P",
+          "operator": "/",
+          "right": "P_max"
+        },
+        "mode": "binary",
+        "reduce": {
+          "reducer": "sum"
+        }
+      }
+    },
+    {
+      "id": "filterFieldsByName",
+      "options": {
+        "include": {
+          "names": [
+            "collisionSetup",
+            "P / P_max"
+          ]
+        }
+      }
+    }
+  ]
+
+    panels = [
+        # Panel 0
+        get_table_panel(
+            fields[0],
+            data_source,
+            [query0A1, query0B1],
+            alias_list = [alias0A, ""],
+            transformations=transformations0,
+            gridPos=get_grid_pos(14, 10, 0, 1),
+        ),
+        # Panel 1
+        get_table_panel(
+            fields[1],
+            data_source,
+            [query1A],
+            gridPos=get_grid_pos(5, 10, 10, 1),
+        ),
+        # Panel 2
+        get_bar_chart_panel(
+            fields[2],
+            data_source,
+            [query2A1, query2B1],
+            alias_list = [alias2A, ""],
+            transformations=transformations2,
+            gridPos=get_grid_pos(9, 10, 10, 6),
+        ),
+    ]
+
+    return build_dashboard(
+        options, panels=panels, templating=filter_vars, annotations=annotations
+    )
+
+
+
 def dashboard_fslbmgravitywave():
     data_source = "InfluxDB-1"
     arch = "CPU"
@@ -305,6 +620,10 @@ def dashboard_fslbmgravitywave():
 
     fields = [
         PanelInfos("simulationTime", Units.seconds),
+        PanelInfos("/.*_percentage/", Units.percent),
+        PanelInfos("/Communication:.*_percentage/", Units.percent),
+        PanelInfos(r"/Sweep:.*_percentage/", Units.percent),
+        PanelInfos(r"/MPI Barrier:.*_percentage/", Units.percent),
     ]
 
     filter_vars = [
@@ -322,25 +641,32 @@ def dashboard_fslbmgravitywave():
         for group, color in zip(groups, colors)
     ]
 
+    queries = [
+        Query(
+            select_=field.name,
+            from_=measurment_name,
+            where_=where,
+            group_by=[f.name for f in filters],
+            from_string=not is_regex(measurment_name),
+            select_string=not is_regex(field.name)
+        )
+        for index, field in enumerate(fields)
+    ]
+
     panels = [
         get_time_series_panel(
-            field,
+            fields[0],
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
-            gridPos=get_grid_pos(12, 24, 0, idx * (13)),
+            [queries[0]],
+            gridPos=get_grid_pos(12, 24, 0, 0),
         )
-        for idx, field in enumerate(fields)
     ]
     panels.append(
         get_time_series_panel(
-            PanelInfos("/.*_percentage/", Units.percent),
+            fields[1],
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
-            gridPos=get_grid_pos(12, 12, 0, len(fields) * (13)),
+            [queries[1]],
+            gridPos=get_grid_pos(12, 12, 0, 13),
             drawStyle="bars",
             stacking={"mode": "normal"},
             fillOpacity=70,
@@ -353,12 +679,10 @@ def dashboard_fslbmgravitywave():
 
     panels.append(
         get_time_series_panel(
-            PanelInfos("/Communication:.*_percentage/", Units.percent),
+            fields[2],
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
-            gridPos=get_grid_pos(12, 12, 12, len(fields) * (13)),
+            [queries[2]],
+            gridPos=get_grid_pos(12, 12, 12, 13),
             drawStyle="bars",
             stacking={"mode": "normal"},
             fillOpacity=70,
@@ -369,12 +693,10 @@ def dashboard_fslbmgravitywave():
 
     panels.append(
         get_time_series_panel(
-            PanelInfos(r"/Sweep:.*_percentage/", Units.percent),
+            fields[3],
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
-            gridPos=get_grid_pos(12, 12, 0, (len(fields) + 1) * (13)),
+            [queries[3]],
+            gridPos=get_grid_pos(12, 12, 0, 2 * 13),
             drawStyle="bars",
             stacking={"mode": "normal"},
             fillOpacity=70,
@@ -385,12 +707,10 @@ def dashboard_fslbmgravitywave():
 
     panels.append(
         get_time_series_panel(
-            PanelInfos(r"/MPI Barrier:.*_percentage/", Units.percent),
+            fields[4],
             data_source,
-            measurment_name,
-            where=where,
-            group_by=[f.name for f in filters],
-            gridPos=get_grid_pos(12, 12, 12, (len(fields) + 1) * (13)),
+            [queries[4]],
+            gridPos=get_grid_pos(12, 12, 12, 2 * 13),
             drawStyle="bars",
             stacking={"mode": "normal"},
             fillOpacity=70,
@@ -402,3 +722,196 @@ def dashboard_fslbmgravitywave():
     return build_dashboard(
         options, panels=panels, templating=filter_vars, annotations=annotations
     )
+
+
+
+def dashboard_percolationgpu():
+    data_source = "InfluxDB-1"
+    arch = "GPU"
+    measurment_name = f"Percolation{arch}"
+    measurment_name2 = f"Percolation{arch}_profile"
+    row_repeat = "GPU"
+
+    options = DashboardOptions(
+        title="Percolation GPU",
+        description=f"Benchmark dashboard for the Percolation {arch} Benchmark from walberla",
+        tags=[arch, "walberla"],
+        timezone="browser",
+    )
+
+    filters = [
+        Filter("host", multi=False, refresh=2),
+        Filter(row_repeat),
+        Filter("useParticles"),
+    ]
+
+    fields = [
+        PanelInfos("MLUPs", Units.mlups, absthreshold=80),
+        PanelInfos("Time Steps per Second", absthreshold=80),
+        PanelInfos("Performance Flops", Units.mflop_sec, absthreshold=80),
+        PanelInfos("Memory Bandwidth", Units.mbytes_per_second, absthreshold=80),
+        PanelInfos(r"/.*_average/ useParticles=$useParticles", Units.seconds),
+        PanelInfos(r"/.*_average/ useParticles=$useParticles", Units.seconds),
+    ]
+
+    whereVariable = join_conditions([get_variable_condition_unbounded(filters[0].name)], "AND",
+        include_time_filter=True)
+    groupVariable = [filters[0].name]
+
+    filter_vars = [
+        get_dashboard_variable(filter, measurment_name, data_source)
+        for filter in filters[2:]
+    ]
+    filter_vars = [
+        get_time_dependend_dashboard_variable(
+            filters[0], measurment_name, data_source, inner_field_key="MLUPS"
+        ),
+        get_time_dependend_dashboard_variable(
+            filters[1], measurment_name, data_source, inner_field_key="MLUPS", where=whereVariable, group_by=groupVariable
+        ),
+        *filter_vars
+    ]
+
+
+    annotations = get_commit_annotation(
+        data_source, "red", "commits", measurment_name)
+
+    overrides0 = [
+        get_line_style_regex_override(f"/.*useParticles: 0/", style="dash")
+    ]
+        
+    groups4 = ["LBM sweep", "Boundary", "Communication"]
+    colors4 = ["blue", "semi-dark-yellow", "semi-dark-purple"]
+    overrides4 = [
+        get_color_regex_override(f"/{group}.*/", color, mode="fixed")
+        for group, color in zip(groups4, colors4)
+    ]
+
+
+    where = join_conditions([get_variable_condition_with_tag(filters[i].name) for i in range(3)], "AND",
+        include_time_filter=True)
+
+    group_by_elements01 = ["GPU", "branch", "fluidCells", "host", "numParticles", "useParticles", "communicationHidingXWidth", "communicationHidingYWidth", "communicationHidingZWidth", "cores", "numXCellsPerBlock", "numYBlocks", "particleDiameter", "sendDirectlyFromGPU"]
+    group_by01 = [get_variable_tag(i) for i in group_by_elements01]
+    group_by23 = [get_variable_tag(i.name) for i in filters]
+    group_by4 = [get_variable_tag(i) for i in group_by_elements01]
+
+
+    selected_columns0 = ", ".join([
+        '"MLUPS"'])
+    selected_columns1 = ", ".join([
+        '"timeStepsPerSecond"'])
+    selected_columns2 = ", ".join([
+        '"DP [MFlop/s]"'])
+    selected_columns3 = ", ".join([
+        '"Memory bandwidth [MByte/s]"'])
+    selected_columns4 = ", ".join([
+        '/.*_average/'])
+    
+
+    query0 = Query(select_=selected_columns0,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=group_by01,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query1 = Query(select_=selected_columns1,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=group_by01,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+    query2 = Query(select_=selected_columns2,
+                  from_=measurment_name2,
+                  where_=where,
+                  group_by=group_by23,
+                  from_string=not is_regex(measurment_name2),
+                  select_string=False)  
+    query3 = Query(select_=selected_columns3,
+                  from_=measurment_name2,
+                  where_=where,
+                  group_by=group_by23,
+                  from_string=not is_regex(measurment_name2),
+                  select_string=False)
+    query4 = Query(select_=selected_columns4,
+                  from_=measurment_name,
+                  where_=where,
+                  group_by=group_by4,
+                  from_string=not is_regex(measurment_name),
+                  select_string=False)
+
+
+    panels = [
+        # Panel 0
+        get_time_series_panel(
+            fields[0],
+            data_source,
+            [query0],
+            pointSize=5,
+            thresholdsStyleMode='off',
+            gridPos=get_grid_pos(13, 9, 0, 0),
+            overrides=overrides0,
+        ),
+        # Panel 1
+        get_time_series_panel(
+            fields[1],
+            data_source,
+            [query1],
+            pointSize=5,
+            thresholdsStyleMode='off',
+            gridPos=get_grid_pos(13, 8, 9, 0),
+        ),
+        # Panel 2
+        get_time_series_panel(
+            fields[2],
+            data_source,
+            [query2],
+            pointSize=5,
+            thresholdsStyleMode='off',
+            gridPos=get_grid_pos(12, 9, 0, 14),
+        ),
+        # Panel 3
+        get_time_series_panel(
+            fields[3],
+            data_source,
+            [query3],
+            pointSize=5,
+            thresholdsStyleMode='off',
+            gridPos=get_grid_pos(12, 9, 9, 14),
+        ),
+        # Panel 4
+        get_pie_chart_panel(
+            fields[4],
+            data_source,
+            [query4],
+            gridPos=get_grid_pos(13, 12, 0, 27),
+            repeat=Repeat("h", filters[2].name),
+            overrides=overrides4,
+        ),
+    ]
+
+
+    # Rows
+    row_repeat_var = [fv for fv in filter_vars if fv.name == row_repeat][0]
+
+    row0 = pack_in_row(
+        title=f"MLUPS and Time Steps per Second",
+        panels=panels[0:2],
+    )
+
+    row1 = pack_in_row(
+        title="Profiling",
+        panels=panels[2:4],
+    )
+    
+    row2 = pack_in_row(
+        title=f"Distribution ${row_repeat_var.name}",
+        panels=panels[4:5],
+        repeat=Repeat("v", row_repeat_var.name),
+    )
+
+    rows = [row0, row1, row2]
+
+    return build_dashboard(
+        options, rows=rows, templating=filter_vars, annotations=annotations
+    )
diff --git a/dashboards/deploy.py b/dashboards/deploy.py
index f703d23c656b8ad9422d0d9526555b0ac77778bd..5b68f8af570ec74de3390e95ce817c79369633c1 100644
--- a/dashboards/deploy.py
+++ b/dashboards/deploy.py
@@ -5,7 +5,7 @@ import dashboards.dashboard_list as boards
 from dashboards.upload import upload_dashboard
 from dashboards.dashboard_fe2ti import dashboard_fe2ti
 from dashboard_pystencils import dashboard_pystencils_cpu, dashboard_pystencils_gpu
-from dashboard_walberla import dashboard_uniformgridcpu, dashboard_uniformgridgpu, dashboard_uniformgridgpu_profile, dashboard_fslbmgravitywave
+from dashboard_walberla import dashboard_uniformgridcpu, dashboard_uniformgridgpu, dashboard_uniformgridgpu_profile, dashboard_fslbmgravitywave, dashboard_uniformgridcpu_relativeperformance, dashboard_percolationgpu
 
 logger = logging.getLogger(__file__)
 logger.setLevel(logging.INFO)
@@ -46,6 +46,8 @@ def main():
             upload_dashboard(dashboard_uniformgridgpu(), folder=walberla_folder)
             upload_dashboard(dashboard_uniformgridgpu_profile(), folder=walberla_folder)
             upload_dashboard(dashboard_fslbmgravitywave(), folder=walberla_folder)
+            upload_dashboard(dashboard_uniformgridcpu_relativeperformance(), folder=walberla_folder)
+            upload_dashboard(dashboard_percolationgpu(), folder=walberla_folder)
         else:
             board = getattr(boards, board_name)
             upload_dashboard(board(), folder=walberla_folder)
diff --git a/dashboards/influx_queries.py b/dashboards/influx_queries.py
index d2113cfa50d8c7a1d4d9df3e3362c15ccbb4a8ae..76b054feae453285a5b5c1b39480e8acb8ec21bb 100644
--- a/dashboards/influx_queries.py
+++ b/dashboards/influx_queries.py
@@ -6,8 +6,8 @@ from typing import List, Union
 @dataclass
 class Query:
     select_: str
-    from_: str
-    where_: str
+    from_: Union[str, 'Query']
+    where_: str = None
     group_by: List[str] = field(default_factory=list)
     select_string: bool = True
     from_string: bool = True
@@ -19,17 +19,22 @@ class Query:
             return f"SELECT {self.select_} "
 
     def _get_from(self):
-        if self.from_string:
+        if isinstance(self.from_, Query):
+            return f'FROM ({self.from_}) '
+        elif self.from_string:
             return f'FROM "{self.from_}" '
         else:
             return f"FROM {self.from_} "
 
     def _get_where(self):
-        return f"WHERE ({self.where_}) AND $timeFilter "
+        if self.where_:
+            return f"WHERE ({self.where_}) "
+        else:
+            return ""
 
     def _get_group_by(self):
         if self.group_by:
-            group_by = ", ".join(f'"{tag}"' for tag in self.group_by)
+            group_by = ", ".join(tag if tag.endswith("::tag") else f'"{tag}"' for tag in self.group_by)
             return f"GROUP BY {group_by}"
         else:
             return ""
@@ -52,12 +57,34 @@ def show_tag_values(table: str, key_name: str) -> str:
     return f'{base} {from_part}WITH key = "{key_name}"'
 
 
-def get_tag_values(table: str, key_name: str, *, inner_field_key="") -> str:
+def get_tag_values(table: str, key_name: str, *, inner_field_key="", where="",
+                   group_by=None) -> str:
+    if group_by is None:
+        group_by = [key_name]
+    
+    where_clause = "$timeFilter"
+    if where:
+        where_clause = where
+
     inner_select = "*"
     if inner_field_key != "":
         inner_select = f'"{inner_field_key}", "{key_name}"'
-    inner_query = f'SELECT {inner_select} FROM "{table}" WHERE $timeFilter'
-    return f'SELECT distinct("{key_name}") FROM ({inner_query}) GROUP BY {key_name}'
+
+    inner_query = f'SELECT {inner_select} FROM "{table}" WHERE {where_clause}'
+
+    group_by_clause = ", ".join(group_by)
+
+    return f'SELECT distinct("{key_name}") FROM ({inner_query}) GROUP BY {group_by_clause}'
+
+
+def show_field_keys(table: str) -> str:
+    """Return influx query to get all field keys from a measurment."""
+    base = "SHOW FIELD KEYS"
+    from_part = ""
+    if table != "":
+        from_part = f'from "{table}" '
+
+    return f'{base} {from_part}'
 
 
 def get_variable_condition(variable_name: str, *, tag_key: str = None) -> str:
@@ -71,7 +98,51 @@ def get_variable_condition(variable_name: str, *, tag_key: str = None) -> str:
     return f'"{clean_lhs}" =~ /^${{{clean_rhs}:regex}}$/'
 
 
-def join_conditions(conditions: List[str], operators: Union[List[str], str]):
+def get_variable_condition_unbounded(variable_name: str, *, tag_key: str = None) -> str:
+    clean_rhs = variable_name.strip()
+    if tag_key:
+        clean_lhs = tag_key.strip()
+    else:
+        clean_lhs = clean_rhs
+    if not clean_rhs:
+        raise ValueError("Empty variable name")
+    return f'"{clean_lhs}" =~ /${{{clean_rhs}:regex}}/'
+
+
+def get_variable_condition_with_tag(variable_name: str, *, tag_key: str = None) -> str:
+    clean_rhs = variable_name.strip()
+    if tag_key:
+        clean_lhs = tag_key.strip()
+    else:
+        clean_lhs = clean_rhs
+    if not clean_rhs:
+        raise ValueError("Empty variable name")
+    return f'"{clean_lhs}"::tag =~ /^${clean_rhs}$/'
+
+
+def get_variable_condition_without_regex(variable_name: str, *, tag_key: str = None) -> str:
+    clean_rhs = variable_name.strip()
+    if tag_key:
+        clean_lhs = tag_key.strip()
+    else:
+        clean_lhs = clean_rhs
+    if not clean_rhs:
+        raise ValueError("Empty variable name")
+    return f'"{clean_lhs}" =~ /^${clean_rhs}$/'
+
+
+def get_variable_tag(variable_name: str, *, tag_key: str = None) -> str:
+    clean_rhs = variable_name.strip()
+    if tag_key:
+        clean_lhs = tag_key.strip()
+    else:
+        clean_lhs = clean_rhs
+    if not clean_rhs:
+        raise ValueError("Empty variable name")
+    return f'"{clean_lhs}"::tag'
+
+
+def join_conditions(conditions: List[str], operators: Union[List[str], str], include_time_filter: bool = False):
     ops = operators
     if isinstance(operators, str):
         ops = repeat(operators, len(conditions) - 1)
@@ -84,10 +155,14 @@ def join_conditions(conditions: List[str], operators: Union[List[str], str]):
     ret = conditions[0]
     for op, cond in zip(ops, conditions[1:]):
         ret += f" {op} {cond}"
+
+    if include_time_filter:
+        ret += ' AND $timeFilter'
+
     return ret
 
 
-def join_variable_and(variable_names: List[str]) -> str:
+def join_variable_and(variable_names: List[str], include_time_filter: bool = False) -> str:
     return join_conditions(
-        [get_variable_condition(name) for name in variable_names], "AND"
+        [get_variable_condition(name) for name in variable_names], "AND", include_time_filter
     )
diff --git a/dashboards/legends.py b/dashboards/legends.py
index ddfd690ea4a1dc2293d2761cf619337574c4fe60..b71d165752cc431f3d6f034ad8cca69f1757a0fa 100644
--- a/dashboards/legends.py
+++ b/dashboards/legends.py
@@ -14,6 +14,8 @@ class Units(str, Enum):
     flop_per_byte = 'Flop/Byte'
     mflop_sec = 'mflops'
     percent = 'percentunit'
+    mlups = 'MLUP/s'
+    none = ''
 
 
 class AxisLabel(str, Enum):
diff --git a/dashboards/panels.py b/dashboards/panels.py
index 8fd80e149ed86016e313551b5944891a2cc02731..766fcee51095df9a2eb497099f2e5ae19e2e12a9 100644
--- a/dashboards/panels.py
+++ b/dashboards/panels.py
@@ -1,10 +1,22 @@
 from dataclasses import dataclass, field
+
 # from collections import namedtuple
 from dashboards.influx_queries import Query
-from grafanalib.core import TimeSeries, Text, Stat, Template, Repeat, Threshold
+from grafanalib.core import (
+    TimeSeries,
+    Text,
+    Stat,
+    Template,
+    Repeat,
+    Threshold,
+    Table,
+    BarChart,
+    PieChartv2,
+)
 from dashboards.dashboard_base import get_influx_target
 from dashboards.legends import Units
 from numbers import Number
+from typing import List
 
 # PanelInfos = namedtuple("PanelInfos", ("name", "unit"))
 
@@ -12,7 +24,7 @@ from numbers import Number
 @dataclass
 class PanelInfos:
     name: str
-    unit: Units
+    unit: Units = field(default=Units.none)
     absthreshold: Number = field(default=None)
 
 
@@ -20,76 +32,252 @@ def is_regex(name):
     return name[0] == "/" and name[-1] == "/"
 
 
-def get_time_series_panel(panel_infos: PanelInfos,
-                          data_source: str,
-                          measurment_name: str,
-                          *,
-                          where=None,
-                          group_by=None,
-                          overrides=None,
-                          **kwargs):
-    query = Query(select_=panel_infos.name,
-                  from_=measurment_name,
-                  where_=where,
-                  group_by=group_by,
-                  from_string=not is_regex(measurment_name),
-                  select_string=not is_regex(panel_infos.name))
+def get_time_series_panel(
+    panel_infos: PanelInfos,
+    data_source: str,
+    query_list: List[Query],
+    *,
+    overrides=None,
+    pointSize: int = 9,
+    **kwargs,
+):
+    targets = [get_influx_target(str(query)) for query in query_list]
     new_kwargs = {**kwargs}
     if panel_infos.absthreshold is not None:
-        new_kwargs.update({'thresholdType': 'absolute',
-                           'thresholds': [Threshold('green', 0, 0.0),
-                                          Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt'), ],
-                           'thresholdsStyleMode': 'line',
-                           }
-                          )
+        if "thresholdsStyleMode" not in new_kwargs:
+            new_kwargs.update(
+                {
+                    "thresholdType": "absolute",
+                    "thresholds": [
+                        Threshold("green", 0, 0.0),
+                        Threshold(
+                            "red",
+                            index=1,
+                            value=float(panel_infos.absthreshold),
+                            op="lt",
+                        ),
+                    ],
+                    "thresholdsStyleMode": "line",
+                }
+            )
+        else:
+            new_kwargs.update(
+                {
+                    "thresholdType": "absolute",
+                    "thresholds": [
+                        Threshold("green", 0, 0.0),
+                        Threshold(
+                            "red",
+                            index=1,
+                            value=float(panel_infos.absthreshold),
+                            op="lt",
+                        ),
+                    ],
+                }
+            )
 
     return TimeSeries(
         title=panel_infos.name,
         dataSource=data_source,
-        targets=[get_influx_target(str(query))],
+        targets=targets,
         unit=panel_infos.unit,
-        pointSize=9,
+        pointSize=pointSize,
         overrides=overrides,
         **new_kwargs,
-
     )
 
 
-def get_text_panel(content: str, *, mode='markdown', **kwargs) -> Text:
-    return Text(
-        content=content,
-        mode=mode,
-        **kwargs
-    )
+def get_text_panel(content: str, *, mode="markdown", **kwargs) -> Text:
+    return Text(content=content, mode=mode, **kwargs)
 
 
-def get_stat_panel(title: str,
-                   dataSource: str,
-                   stat_query: Query,
-                   repeat: Template = None,
-                   alias: str = "",
-                   *,
-                   maxPerRow=0,
-                   **kwargs):
+def get_stat_panel(
+    title: str,
+    dataSource: str,
+    stat_query: Query,
+    repeat: Template = None,
+    alias: str = "",
+    *,
+    maxPerRow=0,
+    **kwargs,
+):
     new_kwargs = {
-        'alignment': 'center',
-        'colorMode': 'value',
-        'graphMode': 'area',
-        'reduceCalc': 'last',
-        'orientation': 'auto',
-        'transparent': True,
+        "alignment": "center",
+        "colorMode": "value",
+        "graphMode": "area",
+        "reduceCalc": "last",
+        "orientation": "auto",
+        "transparent": True,
     }
     new_kwargs.update(kwargs)
     if repeat:
-        rep_args = ['h', repeat.name]
+        rep_args = ["h", repeat.name]
         if maxPerRow:
             rep_args.append(maxPerRow)
-        new_kwargs.setdefault('repeat', Repeat(*rep_args))
+        new_kwargs.setdefault("repeat", Repeat(*rep_args))
     return Stat(
         title=title,
         dataSource=dataSource,
         targets=[get_influx_target(str(stat_query), alias=alias)],
-        thresholdType='percentage',
-        thresholds=[Threshold('green', 0, 0.0), Threshold('yellow', 1, 50.0), Threshold('red', 2, 80.0)],
-        ** new_kwargs,
+        thresholdType="percentage",
+        thresholds=[
+            Threshold("green", 0, 0.0),
+            Threshold("yellow", 1, 50.0),
+            Threshold("red", 2, 80.0),
+        ],
+        **new_kwargs,
+    )
+
+
+def get_table_panel(
+    panel_infos: PanelInfos,
+    data_source: str,
+    query_list: List[Query],
+    *,
+    result_format_list: List[str] = None,
+    alias_list: List[str] = None,
+    transformations=[],
+    overrides=None,
+    **kwargs,
+):
+    if not alias_list:
+        alias_list = [""] * len(query_list)
+    if not result_format_list:
+        result_format_list = ["table"] * len(query_list)
+    targets = [
+        get_influx_target(str(query), result_format=result_format, alias=alias)
+        for query, result_format, alias in zip(
+            query_list, result_format_list, alias_list
+        )
+    ]
+    new_kwargs = {**kwargs}
+    if panel_infos.absthreshold is not None:
+        new_kwargs.update(
+            {
+                "thresholdType": "absolute",
+                "thresholds": [
+                    Threshold("green", 0, 0.0),
+                    Threshold(
+                        "red", index=1, value=float(panel_infos.absthreshold), op="lt"
+                    ),
+                ],
+            }
+        )
+
+    return Table(
+        title=panel_infos.name,
+        dataSource=data_source,
+        targets=targets,
+        transformations=transformations,
+        unit=panel_infos.unit,
+        overrides=overrides,
+        **new_kwargs,
+    )
+
+
+def get_bar_chart_panel(
+    panel_infos: PanelInfos,
+    data_source: str,
+    query_list: List[Query],
+    *,
+    result_format_list: List[str] = None,
+    alias_list: List[str] = None,
+    transformations=[],
+    overrides=None,
+    **kwargs,
+):
+    if not alias_list:
+        alias_list = [""] * len(query_list)
+    if not result_format_list:
+        result_format_list = ["table"] * len(query_list)
+    targets = [
+        get_influx_target(str(query), result_format=result_format, alias=alias)
+        for query, result_format, alias in zip(
+            query_list, result_format_list, alias_list
+        )
+    ]
+    new_kwargs = {**kwargs}
+    if panel_infos.absthreshold is not None:
+        new_kwargs.update(
+            {
+                "thresholdType": "absolute",
+                "thresholds": [
+                    Threshold("green", 0, 0.0),
+                    Threshold(
+                        "red", index=1, value=float(panel_infos.absthreshold), op="lt"
+                    ),
+                ],
+            }
+        )
+    extraJson = {
+        "fieldConfig": {
+            "defaults": {"fieldMinMax": True, "max": 1, "unit": panel_infos.unit}
+        }
+    }
+
+    return BarChart(
+        title=panel_infos.name,
+        dataSource=data_source,
+        targets=targets,
+        transformations=transformations,
+        xTickLabelRotation=-45,
+        extraJson=extraJson,
+        **new_kwargs,
+    )
+
+
+def get_pie_chart_panel(
+    panel_infos: PanelInfos,
+    data_source: str,
+    query_list: List[Query],
+    *,
+    result_format_list: List[str] = None,
+    alias_list: List[str] = None,
+    transformations=[],
+    overrides=[],
+    **kwargs,
+):
+    targets = [get_influx_target(str(query)) for query in query_list]
+    new_kwargs = {**kwargs}
+    if panel_infos.absthreshold is not None:
+        if "thresholdsStyleMode" not in new_kwargs:
+            new_kwargs.update(
+                {
+                    "thresholdType": "absolute",
+                    "thresholds": [
+                        Threshold("green", 0, 0.0),
+                        Threshold(
+                            "red",
+                            index=1,
+                            value=float(panel_infos.absthreshold),
+                            op="lt",
+                        ),
+                    ],
+                    "thresholdsStyleMode": "line",
+                }
+            )
+        else:
+            new_kwargs.update(
+                {
+                    "thresholdType": "absolute",
+                    "thresholds": [
+                        Threshold("green", 0, 0.0),
+                        Threshold(
+                            "red",
+                            index=1,
+                            value=float(panel_infos.absthreshold),
+                            op="lt",
+                        ),
+                    ],
+                }
+            )
+
+    return PieChartv2(
+        title=panel_infos.name,
+        dataSource=data_source,
+        targets=targets,
+        transformations=transformations,
+        unit=panel_infos.unit,
+        overrides=overrides,
+        **new_kwargs,
     )
diff --git a/dashboards/variables.py b/dashboards/variables.py
index 63dc836494edb2daf61dc0fdc8b82c4a45de7b9f..a48cdc0044c4e6026607c42a2f7b6d42e8d5c43a 100644
--- a/dashboards/variables.py
+++ b/dashboards/variables.py
@@ -1,9 +1,14 @@
 from grafanalib.core import Template
 from collections import namedtuple
-from dashboards.influx_queries import show_tag_values, get_tag_values
+from dashboards.influx_queries import show_tag_values, get_tag_values, show_field_keys
+from typing import List
 
+"""
+multi: maps to the multi_value option
+refresh: maps to the grafanalib.Template refresh 1= on dashboard load 2= on timer range change
+"""
 Filter = namedtuple(
-    "Filter", ("name", "multi", "default_value"), defaults=("", True, "")
+    "Filter", ("name", "multi", "default_value", "refresh"), defaults=("", True, "", 1)
 )
 
 
@@ -33,10 +38,28 @@ def get_time_dependend_dashboard_variable(
     data_source: str,
     *,
     inner_field_key: str = "",
+    where: str = "",
+    group_by: List[str] = None
 ):
     query = get_tag_values(
-        measurment_name, filter.name, inner_field_key=inner_field_key
+        measurment_name,
+        filter.name,
+        inner_field_key=inner_field_key,
+        where=where,
+        group_by=group_by
     )
+    kwargs = {
+        "includeAll": filter.multi,
+        "multi": filter.multi,
+        "refresh": filter.refresh
+    }
+    if filter.default_value:
+        kwargs.update({"default": filter.default_value})
+    return get_dashboard_variable_query(filter.name, query, data_source, **kwargs)
+
+
+def get_field_keys_dashboard_variable(filter: Filter, measurment_name: str, data_source: str):
+    query = show_field_keys(measurment_name)
     kwargs = {
         "includeAll": filter.multi,
         "multi": filter.multi,
diff --git a/tests/test_dashboard_creation.py b/tests/test_dashboard_creation.py
index 6b3f5ad5c348d8ce38b7d84779f7844e52271535..837128761897343cb1723a00392c4761db5452d2 100644
--- a/tests/test_dashboard_creation.py
+++ b/tests/test_dashboard_creation.py
@@ -7,7 +7,7 @@ from dashboards.dashboard_base import (get_commit_annotation,
 from dashboards.dashboard_list import dashboard_uniformGridGPU
 from dashboards.dashboard_fe2ti import dashboard_fe2ti
 from dashboards.dashboard_pystencils import dashboard_pystencils_cpu, dashboard_pystencils_gpu
-from dashboards.dashboard_walberla import dashboard_uniformgridgpu, dashboard_uniformgridcpu
+from dashboards.dashboard_walberla import dashboard_uniformgridgpu, dashboard_uniformgridcpu, dashboard_percolationgpu, dashboard_fslbmgravitywave, dashboard_uniformgridgpu_profile, dashboard_uniformgridcpu_relativeperformance
 from dashboards.influx_queries import Query, show_tag_values
 
 dataSource = 'InfluxDB-1'
@@ -96,3 +96,7 @@ def test_dashboard_pystencils_cpu():
 def test_dashboard_walberla():
     dashboard_uniformgridcpu()
     dashboard_uniformgridgpu()
+    dashboard_uniformgridgpu_profile()
+    dashboard_percolationgpu()
+    dashboard_uniformgridcpu_relativeperformance()
+    dashboard_fslbmgravitywave()
diff --git a/tests/test_dashboard_upload.py b/tests/test_dashboard_upload.py
index d7369193a17ea39ab8158e312fb598e547f0f4a1..4db6e7d9024ad6486e775844f363e7be299eae07 100644
--- a/tests/test_dashboard_upload.py
+++ b/tests/test_dashboard_upload.py
@@ -1,28 +1,39 @@
-# Test case using pytest
 import pytest
-from dashboards.upload import load_config_from_env
 from unittest.mock import patch
-import os
+from dashboards.upload import load_config_from_env  # Replace with the correct path to your function
 
 
-def test_load_config_from_env():
-    # Case 1: Test if function raises exception for missing GRAFANA_API_KEY
-    with pytest.raises(ValueError) as e:
-        load_config_from_env(env_path="")
-    assert str(
-        e.value) == "GRAFANA_API_KEY is None or not defined in the .env file"
 
-    # Case 2: Test if function raises exception for missing GRAFANA_SERVER
-    with patch.dict(os.environ, {"GRAFANA_API_KEY": "api_key"}):
-        with pytest.raises(ValueError) as e:
-            load_config_from_env(env_path="")
-        assert str(
-            e.value) == "GRAFANA_SERVER is None or not defined in the .env file"
+# Test when GRAFANA_API_KEY is missing
+@patch('dotenv.load_dotenv')
+@patch('os.getenv')
+def test_missing_grafana_api_key(mock_getenv, mock_load_dotenv):
+    # Setup mock return values with missing GRAFANA_API_KEY
+    mock_getenv.side_effect = lambda key: {
+        'GRAFANA_SERVER': 'http://test.server'
+    }.get(key)
 
-    # Case 3: Test if function returns expected values when both variables are defined
-    with patch.dict(
-        os.environ, {"GRAFANA_API_KEY": "api_key",
-                     "GRAFANA_SERVER": "server_url"}
-    ):
-        result = load_config_from_env(env_path=".env")
-        assert result == ("server_url", "api_key")
+    # Test that ValueError is raised for missing GRAFANA_API_KEY
+    with pytest.raises(ValueError, match="GRAFANA_API_KEY is None or not defined in the .env file"):
+        load_config_from_env()
+
+# Test when GRAFANA_SERVER is missing
+@patch('dotenv.load_dotenv')
+@patch('os.getenv')
+def test_missing_grafana_server(mock_getenv, mock_load_dotenv):
+    # Setup mock return values with missing GRAFANA_SERVER
+    mock_getenv.side_effect = lambda key: {
+        'GRAFANA_API_KEY': 'test_api_key'
+    }.get(key)
+
+    # Test that ValueError is raised for missing GRAFANA_SERVER
+    with pytest.raises(ValueError, match="GRAFANA_SERVER is None or not defined in the .env file"):
+        load_config_from_env()
+
+# Test when the .env file does not exist
+@patch('dotenv.load_dotenv')
+def test_no_env_file(mock_load_dotenv):
+    # Simulate that the .env file does not exist
+    with patch('os.path.exists', return_value=False):
+        with pytest.raises(ValueError, match="GRAFANA_API_KEY is None or not defined in the .env file"):
+            load_config_from_env()
diff --git a/tests/test_influx_queries.py b/tests/test_influx_queries.py
index 7864f3eb497df0a806ea72050dc8f2f6174a621f..c29eefbfcfee38021048d4dbfb4567a6e38608d0 100644
--- a/tests/test_influx_queries.py
+++ b/tests/test_influx_queries.py
@@ -12,7 +12,7 @@ def test_query():
     q = Query(
         select_="mlupsPerProcess",
         from_="UniformGridGPU",
-        where_='"host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/',
+        where_='"host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/ AND $timeFilter ',
         group_by=[
             "blocks_0",
             "blocks_1",
@@ -32,7 +32,7 @@ def test_query():
     q1 = (
         'SELECT "mlupsPerProcess" '
         'FROM "UniformGridGPU" '
-        'WHERE ("host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/) AND $timeFilter '
+        'WHERE ("host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/ AND $timeFilter ) '
         'GROUP BY "blocks_0", "blocks_1", "blocks_2", '
         '"cellsPerBlock_0", "cellsPerBlock_1", "cellsPerBlock_2", '
         '"gpuBlockSize_0", "gpuBlockSize_1", "gpuBlockSize_2", '