diff --git a/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs_RDM.py b/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs_RDM.py
index 5d8614573c06a9993cc971dfcb489008c6b58956..eed87f2709982514ac1fd7fa66d52a728103f9a3 100755
--- a/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs_RDM.py
+++ b/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs_RDM.py
@@ -14,7 +14,7 @@ except ImportError:
 # Number of time steps run for a workload of 128^3 per GPU
 # if double as many cells are on the GPU, half as many time steps are run etc.
 # increase this to get more reliable measurements
-TIME_STEPS_FOR_128_BLOCK = 1000
+TIME_STEPS_FOR_128_BLOCK = int(os.environ.get('TIME_STEPS_FOR_128_BLOCK', 1000))
 DB_FILE = os.environ.get('DB_FILE', "gpu_benchmark.sqlite3")
 BENCHMARK = int(os.environ.get('BENCHMARK', 0))
 
@@ -49,7 +49,7 @@ ldc_setup = {'Border': [
 ]}
 
 
-def num_time_steps(block_size, time_steps_for_128_block=1000):
+def num_time_steps(block_size, time_steps_for_128_block=TIME_STEPS_FOR_128_BLOCK):
     """
     Calculate the number of time steps based on the block size.
 
@@ -202,7 +202,6 @@ class Scenario:
 
 
 def weak_scaling_overlap(cuda_enabled_mpi=False):
-    """Tests different communication overlapping strategies"""
     wlb.log_info_on_root("Running scaling benchmark with communication hiding")
     wlb.log_info_on_root("")