diff --git a/pystencils/datahandling/graph_datahandling.py b/pystencils/datahandling/graph_datahandling.py
new file mode 100644
index 0000000000000000000000000000000000000000..41d16a4c3ddea7c2893712275606c6448b4eb5ba
--- /dev/null
+++ b/pystencils/datahandling/graph_datahandling.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
+#
+# Distributed under terms of the GPLv3 license.
+
+"""
+
+"""
+
+from enum import Enum
+
+import numpy as np
+
+import pystencils.datahandling
+import pystencils.kernel_wrapper
+from pystencils.field import FieldType
+from pystencils.integer_functions import modulo_ceil
+
+
+class DataTransferKind(str, Enum):
+    UNKNOWN = None
+    HOST_ALLOC = 'HOST_ALLOC'
+    DEVICE_ALLOC = 'DEVICE_ALLOC'
+    HOST_TO_DEVICE = 'HOST_TO_DEVICE'
+    DEVICE_TO_HOST = 'DEVICE_TO_HOST'
+    HOST_COMMUNICATION = 'HOST_COMMUNICATION'
+    DEVICE_COMMUNICATION = 'DEVICE_COMMUNICATION'
+    HOST_SWAP = 'HOST_SWAP'
+    DEVICE_SWAP = 'DEVICE_SWAP'
+    HOST_GATHER = 'HOST_GATHER'
+    DEVICE_GATHER = 'DEVICE_GATHER'
+
+    def is_alloc(self):
+        return self in [self.HOST_ALLOC, self.DEVICE_ALLOC]
+
+    def is_transfer(self):
+        return self in [self.HOST_TO_DEVICE, self.DEVICE_TO_HOST, self.SWAP]
+
+
+class DataTransfer:
+    def __init__(self, field: pystencils.Field, kind: DataTransferKind):
+        self.field = field
+        self.kind = kind
+
+
+class Swap(DataTransfer):
+    def __init__(self, source, destination, gpu):
+        self.kind = DataTransferKind.DEVICE_SWAP if gpu else DataTransferKind.HOST_SWAP
+        self.field = source
+        self.destination = destination
+
+
+class Communication(DataTransfer):
+    def __init__(self, field, stencil, gpu):
+        self.kind = DataTransferKind.DEVICE_COMMUNICATION if gpu else DataTransferKind.HOST_COMMUNICATION
+        self.field = field
+        self.stencil = stencil
+
+
+class KernelCall:
+    def __init__(self, kernel: pystencils.kernel_wrapper.KernelWrapper, kwargs):
+        self.kernel = kernel
+        self.kwargs = kwargs
+
+    def __str__(self):
+        return "Call " + str(self.kernel.ast.function_name)
+
+
+class GraphDataHandling(pystencils.datahandling.SerialDataHandling):
+
+    """Docstring for GraphDataHandling. """
+
+    class TimeLoop(pystencils.TimeLoop):
+        def __init__(self, parent, *args, **kwargs):
+            self.parent = parent
+            super().__init__(*args, **kwargs)
+
+        def add_pre_run_function(self, f):
+            self._pre_run_functions.append(f)
+
+        def add_post_run_function(self, f):
+            self._post_run_functions.append(f)
+
+        def add_single_step_function(self, f):
+            self._single_step_functions.append(f)
+
+        def add_call(self, functor, argument_list):
+            if hasattr(functor, 'kernel'):
+                functor = functor.kernel
+            if not isinstance(argument_list, list):
+                argument_list = [argument_list]
+
+            for argument_dict in argument_list:
+                self._call_data.append((functor, argument_dict))
+
+    def __init__(self, *args, **kwargs):
+
+        self.call_queue = []
+        super().__init__(*args, **kwargs)
+
+    def add_array(self, name, values_per_cell=1, dtype=np.float64, latex_name=None, ghost_layers=None, layout=None,
+                  cpu=True, gpu=None, alignment=False, field_type=FieldType.GENERIC):
+
+        super().add_array(name,
+                          values_per_cell,
+                          dtype,
+                          latex_name,
+                          ghost_layers,
+                          layout,
+                          cpu,
+                          gpu,
+                          alignment,
+                          field_type)
+        if cpu:
+            self.call_queue.append(DataTransfer(self._fields[name], DataTransferKind.HOST_ALLOC))
+        if gpu:
+            self.call_queue.append(DataTransfer(self._fields[name], DataTransferKind.DEVICE_ALLOC))
+
+    def add_custom_data(self, name, cpu_creation_function,
+                        gpu_creation_function=None, cpu_to_gpu_transfer_func=None, gpu_to_cpu_transfer_func=None):
+
+        self.call_queue.append('custom data. WTF?')
+        super().add_custom_data(name, cpu_creation_function,
+                                gpu_creation_function, cpu_to_gpu_transfer_func, gpu_to_cpu_transfer_func)
+
+    def gather_array(self, name, slice_obj=None, ghost_layers=False, **kwargs):
+
+        self.call_queue.append('gather_array')
+        super().gather_array(name, slice_obj, ghost_layers, **kwargs)
+
+    def swap(self, name1, name2, gpu=None):
+
+        self.call_queue.append(Swap(self._fields[name1], self._fields[name2], gpu))
+        super().swap(name1, name2, gpu)
+
+    def run_kernel(self, kernel_function, **kwargs):
+        self.call_queue.append(KernelCall(kernel_function, kwargs))
+        # skip calling super
+
+    def to_cpu(self, name):
+        self.call_queue.append(DataTransfer(self._fields[name], DataTransferKind.HOST_TO_DEVICE))
+
+    def to_gpu(self, name):
+        if name in self._custom_data_transfer_functions:
+            self.call_queue.append('Custom Tranfer Function')
+        else:
+            self.call_queue.append(DataTransfer(self._fields[name], DataTransferKind.DEVICE_TO_HOST))
+
+    def synchronization_function(self, names, stencil=None, target=None, **_):
+        for name in names:
+            gpu = target == 'cpu'
+            self.call_queue.append(Communication(self._fields[name], stencil, gpu))
+
+    def __str__(self):
+        return '\n'.join(str(self.call_queue))
+
+    def create_timeloop(self, *args, **kwargs):
+        return self.TimeLoop(self, *args, **kwargs)
+
+    def fill(self, array_name: str, val, value_idx,
+             slice_obj=None, ghost_layers=False, inner_ghost_layers=False) -> None:
+        self.call_queue('Fill ' + array_name)
+        super().fill(self, array_name, val, value_idx, slice_obj, ghost_layers, inner_ghost_layers)
+
+    # TODO
+    # def reduce_float_sequence(self, sequence, operation, all_reduce=False) -> np.array:
+        # return np.array(sequence)
+
+    # def reduce_int_sequence(self, sequence, operation, all_reduce=False) -> np.array:
+        # return np.array(sequence)
+
+    # def create_vtk_writer(self, file_name, data_names, ghost_layers=False):
+        # pass
+
+    # def create_vtk_writer_for_flag_array(self, file_name, data_name, masks_to_name, ghost_layers=False):
+        # pass
diff --git a/pystencils_tests/test_graph_datahandling.py b/pystencils_tests/test_graph_datahandling.py
new file mode 100644
index 0000000000000000000000000000000000000000..d494576baa612613d0e5e7f3890b9934431a994a
--- /dev/null
+++ b/pystencils_tests/test_graph_datahandling.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
+#
+# Distributed under terms of the GPLv3 license.
+
+"""
+
+"""
+
+import pytest
+
+from lbmpy.boundaries import UBB, FixedDensity, NoSlip
+from lbmpy.geometry import add_pipe_inflow_boundary, add_pipe_walls
+from lbmpy.lbstep import LatticeBoltzmannStep
+from pystencils.datahandling import create_data_handling
+from pystencils.datahandling.graph_datahandling import GraphDataHandling
+from pystencils.slicing import slice_from_direction
+
+pytest.importorskip('lbmpy')
+
+
+def create_lid_driven_cavity(domain_size=None, lid_velocity=0.005, lbm_kernel=None, parallel=False,
+                             data_handling=None, **kwargs):
+    """Creates a lid driven cavity scenario.
+
+    Args:
+        domain_size: tuple specifying the number of cells in each dimension
+        lid_velocity: x velocity of lid in lattice coordinates.
+        lbm_kernel: a LBM function, which would otherwise automatically created
+        kwargs: other parameters are passed on to the method, see :mod:`lbmpy.creationfunctions`
+        parallel: True for distributed memory parallelization with walberla
+        data_handling: see documentation of :func:`create_fully_periodic_flow`
+    Returns:
+        instance of :class:`Scenario`
+    """
+    assert domain_size is not None or data_handling is not None
+    if data_handling is None:
+        optimization = kwargs.get('optimization', None)
+        target = optimization.get('target', None) if optimization else None
+        data_handling = GraphDataHandling(domain_size,
+                                          periodicity=False,
+                                          default_ghost_layers=1,
+                                          default_target=target)
+    step = LatticeBoltzmannStep(data_handling=data_handling, lbm_kernel=lbm_kernel, name="ldc", **kwargs)
+
+    my_ubb = UBB(velocity=[lid_velocity, 0, 0][:step.method.dim])
+    step.boundary_handling.set_boundary(my_ubb, slice_from_direction('N', step.dim))
+    for direction in ('W', 'E', 'S') if step.dim == 2 else ('W', 'E', 'S', 'T', 'B'):
+        step.boundary_handling.set_boundary(NoSlip(), slice_from_direction(direction, step.dim))
+
+    return step
+
+
+def ldc_setup(**kwargs):
+    ldc = create_lid_driven_cavity(relaxation_rate=1.7, **kwargs)
+    ldc.run(50)
+    return ldc.density_slice()
+
+
+def test_graph_datahandling():
+    import lbmpy
+
+    print("--- LDC 2D test ---")
+
+    opt_params = {'target': 'gpu', 'gpu_indexing_params': {'block_size': (8, 4, 2)}}
+    lbm_step: LatticeBoltzmannStep = ldc_setup(domain_size=(10, 15), parallel=False, optimization=opt_params)
+    print(lbm_step._data_handling)