diff --git a/src/pystencils_autodiff/framework_integration/datahandling.py b/src/pystencils_autodiff/framework_integration/datahandling.py
index 8c1f5de03f7154a26e777f50b12b0e12c9db7b5e..071c8783e9fc40cfecd4477437f7f789bacc3386 100644
--- a/src/pystencils_autodiff/framework_integration/datahandling.py
+++ b/src/pystencils_autodiff/framework_integration/datahandling.py
@@ -17,59 +17,60 @@ from typing import Sequence, Union
 import numpy as np
 
 import pystencils
-from pystencils.autodiff.backends._pytorch import torch_dtype_to_numpy
+from pystencils.autodiff.backends._pytorch import numpy_dtype_to_torch
 from pystencils.field import (
     Field, FieldType, create_numpy_array_with_layout, layout_string_to_tuple,
     spatial_layout_string_to_tuple)
-from pystencils_autodiff.field_tensor_conversion import _torch_tensor_to_numpy_shim
 
 
-class PyTorchDataHandling(pystencils.datahandling.SerialDataHandling):
-
-    class PyTorchArrayHandler:
-
-        def __init__(self):
-            pass
-
-        def zeros(self, shape, dtype=np.float32, order='C'):
-            assert order == 'C'
-
-            return torch.zeros(*shape, dtype=torch_dtype_to_numpy(dtype))
-
-        def ones(self, shape, dtype, order='C'):
-            assert order == 'C'
-            return torch.ones(*shape, dtype=torch_dtype_to_numpy(dtype))
-
-        def empty(self, shape, dtype=np.float32, layout=None):
-            if layout:
-                cpu_array = torch.from_numpy(pystencils.field.create_numpy_array_with_layout(shape, dtype, layout))
-                return self.from_numpy(cpu_array)
-            else:
-                return torch.empty(*shape, dtype=torch_dtype_to_numpy(dtype))
-
-        def to_gpu(self, array):
-            return array.cuda()
-
-        def upload(self, gpuarray, numpy_array):
-            gpuarray[...] = numpy_array.cuda()
-
-        def download(self, gpuarray, numpy_array):
-            numpy_array[...] = gpuarray.cpu()
-
-        def randn(self, shape, dtype=np.float64):
-            cpu_array = torch.from_numpy(np.random.randn(*shape).astype(dtype))
-            return self.from_numpy(cpu_array)
+class MultiShapeDatahandling(pystencils.datahandling.SerialDataHandling):
+    """
+    Specialization of :class:`pystencils.datahandling.SerialDataHandling` to support arrays with different sizes.
+    """
 
     def __init__(self,
-                 domain_size: Sequence[int],
+                 default_domain_size: Sequence[int],
                  default_ghost_layers: int = 0,
-                 default_layout: str = 'SoA',
+                 default_layout: str = 'numpy',
                  periodicity: Union[bool, Sequence[bool]] = False,
-                 default_target: str = 'gpu'):
-        super().__init__(domain_size, default_ghost_layers, default_layout, periodicity, default_target)
-        self.array_handler = self.PyTorchArrayHandler()
+                 default_target: str = 'cpu',
+                 opencl_queue=None,
+                 opencl_ctx=None,
+                 array_handler=None) -> None:
+        """
+        Same as :func:`pystencils.datahandling.SerialDataHandling.__init__` with better defaults for communication
+        free applications
+        """
+        super().__init__(
+            default_domain_size,
+            default_ghost_layers,
+            default_layout,
+            periodicity,
+            default_target,
+            opencl_queue,
+            opencl_ctx,
+            array_handler=None)
+
+    def add_arrays(self, description: str, spatial_shape=None):
+        from pystencils.field import _parse_part1, _parse_description
+
+        if ':' in description:
+            fields_info, data_type, size = _parse_description(description)
+            names = []
+            for name, indices in fields_info:
+                names.append(name)
+                self.add_array(name, values_per_cell=indices, dtype=data_type, spatial_shape=size)
+
+            return (self.fields[n] for n in names)
+        else:
+            names = []
+            for name, indices in _parse_part1(description):
+                names.append(name)
+                self.add_array(name, values_per_cell=indices, dtype=np.float32, spatial_shape=spatial_shape)
 
-    def add_array(self, name, values_per_cell=1, dtype=np.float64, latex_name=None, ghost_layers=None, layout=None,
+            return (self.fields[n] for n in names)
+
+    def add_array(self, name, values_per_cell=1, dtype=np.float32, latex_name=None, ghost_layers=None, layout=None,
                   cpu=True, gpu=None, alignment=False, field_type=FieldType.GENERIC, spatial_shape=None):
         if ghost_layers is None:
             ghost_layers = self.default_ghost_layers
@@ -106,8 +107,9 @@ class PyTorchDataHandling(pystencils.datahandling.SerialDataHandling):
 
         # cpu_arr is always created - since there is no create_pycuda_array_with_layout()
         byte_offset = ghost_layers * np.dtype(dtype).itemsize
-        cpu_arr = torch.from_numpy(create_numpy_array_with_layout(layout=layout_tuple, alignment=alignment,
-                                                                  byte_offset=byte_offset, **kwargs))
+        numpy_array = create_numpy_array_with_layout(layout=layout_tuple, alignment=alignment,
+                                                     byte_offset=byte_offset, **kwargs)
+        cpu_arr = self.array_handler.from_numpy(numpy_array)
 
         if alignment and gpu:
             raise NotImplementedError("Alignment for GPU fields not supported")
@@ -123,8 +125,75 @@ class PyTorchDataHandling(pystencils.datahandling.SerialDataHandling):
 
         assert all(f.name != name for f in self.fields.values()), "Symbolic field with this name already exists"
         self.fields[name] = Field.create_from_numpy_array(name,
-                                                          _torch_tensor_to_numpy_shim(cpu_arr),
+                                                          numpy_array,
                                                           index_dimensions=index_dimensions,
                                                           field_type=field_type)
         self.fields[name].latex_name = latex_name
         return self.fields[name]
+
+
+class PyTorchDataHandling(MultiShapeDatahandling):
+
+    class PyTorchArrayHandler:
+
+        def __init__(self):
+            pass
+
+        def zeros(self, shape, dtype=np.float32, order='C'):
+            assert order == 'C'
+
+            return torch.zeros(*shape, dtype=numpy_dtype_to_torch(dtype))
+
+        def ones(self, shape, dtype, order='C'):
+            assert order == 'C'
+            return torch.ones(*shape, dtype=numpy_dtype_to_torch(dtype))
+
+        def empty(self, shape, dtype=np.float32, layout=None):
+            if layout:
+                cpu_array = torch.from_numpy(pystencils.field.create_numpy_array_with_layout(shape, dtype, layout))
+                return self.from_numpy(cpu_array)
+            else:
+                return torch.empty(*shape, dtype=numpy_dtype_to_torch(dtype))
+
+        def to_gpu(self, array):
+            if not hasattr(array, 'cuda'):
+                return torch.from_numpy(array).cuda()
+            return array.cuda()
+
+        def upload(self, gpuarray, numpy_array):
+            if not hasattr(numpy_array, 'cuda'):
+                numpy_array = torch.from_numpy(numpy_array)
+            gpuarray[...] = numpy_array.cuda()
+
+        def download(self, gpuarray, numpy_array):
+            numpy_array[...] = gpuarray.cpu()
+
+        def randn(self, shape, dtype=np.float32):
+            return torch.randn(shape, dtype=dtype)
+
+        from_numpy = torch.from_numpy
+
+    def __init__(self,
+                 domain_size: Sequence[int],
+                 default_ghost_layers: int = 0,
+                 default_layout: str = 'numpy',
+                 periodicity: Union[bool, Sequence[bool]] = False,
+                 default_target: str = 'gpu'):
+        super().__init__(domain_size, default_ghost_layers, default_layout, periodicity, default_target)
+        self.array_handler = self.PyTorchArrayHandler()
+
+    def run_kernel(self, kernel_function, **kwargs):
+        arrays = self.gpu_arrays if self.default_target == 'gpu' else self.cpu_arrays
+        kernel_function(**arrays, **kwargs)
+
+    def require_autograd(self, bool_val, *names):
+        for n in names:
+            try:
+                self.cpu_arrays[n].require_autograd = bool_val
+            except Exception:
+                pass
+
+            try:
+                self.gpu_arrays[n].require_autograd = bool_val
+            except Exception:
+                pass
diff --git a/tests/test_datahandling.py b/tests/test_datahandling.py
index f552d99818a0a086dbbb20b6c7845e25b376a639..9feb1456173a6b5e5dd96c6803e9e6ff9bcd9a81 100644
--- a/tests/test_datahandling.py
+++ b/tests/test_datahandling.py
@@ -30,3 +30,4 @@ def test_datahandling():
     kernel = forward_assignments.create_pytorch_op()
 
     dh.run_kernel(kernel, a=3)
+