diff --git a/src/pystencils/backend/ast/vector.py b/src/pystencils/backend/ast/vector.py
index 14249e1e84d036d78943b9a716372e4e1d9e1073..5121987a8129d682cb07e4b9cae5d6c9d6741817 100644
--- a/src/pystencils/backend/ast/vector.py
+++ b/src/pystencils/backend/ast/vector.py
@@ -92,11 +92,9 @@ class PsVecHorizontal(PsBinOp, PsVectorOp):
     def structurally_equal(self, other: PsAstNode) -> bool:
         if not isinstance(other, PsVecHorizontal):
             return False
-        return (
-                super().structurally_equal(other)
+        return (super().structurally_equal(other)
                 and self._lanes == other._lanes
-                and self._reduction_op == other._reduction_op
-        )
+                and self._reduction_op == other._reduction_op)
 
 
 class PsVecMemAcc(PsExpression, PsLvalue, PsVectorOp):
diff --git a/src/pystencils/backend/platforms/cuda.py b/src/pystencils/backend/platforms/cuda.py
index 9877cea44d3a01a79ddd9eecdd5b5e79cf71527f..e8c8f6a3a2a9466405376d8b898ec8cece4dedea 100644
--- a/src/pystencils/backend/platforms/cuda.py
+++ b/src/pystencils/backend/platforms/cuda.py
@@ -89,7 +89,7 @@ class CudaPlatform(GenericGpu):
 
         if isinstance(dtype, PsScalarType) and func in (NumericLimitsFunctions.Min, NumericLimitsFunctions.Max):
             assert isinstance(dtype, PsIeeeFloatType)
-            defines = { NumericLimitsFunctions.Min: "NEG_INFINITY", NumericLimitsFunctions.Max: "POS_INFINITY" }
+            defines = {NumericLimitsFunctions.Min: "NEG_INFINITY", NumericLimitsFunctions.Max: "POS_INFINITY"}
 
             return PsLiteralExpr(PsLiteral(defines[func], dtype))
 
@@ -170,8 +170,8 @@ class CudaPlatform(GenericGpu):
                     case ReductionOp.Sub:
                         # workaround for unsupported atomicSub: use atomic add
                         # similar to OpenMP reductions: local copies (negative sign) are added at the end
-                        call.function = CFunction(f"atomicAdd", [ptr_expr.dtype, symbol_expr.dtype],
-                                          PsCustomType("void"))
+                        call.function = CFunction("atomicAdd", [ptr_expr.dtype, symbol_expr.dtype],
+                                                  PsCustomType("void"))
                         call.args = (ptr_expr, symbol_expr)
                     case _:
                         call.function = CFunction(f"atomic{op.name}", [ptr_expr.dtype, symbol_expr.dtype],
diff --git a/src/pystencils/backend/transformations/loop_vectorizer.py b/src/pystencils/backend/transformations/loop_vectorizer.py
index ab28507c2abbe58e5496757e1ea7503df1aef11c..b78114553464f6d04c59f3d2f4a3e65ec950b75f 100644
--- a/src/pystencils/backend/transformations/loop_vectorizer.py
+++ b/src/pystencils/backend/transformations/loop_vectorizer.py
@@ -240,14 +240,14 @@ class LoopVectorizer:
                 )
 
                 return PsBlock(
-                    simd_init_local_reduction_vars +
-                    [
+                    simd_init_local_reduction_vars
+                    + [
                         simd_stop_decl,
                         simd_step_decl,
                         simd_loop
-                    ] +
-                    simd_writeback_local_reduction_vars +
-                    [
+                    ]
+                    + simd_writeback_local_reduction_vars
+                    + [
                         trailing_start_decl,
                         trailing_loop,
                     ]
@@ -258,13 +258,13 @@ class LoopVectorizer:
 
             case LoopVectorizer.TrailingItersTreatment.NONE:
                 return PsBlock(
-                    simd_init_local_reduction_vars +
-                    [
+                    simd_init_local_reduction_vars
+                    + [
                         simd_stop_decl,
                         simd_step_decl,
                         simd_loop,
-                    ] +
-                    simd_writeback_local_reduction_vars
+                    ]
+                    + simd_writeback_local_reduction_vars
                 )
 
     @overload
diff --git a/src/pystencils/compound_op_mapping.py b/src/pystencils/compound_op_mapping.py
index 2dd88fc94cacff7a84f3ceb8d2d6cd30f5fbe1a7..f256369f9a86fa7fbc84fdc09a42f8bd1a110ad7 100644
--- a/src/pystencils/compound_op_mapping.py
+++ b/src/pystencils/compound_op_mapping.py
@@ -1,5 +1,3 @@
-from operator import truediv, mul, sub, add
-
 from .backend.ast.expressions import PsExpression, PsCall, PsAdd, PsSub, PsMul, PsDiv
 from .backend.exceptions import FreezeError
 from .backend.functions import PsMathFunction, MathFunctions
diff --git a/src/pystencils/jit/gpu_cupy.py b/src/pystencils/jit/gpu_cupy.py
index 331b58ce51199a1a16af401e444958abc401985d..0792b6c01a526ab9fc24f5ac471a8456ee9f2745 100644
--- a/src/pystencils/jit/gpu_cupy.py
+++ b/src/pystencils/jit/gpu_cupy.py
@@ -11,7 +11,6 @@ except ImportError:
 from ..codegen import Target
 from ..field import FieldType
 
-from ..types import PsType, PsPointerType
 from .jit import JitBase, JitError, KernelWrapper
 from ..codegen import (
     Kernel,
@@ -19,7 +18,7 @@ from ..codegen import (
     Parameter,
 )
 from ..codegen.properties import FieldShape, FieldStride, FieldBasePtr
-from ..types import PsStructType, PsPointerType
+from ..types import PsType, PsStructType, PsPointerType
 
 from ..include import get_pystencils_include_path
 
diff --git a/src/pystencils/sympyextensions/reduction.py b/src/pystencils/sympyextensions/reduction.py
index 25ae5c0ac8d49f6450c5aa7ecd76d5d59ecec865..cebfcb2f738e77268eac9a4b0ad049ae3c6b756d 100644
--- a/src/pystencils/sympyextensions/reduction.py
+++ b/src/pystencils/sympyextensions/reduction.py
@@ -19,14 +19,18 @@ class ReductionAssignment(AssignmentBase):
     Attributes:
     ===========
 
-    binop : CompoundOp
+    reduction_op : ReductionOp
        Enum for binary operation being applied in the assignment, such as "Add" for "+", "Sub" for "-", etc.
     """
-    reduction_op = None  # type: ReductionOp
+    _reduction_op = None  # type: ReductionOp
 
     @property
     def reduction_op(self):
-        return self.reduction_op
+        return self._reduction_op
+
+    @reduction_op.setter
+    def reduction_op(self, op):
+        self._reduction_op = op
 
 
 class AddReductionAssignment(ReductionAssignment):