From f3d5a4308617c930a3cf66831c1aca2b8ca985a4 Mon Sep 17 00:00:00 2001
From: Martin Bauer <martin.bauer@fau.de>
Date: Tue, 10 Apr 2018 16:00:43 +0200
Subject: [PATCH] Rest of PEP8 renaming

---
 boundaries/boundaryconditions.py              |   8 +-
 chapman_enskog/chapman_enskog.py              |  34 ++--
 chapman_enskog/chapman_enskog_higher_order.py |   8 +-
 chapman_enskog/chapman_enskog_steady_state.py |  42 ++---
 continuous_distribution_measures.py           |   2 +-
 creationfunctions.py                          |  20 +-
 cumulants.py                                  |   6 +-
 fieldaccess.py                                |  26 +--
 forcemodels.py                                |  44 ++---
 geometry.py                                   |   4 +-
 lbstep.py                                     |  10 +-
 macroscopic_value_kernels.py                  | 172 +++++++++---------
 maxwellian_equilibrium.py                     |   4 +-
 methods/abstractlbmethod.py                   |   2 +-
 methods/conservedquantitycomputation.py       |   9 +-
 methods/creationfunctions.py                  |  29 +--
 methods/cumulantbased.py                      |  20 +-
 methods/entropic.py                           |  14 +-
 methods/entropic_eq_srt.py                    |  10 +-
 methods/momentbased.py                        |  18 +-
 methods/momentbasedsimplifications.py         |  14 +-
 moments.py                                    |  48 ++---
 parameterization.py                           |  30 +--
 phasefield/analytical.py                      |  20 +-
 phasefield/kerneleqs.py                       |  24 +--
 phasefield/phasefieldstep.py                  | 100 +++++-----
 plot2d.py                                     |   8 +-
 quadratic_equilibrium_construction.py         |   8 +-
 relaxationrates.py                            |   8 +-
 scenarios.py                                  |  10 +-
 stencils.py                                   |   7 +-
 turbulence_models.py                          |   2 +-
 updatekernels.py                              |  10 +-
 33 files changed, 388 insertions(+), 383 deletions(-)

diff --git a/boundaries/boundaryconditions.py b/boundaries/boundaryconditions.py
index bc3c3011..218af246 100644
--- a/boundaries/boundaryconditions.py
+++ b/boundaries/boundaryconditions.py
@@ -20,7 +20,7 @@ class Boundary:
         :param pdf_field: pystencils field describing the pdf. The current cell is cell next to the boundary,
                          which is influenced by the boundary cell i.e. has a link from the boundary cell to
                          itself.
-        :param direction_symbol: a sympy symbol that can be used as index to the pdfField. It describes
+        :param direction_symbol: a sympy symbol that can be used as index to the pdf_field. It describes
                                 the direction pointing from the fluid to the boundary cell 
         :param lb_method: an instance of the LB method used. Use this to adapt the boundary to the method
                          (e.g. compressibility)
@@ -32,8 +32,8 @@ class Boundary:
     @property
     def additional_data(self):
         """Return a list of (name, type) tuples for additional data items required in this boundary
-        These data items can either be initialized in separate kernel see additionalDataKernelInit or by 
-        Python callbacks - see additionalDataCallback """
+        These data items can either be initialized in separate kernel see additional_data_kernel_init or by
+        Python callbacks - see additional_data_callback """
         return []
 
     @property
@@ -134,7 +134,7 @@ class UBB(Boundary):
                                      for d_i, v_i in zip(neighbor, velocity)]) * weight_of_direction(direction)
 
         # Better alternative: in conserved value computation
-        # rename what is currently called density to "virtualDensity"
+        # rename what is currently called density to "virtual_density"
         # provide a new quantity density, which is constant in case of incompressible models
         if not lb_method.conserved_quantity_computation.zero_centered_pdfs:
             cqc = lb_method.conserved_quantity_computation
diff --git a/chapman_enskog/chapman_enskog.py b/chapman_enskog/chapman_enskog.py
index a29dab2c..904121b1 100644
--- a/chapman_enskog/chapman_enskog.py
+++ b/chapman_enskog/chapman_enskog.py
@@ -261,7 +261,7 @@ class LbMethodEqMoments:
         moment_tuple = ce_moment.moment_tuple
 
         moment_symbols = []
-        for moment, (eqValue, rr) in self._method.relaxation_info_dict.items():
+        for moment, (eq_value, rr) in self._method.relaxation_info_dict.items():
             if isinstance(moment, tuple):
                 moment_symbols.append(-rr**exponent
                                       * CeMoment(pre_collision_moment_name, moment, ce_moment.superscript))
@@ -326,8 +326,8 @@ def substitute_collision_operator_moments(expr, lb_moment_computation, collision
                                           pre_collision_moment_name="\\Pi"):
     moments_to_replace = [m for m in expr.atoms(CeMoment) if m.name == collision_op_moment_name]
     subs_dict = {}
-    for ceMoment in moments_to_replace:
-        subs_dict[ceMoment] = lb_moment_computation.get_post_collision_moment(ceMoment, 1, pre_collision_moment_name)
+    for ce_moment in moments_to_replace:
+        subs_dict[ce_moment] = lb_moment_computation.get_post_collision_moment(ce_moment, 1, pre_collision_moment_name)
 
     return expr.subs(subs_dict)
 
@@ -341,10 +341,10 @@ def take_moments(eqn, pdf_to_moment_name=(('f', '\Pi'), ('\Omega f', '\\Upsilon'
     velocity_terms = tuple(expanded_symbol(velocity_name, subscript=i) for i in range(3))
 
     def determine_f_index(factor):
-        FIndex = namedtuple("FIndex", ['momentName', 'superscript'])
-        for symbolListId, pdfSymbolsElement in enumerate(pdf_symbols):
+        FIndex = namedtuple("FIndex", ['moment_name', 'superscript'])
+        for symbol_list_id, pdf_symbols_element in enumerate(pdf_symbols):
             try:
-                return FIndex(pdf_to_moment_name[symbolListId][1], pdfSymbolsElement.index(factor))
+                return FIndex(pdf_to_moment_name[symbol_list_id][1], pdf_symbols_element.index(factor))
             except ValueError:
                 pass
         return None
@@ -370,13 +370,13 @@ def take_moments(eqn, pdf_to_moment_name=(('f', '\Pi'), ('\Omega f', '\\Upsilon'
                     f_index = new_f_index
 
         moment_tuple = [0] * len(velocity_terms)
-        for cIdx in c_indices:
-            moment_tuple[cIdx] += 1
+        for c_idx in c_indices:
+            moment_tuple[c_idx] += 1
         moment_tuple = tuple(moment_tuple)
 
         if use_one_neighborhood_aliasing:
             moment_tuple = non_aliased_moment(moment_tuple)
-        result = CeMoment(f_index.momentName, moment_tuple, f_index.superscript)
+        result = CeMoment(f_index.moment_name, moment_tuple, f_index.superscript)
         if derivative_term is not None:
             result = derivative_term.change_arg_recursive(result)
         result *= rest
@@ -510,7 +510,7 @@ def chapman_enskog_ansatz(equation, time_derivative_orders=(1, 3), spatial_deriv
         equation: equation to expand
         time_derivative_orders: tuple describing range for time derivative to expand
         spatial_derivative_orders: tuple describing range for spatial derivatives to expand
-        pdfs: symbols to expand: sequence of triples (symbolName, startOrder, endOrder)
+        pdfs: symbols to expand: sequence of triples (symbol_name, start_order, end_order)
         commutative: can be set to False to have non-commutative pdf symbols
     Returns:
         tuple mapping epsilon order to equation
@@ -533,15 +533,15 @@ def chapman_enskog_ansatz(equation, time_derivative_orders=(1, 3), spatial_deriv
     expanded_pdf_symbols = []
 
     max_expansion_order = spatial_derivative_orders[1] if spatial_derivative_orders else 10
-    for pdf_name, startOrder, stopOrder in pdfs:
+    for pdf_name, start_order, stop_order in pdfs:
         if isinstance(pdf_name, sp.Symbol):
             pdf_name = pdf_name.name
         expanded_pdf_symbols += [expanded_symbol(pdf_name, superscript=i, commutative=commutative)
-                                 for i in range(startOrder, stopOrder)]
+                                 for i in range(start_order, stop_order)]
         pdf_symbol = sp.Symbol(pdf_name, commutative=commutative)
         subs_dict[pdf_symbol] = sum(eps ** i * expanded_symbol(pdf_name, superscript=i, commutative=commutative)
-                                    for i in range(startOrder, stopOrder))
-        max_expansion_order = max(max_expansion_order, stopOrder)
+                                    for i in range(start_order, stop_order))
+        max_expansion_order = max(max_expansion_order, stop_order)
     equation = equation.subs(subs_dict)
     equation = expand_using_linearity(equation, functions=expanded_pdf_symbols).expand().collect(eps)
     result = {eps_order: equation.coeff(eps ** eps_order) for eps_order in range(1, 2*max_expansion_order)}
@@ -568,7 +568,7 @@ def match_to_navier_stokes(conservation_equations, rho=sp.Symbol("rho"), u=sp.sy
 
     def match_moment_eqs(moment_eqs, is_compressible):
         shear_and_pressure_eqs = []
-        for i, momEq in enumerate(moment_eqs):
+        for i, mom_eq in enumerate(moment_eqs):
             factor = rho if is_compressible else 1
             ref = diff_simplify(Diff(factor * u[i], t) + sum(Diff(factor * u[i] * u[j], j) for j in range(dim)))
             shear_and_pressure_eqs.append(diff_simplify(moment_eqs[i]) - ref)
@@ -590,8 +590,8 @@ def match_to_navier_stokes(conservation_equations, rho=sp.Symbol("rho"), u=sp.sy
 
         sigma_ = sp.zeros(dim)
         error_terms_ = []
-        for i, shearAndPressureEq in enumerate(shear_and_pressure_eqs):
-            eq_without_pressure = shearAndPressureEq - sum(coeff * Diff(arg, i) for coeff, arg in pressure_terms)
+        for i, shear_and_pressure_eq in enumerate(shear_and_pressure_eqs):
+            eq_without_pressure = shear_and_pressure_eq - sum(coeff * Diff(arg, i) for coeff, arg in pressure_terms)
             for d in eq_without_pressure.atoms(Diff):
                 eq_without_pressure = eq_without_pressure.collect(d)
                 sigma_[i, d.target] += eq_without_pressure.coeff(d) * d.arg
diff --git a/chapman_enskog/chapman_enskog_higher_order.py b/chapman_enskog/chapman_enskog_higher_order.py
index 21ce5532..a4280eff 100644
--- a/chapman_enskog/chapman_enskog_higher_order.py
+++ b/chapman_enskog/chapman_enskog_higher_order.py
@@ -69,17 +69,17 @@ def determine_higher_order_moments(epsilon_hierarchy, relaxation_rates, moment_c
 
     time_diffs = OrderedDict()
     non_eq_moms = OrderedDict()
-    for epsOrder in range(1, order):
-        eps_eq = epsilon_hierarchy[epsOrder]
+    for eps_order in range(1, order):
+        eps_eq = epsilon_hierarchy[eps_order]
 
         for order in range(order+1):
             eqs = sp.Matrix([full_expand(tm(eps_eq * m)) for m in poly_moments(order, dim)])
             unknown_moments = [m for m in eqs.atoms(CeMoment)
-                               if m.superscript == epsOrder and sum(m.moment_tuple) == order]
+                               if m.superscript == eps_order and sum(m.moment_tuple) == order]
             if len(unknown_moments) == 0:
                 for eq in eqs:
                     time_diffs_in_expr = [d for d in eq.atoms(Diff) if
-                                          (d.target == 't' or d.target == sp.Symbol("t")) and d.superscript == epsOrder]
+                                          (d.target == 't' or d.target == sp.Symbol("t")) and d.superscript == eps_order]
                     if len(time_diffs_in_expr) == 0:
                         continue
                     assert len(time_diffs_in_expr) == 1, \
diff --git a/chapman_enskog/chapman_enskog_steady_state.py b/chapman_enskog/chapman_enskog_steady_state.py
index 6d5678c1..f772a018 100644
--- a/chapman_enskog/chapman_enskog_steady_state.py
+++ b/chapman_enskog/chapman_enskog_steady_state.py
@@ -13,12 +13,12 @@ class SteadyStateChapmanEnskogAnalysis:
         self.method = method
         self.dim = method.dim
         self.order = order
-        self.physicalVariables = list(sp.Matrix(self.method.moment_equilibrium_values).atoms(sp.Symbol))  # rho, u..
+        self.physical_variables = list(sp.Matrix(self.method.moment_equilibrium_values).atoms(sp.Symbol))  # rho, u..
         self.eps = sp.Symbol("epsilon")
 
         self.f_sym = sp.Symbol("f", commutative=False)
         self.f_syms = [expanded_symbol("f", superscript=i, commutative=False) for i in range(order + 1)]
-        self.collisionOpSym = sp.Symbol("A", commutative=False)
+        self.collision_op_sym = sp.Symbol("A", commutative=False)
         self.force_sym = sp.Symbol("F_q", commutative=False)
         self.velocity_syms = sp.Matrix([expanded_symbol("c", subscript=i, commutative=False) for i in range(self.dim)])
 
@@ -26,34 +26,34 @@ class SteadyStateChapmanEnskogAnalysis:
         self.force_model = None
         if force_model_class:
             acceleration_symbols = sp.symbols("a_:%d" % (self.dim,), commutative=False)
-            self.physicalVariables += acceleration_symbols
+            self.physical_variables += acceleration_symbols
             self.force_model = force_model_class(acceleration_symbols)
             self.F_q = self.force_model(self.method)
 
         # Perform the analysis
-        self.tayloredEquation = self._create_taylor_expanded_equation()
-        inserted_hierarchy, raw_hierarchy = self._create_pdf_hierarchy(self.tayloredEquation)
-        self.pdfHierarchy = inserted_hierarchy
-        self.pdfHierarchyRaw = raw_hierarchy
-        self.recombinedEq = self._recombine_pdfs(self.pdfHierarchy)
+        self.taylored_equation = self._create_taylor_expanded_equation()
+        inserted_hierarchy, raw_hierarchy = self._create_pdf_hierarchy(self.taylored_equation)
+        self.pdf_hierarchy = inserted_hierarchy
+        self.pdf_hierarchy_raw = raw_hierarchy
+        self.recombined_eq = self._recombine_pdfs(self.pdf_hierarchy)
 
         symbols_to_values = self._get_symbols_to_values_dict()
-        self.continuityEquation = self._compute_continuity_equation(self.recombinedEq, symbols_to_values)
-        self.momentumEquations = [self._compute_momentum_equation(self.recombinedEq, symbols_to_values, h)
-                                  for h in range(self.dim)]
+        self.continuity_equation = self._compute_continuity_equation(self.recombined_eq, symbols_to_values)
+        self.momentum_equations = [self._compute_momentum_equation(self.recombined_eq, symbols_to_values, h)
+                                   for h in range(self.dim)]
 
     def get_pdf_hierarchy(self, order, collision_operator_symbol=sp.Symbol("omega")):
         def substitute_non_commuting_symbols(eq):
             return eq.subs({a: sp.Symbol(a.name) for a in eq.atoms(sp.Symbol)})
-        result = self.pdfHierarchy[order].subs(self.collisionOpSym, collision_operator_symbol)
+        result = self.pdf_hierarchy[order].subs(self.collision_op_sym, collision_operator_symbol)
         result = normalize_diff_order(result, functions=(self.f_syms[0], self.force_sym))
         return substitute_non_commuting_symbols(result)
 
     def get_continuity_equation(self, only_order=None):
-        return self._extract_order(self.continuityEquation, only_order)
+        return self._extract_order(self.continuity_equation, only_order)
 
     def get_momentum_equation(self, only_order=None):
-        return [self._extract_order(e, only_order) for e in self.momentumEquations]
+        return [self._extract_order(e, only_order) for e in self.momentum_equations]
 
     def _extract_order(self, eq, order):
         if order is None:
@@ -76,7 +76,7 @@ class SteadyStateChapmanEnskogAnalysis:
         taylor_expansion = DiffOperator.apply(differential_operator.expand(), self.f_sym)
 
         f_non_eq = self.f_sym - self.f_syms[0]
-        return taylor_expansion + self.collisionOpSym * f_non_eq - self.eps * self.force_sym
+        return taylor_expansion + self.collision_op_sym * f_non_eq - self.eps * self.force_sym
 
     def _create_pdf_hierarchy(self, taylored_equation):
         """
@@ -90,8 +90,8 @@ class SteadyStateChapmanEnskogAnalysis:
         inserted_hierarchy = []
         raw_hierarchy = []
         substitution_dict = {}
-        for ceEq, f_i in zip(chapman_enskog_hierarchy, self.f_syms):
-            new_eq = -1 / self.collisionOpSym * (ceEq - self.collisionOpSym * f_i)
+        for ce_eq, f_i in zip(chapman_enskog_hierarchy, self.f_syms):
+            new_eq = -1 / self.collision_op_sym * (ce_eq - self.collision_op_sym * f_i)
             raw_hierarchy.append(new_eq)
             new_eq = expand_using_linearity(new_eq.subs(substitution_dict), functions=self.f_syms + [self.force_sym])
             if new_eq:
@@ -110,14 +110,14 @@ class SteadyStateChapmanEnskogAnalysis:
         eq = sp.expand(self.velocity_syms[coordinate] * recombined_eq)
 
         result = self._compute_moments(eq, symbols_to_values)
-        if self.force_model and hasattr(self.force_model, 'equilibriumVelocityShift'):
+        if self.force_model and hasattr(self.force_model, 'equilibrium_velocity_shift'):
             compressible = self.method.conserved_quantity_computation.compressible
-            shift = self.force_model.equilibriumVelocityShift(sp.Symbol("rho") if compressible else 1)
+            shift = self.force_model.equilibrium_velocity_shift(sp.Symbol("rho") if compressible else 1)
             result += shift[coordinate]
         return result
 
     def _get_symbols_to_values_dict(self):
-        result = {1 / self.collisionOpSym: self.method.inverse_collision_matrix,
+        result = {1 / self.collision_op_sym: self.method.inverse_collision_matrix,
                   self.force_sym: sp.Matrix(self.force_model(self.method)) if self.force_model else 0,
                   self.f_syms[0]: self.method.get_equilibrium_terms()}
         for i, c_i in enumerate(self.velocity_syms):
@@ -163,7 +163,7 @@ class SteadyStateChapmanEnskogAnalysis:
 
             new_products.append(new_prod)
 
-        return normalize_diff_order(expand_using_linearity(sp.Add(*new_products), functions=self.physicalVariables))
+        return normalize_diff_order(expand_using_linearity(sp.Add(*new_products), functions=self.physical_variables))
 
 
 # ----------------------------------------------------------------------------------------------------------------------
diff --git a/continuous_distribution_measures.py b/continuous_distribution_measures.py
index 8e69cb59..39b173e5 100644
--- a/continuous_distribution_measures.py
+++ b/continuous_distribution_measures.py
@@ -20,7 +20,7 @@ def moment_generating_function(generating_function, symbols, symbols_in_result):
     :param generating_function: sympy expression
     :param symbols: a sequence of symbols forming the vector x
     :param symbols_in_result: a sequence forming the vector t
-    :return: transformation result F: an expression that depends now on symbolsInResult
+    :return: transformation result F: an expression that depends now on symbols_in_result
              (symbols have been integrated out)
 
     .. note::
diff --git a/creationfunctions.py b/creationfunctions.py
index 792ec5a7..9b6758bb 100644
--- a/creationfunctions.py
+++ b/creationfunctions.py
@@ -90,11 +90,11 @@ Simplifications / Transformations:
 
 Field size information:
 
-- ``pdfArr=None``: pass a numpy array here to create kernels with fixed size and create the loop nest according to layout
-  of this array
+- ``pdf_arr=None``: pass a numpy array here to create kernels with fixed size and create the loop nest according 
+    to layout of this array
 - ``field_size=None``: create kernel for fixed field size
-- ``field_layout='c'``:   ``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverseNumpy'`` or ``'f'`` for fortran
-  layout, this does not apply when pdfArr was given, then the same layout as pdfArr is used
+- ``field_layout='c'``:   ``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse_numpy'`` or ``'f'`` for fortran
+  layout, this does not apply when pdf_arr was given, then the same layout as pdf_arr is used
 
 GPU:
 
@@ -102,7 +102,7 @@ GPU:
   and installed *pycuda* package
 - ``gpu_indexing='block'``: determines mapping of CUDA threads to cells. Can be either ``'block'`` or ``'line'``
 - ``gpu_indexing_params='block'``: parameters passed to init function of gpu indexing.
-  For ``'block'`` indexing one can e.g. specify the block size ``{'blockSize' : (128, 4, 1)}``
+  For ``'block'`` indexing one can e.g. specify the block size ``{'block_size' : (128, 4, 1)}``
 
 Other:
 
@@ -173,10 +173,10 @@ from lbmpy.updatekernels import StreamPullTwoFieldsAccessor, PeriodicTwoFieldsAc
 
 
 def create_lb_function(ast=None, optimization={}, **kwargs):
-    params, optParams = update_with_default_parameters(kwargs, optimization)
+    params, opt_params = update_with_default_parameters(kwargs, optimization)
 
     if ast is None:
-        params['optimization'] = optParams
+        params['optimization'] = opt_params
         ast = create_lb_ast(**params)
 
     res = ast.compile()
@@ -279,7 +279,7 @@ def create_lb_collision_rule(lb_method=None, optimization={}, **kwargs):
     if params['velocity_input'] is not None:
         eqs = [Assignment(cqc.zeroth_order_moment_symbol, sum(lb_method.pre_collision_pdf_symbols))]
         velocity_field = params['velocity_input']
-        eqs += [Assignment(uSym, velocity_field(i)) for i, uSym in enumerate(cqc.first_order_moment_symbols)]
+        eqs += [Assignment(u_sym, velocity_field(i)) for i, u_sym in enumerate(cqc.first_order_moment_symbols)]
         eqs = AssignmentCollection(eqs, [])
         collision_rule = lb_method.get_collision_rule(conserved_quantity_equations=eqs)
     else:
@@ -366,7 +366,7 @@ def create_lb_method_from_existing(method, modification_function):
 
     Args:
         method: old method
-        modification_function: function receiving (moment, equilibriumValue, relaxation_rate) as arguments,
+        modification_function: function receiving (moment, equilibrium_value, relaxation_rate) as arguments,
                                i.e. one row of the relaxation table, returning a modified version
     """
     relaxation_table = (modification_function(m, eq, rr)
@@ -463,7 +463,7 @@ def update_with_default_parameters(params, opt_params=None, fail_on_unknown_para
         'split': False,
 
         'field_size': None,
-        'field_layout': 'fzyx',  # can be 'numpy' (='c'), 'reverseNumpy' (='f'), 'fzyx', 'zyxf'
+        'field_layout': 'fzyx',  # can be 'numpy' (='c'), 'reverse_numpy' (='f'), 'fzyx', 'zyxf'
         'symbolic_field': None,
 
         'target': 'cpu',
diff --git a/cumulants.py b/cumulants.py
index 9794b7bc..b330f0eb 100644
--- a/cumulants.py
+++ b/cumulants.py
@@ -52,7 +52,7 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d
         index: tuple describing the index of the cumulant/moment to express as function of moments/cumulants
         dependent_var_dict: a dictionary from index tuple to moments/cumulants symbols, or None to use default symbols
         outer_function: logarithm to transform from moments->cumulants, exp for inverse direction
-        default_prefix: if dependentVarDict is None, this is used to construct symbols of the form prefix_i_j_k
+        default_prefix: if dependent_var_dict is None, this is used to construct symbols of the form prefix_i_j_k
         centralized: if True the first order moments/cumulants are set to zero
     """
     dim = len(index)
@@ -74,8 +74,8 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d
     # index (2,1,0) means differentiate twice w.r.t to first variable, and once w.r.t to second variable
     # this is transformed here into representation [0,0,1] such that each entry is one diff operation
     partition_list = []
-    for i, indexComponent in enumerate(index):
-        for j in range(indexComponent):
+    for i, index_component in enumerate(index):
+        for j in range(index_component):
             partition_list.append(i)
 
     if len(partition_list) == 0:  # special case for zero index
diff --git a/fieldaccess.py b/fieldaccess.py
index 77e0b650..3f4309c8 100644
--- a/fieldaccess.py
+++ b/fieldaccess.py
@@ -68,22 +68,22 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor):
         for i, d in enumerate(stencil):
             pull_direction = inverse_direction(d)
             periodic_pull_direction = []
-            for coordId, dirElement in enumerate(pull_direction):
-                if not self._periodicity[coordId]:
-                    periodic_pull_direction.append(dirElement)
+            for coord_id, dir_element in enumerate(pull_direction):
+                if not self._periodicity[coord_id]:
+                    periodic_pull_direction.append(dir_element)
                     continue
 
                 lower_limit = self._ghostLayers
-                upper_limit = field.spatial_shape[coordId] - 1 - self._ghostLayers
+                upper_limit = field.spatial_shape[coord_id] - 1 - self._ghostLayers
                 limit_diff = upper_limit - lower_limit
-                loop_counter = LoopOverCoordinate.get_loop_counter_symbol(coordId)
-                if dirElement == 0:
+                loop_counter = LoopOverCoordinate.get_loop_counter_symbol(coord_id)
+                if dir_element == 0:
                     periodic_pull_direction.append(0)
-                elif dirElement == 1:
-                    new_dir_element = sp.Piecewise((dirElement, loop_counter < upper_limit), (-limit_diff, True))
+                elif dir_element == 1:
+                    new_dir_element = sp.Piecewise((dir_element, loop_counter < upper_limit), (-limit_diff, True))
                     periodic_pull_direction.append(new_dir_element)
-                elif dirElement == -1:
-                    new_dir_element = sp.Piecewise((dirElement, loop_counter > lower_limit), (limit_diff, True))
+                elif dir_element == -1:
+                    new_dir_element = sp.Piecewise((dir_element, loop_counter > lower_limit), (limit_diff, True))
                     periodic_pull_direction.append(new_dir_element)
                 else:
                     raise NotImplementedError("This accessor supports only nearest neighbor stencils")
@@ -127,9 +127,9 @@ def visualize_field_mapping(axes, stencil, field_mapping, color='b'):
     from lbmpy.plot2d import LbGrid
     grid = LbGrid(3, 3)
     grid.fill_with_default_arrows()
-    for fieldAccess, direction in zip(field_mapping, stencil):
-        field_position = stencil[fieldAccess.index[0]]
-        neighbor = fieldAccess.offsets
+    for field_access, direction in zip(field_mapping, stencil):
+        field_position = stencil[field_access.index[0]]
+        neighbor = field_access.offsets
         grid.add_arrow((1 + neighbor[0], 1 + neighbor[1]),
                        arrow_position=field_position, arrow_direction=direction, color=color)
     grid.draw(axes)
diff --git a/forcemodels.py b/forcemodels.py
index 88e53b90..559f6dcd 100644
--- a/forcemodels.py
+++ b/forcemodels.py
@@ -110,14 +110,14 @@ class Simple(object):
     def __call__(self, lb_method, **kwargs):
         assert len(self._force) == lb_method.dim
 
-        def scalarProduct(a, b):
+        def scalar_product(a, b):
             return sum(a_i * b_i for a_i, b_i in zip(a, b))
 
-        return [3 * w_i * scalarProduct(self._force, direction)
+        return [3 * w_i * scalar_product(self._force, direction)
                 for direction, w_i in zip(lb_method.stencil, lb_method.weights)]
 
     def macroscopic_velocity_shift(self, density):
-        return defaultVelocityShift(density, self._force)
+        return default_velocity_shift(density, self._force)
 
 
 class Luo(object):
@@ -139,7 +139,7 @@ class Luo(object):
         return result
 
     def macroscopic_velocity_shift(self, density):
-        return defaultVelocityShift(density, self._force)
+        return default_velocity_shift(density, self._force)
 
 
 class Guo(object):
@@ -153,15 +153,15 @@ class Guo(object):
     def __call__(self, lb_method):
         luo = Luo(self._force)
 
-        shearRelaxationRate = get_shear_relaxation_rate(lb_method)
-        correctionFactor = (1 - sp.Rational(1, 2) * shearRelaxationRate)
-        return [correctionFactor * t for t in luo(lb_method)]
+        shear_relaxation_rate = get_shear_relaxation_rate(lb_method)
+        correction_factor = (1 - sp.Rational(1, 2) * shear_relaxation_rate)
+        return [correction_factor * t for t in luo(lb_method)]
 
     def macroscopic_velocity_shift(self, density):
-        return defaultVelocityShift(density, self._force)
+        return default_velocity_shift(density, self._force)
 
-    def equilibriumVelocityShift(self, density):
-        return defaultVelocityShift(density, self._force)
+    def equilibrium_velocity_shift(self, density):
+        return default_velocity_shift(density, self._force)
 
 
 class Buick(object):
@@ -176,15 +176,15 @@ class Buick(object):
     def __call__(self, lb_method, **kwargs):
         simple = Simple(self._force)
 
-        shearRelaxationRate = get_shear_relaxation_rate(lb_method)
-        correctionFactor = (1 - sp.Rational(1, 2) * shearRelaxationRate)
-        return [correctionFactor * t for t in simple(lb_method)]
+        shear_relaxation_rate = get_shear_relaxation_rate(lb_method)
+        correction_factor = (1 - sp.Rational(1, 2) * shear_relaxation_rate)
+        return [correction_factor * t for t in simple(lb_method)]
 
     def macroscopic_velocity_shift(self, density):
-        return defaultVelocityShift(density, self._force)
+        return default_velocity_shift(density, self._force)
 
-    def equilibriumVelocityShift(self, density):
-        return defaultVelocityShift(density, self._force)
+    def equilibrium_velocity_shift(self, density):
+        return default_velocity_shift(density, self._force)
 
 
 class EDM(object):
@@ -198,18 +198,18 @@ class EDM(object):
         rho = cqc.zeroth_order_moment_symbol if cqc.compressible else 1
         u = cqc.first_order_moment_symbols
 
-        shiftedU = (u_i + f_i / rho for u_i, f_i in zip(u, self._force))
-        eqTerms = lb_method.get_equilibrium_terms()
-        shiftedEq = eqTerms.subs({u_i: su_i for u_i, su_i in zip(u, shiftedU)})
-        return shiftedEq - eqTerms
+        shifted_u = (u_i + f_i / rho for u_i, f_i in zip(u, self._force))
+        eq_terms = lb_method.get_equilibrium_terms()
+        shifted_eq = eq_terms.subs({u_i: su_i for u_i, su_i in zip(u, shifted_u)})
+        return shifted_eq - eq_terms
 
     def macroscopic_velocity_shift(self, density):
-        return defaultVelocityShift(density, self._force)
+        return default_velocity_shift(density, self._force)
 
 
 # --------------------------------  Helper functions  ------------------------------------------------------------------
 
 
-def defaultVelocityShift(density, force):
+def default_velocity_shift(density, force):
     return [f_i / (2 * density) for f_i in force]
 
diff --git a/geometry.py b/geometry.py
index 217a3055..e650ebe8 100644
--- a/geometry.py
+++ b/geometry.py
@@ -87,8 +87,8 @@ def add_pipe(boundary_handling, diameter, boundary=NoSlip()):
 
     :param boundary_handling: boundary handling object, works for serial and parallel versions
     :param diameter: pipe diameter, can be either a constant value or a callback function.
-                     the callback function has the signature (xCoordArray, domainShapeInCells) andp has to return
-                     a array of same shape as the received xCoordArray, with the diameter for each x position
+                     the callback function has the signature (x_coord_array, domain_shape_in_cells) and has to return
+                     a array of same shape as the received x_coord_array, with the diameter for each x position
     :param boundary: boundary object that is set at the wall, defaults to NoSlip (bounce back)
     """
     domain_shape = boundary_handling.shape
diff --git a/lbstep.py b/lbstep.py
index 937ca58a..8644ce5d 100644
--- a/lbstep.py
+++ b/lbstep.py
@@ -18,7 +18,7 @@ class LatticeBoltzmannStep:
                  kernel_params=MappingProxyType({}), data_handling=None, name="lbm", optimization=None,
                  velocity_data_name=None, density_data_name=None, density_data_index=None,
                  compute_velocity_in_every_step=False, compute_density_in_every_step=False,
-                 velocity_input_array_name=None, time_step_order='streamCollide', flag_interface=None,
+                 velocity_input_array_name=None, time_step_order='stream_collide', flag_interface=None,
                  **method_parameters):
 
         # --- Parameter normalization  ---
@@ -87,10 +87,10 @@ class LatticeBoltzmannStep:
             optimization['symbolic_field'] = data_handling.fields[self._pdf_arr_name]
             method_parameters['field_name'] = self._pdf_arr_name
             method_parameters['temporary_field_name'] = self._tmp_arr_name
-            if time_step_order == 'streamCollide':
+            if time_step_order == 'stream_collide':
                 self._lbmKernels = [create_lb_function(optimization=optimization,
                                                        **method_parameters)]
-            elif time_step_order == 'collideStream':
+            elif time_step_order == 'collide_stream':
                 self._lbmKernels = [create_lb_function(optimization=optimization,
                                                        kernel_type='collide_only',
                                                        **method_parameters),
@@ -224,9 +224,9 @@ class LatticeBoltzmannStep:
             self._data_handling.to_cpu(self._pdf_arr_name)
         self._data_handling.run_kernel(self._getterKernel, **self.kernel_params)
 
-    def run(self, timeSteps):
+    def run(self, time_steps):
         self.pre_run()
-        for i in range(timeSteps):
+        for i in range(time_steps):
             self.time_step()
         self.post_run()
 
diff --git a/macroscopic_value_kernels.py b/macroscopic_value_kernels.py
index 5e997c90..e165a3a1 100644
--- a/macroscopic_value_kernels.py
+++ b/macroscopic_value_kernels.py
@@ -3,57 +3,59 @@ from pystencils.field import Field, get_layout_of_array
 from lbmpy.simplificationfactory import create_simplification_strategy
 
 
-def compileMacroscopicValuesGetter(lb_method, outputQuantities, pdfArr=None, field_layout='numpy', target='cpu'):
+def compile_macroscopic_values_getter(lb_method, output_quantities, pdf_arr=None, field_layout='numpy', target='cpu'):
     """
     Create kernel to compute macroscopic value(s) from a pdf field (e.g. density or velocity)
 
     :param lb_method: instance of :class:`lbmpy.methods.AbstractLbMethod`
-    :param outputQuantities: sequence of quantities to compute e.g. ['density', 'velocity']
-    :param pdfArr: optional numpy array for pdf field - used to get optimal loop structure for kernel
-    :param field_layout: layout for output field, also used for pdf field if pdfArr is not given
+    :param output_quantities: sequence of quantities to compute e.g. ['density', 'velocity']
+    :param pdf_arr: optional numpy array for pdf field - used to get optimal loop structure for kernel
+    :param field_layout: layout for output field, also used for pdf field if pdf_arr is not given
     :param target: 'cpu' or 'gpu'
     :return: a function to compute macroscopic values:
         - pdf_array
-        - keyword arguments from name of conserved quantity (as in outputQuantities) to numpy field
+        - keyword arguments from name of conserved quantity (as in output_quantities) to numpy field
     """
-    if not (isinstance(outputQuantities, list) or isinstance(outputQuantities, tuple)):
-        outputQuantities = [outputQuantities]
+    if not (isinstance(output_quantities, list) or isinstance(output_quantities, tuple)):
+        output_quantities = [output_quantities]
 
     cqc = lb_method.conserved_quantity_computation
-    unknownQuantities = [oq for oq in outputQuantities if oq not in cqc.conserved_quantities]
-    if unknownQuantities:
+    unknown_quantities = [oq for oq in output_quantities if oq not in cqc.conserved_quantities]
+    if unknown_quantities:
         raise ValueError("No such conserved quantity: %s, conserved quantities are %s" %
-                         (str(unknownQuantities), str(cqc.conserved_quantities.keys())))
+                         (str(unknown_quantities), str(cqc.conserved_quantities.keys())))
 
-    if pdfArr is None:
-        pdfField = Field.create_generic('pdfs', lb_method.dim, index_dimensions=1, layout=field_layout)
+    if pdf_arr is None:
+        pdf_field = Field.create_generic('pdfs', lb_method.dim, index_dimensions=1, layout=field_layout)
     else:
-        pdfField = Field.create_from_numpy_array('pdfs', pdfArr, index_dimensions=1)
+        pdf_field = Field.create_from_numpy_array('pdfs', pdf_arr, index_dimensions=1)
 
-    outputMapping = {}
-    for outputQuantity in outputQuantities:
-        numberOfElements = cqc.conserved_quantities[outputQuantity]
-        assert numberOfElements >= 1
+    output_mapping = {}
+    for output_quantity in output_quantities:
+        number_of_elements = cqc.conserved_quantities[output_quantity]
+        assert number_of_elements >= 1
 
-        indDims = 0 if numberOfElements <= 1 else 1
-        if pdfArr is None:
-            outputField = Field.create_generic(outputQuantity, lb_method.dim, layout=field_layout, index_dimensions=indDims)
+        ind_dims = 0 if number_of_elements <= 1 else 1
+        if pdf_arr is None:
+            output_field = Field.create_generic(output_quantity, lb_method.dim, layout=field_layout,
+                                                index_dimensions=ind_dims)
         else:
-            outputFieldShape = pdfArr.shape[:-1]
-            if indDims > 0:
-                outputFieldShape += (numberOfElements,)
-                field_layout = get_layout_of_array(pdfArr)
+            output_field_shape = pdf_arr.shape[:-1]
+            if ind_dims > 0:
+                output_field_shape += (number_of_elements,)
+                field_layout = get_layout_of_array(pdf_arr)
             else:
-                field_layout = get_layout_of_array(pdfArr, index_dimension_ids=[len(pdfField.shape) - 1])
-            outputField = Field.create_fixed_size(outputQuantity, outputFieldShape, indDims, pdfArr.dtype, field_layout)
+                field_layout = get_layout_of_array(pdf_arr, index_dimension_ids=[len(pdf_field.shape) - 1])
+            output_field = Field.create_fixed_size(output_quantity, output_field_shape, ind_dims, pdf_arr.dtype,
+                                                   field_layout)
 
-        outputMapping[outputQuantity] = [outputField(i) for i in range(numberOfElements)]
-        if len(outputMapping[outputQuantity]) == 1:
-            outputMapping[outputQuantity] = outputMapping[outputQuantity][0]
+        output_mapping[output_quantity] = [output_field(i) for i in range(number_of_elements)]
+        if len(output_mapping[output_quantity]) == 1:
+            output_mapping[output_quantity] = output_mapping[output_quantity][0]
 
     stencil = lb_method.stencil
-    pdfSymbols = [pdfField(i) for i in range(len(stencil))]
-    eqs = cqc.output_equations_from_pdfs(pdfSymbols, outputMapping).all_assignments
+    pdf_symbols = [pdf_field(i) for i in range(len(stencil))]
+    eqs = cqc.output_equations_from_pdfs(pdf_symbols, output_mapping).all_assignments
 
     if target == 'cpu':
         import pystencils.cpu as cpu
@@ -65,128 +67,128 @@ def compileMacroscopicValuesGetter(lb_method, outputQuantities, pdfArr=None, fie
         raise ValueError("Unknown target '%s'. Possible targets are 'cpu' and 'gpu'" % (target,))
 
     def getter(pdfs, **kwargs):
-        if pdfArr is not None:
-            assert pdfs.shape == pdfArr.shape and pdfs.strides == pdfArr.strides, \
-                "Pdf array not matching blueprint which was used to compile" + str(pdfs.shape) + str(pdfArr.shape)
-        if not set(outputQuantities).issubset(kwargs.keys()):
+        if pdf_arr is not None:
+            assert pdfs.shape == pdf_arr.shape and pdfs.strides == pdf_arr.strides, \
+                "Pdf array not matching blueprint which was used to compile" + str(pdfs.shape) + str(pdf_arr.shape)
+        if not set(output_quantities).issubset(kwargs.keys()):
             raise ValueError("You have to specify the output field for each of the following quantities: %s"
-                             % (str(outputQuantities),))
+                             % (str(output_quantities),))
         kernel(pdfs=pdfs, **kwargs)
 
     return getter
 
 
-def compileMacroscopicValuesSetter(lb_method, quantitiesToSet, pdfArr=None, field_layout='numpy', target='cpu'):
+def compile_macroscopic_values_setter(lb_method, quantities_to_set, pdf_arr=None, field_layout='numpy', target='cpu'):
     """
     Creates a function that sets a pdf field to specified macroscopic quantities
     The returned function can be called with the pdf field to set as single argument
 
     :param lb_method: instance of :class:`lbmpy.methods.AbstractLbMethod`
-    :param quantitiesToSet: map from conserved quantity name to fixed value or numpy array
-    :param pdfArr: optional numpy array for pdf field - used to get optimal loop structure for kernel
-    :param field_layout: layout of the pdf field if pdfArr was not given
+    :param quantities_to_set: map from conserved quantity name to fixed value or numpy array
+    :param pdf_arr: optional numpy array for pdf field - used to get optimal loop structure for kernel
+    :param field_layout: layout of the pdf field if pdf_arr was not given
     :param target: 'cpu' or 'gpu'
     :return: function taking pdf array as single argument and which sets the field to the given values
     """
-    if pdfArr is not None:
-        pdfField = Field.create_from_numpy_array('pdfs', pdfArr, index_dimensions=1)
+    if pdf_arr is not None:
+        pdf_field = Field.create_from_numpy_array('pdfs', pdf_arr, index_dimensions=1)
     else:
-        pdfField = Field.create_generic('pdfs', lb_method.dim, index_dimensions=1, layout=field_layout)
+        pdf_field = Field.create_generic('pdfs', lb_method.dim, index_dimensions=1, layout=field_layout)
 
-    fixedKernelParameters = {}
+    fixed_kernel_parameters = {}
     cqc = lb_method.conserved_quantity_computation
 
-    valueMap = {}
-    atLeastOneFieldInput = False
-    for quantityName, value in quantitiesToSet.items():
+    value_map = {}
+    at_least_one_field_input = False
+    for quantity_name, value in quantities_to_set.items():
         if hasattr(value, 'shape'):
-            fixedKernelParameters[quantityName] = value
-            atLeastOneFieldInput = True
-            numComponents = cqc.conserved_quantities[quantityName]
-            field = Field.create_from_numpy_array(quantityName, value, index_dimensions=0 if numComponents <= 1 else 1)
-            if numComponents == 1:
+            fixed_kernel_parameters[quantity_name] = value
+            at_least_one_field_input = True
+            num_components = cqc.conserved_quantities[quantity_name]
+            field = Field.create_from_numpy_array(quantity_name, value, index_dimensions=0 if num_components <= 1 else 1)
+            if num_components == 1:
                 value = field(0)
             else:
-                value = [field(i) for i in range(numComponents)]
+                value = [field(i) for i in range(num_components)]
 
-        valueMap[quantityName] = value
+        value_map[quantity_name] = value
 
-    cqEquations = cqc.equilibrium_input_equations_from_init_values(**valueMap)
+    cq_equations = cqc.equilibrium_input_equations_from_init_values(**value_map)
 
-    eq = lb_method.get_equilibrium(conserved_quantity_equations=cqEquations)
-    if atLeastOneFieldInput:
+    eq = lb_method.get_equilibrium(conserved_quantity_equations=cq_equations)
+    if at_least_one_field_input:
         simplification = create_simplification_strategy(lb_method)
         eq = simplification(eq)
     else:
         eq = eq.new_without_subexpressions()
 
-    substitutions = {sym: pdfField(i) for i, sym in enumerate(lb_method.post_collision_pdf_symbols)}
+    substitutions = {sym: pdf_field(i) for i, sym in enumerate(lb_method.post_collision_pdf_symbols)}
     eq = eq.new_with_substitutions(substitutions).all_assignments
 
     if target == 'cpu':
         import pystencils.cpu as cpu
-        kernel = cpu.make_python_function(cpu.create_kernel(eq), argument_dict=fixedKernelParameters)
+        kernel = cpu.make_python_function(cpu.create_kernel(eq), argument_dict=fixed_kernel_parameters)
     elif target == 'gpu':
         import pystencils.gpucuda as gpu
-        kernel = gpu.make_python_function(gpu.create_cuda_kernel(eq), argument_dict=fixedKernelParameters)
+        kernel = gpu.make_python_function(gpu.create_cuda_kernel(eq), argument_dict=fixed_kernel_parameters)
     else:
         raise ValueError("Unknown target '%s'. Possible targets are 'cpu' and 'gpu'" % (target,))
 
     def setter(pdfs, **kwargs):
-        if pdfArr is not None:
-            assert pdfs.shape == pdfArr.shape and pdfs.strides == pdfArr.strides, \
-                "Pdf array not matching blueprint which was used to compile" + str(pdfs.shape) + str(pdfArr.shape)
+        if pdf_arr is not None:
+            assert pdfs.shape == pdf_arr.shape and pdfs.strides == pdf_arr.strides, \
+                "Pdf array not matching blueprint which was used to compile" + str(pdfs.shape) + str(pdf_arr.shape)
         kernel(pdfs=pdfs, **kwargs)
 
     return setter
 
 
-def createAdvancedVelocitySetterCollisionRule(lb_method, velocityArray, velocityRelaxationRate=0.8):
+def create_advanced_velocity_setter_collision_rule(lb_method, velocity_array, velocity_relaxation_rate=0.8):
 
-    velocityField = Field.create_from_numpy_array('velInput', velocityArray, index_dimensions=1)
+    velocity_field = Field.create_from_numpy_array('vel_input', velocity_array, index_dimensions=1)
 
     cqc = lb_method.conserved_quantity_computation
-    densitySymbol = cqc.defined_symbols(order=0)[1]
-    velocitySymbols = cqc.defined_symbols(order=1)[1]
+    density_symbol = cqc.defined_symbols(order=0)[1]
+    velocity_symbols = cqc.defined_symbols(order=1)[1]
 
     # density is computed from pdfs
-    eqInputFromPdfs = cqc.equilibrium_input_equations_from_pdfs(lb_method.pre_collision_pdf_symbols)
-    eqInputFromPdfs = eqInputFromPdfs.new_filtered([densitySymbol])
+    eq_input_from_pdfs = cqc.equilibrium_input_equations_from_pdfs(lb_method.pre_collision_pdf_symbols)
+    eq_input_from_pdfs = eq_input_from_pdfs.new_filtered([density_symbol])
     # velocity is read from input field
-    velSymbols = [velocityField(i) for i in range(lb_method.dim)]
-    eqInputFromField = cqc.equilibrium_input_equations_from_init_values(velocity=velSymbols)
-    eqInputFromField = eqInputFromField.new_filtered(velocitySymbols)
+    vel_symbols = [velocity_field(i) for i in range(lb_method.dim)]
+    eq_input_from_field = cqc.equilibrium_input_equations_from_init_values(velocity=vel_symbols)
+    eq_input_from_field = eq_input_from_field.new_filtered(velocity_symbols)
     # then both are merged together
-    eqInput = eqInputFromPdfs.new_merged(eqInputFromField)
+    eq_input = eq_input_from_pdfs.new_merged(eq_input_from_field)
 
     # set first order relaxation rate
     lb_method = deepcopy(lb_method)
-    lb_method.set_first_moment_relaxation_rate(velocityRelaxationRate)
+    lb_method.set_first_moment_relaxation_rate(velocity_relaxation_rate)
 
-    simplificationStrategy = create_simplification_strategy(lb_method)
-    newCollisionRule = simplificationStrategy(lb_method.get_collision_rule(eqInput))
+    simplification_strategy = create_simplification_strategy(lb_method)
+    new_collision_rule = simplification_strategy(lb_method.get_collision_rule(eq_input))
 
-    return newCollisionRule
+    return new_collision_rule
 
 
-def compileAdvancedVelocitySetter(method, velocityArray, velocityRelaxationRate=0.8, pdfArr=None,
-                                  field_layout='numpy', optimization={}):
+def compile_advanced_velocity_setter(method, velocity_array, velocity_relaxation_rate=0.8, pdf_arr=None,
+                                     field_layout='numpy', optimization={}):
     """
     Advanced initialization of velocity field through iteration procedure according to
     Mei, Luo, Lallemand and Humieres: Consistent initial conditions for LBM simulations, 2005
 
     :param method:
-    :param velocityArray: array with velocity field
-    :param velocityRelaxationRate: relaxation rate for the velocity moments - determines convergence behaviour
+    :param velocity_array: array with velocity field
+    :param velocity_relaxation_rate: relaxation rate for the velocity moments - determines convergence behaviour
                                    of the initialization scheme
-    :param pdfArr: optional numpy array for pdf field - used to get optimal loop structure for kernel
-    :param field_layout: layout of the pdf field if pdfArr was not given
+    :param pdf_arr: optional numpy array for pdf field - used to get optimal loop structure for kernel
+    :param field_layout: layout of the pdf field if pdf_arr was not given
     :param optimization: dictionary with optimization hints
     :return: stream-collide update function
     """
     from lbmpy.updatekernels import create_stream_pull_collide_kernel
     from lbmpy.creationfunctions import create_lb_ast, create_lb_function
-    newCollisionRule = createAdvancedVelocitySetterCollisionRule(method, velocityArray, velocityRelaxationRate)
-    update_rule = create_stream_pull_collide_kernel(newCollisionRule, pdfArr, generic_layout=field_layout)
+    new_collision_rule = create_advanced_velocity_setter_collision_rule(method, velocity_array, velocity_relaxation_rate)
+    update_rule = create_stream_pull_collide_kernel(new_collision_rule, pdf_arr, generic_layout=field_layout)
     ast = create_lb_ast(update_rule, optimization)
     return create_lb_function(ast, optimization)
diff --git a/maxwellian_equilibrium.py b/maxwellian_equilibrium.py
index 07e0a7ed..7ac36b59 100644
--- a/maxwellian_equilibrium.py
+++ b/maxwellian_equilibrium.py
@@ -166,7 +166,7 @@ def get_moments_of_continuous_maxwellian_equilibrium(moments, dim, rho=sp.Symbol
 
     # trick to speed up sympy integration (otherwise it takes multiple minutes, or aborts):
     # use a positive, real symbol to represent c_s_sq -> then replace this symbol afterwards with the real c_s_sq
-    c_s_sq_helper = sp.Symbol("csqHelper", positive=True, real=True)
+    c_s_sq_helper = sp.Symbol("csq_helper", positive=True, real=True)
     mb = continuous_maxwellian_equilibrium(dim, rho, u, MOMENT_SYMBOLS[:dim], c_s_sq_helper)
     result = [continuous_moment(mb, moment, MOMENT_SYMBOLS[:dim]).subs(c_s_sq_helper, c_s_sq) for moment in moments]
     if order is not None:
@@ -227,7 +227,7 @@ def get_cumulants_of_continuous_maxwellian_equilibrium(cumulants, dim, rho=sp.Sy
 
     # trick to speed up sympy integration (otherwise it takes multiple minutes, or aborts):
     # use a positive, real symbol to represent c_s_sq -> then replace this symbol afterwards with the real c_s_sq
-    c_s_sq_helper = sp.Symbol("csqHelper", positive=True, real=True)
+    c_s_sq_helper = sp.Symbol("csq_helper", positive=True, real=True)
     mb = continuous_maxwellian_equilibrium(dim, rho, u, MOMENT_SYMBOLS[:dim], c_s_sq_helper)
     result = [continuous_cumulant(mb, cumulant, MOMENT_SYMBOLS[:dim]).subs(c_s_sq_helper, c_s_sq)
               for cumulant in cumulants]
diff --git a/methods/abstractlbmethod.py b/methods/abstractlbmethod.py
index c7d7bbad..12687314 100644
--- a/methods/abstractlbmethod.py
+++ b/methods/abstractlbmethod.py
@@ -4,7 +4,7 @@ from collections import namedtuple
 from pystencils.assignment_collection import AssignmentCollection
 
 
-RelaxationInfo = namedtuple('RelaxationInfo', ['equilibriumValue', 'relaxation_rate'])
+RelaxationInfo = namedtuple('RelaxationInfo', ['equilibrium_value', 'relaxation_rate'])
 
 
 class LbmCollisionRule(AssignmentCollection):
diff --git a/methods/conservedquantitycomputation.py b/methods/conservedquantitycomputation.py
index a40b1e9b..020548ab 100644
--- a/methods/conservedquantitycomputation.py
+++ b/methods/conservedquantitycomputation.py
@@ -135,7 +135,7 @@ class DensityVelocityComputation(AbstractConservedQuantityComputation):
         if self._compressible:
             eq_coll = divide_first_order_moments_by_rho(eq_coll, dim)
 
-        eq_coll = apply_force_model_shift('equilibriumVelocityShift', dim, eq_coll, self._forceModel, self._compressible)
+        eq_coll = apply_force_model_shift('equilibrium_velocity_shift', dim, eq_coll, self._forceModel, self._compressible)
         return eq_coll
 
     def equilibrium_input_equations_from_init_values(self, density=1, velocity=(0, 0, 0)):
@@ -202,7 +202,8 @@ class DensityVelocityComputation(AbstractConservedQuantityComputation):
             # but this is usually not the case
             momentum_density_output_symbols = output_quantity_names_to_symbols['momentum_density']
             mom_density_eq_coll = get_equations_for_zeroth_and_first_order_moment(self._stencil, pdfs,
-                                                                                  self._symbolOrder0, self._symbolsOrder1)
+                                                                                  self._symbolOrder0,
+                                                                                  self._symbolsOrder1)
             mom_density_eq_coll = apply_force_model_shift('macroscopic_velocity_shift', dim, mom_density_eq_coll,
                                                           self._forceModel, self._compressible)
             for sym, val in zip(momentum_density_output_symbols, mom_density_eq_coll.main_assignments[1:]):
@@ -311,8 +312,8 @@ def apply_force_model_shift(shift_member_name, dim, assignment_collection, force
         vel_offsets = shift_func(density)
         if reverse:
             vel_offsets = [-v for v in vel_offsets]
-        shifted_velocity_eqs = [Assignment(oldEq.lhs, oldEq.rhs + offset)
-                                for oldEq, offset in zip(old_vel_eqs, vel_offsets)]
+        shifted_velocity_eqs = [Assignment(old_eq.lhs, old_eq.rhs + offset)
+                                for old_eq, offset in zip(old_vel_eqs, vel_offsets)]
         new_eqs = [old_eqs[0]] + shifted_velocity_eqs + old_eqs[dim + 1:]
         return assignment_collection.copy(new_eqs)
     else:
diff --git a/methods/creationfunctions.py b/methods/creationfunctions.py
index 5c2399c9..7407da15 100644
--- a/methods/creationfunctions.py
+++ b/methods/creationfunctions.py
@@ -62,8 +62,8 @@ def create_with_discrete_maxwellian_eq_moments(stencil, moment_to_relaxation_rat
                                                                    c_s_sq=c_s_sq, compressible=compressible,
                                                                    order=equilibrium_order)
 
-    rr_dict = OrderedDict([(mom, RelaxationInfo(eqMom, rr))
-                           for mom, rr, eqMom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
+    rr_dict = OrderedDict([(mom, RelaxationInfo(eq_mom, rr))
+                           for mom, rr, eq_mom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
     if cumulant:
         return CumulantBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
     else:
@@ -100,8 +100,8 @@ def create_with_continuous_maxwellian_eq_moments(stencil, moment_to_relaxation_r
         u = density_velocity_computation.defined_symbols(order=1)[1]
         eq_values = [compressible_to_incompressible_moment_value(em, rho, u) for em in eq_values]
 
-    rr_dict = OrderedDict([(mom, RelaxationInfo(eqMom, rr))
-                          for mom, rr, eqMom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
+    rr_dict = OrderedDict([(mom, RelaxationInfo(eq_mom, rr))
+                          for mom, rr, eq_mom in zip(mom_to_rr_dict.keys(), mom_to_rr_dict.values(), eq_values)])
     if cumulant:
         return CumulantBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
     else:
@@ -123,9 +123,9 @@ def create_generic_mrt(stencil, moment_eq_value_relaxation_rate_tuples, compress
     density_velocity_computation = DensityVelocityComputation(stencil, compressible, force_model)
 
     rr_dict = OrderedDict()
-    for moment, eqValue, rr in moment_eq_value_relaxation_rate_tuples:
+    for moment, eq_value, rr in moment_eq_value_relaxation_rate_tuples:
         moment = sp.sympify(moment)
-        rr_dict[moment] = RelaxationInfo(eqValue, rr)
+        rr_dict[moment] = RelaxationInfo(eq_value, rr)
     if cumulant:
         return CumulantBasedLbMethod(stencil, rr_dict, density_velocity_computation, force_model)
     else:
@@ -248,7 +248,8 @@ def create_mrt3(stencil, relaxation_rates, maxwellian_moments=False, **kwargs):
     shear_tensor_trace = sum(shear_tensor_diagonal)
     shear_tensor_trace_free_diagonal = [dim * d - shear_tensor_trace for d in shear_tensor_diagonal]
 
-    rest = [defaultMoment for defaultMoment in get_default_moment_set_for_stencil(stencil) if get_order(defaultMoment) != 2]
+    rest = [default_moment for default_moment in get_default_moment_set_for_stencil(stencil) 
+            if get_order(default_moment) != 2]
 
     d = shear_tensor_off_diagonal + shear_tensor_trace_free_diagonal[:-1]
     t = [shear_tensor_trace]
@@ -434,9 +435,9 @@ def create_mrt_orthogonal(stencil, relaxation_rate_getter=None, maxwellian_momen
         raise NotImplementedError("No MRT model is available (yet) for this stencil. "
                                   "Create a custom MRT using 'create_with_discrete_maxwellian_eq_moments'")
 
-    for momentList in nested_moments:
-        rr = relaxation_rate_getter(momentList)
-        for m in momentList:
+    for moment_list in nested_moments:
+        rr = relaxation_rate_getter(moment_list)
+        for m in moment_list:
             moment_to_relaxation_rate_dict[m] = rr
 
     if maxwellian_moments:
@@ -457,8 +458,8 @@ def compare_moment_based_lb_methods(reference, other, show_deviations_only=False
     reference_moments = set(reference.moments)
     other_moments = set(other.moments)
     for moment in reference_moments.intersection(other_moments):
-        reference_value = reference.relaxation_info_dict[moment].equilibriumValue
-        other_value = other.relaxation_info_dict[moment].equilibriumValue
+        reference_value = reference.relaxation_info_dict[moment].equilibrium_value
+        other_value = other.relaxation_info_dict[moment].equilibrium_value
         diff = sp.simplify(reference_value - other_value)
         if show_deviations_only and diff == 0:
             pass
@@ -473,7 +474,7 @@ def compare_moment_based_lb_methods(reference, other, show_deviations_only=False
         caption_rows.append(len(table))
         table.append(['Only in Ref', 'value', '', ''])
         for moment in only_in_ref:
-            val = reference.relaxation_info_dict[moment].equilibriumValue
+            val = reference.relaxation_info_dict[moment].equilibrium_value
             table.append(["$%s$" % (sp.latex(moment),),
                           "$%s$" % (sp.latex(val),),
                           " ", " "])
@@ -483,7 +484,7 @@ def compare_moment_based_lb_methods(reference, other, show_deviations_only=False
         caption_rows.append(len(table))
         table.append(['Only in Other', '', 'value', ''])
         for moment in only_in_other:
-            val = other.relaxation_info_dict[moment].equilibriumValue
+            val = other.relaxation_info_dict[moment].equilibrium_value
             table.append(["$%s$" % (sp.latex(moment),),
                           " ",
                           "$%s$" % (sp.latex(val),),
diff --git a/methods/cumulantbased.py b/methods/cumulantbased.py
index b340ffbf..bbe3bc84 100644
--- a/methods/cumulantbased.py
+++ b/methods/cumulantbased.py
@@ -50,7 +50,7 @@ class CumulantBasedLbMethod(AbstractLbMethod):
 
     @property
     def cumulant_equilibrium_values(self):
-        return tuple([e.equilibriumValue for e in self._cumulant_to_relaxation_info_dict.values()])
+        return tuple([e.equilibrium_value for e in self._cumulant_to_relaxation_info_dict.values()])
 
     @property
     def relaxation_rates(self):
@@ -82,16 +82,16 @@ class CumulantBasedLbMethod(AbstractLbMethod):
         </table>
         """
         content = ""
-        for cumulant, (eqValue, rr) in self._cumulant_to_relaxation_info_dict.items():
+        for cumulant, (eq_value, rr) in self._cumulant_to_relaxation_info_dict.items():
             vals = {
                 'rr': sp.latex(rr),
                 'cumulant': sp.latex(cumulant),
-                'eqValue': sp.latex(eqValue),
+                'eq_value': sp.latex(eq_value),
                 'nb': 'style="border:none"',
             }
             content += """<tr {nb}>
                             <td {nb}>${cumulant}$</td>
-                            <td {nb}>${eqValue}$</td>
+                            <td {nb}>${eq_value}$</td>
                             <td {nb}>${rr}$</td>
                          </tr>\n""".format(**vals)
         return table.format(content=content, nb='style="border:none"')
@@ -176,7 +176,7 @@ class CumulantBasedLbMethod(AbstractLbMethod):
         monomial_cumulants = [cumulant_as_function_of_raw_moments(idx, moments_dict) for idx in indices]
 
         if pre_collision_subexpressions:
-            symbols = [tuple_to_symbol(t, "preC") for t in higher_order_indices]
+            symbols = [tuple_to_symbol(t, "pre_c") for t in higher_order_indices]
             subexpressions += [Assignment(sym, c)
                                for sym, c in zip(symbols, monomial_cumulants[num_lower_order_indices:])]
             monomial_cumulants = monomial_cumulants[:num_lower_order_indices] + symbols
@@ -189,7 +189,7 @@ class CumulantBasedLbMethod(AbstractLbMethod):
         relaxed_monomial_cumulants = mon_to_poly.inv() * collided_poly_values
 
         if post_collision_subexpressions:
-            symbols = [tuple_to_symbol(t, "postC") for t in higher_order_indices]
+            symbols = [tuple_to_symbol(t, "post_c") for t in higher_order_indices]
             subexpressions += [Assignment(sym, c)
                                for sym, c in zip(symbols, relaxed_monomial_cumulants[num_lower_order_indices:])]
             relaxed_monomial_cumulants = relaxed_monomial_cumulants[:num_lower_order_indices] + symbols
@@ -204,11 +204,11 @@ class CumulantBasedLbMethod(AbstractLbMethod):
         if self._force_model is not None and include_force_terms:
             force_model_terms = self._force_model(self)
             force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms,)))
-            force_subexpressions = [Assignment(sym, forceModelTerm)
-                                    for sym, forceModelTerm in zip(force_term_symbols, force_model_terms)]
+            force_subexpressions = [Assignment(sym, force_model_term)
+                                    for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
             subexpressions += force_subexpressions
-            main_assignments = [Assignment(eq.lhs, eq.rhs + forceTermSymbol)
-                                for eq, forceTermSymbol in zip(main_assignments, force_term_symbols)]
+            main_assignments = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
+                                for eq, force_term_symbol in zip(main_assignments, force_term_symbols)]
 
         sh = {'relaxation_rates': list(self.relaxation_rates)}
         return LbmCollisionRule(self, main_assignments, subexpressions, simplification_hints=sh)
diff --git a/methods/entropic.py b/methods/entropic.py
index 58b1d0ed..7569848a 100644
--- a/methods/entropic.py
+++ b/methods/entropic.py
@@ -58,9 +58,9 @@ def add_entropy_condition(collision_rule, omega_output_field=None):
     new_update_equations = []
 
     const_part = decomposition.constant_exprs()
-    for updateEq in collision_rule.main_assignments:
-        index = collision_rule.method.post_collision_pdf_symbols.index(updateEq.lhs)
-        new_eq = Assignment(updateEq.lhs, const_part[index] + omega_s * ds_symbols[index] + omega_h * dh_symbols[index])
+    for update_eq in collision_rule.main_assignments:
+        index = collision_rule.method.post_collision_pdf_symbols.index(update_eq.lhs)
+        new_eq = Assignment(update_eq.lhs, const_part[index] + omega_s * ds_symbols[index] + omega_h * dh_symbols[index])
         new_update_equations.append(new_eq)
     new_collision_rule = collision_rule.copy(new_update_equations, collision_rule.subexpressions + subexpressions)
     new_collision_rule.simplification_hints['entropic'] = True
@@ -102,8 +102,8 @@ def add_iterative_entropy_condition(collision_rule, free_omega=None, newton_iter
     # 1) decompose into constant + free_omega * ent1 + free_omega**2 * ent2
     polynomial_subexpressions = []
     rr_polynomials = []
-    for i, constantExpr in enumerate(decomposition.constant_exprs()):
-        constant_expr_eq = Assignment(decomposition.symbolic_constant_expr(i), constantExpr)
+    for i, constant_expr in enumerate(decomposition.constant_exprs()):
+        constant_expr_eq = Assignment(decomposition.symbolic_constant_expr(i), constant_expr)
         polynomial_subexpressions.append(constant_expr_eq)
         rr_polynomial = constant_expr_eq.lhs
 
@@ -209,9 +209,9 @@ class RelaxationRatePolynomialDecomposition(object):
         update_equations = self._collisionRule.main_assignments
 
         result = []
-        for updateEquation in update_equations:
+        for update_equation in update_equations:
             factors = []
-            rhs = updateEquation.rhs
+            rhs = update_equation.rhs
             power = 0
             while True:
                 power += 1
diff --git a/methods/entropic_eq_srt.py b/methods/entropic_eq_srt.py
index 07967e3c..eb2116b5 100644
--- a/methods/entropic_eq_srt.py
+++ b/methods/entropic_eq_srt.py
@@ -13,7 +13,7 @@ class EntropicEquilibriumSRT(AbstractLbMethod):
         self._weights = get_weights(stencil, c_s_sq=sp.Rational(1, 3))
         self._relaxationRate = relaxation_rate
         self._forceModel = force_model
-        self.shearRelaxationRate = relaxation_rate
+        self.shear_relaxation_rate = relaxation_rate
 
     @property
     def conserved_quantity_computation(self):
@@ -60,11 +60,11 @@ class EntropicEquilibriumSRT(AbstractLbMethod):
         if (self._forceModel is not None) and include_force_terms:
             force_model_terms = self._forceModel(self)
             force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms, )))
-            force_subexpressions = [Assignment(sym, forceModelTerm)
-                                    for sym, forceModelTerm in zip(force_term_symbols, force_model_terms)]
+            force_subexpressions = [Assignment(sym, force_model_term)
+                                    for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
             all_subexpressions += force_subexpressions
-            collision_eqs = [Assignment(eq.lhs, eq.rhs + forceTermSymbol)
-                             for eq, forceTermSymbol in zip(collision_eqs, force_term_symbols)]
+            collision_eqs = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
+                             for eq, force_term_symbol in zip(collision_eqs, force_term_symbols)]
 
         return LbmCollisionRule(self, collision_eqs, all_subexpressions)
 
diff --git a/methods/momentbased.py b/methods/momentbased.py
index fe91f4bc..42c545c3 100644
--- a/methods/momentbased.py
+++ b/methods/momentbased.py
@@ -50,7 +50,7 @@ class MomentBasedLbMethod(AbstractLbMethod):
 
     @property
     def moment_equilibrium_values(self):
-        return tuple([e.equilibriumValue for e in self._momentToRelaxationInfoDict.values()])
+        return tuple([e.equilibrium_value for e in self._momentToRelaxationInfoDict.values()])
 
     @property
     def relaxation_rates(self):
@@ -138,16 +138,16 @@ class MomentBasedLbMethod(AbstractLbMethod):
         </table>
         """
         content = ""
-        for moment, (eqValue, rr) in self._momentToRelaxationInfoDict.items():
+        for moment, (eq_value, rr) in self._momentToRelaxationInfoDict.items():
             vals = {
                 'rr': sp.latex(rr),
                 'moment': sp.latex(moment),
-                'eqValue': sp.latex(eqValue),
+                'eq_value': sp.latex(eq_value),
                 'nb': 'style="border:none"',
             }
             content += """<tr {nb}>
                             <td {nb}>${moment}$</td>
-                            <td {nb}>${eqValue}$</td>
+                            <td {nb}>${eq_value}$</td>
                             <td {nb}>${rr}$</td>
                          </tr>\n""".format(**vals)
         return table.format(content=content, nb='style="border:none"')
@@ -191,12 +191,12 @@ class MomentBasedLbMethod(AbstractLbMethod):
         if self._forceModel is not None and include_force_terms:
             force_model_terms = self._forceModel(self)
             force_term_symbols = sp.symbols("forceTerm_:%d" % (len(force_model_terms,)))
-            force_subexpressions = [Assignment(sym, forceModelTerm)
-                                    for sym, forceModelTerm in zip(force_term_symbols, force_model_terms)]
+            force_subexpressions = [Assignment(sym, force_model_term)
+                                    for sym, force_model_term in zip(force_term_symbols, force_model_terms)]
             all_subexpressions += force_subexpressions
-            collision_eqs = [Assignment(eq.lhs, eq.rhs + forceTermSymbol)
-                             for eq, forceTermSymbol in zip(collision_eqs, force_term_symbols)]
-            simplification_hints['forceTerms'] = force_term_symbols
+            collision_eqs = [Assignment(eq.lhs, eq.rhs + force_term_symbol)
+                             for eq, force_term_symbol in zip(collision_eqs, force_term_symbols)]
+            simplification_hints['force_terms'] = force_term_symbols
 
         return LbmCollisionRule(self, collision_eqs, all_subexpressions,
                                 simplification_hints)
diff --git a/methods/momentbasedsimplifications.py b/methods/momentbasedsimplifications.py
index f283df62..10bbd488 100644
--- a/methods/momentbasedsimplifications.py
+++ b/methods/momentbasedsimplifications.py
@@ -55,7 +55,7 @@ def factor_relaxation_rates(cr: LbmCollisionRule):
 def factor_density_after_factoring_relaxation_times(cr: LbmCollisionRule):
     """
     Tries to factor out the density. This only works if previously
-    :func:`lbmpy.methods.momentbasedsimplifications.factorRelaxationTimes` was run.
+    :func:`lbmpy.methods.momentbasedsimplifications.factor_relaxation_times` was run.
 
     This transformations makes only sense for compressible models - for incompressible models this does nothing
 
@@ -198,13 +198,13 @@ def cse_in_opposing_directions(cr: LbmCollisionRule):
                                                          order='None', optimizations=[])
                 substitutions += [Assignment(f[0], f[1]) for f in found_subexpressions]
 
-                update_rules = [Assignment(ur.lhs, ur.rhs.subs(relaxation_rate * oldTerm, new_coefficient * newTerm))
-                                for ur, newTerm, oldTerm in zip(update_rules, new_terms, terms)]
+                update_rules = [Assignment(ur.lhs, ur.rhs.subs(relaxation_rate * old_term, new_coefficient * new_term))
+                                for ur, new_term, old_term in zip(update_rules, new_terms, terms)]
 
         result += update_rules
 
-    for term, substitutedVar in new_coefficient_substitutions.items():
-        substitutions.append(Assignment(substitutedVar, term))
+    for term, substituted_var in new_coefficient_substitutions.items():
+        substitutions.append(Assignment(substituted_var, term))
 
     result.sort(key=lambda e: cr.method.post_collision_pdf_symbols.index(e.lhs))
     res = cr.copy(result)
@@ -233,8 +233,8 @@ def __get_common_quadratic_and_constant_terms(cr: LbmCollisionRule):
     for fa in pdf_symbols:
         t = t.subs(fa, 0)
 
-    if 'forceTerms' in sh:
-        t = t.subs({ft: 0 for ft in sh['forceTerms']})
+    if 'force_terms' in sh:
+        t = t.subs({ft: 0 for ft in sh['force_terms']})
 
     weight = t
 
diff --git a/moments.py b/moments.py
index a00e676b..db8f2dc5 100644
--- a/moments.py
+++ b/moments.py
@@ -27,7 +27,7 @@ Example ::
 
     >>> from lbmpy.moments import MOMENT_SYMBOLS
     >>> x, y, z = MOMENT_SYMBOLS
-    >>> secondOrderMoment = x*y + y*z
+    >>> second_order_moment = x*y + y*z
 
 
 Functions
@@ -122,8 +122,8 @@ def exponent_to_polynomial_representation(exponent_tuple):
         x**2*y*z**3
     """
     poly = 1
-    for sym, tupleEntry in zip(MOMENT_SYMBOLS[:len(exponent_tuple)], exponent_tuple):
-        poly *= sym ** tupleEntry
+    for sym, tuple_entry in zip(MOMENT_SYMBOLS[:len(exponent_tuple)], exponent_tuple):
+        poly *= sym ** tuple_entry
     return poly
 
 
@@ -277,10 +277,10 @@ def is_shear_moment(moment):
     """Shear moments in 3D are: x*y, x*z and y*z - in 2D its only x*y"""
     if type(moment) is tuple:
         moment = exponent_to_polynomial_representation(moment)
-    return moment in is_shear_moment.shearMoments
+    return moment in is_shear_moment.shear_moments
 
 
-is_shear_moment.shearMoments = set([c[0] * c[1] for c in itertools.combinations(MOMENT_SYMBOLS, 2)])
+is_shear_moment.shear_moments = set([c[0] * c[1] for c in itertools.combinations(MOMENT_SYMBOLS, 2)])
 
 
 @memorycache(maxsize=512)
@@ -325,14 +325,14 @@ def moment_matrix(moments, stencil):
     if type(moments[0]) is tuple:
         def generator(row, column):
             result = sp.Rational(1, 1)
-            for exponent, stencilEntry in zip(moments[row], stencil[column]):
-                result *= int(stencilEntry ** exponent)
+            for exponent, stencil_entry in zip(moments[row], stencil[column]):
+                result *= int(stencil_entry ** exponent)
             return result
     else:
         def generator(row, column):
             evaluated = moments[row]
-            for var, stencilEntry in zip(MOMENT_SYMBOLS, stencil[column]):
-                evaluated = evaluated.subs(var, stencilEntry)
+            for var, stencil_entry in zip(MOMENT_SYMBOLS, stencil[column]):
+                evaluated = evaluated.subs(var, stencil_entry)
             return evaluated
 
     return sp.Matrix(len(moments), len(stencil), generator)
@@ -425,8 +425,8 @@ def extract_monomials(sequence_of_polynomials, dim=3):
     """
     monomials = set()
     for polynomial in sequence_of_polynomials:
-        for factor, exponentTuple in polynomial_to_exponent_representation(polynomial):
-            monomials.add(exponentTuple[:dim])
+        for factor, exponent_tuple in polynomial_to_exponent_representation(polynomial):
+            monomials.add(exponent_tuple[:dim])
     return monomials
 
 
@@ -448,10 +448,10 @@ def monomial_to_polynomial_transformation_matrix(monomials, polynomials):
     dim = len(monomials[0])
 
     result = sp.zeros(len(polynomials), len(monomials))
-    for polynomialIdx, polynomial in enumerate(polynomials):
+    for polynomial_idx, polynomial in enumerate(polynomials):
         for factor, exponent_tuple in polynomial_to_exponent_representation(polynomial):
             exponent_tuple = exponent_tuple[:dim]
-            result[polynomialIdx, monomials.index(exponent_tuple)] = factor
+            result[polynomial_idx, monomials.index(exponent_tuple)] = factor
     return result
 
 
@@ -501,7 +501,7 @@ def moment_equality_table(stencil, discrete_equilibrium=None, continuous_equilib
     for order, moments in enumerate(moments_list):
         row = [' '] * nr_of_columns
         row[0] = '%d' % (order,)
-        for moment, colIdx in zip(moments, range(1, len(row))):
+        for moment, col_idx in zip(moments, range(1, len(row))):
             multiplicity = moment_multiplicity(moment)
             dm = discrete_moment(discrete_equilibrium, moment, stencil)
             cm = continuous_moment(continuous_equilibrium, moment, symbols=sp.symbols("v_0 v_1 v_2")[:dim])
@@ -509,20 +509,20 @@ def moment_equality_table(stencil, discrete_equilibrium=None, continuous_equilib
             if truncate_order:
                 difference = sp.simplify(remove_higher_order_terms(difference, symbols=u, order=truncate_order))
             if difference != 0:
-                colors[(order + 1, colIdx)] = 'Orange'
+                colors[(order + 1, col_idx)] = 'Orange'
                 non_matched_moments += multiplicity
             else:
-                colors[(order + 1, colIdx)] = 'lightGreen'
+                colors[(order + 1, col_idx)] = 'lightGreen'
                 matched_moments += multiplicity
 
-            row[colIdx] = '%s  x %d' % (moment, moment_multiplicity(moment))
+            row[col_idx] = '%s  x %d' % (moment, moment_multiplicity(moment))
 
         table.append(row)
 
     table_display = ipy_table.make_table(table)
     ipy_table.set_row_style(0, color='#ddd')
-    for cellIdx, color in colors.items():
-        ipy_table.set_cell_style(cellIdx[0], cellIdx[1], color=color)
+    for cell_idx, color in colors.items():
+        ipy_table.set_cell_style(cell_idx[0], cell_idx[1], color=color)
 
     print("Matched moments %d - non matched moments %d - total %d" %
           (matched_moments, non_matched_moments, matched_moments + non_matched_moments))
@@ -554,21 +554,21 @@ def moment_equality_table_by_stencil(name_to_stencil_dict, moments, truncate_ord
     moments = list(pick_representative_moments(moments))
 
     colors = {}
-    for stencilIdx, stencil in enumerate(stencils):
+    for stencil_idx, stencil in enumerate(stencils):
         dim = len(stencil[0])
         u = sp.symbols(f"u_:{dim}")
         discrete_equilibrium = discrete_maxwellian_equilibrium(stencil, c_s_sq=sp.Rational(1, 3), compressible=True,
                                                                u=u, order=truncate_order)
         continuous_equilibrium = continuous_maxwellian_equilibrium(dim=dim, u=u, c_s_sq=sp.Rational(1, 3))
 
-        for momentIdx, moment in enumerate(moments):
+        for moment_idx, moment in enumerate(moments):
             moment = moment[:dim]
             dm = discrete_moment(discrete_equilibrium, moment, stencil)
             cm = continuous_moment(continuous_equilibrium, moment, symbols=sp.symbols("v_0 v_1 v_2")[:dim])
             difference = sp.simplify(dm - cm)
             if truncate_order:
                 difference = sp.simplify(remove_higher_order_terms(difference, symbols=u, order=truncate_order))
-            colors[(momentIdx + 1, stencilIdx + 2)] = 'Orange' if difference != 0 else 'lightGreen'
+            colors[(moment_idx + 1, stencil_idx + 2)] = 'Orange' if difference != 0 else 'lightGreen'
 
     table = []
     header_row = [' ', '#'] + stencil_names
@@ -579,8 +579,8 @@ def moment_equality_table_by_stencil(name_to_stencil_dict, moments, truncate_ord
 
     table_display = ipy_table.make_table(table)
     ipy_table.set_row_style(0, color='#ddd')
-    for cellIdx, color in colors.items():
-        ipy_table.set_cell_style(cellIdx[0], cellIdx[1], color=color)
+    for cell_idx, color in colors.items():
+        ipy_table.set_cell_style(cell_idx[0], cell_idx[1], color=color)
 
     return table_display
 
diff --git a/parameterization.py b/parameterization.py
index aa43f280..5cb3ad25 100644
--- a/parameterization.py
+++ b/parameterization.py
@@ -6,9 +6,9 @@ from lbmpy.relaxationrates import relaxation_rate_from_lattice_viscosity, lattic
 
 class ScalingWidget:
     def __init__(self):
-        self.scalingType = Select(options=[r'diffusive (fixed relaxation rate)',
-                                           r'acoustic (fixed dt)',
-                                           r'fixed lattice velocity'])
+        self.scaling_type = Select(options=[r'diffusive (fixed relaxation rate)',
+                                            r'acoustic (fixed dt)',
+                                            r'fixed lattice velocity'])
         self.physical_length = FloatText(value=0.1)
         self.cells_per_length = FloatText(value=256.0)
         self.max_physical_velocity = FloatText(value=0.01)
@@ -19,7 +19,7 @@ class ScalingWidget:
         self.max_lattice_velocity = FloatText(disabled=True)
         self.re = FloatText(disabled=True)
 
-        self.processingUpdate = False
+        self.processing_update = False
 
         def make_label(text):
             label_layout = {'width': '200px'}
@@ -44,7 +44,7 @@ class ScalingWidget:
             return [half_btn, double_btn]
 
         self.form = VBox([
-            HBox([make_label(r'Scaling'), self.scalingType]),
+            HBox([make_label(r'Scaling'), self.scaling_type]),
             HBox([make_label(r"Physical Length $[m]$"), self.physical_length]),
             HBox([make_label(r"Max. velocity $[m/s]$"), self.max_physical_velocity]),
             HBox([make_label(r"Cells per length"), self.cells_per_length] + make_buttons(self.cells_per_length, True)),
@@ -64,7 +64,7 @@ class ScalingWidget:
         self.kinematic_viscosity.observe(self._update_re)
         self.max_physical_velocity.observe(self._update_re)
 
-        for obj in [self.scalingType, self.kinematic_viscosity, self.omega, self.dt, self.max_lattice_velocity]:
+        for obj in [self.scaling_type, self.kinematic_viscosity, self.omega, self.dt, self.max_lattice_velocity]:
             obj.observe(self._update_free_parameter, names='value')
 
         self._update_free_parameter()
@@ -95,15 +95,15 @@ class ScalingWidget:
         self.omega.disabled = True
         self.max_lattice_velocity.disabled = True
 
-        if self.scalingType.value == r'diffusive (fixed relaxation rate)':
+        if self.scaling_type.value == r'diffusive (fixed relaxation rate)':
             self.omega.disabled = False
             self._update_dt_from_relaxation_rate_viscosity_and_dx()
             self._update_lattice_velocity_from_dx_dt_and_physical_velocity()
-        elif self.scalingType.value == r'acoustic (fixed dt)':
+        elif self.scaling_type.value == r'acoustic (fixed dt)':
             self._update_omega_from_viscosity_and_dt_dx()
             self._update_dt_from_dx_and_lattice_velocity()
             self.max_lattice_velocity.disabled = False
-        elif self.scalingType.value == r'fixed lattice velocity':
+        elif self.scaling_type.value == r'fixed lattice velocity':
             self._update_omega_from_viscosity_and_dt_dx()
             self.dt.disabled = False
             self._update_lattice_velocity_from_dx_dt_and_physical_velocity()
@@ -124,24 +124,24 @@ class ScalingWidget:
         self.re.value = round(L * u / nu, 7)
 
     def _on_dx_change(self, _):
-        if self.processingUpdate:
+        if self.processing_update:
             return
         if self.dx.value == 0:
             return
-        self.processingUpdate = True
+        self.processing_update = True
         self.cells_per_length.value = self.physical_length.value / self.dx.value
         self._update_free_parameter()
-        self.processingUpdate = False
+        self.processing_update = False
 
     def _on_cells_per_length_change(self, _):
-        if self.processingUpdate:
+        if self.processing_update:
             return
         if self.cells_per_length.value == 0:
             return
-        self.processingUpdate = True
+        self.processing_update = True
         self.dx.value = self.physical_length.value / self.cells_per_length.value
         self._update_free_parameter()
-        self.processingUpdate = False
+        self.processing_update = False
 
     def _on_physical_length_change(self, _):
         if self.cells_per_length.value == 0:
diff --git a/phasefield/analytical.py b/phasefield/analytical.py
index d91600c7..0245b0ca 100644
--- a/phasefield/analytical.py
+++ b/phasefield/analytical.py
@@ -4,8 +4,8 @@ from collections import defaultdict
 from pystencils.sympyextensions import multidimensional_sum as multi_sum, normalize_product, prod
 from pystencils.derivative import functional_derivative, expand_using_linearity, Diff, full_diff_expand
 
-orderParameterSymbolName = "phi"
-surfaceTensionSymbolName = "tau"
+order_parameter_symbol_name = "phi"
+surface_tension_symbol_name = "tau"
 interface_width_symbol = sp.Symbol("alpha")
 
 
@@ -15,11 +15,11 @@ def symmetric_symbolic_surface_tension(i, j):
     if i == j:
         return 0
     index = (i, j) if i < j else (j, i)
-    return sp.Symbol("%s_%d_%d" % ((surfaceTensionSymbolName, ) + index))
+    return sp.Symbol("%s_%d_%d" % ((surface_tension_symbol_name,) + index))
 
 
 def symbolic_order_parameters(num_symbols):
-    return sp.symbols("%s_:%i" % (orderParameterSymbolName, num_symbols))
+    return sp.symbols("%s_:%i" % (order_parameter_symbol_name, num_symbols))
 
 
 def free_energy_functional_3_phases(order_parameters=None, interface_width=interface_width_symbol, transformed=True,
@@ -109,7 +109,7 @@ def free_energy_functional_n_phases(num_phases=None, surface_tensions=symmetric_
     :param include_bulk: if false no bulk term is added
     :param include_interface:if false no interface contribution is added
     :param symbolic_lambda: surface energy coefficient is represented by symbol, not in expanded form
-    :param symbolic_dependent_variable: last phase variable is defined as 1-otherPhaseVars, if this is set to True
+    :param symbolic_dependent_variable: last phase variable is defined as 1-other_phase_vars, if this is set to True
                                       it is represented by phi_A for better readability
     """
     assert not (num_phases is None and order_parameters is None)
@@ -177,14 +177,14 @@ def analytic_interface_profile(x, interface_width=interface_width_symbol):
     parameter, i.e. at a transition between two phases.
 
     Examples:
-        >>> numPhases = 4
-        >>> x, phi = sp.Symbol("x"), symbolic_order_parameters(numPhases-1)
+        >>> num_phases = 4
+        >>> x, phi = sp.Symbol("x"), symbolic_order_parameters(num_phases-1)
         >>> F = free_energy_functional_n_phases(order_parameters=phi)
         >>> mu = chemical_potentials_from_free_energy(F)
         >>> mu0 = mu[0].subs({p: 0 for p in phi[1:]})  # mu[0] as function of one order parameter only
         >>> solution = analytic_interface_profile(x)
-        >>> solutionSubstitution = {phi[0]: solution, Diff(Diff(phi[0])): sp.diff(solution, x, x) }
-        >>> sp.expand(mu0.subs(solutionSubstitution))  # inserting solution should solve the mu_0=0 equation
+        >>> solution_substitution = {phi[0]: solution, Diff(Diff(phi[0])): sp.diff(solution, x, x) }
+        >>> sp.expand(mu0.subs(solution_substitution))  # inserting solution should solve the mu_0=0 equation
         0
     """
     return (1 + sp.tanh(x / (2 * interface_width))) / 2
@@ -194,7 +194,7 @@ def chemical_potentials_from_free_energy(free_energy, order_parameters=None):
     """Computes chemical potentials as functional derivative of free energy."""
     symbols = free_energy.atoms(sp.Symbol)
     if order_parameters is None:
-        order_parameters = [s for s in symbols if s.name.startswith(orderParameterSymbolName)]
+        order_parameters = [s for s in symbols if s.name.startswith(order_parameter_symbol_name)]
         order_parameters.sort(key=lambda e: e.name)
         order_parameters = order_parameters[:-1]
     constants = [s for s in symbols if s not in order_parameters]
diff --git a/phasefield/kerneleqs.py b/phasefield/kerneleqs.py
index 41513167..8d84d7f2 100644
--- a/phasefield/kerneleqs.py
+++ b/phasefield/kerneleqs.py
@@ -35,8 +35,8 @@ def pressure_tensor_kernel(free_energy, order_parameters, phi_field, pressure_te
     index_map = symmetric_tensor_linearization(dim)
     discretize = Discretization2ndOrder(dx=dx)
     eqs = []
-    for index, linIndex in index_map.items():
-        eq = Assignment(pressure_tensor_field(linIndex), discretize(p[index]).expand())
+    for index, lin_index in index_map.items():
+        eq = Assignment(pressure_tensor_field(lin_index), discretize(p[index]).expand())
         eqs.append(eq)
     return eqs
 
@@ -67,14 +67,14 @@ class CahnHilliardFDStep:
     def __init__(self, data_handling, phi_field_name, mu_field_name, velocity_field_name, name='ch_fd', target='cpu',
                  dx=1, dt=1, mobilities=1, equation_modifier=lambda eqs: eqs):
         from pystencils import create_kernel
-        self.dataHandling = data_handling
+        self.data_handling = data_handling
 
-        mu_field = self.dataHandling.fields[mu_field_name]
-        vel_field = self.dataHandling.fields[velocity_field_name]
-        self.phi_field = self.dataHandling.fields[phi_field_name]
-        self.tmp_field = self.dataHandling.add_array_like(name + '_tmp', phi_field_name, latex_name='tmp')
+        mu_field = self.data_handling.fields[mu_field_name]
+        vel_field = self.data_handling.fields[velocity_field_name]
+        self.phi_field = self.data_handling.fields[phi_field_name]
+        self.tmp_field = self.data_handling.add_array_like(name + '_tmp', phi_field_name, latex_name='tmp')
 
-        num_phases = self.dataHandling.values_per_cell(phi_field_name)
+        num_phases = self.data_handling.values_per_cell(phi_field_name)
         if not hasattr(mobilities, '__len__'):
             mobilities = [mobilities] * num_phases
 
@@ -85,13 +85,13 @@ class CahnHilliardFDStep:
         self.update_eqs = update_eqs
         self.update_eqs = equation_modifier(update_eqs)
         self.kernel = create_kernel(self.update_eqs, target=target).compile()
-        self.sync = self.dataHandling.synchronization_function([phi_field_name, velocity_field_name, mu_field_name],
-                                                               target=target)
+        self.sync = self.data_handling.synchronization_function([phi_field_name, velocity_field_name, mu_field_name],
+                                                                target=target)
 
     def time_step(self, **kwargs):
         self.sync()
-        self.dataHandling.run_kernel(self.kernel, **kwargs)
-        self.dataHandling.swap(self.phi_field.name, self.tmp_field.name)
+        self.data_handling.run_kernel(self.kernel, **kwargs)
+        self.data_handling.swap(self.phi_field.name, self.tmp_field.name)
 
     def set_pdf_fields_from_macroscopic_values(self):
         pass
diff --git a/phasefield/phasefieldstep.py b/phasefield/phasefieldstep.py
index 809fca16..33b894ed 100644
--- a/phasefield/phasefieldstep.py
+++ b/phasefield/phasefieldstep.py
@@ -62,7 +62,7 @@ class PhaseFieldStep:
         self.force_field = dh.add_array(self.force_field_name, values_per_cell=dh.dim, gpu=gpu, latex_name="F")
         self.pressure_tensor_field = data_handling.add_array(self.pressure_tensor_field_name,
                                                              values_per_cell=pressure_tensor_size, latex_name='P')
-        self.flagInterface = FlagInterface(data_handling, 'flags')
+        self.flag_interface = FlagInterface(data_handling, 'flags')
 
         # ------------------ Creating kernels ------------------
         phi = tuple(self.phi_field(i) for i in range(len(order_parameters)))
@@ -73,33 +73,33 @@ class PhaseFieldStep:
                 fields = [data_handling.fields[self.phi_field_name],
                           data_handling.fields[self.pressure_tensor_field_name],
                           ]
-                flag_field = data_handling.fields[self.flagInterface.flag_field_name]
-                return add_neumann_boundary(eqs, fields, flag_field, "neumannFlag", inverse_flag=False)
+                flag_field = data_handling.fields[self.flag_interface.flag_field_name]
+                return add_neumann_boundary(eqs, fields, flag_field, "neumann_flag", inverse_flag=False)
         else:
             def apply_neumann_boundaries(eqs):
                 return eqs
 
         # μ and pressure tensor update
-        self.phiSync = data_handling.synchronization_function([self.phi_field_name], target=target)
-        self.muEqs = mu_kernel(F, phi, self.phi_field, self.mu_field, dx)
-        self.pressureTensorEqs = pressure_tensor_kernel(self.free_energy, order_parameters,
-                                                        self.phi_field, self.pressure_tensor_field, dx)
-        mu_and_pressure_tensor_eqs = self.muEqs + self.pressureTensorEqs
+        self.phi_sync = data_handling.synchronization_function([self.phi_field_name], target=target)
+        self.mu_eqs = mu_kernel(F, phi, self.phi_field, self.mu_field, dx)
+        self.pressure_tensor_eqs = pressure_tensor_kernel(self.free_energy, order_parameters,
+                                                          self.phi_field, self.pressure_tensor_field, dx)
+        mu_and_pressure_tensor_eqs = self.mu_eqs + self.pressure_tensor_eqs
         mu_and_pressure_tensor_eqs = apply_neumann_boundaries(mu_and_pressure_tensor_eqs)
-        self.muAndPressureTensorKernel = create_kernel(sympy_cse_on_assignment_list(mu_and_pressure_tensor_eqs),
-                                                       target=target, cpu_openmp=openmp).compile()
+        self.mu_and_pressure_tensor_kernel = create_kernel(sympy_cse_on_assignment_list(mu_and_pressure_tensor_eqs),
+                                                           target=target, cpu_openmp=openmp).compile()
 
         # F Kernel
         extra_force = sp.Matrix([0] * self.data_handling.dim)
         if order_parameter_force is not None:
-            for orderParameterIdx, force in order_parameter_force.items():
-                extra_force += self.phi_field(orderParameterIdx) * sp.Matrix(force)
-        self.forceEqs = force_kernel_using_pressure_tensor(self.force_field, self.pressure_tensor_field, dx=dx,
-                                                           extra_force=extra_force)
-        self.forceFromPressureTensorKernel = create_kernel(apply_neumann_boundaries(self.forceEqs),
-                                                           target=target, cpu_openmp=openmp).compile()
-        self.pressureTensorSync = data_handling.synchronization_function([self.pressure_tensor_field_name],
-                                                                         target=target)
+            for order_parameter_idx, force in order_parameter_force.items():
+                extra_force += self.phi_field(order_parameter_idx) * sp.Matrix(force)
+        self.force_eqs = force_kernel_using_pressure_tensor(self.force_field, self.pressure_tensor_field, dx=dx,
+                                                            extra_force=extra_force)
+        self.force_from_pressure_tensor_kernel = create_kernel(apply_neumann_boundaries(self.force_eqs),
+                                                               target=target, cpu_openmp=openmp).compile()
+        self.pressure_tensor_sync = data_handling.synchronization_function([self.pressure_tensor_field_name],
+                                                                           target=target)
 
         hydro_lbm_parameters = hydro_lbm_parameters.copy()
         # Hydrodynamic LBM
@@ -114,19 +114,19 @@ class PhaseFieldStep:
         else:
             hydro_lbm_parameters['optimization'].update(optimization)
 
-        self.hydroLbmStep = LatticeBoltzmannStep(data_handling=data_handling, name=name + '_hydroLBM',
-                                                 relaxation_rate=hydro_dynamic_relaxation_rate,
-                                                 compute_velocity_in_every_step=True, force=self.force_field,
-                                                 velocity_data_name=self.vel_field_name, kernel_params=kernel_params,
-                                                 flag_interface=self.flagInterface,
-                                                 time_step_order='collideStream',
-                                                 **hydro_lbm_parameters)
+        self.hydro_lbm_step = LatticeBoltzmannStep(data_handling=data_handling, name=name + '_hydroLBM',
+                                                   relaxation_rate=hydro_dynamic_relaxation_rate,
+                                                   compute_velocity_in_every_step=True, force=self.force_field,
+                                                   velocity_data_name=self.vel_field_name, kernel_params=kernel_params,
+                                                   flag_interface=self.flag_interface,
+                                                   time_step_order='collide_stream',
+                                                   **hydro_lbm_parameters)
 
         # Cahn-Hilliard LBMs
         if not hasattr(cahn_hilliard_relaxation_rates, '__len__'):
             cahn_hilliard_relaxation_rates = [cahn_hilliard_relaxation_rates] * len(order_parameters)
 
-        self.cahnHilliardSteps = []
+        self.cahn_hilliard_steps = []
 
         if solve_cahn_hilliard_with_finite_differences:
             if density_order_parameter is not None:
@@ -135,13 +135,13 @@ class PhaseFieldStep:
             ch_step = CahnHilliardFDStep(self.data_handling, self.phi_field_name, self.mu_field_name,
                                          self.vel_field_name, target=target, dx=dx, dt=dt, mobilities=1,
                                          equation_modifier=apply_neumann_boundaries)
-            self.cahnHilliardSteps.append(ch_step)
+            self.cahn_hilliard_steps.append(ch_step)
         else:
             for i, op in enumerate(order_parameters):
                 if op == density_order_parameter:
                     continue
 
-                ch_method = cahn_hilliard_lb_method(self.hydroLbmStep.method.stencil, self.mu_field(i),
+                ch_method = cahn_hilliard_lb_method(self.hydro_lbm_step.method.stencil, self.mu_field(i),
                                                     relaxation_rate=cahn_hilliard_relaxation_rates[i])
                 ch_step = LatticeBoltzmannStep(data_handling=data_handling, relaxation_rate=1, lb_method=ch_method,
                                                velocity_input_array_name=self.vel_field.name,
@@ -149,10 +149,10 @@ class PhaseFieldStep:
                                                stencil='D3Q19' if self.data_handling.dim == 3 else 'D2Q9',
                                                compute_density_in_every_step=True,
                                                density_data_index=i,
-                                               flag_interface=self.hydroLbmStep.boundary_handling.flag_interface,
+                                               flag_interface=self.hydro_lbm_step.boundary_handling.flag_interface,
                                                name=name + "_chLbm_%d" % (i,),
                                                optimization=optimization)
-                self.cahnHilliardSteps.append(ch_step)
+                self.cahn_hilliard_steps.append(ch_step)
 
         self._vtk_writer = None
         self.run_hydro_lbm = True
@@ -160,7 +160,7 @@ class PhaseFieldStep:
         self.time_steps_run = 0
         self.reset()
 
-        self.neumannFlag = 0
+        self.neumann_flag = 0
 
     @property
     def vtk_writer(self):
@@ -192,18 +192,18 @@ class PhaseFieldStep:
         self.time_steps_run = 0
 
     def set_pdf_fields_from_macroscopic_values(self):
-        self.hydroLbmStep.set_pdf_fields_from_macroscopic_values()
-        for chStep in self.cahnHilliardSteps:
-            chStep.set_pdf_fields_from_macroscopic_values()
+        self.hydro_lbm_step.set_pdf_fields_from_macroscopic_values()
+        for ch_step in self.cahn_hilliard_steps:
+            ch_step.set_pdf_fields_from_macroscopic_values()
 
     def pre_run(self):
         if self.gpu:
             self.data_handling.to_gpu(self.phi_field_name)
             self.data_handling.to_gpu(self.mu_field_name)
             self.data_handling.to_gpu(self.force_field_name)
-        self.hydroLbmStep.pre_run()
-        for chStep in self.cahnHilliardSteps:
-            chStep.pre_run()
+        self.hydro_lbm_step.pre_run()
+        for ch_step in self.cahn_hilliard_steps:
+            ch_step.pre_run()
 
     def post_run(self):
         if self.gpu:
@@ -211,29 +211,29 @@ class PhaseFieldStep:
             self.data_handling.to_cpu(self.mu_field_name)
             self.data_handling.to_cpu(self.force_field_name)
         if self.run_hydro_lbm:
-            self.hydroLbmStep.post_run()
-        for chStep in self.cahnHilliardSteps:
-            chStep.post_run()
+            self.hydro_lbm_step.post_run()
+        for ch_step in self.cahn_hilliard_steps:
+            ch_step.post_run()
 
     def time_step(self):
-        neumannFlag = self.neumannFlag
+        neumann_flag = self.neumann_flag
 
-        self.phiSync()
-        self.data_handling.run_kernel(self.muAndPressureTensorKernel, neumannFlag=neumannFlag)
-        self.pressureTensorSync()
-        self.data_handling.run_kernel(self.forceFromPressureTensorKernel, neumannFlag=neumannFlag)
+        self.phi_sync()
+        self.data_handling.run_kernel(self.mu_and_pressure_tensor_kernel, neumann_flag=neumann_flag)
+        self.pressure_tensor_sync()
+        self.data_handling.run_kernel(self.force_from_pressure_tensor_kernel, neumann_flag=neumann_flag)
 
         if self.run_hydro_lbm:
-            self.hydroLbmStep.time_step()
+            self.hydro_lbm_step.time_step()
 
-        for chLbm in self.cahnHilliardSteps:
-            chLbm.time_step()
+        for ch_lbm in self.cahn_hilliard_steps:
+            ch_lbm.time_step()
 
         self.time_steps_run += 1
 
     @property
     def boundary_handling(self):
-        return self.hydroLbmStep.boundary_handling
+        return self.hydro_lbm_step.boundary_handling
 
     def set_concentration(self, slice_obj, concentration):
         if self.concentration_to_order_parameter is not None:
@@ -248,7 +248,7 @@ class PhaseFieldStep:
     def set_density(self, slice_obj, value):
         for b in self.data_handling.iterate(slice_obj):
             for i in range(self.num_order_parameters):
-                b[self.hydroLbmStep.density_data_name].fill(value)
+                b[self.hydro_lbm_step.density_data_name].fill(value)
 
     def run(self, time_steps):
         self.pre_run()
diff --git a/plot2d.py b/plot2d.py
index bd7bf84e..1bc774ea 100644
--- a/plot2d.py
+++ b/plot2d.py
@@ -39,9 +39,9 @@ def boundary_handling(boundary_handling_obj, slice_obj=None, boundary_name_to_co
 
     boundary_names = []
     flag_values = []
-    for name, flagName in sorted(boundary_handling_obj.get_boundary_name_to_flag_dict().items(), key=lambda l: l[1]):
+    for name, flag_name in sorted(boundary_handling_obj.get_boundary_name_to_flag_dict().items(), key=lambda l: l[1]):
         boundary_names.append(name)
-        flag_values.append(flagName)
+        flag_values.append(flag_name)
     default_cycle = matplotlib.rcParams['axes.prop_cycle']
     color_values = [fixed_colors[name] if name in fixed_colors else c['color']
                     for c, name in zip(default_cycle, boundary_names)]
@@ -157,8 +157,8 @@ class LbGrid:
         for p in self._patches:
             ax.add_patch(p)
 
-        for arrowPatch in self._arrows.values():
-            ax.add_patch(arrowPatch)
+        for arrow_patch in self._arrows.values():
+            ax.add_patch(arrow_patch)
 
         offset = 0.1
         ax.set_xlim(-offset, self._xCells+offset)
diff --git a/quadratic_equilibrium_construction.py b/quadratic_equilibrium_construction.py
index 6f46d387..1b1a0455 100644
--- a/quadratic_equilibrium_construction.py
+++ b/quadratic_equilibrium_construction.py
@@ -45,13 +45,13 @@ def match_generic_equilibrium_ansatz(stencil, equilibrium, u=sp.symbols("u_:3"))
     u = u[:dim]
 
     result = dict()
-    for direction, actualEquilibrium in zip(stencil, equilibrium):
+    for direction, actual_equilibrium in zip(stencil, equilibrium):
         speed = np.abs(direction).sum()
         a, b, c, d = get_parameter_symbols(speed)
         u_times_d = scalar_product(u, direction)
         generic_equation = a + b * u_times_d + c * u_times_d ** 2 + d * scalar_product(u, u)
 
-        equations = sp.poly(actualEquilibrium - generic_equation, *u).coeffs()
+        equations = sp.poly(actual_equilibrium - generic_equation, *u).coeffs()
         solve_res = sp.solve(equations, [a, b, c, d])
         if not solve_res:
             raise ValueError("This equilibrium does not match the generic quadratic standard form")
@@ -70,9 +70,9 @@ def moment_constraint_equations(stencil, equilibrium, moment_to_value_dict, u=sp
     u = u[:dim]
     equilibrium = tuple(equilibrium)
     constraint_equations = set()
-    for moment, desiredValue in moment_to_value_dict.items():
+    for moment, desired_value in moment_to_value_dict.items():
         generic_moment = discrete_moment(equilibrium, moment, stencil)
-        equations = sp.poly(generic_moment - desiredValue, *u).coeffs()
+        equations = sp.poly(generic_moment - desired_value, *u).coeffs()
         constraint_equations.update(equations)
     return list(constraint_equations)
 
diff --git a/relaxationrates.py b/relaxationrates.py
index 7d898962..434b19ea 100644
--- a/relaxationrates.py
+++ b/relaxationrates.py
@@ -25,13 +25,13 @@ def get_shear_relaxation_rate(method):
     Shear moments in 3D are: x*y, x*z and y*z - in 2D its only x*y
     The shear relaxation rate determines the viscosity in hydrodynamic LBM schemes
     """
-    if hasattr(method, 'shearRelaxationRate'):
-        return method.shearRelaxationRate
+    if hasattr(method, 'shear_relaxation_rate'):
+        return method.shear_relaxation_rate
 
     relaxation_rates = set()
-    for moment, relaxInfo in method.relaxation_info_dict.items():
+    for moment, relax_info in method.relaxation_info_dict.items():
         if is_shear_moment(moment):
-            relaxation_rates.add(relaxInfo.relaxation_rate)
+            relaxation_rates.add(relax_info.relaxation_rate)
     if len(relaxation_rates) == 1:
         return relaxation_rates.pop()
     else:
diff --git a/scenarios.py b/scenarios.py
index 83b14900..8055a79f 100644
--- a/scenarios.py
+++ b/scenarios.py
@@ -61,7 +61,7 @@ def create_fully_periodic_flow(initial_velocity, periodicity_in_kernel=False, lb
     if data_handling is None:
         data_handling = create_data_handling(parallel, domain_size, periodicity=not periodicity_in_kernel,
                                              default_ghost_layers=1)
-    step = LatticeBoltzmannStep(data_handling=data_handling, name="periodicScenario", lbm_kernel=lbm_kernel, **kwargs)
+    step = LatticeBoltzmannStep(data_handling=data_handling, name="periodic_scenario", lbm_kernel=lbm_kernel, **kwargs)
     for b in step.data_handling.iterate(ghost_layers=False):
         np.copyto(b[step.velocity_data_name], initial_velocity[b.global_slice])
     step.set_pdf_fields_from_macroscopic_values()
@@ -85,7 +85,7 @@ def create_lid_driven_cavity(domain_size=None, lid_velocity=0.005, lbm_kernel=No
     assert domain_size is not None or data_handling is not None
     if data_handling is None:
         data_handling = create_data_handling(parallel, domain_size, periodicity=False, default_ghost_layers=1)
-    step = LatticeBoltzmannStep(data_handling=data_handling, lbm_kernel=lbm_kernel, name="lidDrivenCavity", **kwargs)
+    step = LatticeBoltzmannStep(data_handling=data_handling, lbm_kernel=lbm_kernel, name="ldc", **kwargs)
 
     my_ubb = UBB(velocity=[lid_velocity, 0, 0][:step.method.dim])
     step.boundary_handling.set_boundary(my_ubb, slice_from_direction('N', step.dim))
@@ -132,17 +132,17 @@ def create_channel(domain_size=None, force=None, pressure_difference=None, u_max
     if force:
         kwargs['force'] = tuple([force, 0, 0][:dim])
         assert data_handling.periodicity[0]
-        step = LatticeBoltzmannStep(data_handling=data_handling, name="forceDrivenChannel", **kwargs)
+        step = LatticeBoltzmannStep(data_handling=data_handling, name="force_driven_channel", **kwargs)
     elif pressure_difference:
         inflow = FixedDensity(1.0 + pressure_difference)
         outflow = FixedDensity(1.0)
-        step = LatticeBoltzmannStep(data_handling=data_handling, name="pressureDrivenChannel", **kwargs)
+        step = LatticeBoltzmannStep(data_handling=data_handling, name="pressure_driven_channel", **kwargs)
         step.boundary_handling.set_boundary(inflow, slice_from_direction('W', dim))
         step.boundary_handling.set_boundary(outflow, slice_from_direction('E', dim))
     elif u_max:
         if duct:
             raise NotImplementedError("Velocity inflow for duct flows not yet implemented")
-        step = LatticeBoltzmannStep(data_handling=data_handling, name="velocityDrivenChannel", **kwargs)
+        step = LatticeBoltzmannStep(data_handling=data_handling, name="velocity_driven_channel", **kwargs)
         diameter = diameter_callback(np.array([0]), domain_size)[0] if diameter_callback else min(domain_size[1:])
         add_parabolic_velocity_inflow(step.boundary_handling, u_max, slice_from_direction('W', dim),
                                       vel_coord=0, diameter=diameter)
diff --git a/stencils.py b/stencils.py
index c663840d..0222d6b0 100644
--- a/stencils.py
+++ b/stencils.py
@@ -16,10 +16,11 @@ def get_stencil(name, ordering='walberla'):
         return get_stencil.data[name.upper()][ordering.lower()]
     except KeyError:
         err_msg = ""
-        for stencil, orderingNames in get_stencil.data.items():
-            err_msg += "  %s: %s\n" % (stencil, ", ".join(orderingNames.keys()))
+        for stencil, ordering_names in get_stencil.data.items():
+            err_msg += "  %s: %s\n" % (stencil, ", ".join(ordering_names.keys()))
 
-        raise ValueError("No such stencil available. Available stencils: <stencilName>( <orderingNames> )\n" + err_msg)
+        raise ValueError("No such stencil available. "
+                         "Available stencils: <stencil_name>( <ordering_names> )\n" + err_msg)
 
 
 get_stencil.data = {
diff --git a/turbulence_models.py b/turbulence_models.py
index 19a4e58b..6e742aa2 100644
--- a/turbulence_models.py
+++ b/turbulence_models.py
@@ -25,7 +25,7 @@ def add_smagorinsky_model(collision_rule, smagorinsky_constant, omega_output_fie
     tau_0 = sp.Symbol("tau_0_")
     second_order_neq_moments = sp.Symbol("Pi")
     rho = method.zeroth_order_equilibrium_moment_symbol if method.conserved_quantity_computation.compressible else 1
-    adapted_omega = sp.Symbol("smagorinskyOmega")
+    adapted_omega = sp.Symbol("smagorinsky_omega")
 
     collision_rule = collision_rule.new_with_substitutions({omega_s: adapted_omega})
     # for derivation see notebook demo_custom_LES_model.ipynb
diff --git a/updatekernels.py b/updatekernels.py
index 46f9241f..7c78c965 100644
--- a/updatekernels.py
+++ b/updatekernels.py
@@ -33,16 +33,16 @@ def create_lbm_kernel(collision_rule, input_field, output_field, accessor):
     input_accesses = accessor.read(input_field, method.stencil)
     output_accesses = accessor.write(output_field, method.stencil)
 
-    for (idx, offset), inputAccess, outputAccess in zip(enumerate(method.stencil), input_accesses, output_accesses):
-        substitutions[pre_collision_symbols[idx]] = inputAccess
-        substitutions[post_collision_symbols[idx]] = outputAccess
+    for (idx, offset), input_access, output_access in zip(enumerate(method.stencil), input_accesses, output_accesses):
+        substitutions[pre_collision_symbols[idx]] = input_access
+        substitutions[post_collision_symbols[idx]] = output_access
 
     result = collision_rule.new_with_substitutions(substitutions)
 
     if 'split_groups' in result.simplification_hints:
         new_split_groups = []
-        for splitGroup in result.simplification_hints['split_groups']:
-            new_split_groups.append([fast_subs(e, substitutions) for e in splitGroup])
+        for split_group in result.simplification_hints['split_groups']:
+            new_split_groups.append([fast_subs(e, substitutions) for e in split_group])
         result.simplification_hints['split_groups'] = new_split_groups
 
     return result
-- 
GitLab