diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a2ec00d16cc04af33a2d3e4e46183111e4cfbcda..5eb26c1acbf415b775537de385aa62bbb30b208d 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -313,14 +313,6 @@ typecheck:
     - docker
     - AVX
 
-"testsuite-experimental-jit-py3.10":
-  extends: .testsuite-base
-  image: i10git.cs.fau.de:5005/pycodegen/pycodegen/nox:alpine
-  script:
-    - nox -s "testsuite-3.10(cpu)" -- --experimental-cpu-jit
-  tags:
-    - docker
-    - AVX
 
 # -------------------- Documentation ---------------------------------------------------------------------
 
diff --git a/docs/source/_static/img/pystencils-logo-light-whitebox.svg b/docs/source/_static/img/pystencils-logo-light-whitebox.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2a8c5fbb309d092aed712427d64cd7d3ba3923ff
--- /dev/null
+++ b/docs/source/_static/img/pystencils-logo-light-whitebox.svg
@@ -0,0 +1,467 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   width="53.913792mm"
+   height="53.913792mm"
+   viewBox="0 0 53.913792 53.913791"
+   version="1.1"
+   id="svg1"
+   inkscape:version="1.4.1 (93de688d07, 2025-03-30)"
+   sodipodi:docname="pystencils-logo-light-whitebox.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview1"
+     pagecolor="#ffffff"
+     bordercolor="#111111"
+     borderopacity="1"
+     inkscape:showpageshadow="0"
+     inkscape:pageopacity="0"
+     inkscape:pagecheckerboard="1"
+     inkscape:deskcolor="#d1d1d1"
+     inkscape:document-units="mm"
+     showguides="true"
+     inkscape:lockguides="false"
+     showgrid="false"
+     inkscape:zoom="2.8284271"
+     inkscape:cx="74.246213"
+     inkscape:cy="110.30866"
+     inkscape:window-width="2560"
+     inkscape:window-height="1399"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg1">
+    <inkscape:grid
+       id="grid4"
+       units="mm"
+       originx="6.9568948"
+       originy="1.630949"
+       spacingx="0.99999998"
+       spacingy="1"
+       empcolor="#0099e5"
+       empopacity="0.30196078"
+       color="#0099e5"
+       opacity="0.14901961"
+       empspacing="5"
+       enabled="true"
+       visible="false" />
+    <sodipodi:guide
+       position="41.130717,-7.7435943"
+       orientation="1,0"
+       id="guide2"
+       inkscape:locked="false" />
+    <sodipodi:guide
+       position="12.782714,-7.7435943"
+       orientation="1,0"
+       id="guide3"
+       inkscape:locked="false" />
+    <sodipodi:guide
+       position="26.956716,6.4304046"
+       orientation="0,-1"
+       id="guide4"
+       inkscape:locked="false" />
+    <sodipodi:guide
+       position="26.956716,-21.917594"
+       orientation="0,-1"
+       id="guide5"
+       inkscape:locked="false" />
+  </sodipodi:namedview>
+  <defs
+     id="defs1">
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect5"
+       is_visible="true"
+       lpeversion="0" />
+    <filter
+       y="-0.17469697"
+       height="1.3493938"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4608-0"
+       x="-0.17469697"
+       width="1.3493938">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4610-2" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4612-5" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4614-7" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4616-6" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4618-9" />
+    </filter>
+    <filter
+       y="-0.17469697"
+       height="1.3493938"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4632-1"
+       x="-0.17469697"
+       width="1.3493938">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4634-9" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4636-8" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4638-7" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4640-6" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4642-5" />
+    </filter>
+    <filter
+       y="-0.17469697"
+       height="1.3493938"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4620-1"
+       x="-0.17469697"
+       width="1.3493938">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4622-1" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4624-4" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4626-8" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4628-5" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4630-7" />
+    </filter>
+    <filter
+       y="-0.17469697"
+       height="1.3493938"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4596-6"
+       x="-0.17469697"
+       width="1.3493938">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4598-6" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4600-9" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4602-1" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4604-4" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4606-3" />
+    </filter>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4188-5-6"
+       is_visible="true"
+       lpeversion="0" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4188-7"
+       is_visible="true"
+       lpeversion="0" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4188-5-6-3"
+       is_visible="true"
+       lpeversion="0" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4188-7-2"
+       is_visible="true"
+       lpeversion="0" />
+    <filter
+       y="-0.17469696"
+       height="1.3493939"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4608-0-5"
+       x="-0.17469696"
+       width="1.3493939">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4610-2-5" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4612-5-4" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4614-7-7" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4616-6-6" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4618-9-5" />
+    </filter>
+    <filter
+       y="-0.17469696"
+       height="1.3493939"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4620-1-7"
+       x="-0.17469696"
+       width="1.3493939">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4622-1-4" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4624-4-5" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4626-8-2" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4628-5-5" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4630-7-4" />
+    </filter>
+    <filter
+       y="-0.17469696"
+       height="1.3493939"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4596-6-3"
+       x="-0.17469696"
+       width="1.3493939">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4598-6-0" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4600-9-7" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4602-1-8" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4604-4-6" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4606-3-8" />
+    </filter>
+    <filter
+       y="-0.17469696"
+       height="1.3493939"
+       inkscape:menu-tooltip="Darkens the edge with an inner blur and adds a flexible glow"
+       inkscape:menu="Shadows and Glows"
+       inkscape:label="Dark And Glow"
+       style="color-interpolation-filters:sRGB"
+       id="filter4632-1-4"
+       x="-0.17469696"
+       width="1.3493939">
+      <feGaussianBlur
+         stdDeviation="5"
+         result="result6"
+         id="feGaussianBlur4634-9-9" />
+      <feComposite
+         result="result8"
+         in="SourceGraphic"
+         operator="atop"
+         in2="result6"
+         id="feComposite4636-8-2" />
+      <feComposite
+         result="result9"
+         operator="over"
+         in2="SourceAlpha"
+         in="result8"
+         id="feComposite4638-7-0" />
+      <feColorMatrix
+         values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 "
+         result="result10"
+         id="feColorMatrix4640-6-6" />
+      <feBlend
+         in="result10"
+         mode="normal"
+         in2="result6"
+         id="feBlend4642-5-8" />
+    </filter>
+  </defs>
+  <rect
+     style="fill:#ffffff;fill-opacity:1;stroke:#d2d2d2;stroke-width:0.509157;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+     id="rect1396-1"
+     width="53.404999"
+     height="53.404636"
+     x="0.2545785"
+     y="0.2545785"
+     ry="2.939965"
+     inkscape:export-xdpi="70.669998"
+     inkscape:export-ydpi="70.669998" />
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(6.9568948,1.6309454)">
+    <g
+       id="g25"
+       transform="translate(1.812973e-4,-8.5216229e-5)">
+      <g
+         id="g24">
+        <rect
+           style="display:inline;opacity:1;fill:#000000;fill-opacity:0.701961;stroke-width:0.409154"
+           id="rect1"
+           width="28.348"
+           height="1.396094"
+           x="5.825819"
+           y="19.277994"
+           inkscape:label="axis-H" />
+        <rect
+           style="opacity:1;fill:#000000;fill-opacity:0.701961;stroke-width:0.409154"
+           id="rect2"
+           width="28.348"
+           height="1.396094"
+           x="5.8020415"
+           y="-20.697866"
+           transform="rotate(90)"
+           inkscape:label="axis-V" />
+        <circle
+           transform="matrix(0.10711925,0,0,0.10711925,-10.838025,-5.3822253)"
+           r="34.345188"
+           cy="236.72931"
+           cx="155.56349"
+           id="path4136-7-0"
+           style="fill:#009e73;fill-opacity:1;stroke:none;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter4608-0-5)"
+           inkscape:label="circle-L" />
+        <circle
+           transform="matrix(0.10711925,0,0,0.10711925,-11.109448,-4.9954233)"
+           r="34.345188"
+           cy="365.43817"
+           cx="290.41885"
+           id="path4136-6-0"
+           style="fill:#0072b2;fill-opacity:1;stroke:none;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter4620-1-7)"
+           inkscape:label="circle-B" />
+        <circle
+           transform="matrix(0.10711925,0,0,0.10711925,-11.20464,-5.7690267)"
+           r="34.345188"
+           cy="108.02044"
+           cx="291.42902"
+           id="path4136-76"
+           style="fill:#e69f00;fill-opacity:1;stroke:none;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter4596-6-3)"
+           inkscape:label="circle-T" />
+        <circle
+           transform="matrix(0.10711925,0,0,0.10711925,-11.056616,-5.2185227)"
+           r="34.345188"
+           cy="236.72931"
+           cx="422.24377"
+           id="path4136-3-9"
+           style="fill:#999999;fill-opacity:1;stroke:none;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter4632-1-4)"
+           inkscape:label="circle-R" />
+      </g>
+      <path
+         style="font-weight:bold;font-size:9.525px;line-height:125%;font-family:'Latin Modern Mono Light';-inkscape-font-specification:'Latin Modern Mono Light, Bold';letter-spacing:0px;word-spacing:0px;fill:#252525;stroke-width:0.264583px"
+         d="m -0.08362661,45.623494 c 0,-1.190625 -0.86677497,-2.124075 -1.88594989,-2.124075 -0.409575,0 -0.8001,0.13335 -1.114425,0.371475 -0.00953,-0.219075 -0.09525,-0.32385 -0.43815,-0.32385 h -0.6762749 c -0.180975,0 -0.4572,0.01905 -0.4572,0.36195 0,0.3429 0.28575,0.352425 0.447675,0.352425 h 0.3428999 v 4.781549 h -0.3333749 c -0.180975,0 -0.4572,0.0095 -0.4572,0.36195 0,0.3429 0.28575,0.352425 0.447675,0.352425 h 1.4763749 c 0.161925,0 0.447675,-0.0095 0.447675,-0.352425 0,-0.352425 -0.276225,-0.36195 -0.4572,-0.36195 h -0.333375 v -1.666874 c 0.36195,0.314325 0.7239,0.381 1.000125,0.381 1.0477499,0 1.99072489,-0.9144 1.99072489,-2.1336 z m -0.78104997,0 c 0,0.8382 -0.60960002,1.419225 -1.22872492,1.419225 -0.676275,0 -0.97155,-0.771525 -0.97155,-1.190625 v -0.676275 c 0,-0.51435 0.504825,-0.962025 1.04775,-0.962025 0.6476999,0 1.15252492,0.6477 1.15252492,1.4097 z m 5.89597128,-1.7145 c 0,-0.352425 -0.2667,-0.36195 -0.447675,-0.36195 H 3.4977698 c -0.17145,0 -0.447675,0.01905 -0.447675,0.352425 0,0.352425 0.2667,0.36195 0.447675,0.36195 h 0.1524 l -0.81915,2.486025 -0.942975,-2.486025 h 0.123825 c 0.17145,0 0.447675,-0.0095 0.447675,-0.352425 0,-0.352425 -0.2667,-0.36195 -0.447675,-0.36195 H 0.92601987 c -0.180975,0 -0.44767499,0.0095 -0.44767499,0.36195 0,0.3429 0.27622499,0.352425 0.44767499,0.352425 H 1.1641449 l 1.3334999,3.362325 c -0.0381,0.104775 -0.238125,0.771525 -0.3429,0.981075 -0.1905,0.361949 -0.485775,0.581024 -0.66675,0.581024 0.00953,-0.0381 0.104775,-0.06667 0.104775,-0.20955 0,-0.276224 -0.200025,-0.476249 -0.4762499,-0.476249 -0.29527503,0 -0.47625002,0.200025 -0.47625002,0.476249 0,0.428625 0.34289998,0.8382 0.83819992,0.8382 0.9525,0 1.419225,-1.266824 1.457325,-1.371599 l 1.4192249,-4.181475 h 0.2286 c 0.17145,0 0.447675,-0.0095 0.447675,-0.352425 z m 4.6196213,2.5527 c 0,-1.00965 -1.2477749,-1.228725 -1.5620999,-1.27635 l -0.962025,-0.17145 c -0.24765,-0.05715 -0.5048249,-0.161925 -0.5048249,-0.381 0,-0.219075 0.2952749,-0.447675 1.1048999,-0.447675 0.695325,0 0.828675,0.2286 0.85725,0.447675 0.00953,0.1905 0.0381,0.381 0.390525,0.381 0.3905249,0 0.4000499,-0.2286 0.4000499,-0.447675 v -0.6477 c 0,-0.180975 -0.01905,-0.447675 -0.3524249,-0.447675 -0.20955,0 -0.28575,0.08573 -0.32385,0.161925 -0.40005,-0.161925 -0.78105,-0.161925 -0.9525,-0.161925 -1.6287749,0 -1.8383249,0.81915 -1.8383249,1.16205 0,0.89535 1.0001249,1.08585 1.8954749,1.2192 0.4191,0.06667 1.133475,0.180975 1.133475,0.6096 0,0.32385 -0.333375,0.581025 -1.114425,0.581025 -0.409575,0 -0.89535,-0.09525 -1.1144249,-0.771525 -0.05715,-0.20955 -0.104775,-0.32385 -0.40005,-0.32385 -0.390525,0 -0.40005,0.2286 -0.40005,0.4572 v 0.89535 c 0,0.180975 0.00953,0.4572 0.352425,0.4572 0.09525,0 0.257175,0 0.381,-0.314325 0.4476749,0.295275 0.9143999,0.314325 1.1715749,0.314325 1.5335249,0 1.8383249,-0.828675 1.8383249,-1.2954 z m 4.895846,-0.0095 c 0,-0.200025 0,-0.428625 -0.40005,-0.428625 -0.371475,0 -0.381,0.2286 -0.381,0.4191 -0.0095,0.51435 -0.485775,0.600075 -0.6858,0.600075 -0.638175,0 -0.638175,-0.43815 -0.638175,-0.638175 v -2.143125 h 1.4478 c 0.17145,0 0.447675,-0.0095 0.447675,-0.352425 0,-0.352425 -0.2667,-0.36195 -0.447675,-0.36195 h -1.4478 v -0.6858 c 0,-0.20955 -0.0095,-0.447675 -0.390525,-0.447675 -0.390525,0 -0.40005,0.2286 -0.40005,0.447675 v 0.6858 h -0.733425 c -0.180975,0 -0.447675,0.01905 -0.447675,0.36195 0,0.3429 0.2667,0.352425 0.43815,0.352425 h 0.74295 v 2.19075 c 0,0.942975 0.66675,1.304925 1.39065,1.304925 0.7239,0 1.50495,-0.428625 1.50495,-1.304925 z m 5.153022,0.161925 c 0,-0.32385 -0.333375,-0.333375 -0.40005,-0.333375 -0.20955,0 -0.295275,0.0381 -0.381,0.24765 -0.1905,0.43815 -0.657225,0.51435 -0.89535,0.51435 -0.638175,0 -1.27635,-0.409575 -1.4478,-1.114425 h 2.676525 c 0.238125,0 0.447675,0 0.447675,-0.4191 0,-1.095375 -0.62865,-2.03835 -1.876425,-2.03835 -1.143,0 -2.07645,0.962025 -2.07645,2.143125 0,1.171575 0.97155,2.143125 2.2098,2.143125 1.2954,0 1.743075,-0.89535 1.743075,-1.143 z m -0.81915,-1.39065 h -2.295525 c 0.1524,-0.600075 0.6477,-1.038225 1.23825,-1.038225 0.43815,0 0.9525,0.20955 1.057275,1.038225 z m 6.257921,2.124075 c 0,-0.352425 -0.2667,-0.36195 -0.447675,-0.36195 h -0.333375 v -2.07645 c 0,-0.9906 -0.504824,-1.4097 -1.285874,-1.4097 -0.5334,0 -0.9144,0.219075 -1.143,0.40005 -0.0095,-0.238125 -0.0762,-0.352425 -0.447675,-0.352425 h -0.676275 c -0.180975,0 -0.447675,0.01905 -0.447675,0.36195 0,0.3429 0.28575,0.352425 0.43815,0.352425 h 0.3429 v 2.72415 h -0.333375 c -0.180975,0 -0.447675,0.0095 -0.447675,0.36195 0,0.3429 0.28575,0.352425 0.43815,0.352425 h 1.476375 c 0.161925,0 0.447675,-0.0095 0.447675,-0.352425 0,-0.352425 -0.276225,-0.36195 -0.4572,-0.36195 h -0.333375 v -1.571625 c 0,-0.866775 0.6477,-1.20015 1.08585,-1.20015 0.447675,0 0.55245,0.238125 0.55245,0.74295 v 2.028825 h -0.28575 c -0.180975,0 -0.4572,0.0095 -0.4572,0.36195 0,0.3429 0.295275,0.352425 0.4572,0.352425 h 1.419224 c 0.1524,0 0.43815,-0.0095 0.43815,-0.352425 z m 4.58152,-0.733425 c 0,-0.32385 -0.333375,-0.333375 -0.40005,-0.333375 -0.180975,0 -0.295275,0.01905 -0.381,0.24765 -0.04763,0.1143 -0.238125,0.51435 -0.847725,0.51435 -0.74295,0 -1.381125,-0.619125 -1.381125,-1.419225 0,-0.428625 0.24765,-1.438275 1.438275,-1.438275 l 0.485775,0.0095 c 0.0095,0.381 0.219075,0.523875 0.47625,0.523875 0.2667,0 0.4953,-0.180975 0.4953,-0.4953 0,-0.7239 -1.00965,-0.752475 -1.457325,-0.752475 -1.647825,0 -2.228849,1.304925 -2.228849,2.15265 0,1.152525 0.904874,2.1336 2.114549,2.1336 1.362075,0 1.685925,-0.981075 1.685925,-1.143 z m 4.895847,0.733425 c 0,-0.352425 -0.28575,-0.36195 -0.4572,-0.36195 h -0.89535 v -2.981325 c 0,-0.3429 -0.06667,-0.4572 -0.447675,-0.4572 h -1.31445 c -0.17145,0 -0.4572,0.01905 -0.4572,0.352425 0,0.352425 0.28575,0.36195 0.4572,0.36195 h 0.97155 v 2.72415 h -1.04775 c -0.180975,0 -0.4572,0.0095 -0.4572,0.36195 0,0.3429 0.28575,0.352425 0.4572,0.352425 h 2.733675 c 0.17145,0 0.4572,-0.0095 0.4572,-0.352425 z m -1.31445,-4.886325 c 0,-0.295275 -0.238125,-0.5334 -0.542925,-0.5334 -0.3048,0 -0.542925,0.238125 -0.542925,0.5334 0,0.3048 0.238125,0.542925 0.542925,0.542925 0.3048,0 0.542925,-0.238125 0.542925,-0.542925 z m 6.429372,4.886325 c 0,-0.352425 -0.2667,-0.36195 -0.447675,-0.36195 h -1.133475 v -4.6482 c 0,-0.3429 -0.06667,-0.4572 -0.447675,-0.4572 h -1.46685 c -0.180975,0 -0.4572,0.0095 -0.4572,0.36195 0,0.3429 0.28575,0.352425 0.447675,0.352425 h 1.133475 v 4.391025 h -1.12395 c -0.180975,0 -0.4572,0.0095 -0.4572,0.36195 0,0.3429 0.28575,0.352425 0.447675,0.352425 h 3.057525 c 0.17145,0 0.447675,-0.0095 0.447675,-0.352425 z m 4.924422,-0.885825 c 0,-1.00965 -1.247775,-1.228725 -1.5621,-1.27635 l -0.962025,-0.17145 c -0.24765,-0.05715 -0.504825,-0.161925 -0.504825,-0.381 0,-0.219075 0.295275,-0.447675 1.1049,-0.447675 0.695325,0 0.828675,0.2286 0.85725,0.447675 0.0095,0.1905 0.0381,0.381 0.390525,0.381 0.390525,0 0.40005,-0.2286 0.40005,-0.447675 v -0.6477 c 0,-0.180975 -0.01905,-0.447675 -0.352425,-0.447675 -0.20955,0 -0.28575,0.08573 -0.32385,0.161925 -0.40005,-0.161925 -0.78105,-0.161925 -0.9525,-0.161925 -1.628775,0 -1.838325,0.81915 -1.838325,1.16205 0,0.89535 1.000125,1.08585 1.895475,1.2192 0.4191,0.06667 1.133475,0.180975 1.133475,0.6096 0,0.32385 -0.333375,0.581025 -1.114425,0.581025 -0.409575,0 -0.89535,-0.09525 -1.114425,-0.771525 -0.05715,-0.20955 -0.104775,-0.32385 -0.40005,-0.32385 -0.390525,0 -0.40005,0.2286 -0.40005,0.4572 v 0.89535 c 0,0.180975 0.0095,0.4572 0.352425,0.4572 0.09525,0 0.257175,0 0.381,-0.314325 0.447675,0.295275 0.9144,0.314325 1.171575,0.314325 1.533525,0 1.838325,-0.828675 1.838325,-1.2954 z"
+         id="text1392-1"
+         inkscape:label="text1392-1"
+         aria-label="pystencils" />
+    </g>
+  </g>
+</svg>
diff --git a/docs/source/_static/img/pystencils-logo-light.svg b/docs/source/_static/img/pystencils-logo-light.svg
index c59a25519edd719586d66f91b6c05ba14b3db9c3..d8ba09fed815f077c508cc52262cbdcf864efcf9 100644
--- a/docs/source/_static/img/pystencils-logo-light.svg
+++ b/docs/source/_static/img/pystencils-logo-light.svg
@@ -7,7 +7,7 @@
    viewBox="0 0 49.310894 48.976913"
    version="1.1"
    id="svg1"
-   inkscape:version="1.4 (e7c3feb100, 2024-10-09)"
+   inkscape:version="1.4.1 (93de688d07, 2025-03-30)"
    sodipodi:docname="pystencils-logo-light.svg"
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
@@ -27,14 +27,14 @@
      inkscape:lockguides="false"
      showgrid="false"
      inkscape:zoom="2.8284271"
-     inkscape:cx="33.410796"
-     inkscape:cy="139.65359"
+     inkscape:cx="74.246213"
+     inkscape:cy="110.30866"
      inkscape:window-width="2560"
      inkscape:window-height="1399"
      inkscape:window-x="0"
      inkscape:window-y="0"
      inkscape:window-maximized="1"
-     inkscape:current-layer="layer1">
+     inkscape:current-layer="svg1">
     <inkscape:grid
        id="grid4"
        units="mm"
diff --git a/docs/source/api/jit.md b/docs/source/api/jit.md
index 06ea0cbaf4e5df4c031a102b248b9c1fdc57efad..4f85639cf025d74f0d93fd0507a05662f38549be 100644
--- a/docs/source/api/jit.md
+++ b/docs/source/api/jit.md
@@ -17,47 +17,32 @@
 .. autodata:: no_jit
 ```
 
-## Legacy CPU JIT
+## CPU Just-In-Time Compiler
 
-The legacy CPU JIT Compiler is a leftover from pystencils 1.3
-which at the moment still drives most CPU JIT-compilation within the package,
-until the new JIT compiler is ready to take over.
+The CPU JIT compiler 
+- embeds a kernel's code into a prepared C++ frame, which includes routines
+  that map NumPy arrays and Python scalars to kernel arguments,
+  and perform shape and type checks on these arguments;
+- invokes a host C++ compiler to compile and link the generated code as a
+  Python extension module;
+- dynamically loads that module and exposes the compiled kernel to the user.
 
 ```{eval-rst}
+.. module:: pystencils.jit.cpu
+
 .. autosummary::
   :toctree: generated
   :nosignatures:
   :template: autosummary/entire_class.rst
 
-  LegacyCpuJit
-```
-
-## CPU Just-In-Time Compiler
-
-:::{note}
-The new CPU JIT compiler is still considered experimental and not yet adopted by most of pystencils.
-While the APIs described here will (probably) become the default for pystencils 2.0
-and can (and should) already be used for testing,
-the current implementation is still *very slow*.
-For more information, see [issue !120](https://i10git.cs.fau.de/pycodegen/pystencils/-/issues/120).
-:::
-
-To configure and create an instance of the CPU JIT compiler, use the `CpuJit.create` factory method:
-
-:::{card}
-```{eval-rst}
-.. autofunction:: pystencils.jit.CpuJit.create
-  :no-index:
+  CpuJit
 ```
-:::
 
 ### Compiler Infos
 
-The CPU JIT compiler invokes a host C++ compiler to compile and link a Python extension
-module containing the generated kernel.
 The properties of the host compiler are defined in a `CompilerInfo` object.
 To select a custom host compiler and customize its options, set up and pass
-a custom compiler info object to `CpuJit.create`.
+a custom compiler info object to `CpuJit`.
 
 ```{eval-rst}
 .. module:: pystencils.jit.cpu.compiler_info
@@ -70,20 +55,22 @@ a custom compiler info object to `CpuJit.create`.
   CompilerInfo
   GccInfo
   ClangInfo
+  AppleClangInfo
 ```
 
-### Implementation
+### Implementation Details
 
 ```{eval-rst}
-.. module:: pystencils.jit.cpu
+.. currentmodule:: pystencils.jit.cpu
 
 .. autosummary::
   :toctree: generated
   :nosignatures:
   :template: autosummary/entire_class.rst
 
-  CpuJit
   cpujit.ExtensionModuleBuilderBase
+  default_module_builder.DefaultExtensionModuleBuilder
+  default_module_builder.DefaultCpuKernelWrapper
 ```
 
 ## CuPy-based GPU JIT
diff --git a/docs/source/contributing/testing.md b/docs/source/contributing/testing.md
index b9c93b0d279032c6d82fad2c9d3ba0e134b8eb2e..c9c4318298549521e4b87366ab98d62ff8dc2f34 100644
--- a/docs/source/contributing/testing.md
+++ b/docs/source/contributing/testing.md
@@ -63,15 +63,3 @@ receives the overridden `target` parameter as input:
 def test_bogus(gen_config):
     assert gen_config.target.is_vector_cpu()
 ```
-
-## Testing with the Experimental CPU JIT
-
-Currently, the testsuite by default still uses the {any}`legacy CPU JIT compiler <LegacyCpuJit>`,
-since the new CPU JIT compiler is still in an experimental stage.
-To test your code against the new JIT compiler, pass the `--experimental-cpu-jit` option to pytest:
-
-```bash
-nox -s testsuite -- --experimental-cpu-jit
-```
-
-This will alter the `gen_config` fixture, activating the experimental CPU JIT for CPU targets.
diff --git a/pyproject.toml b/pyproject.toml
index 0c427bdae66e2ec8d5c0a86dadd7005b906ab521..fd67a442a1f73b6c71029f35b146490c7592ab77 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -12,7 +12,7 @@ authors = [
 ]
 license = { file = "COPYING.txt" }
 requires-python = ">=3.10"
-dependencies = ["sympy>=1.9,<=1.12.1", "numpy>=1.8.0", "appdirs", "joblib", "pyyaml", "pybind11", "fasteners"]
+dependencies = ["sympy>=1.9,<=1.12.1", "numpy>=1.8.0", "appdirs", "joblib", "pyyaml", "fasteners"]
 classifiers = [
     "Development Status :: 4 - Beta",
     "Framework :: Jupyter",
diff --git a/src/pystencils/codegen/config.py b/src/pystencils/codegen/config.py
index bb10c12bd338feb5414042ed342d066852d4939e..1af6014b206c23cd73757ddad6a9ab87f8402d7a 100644
--- a/src/pystencils/codegen/config.py
+++ b/src/pystencils/codegen/config.py
@@ -609,9 +609,9 @@ class CreateKernelConfig(ConfigBase):
 
         if jit is None:
             if target.is_cpu():
-                from ..jit import LegacyCpuJit
+                from ..jit import CpuJit
 
-                return LegacyCpuJit()
+                return CpuJit()
             elif target == Target.CUDA or target == Target.HIP:
                 try:
                     from ..jit.gpu_cupy import CupyJit
diff --git a/src/pystencils/display_utils.py b/src/pystencils/display_utils.py
index 919dea4a8b568143065e8361fc695a044c69d541..232db733a843dfaf48b53865275e123b7ef67259 100644
--- a/src/pystencils/display_utils.py
+++ b/src/pystencils/display_utils.py
@@ -48,7 +48,7 @@ def get_code_obj(ast: KernelWrapper | Kernel, custom_backend=None):
     Can either be displayed as HTML in Jupyter notebooks or printed as normal string.
     """
     if isinstance(ast, KernelWrapper):
-        func = ast.kernel_function
+        func = ast.kernel
     else:
         func = ast
 
diff --git a/src/pystencils/jit/__init__.py b/src/pystencils/jit/__init__.py
index 3ae63fa721a4a70340bf7dd88f5a203fb6c2da66..7c39cf9f9aba87272069d3cc22ae24e2ab497a8a 100644
--- a/src/pystencils/jit/__init__.py
+++ b/src/pystencils/jit/__init__.py
@@ -23,7 +23,6 @@ It is due to be replaced in the near future.
 """
 
 from .jit import JitBase, NoJit, KernelWrapper
-from .legacy_cpu import LegacyCpuJit
 from .cpu import CpuJit
 from .gpu_cupy import CupyJit, CupyKernelWrapper, LaunchGrid
 
@@ -33,7 +32,6 @@ no_jit = NoJit()
 __all__ = [
     "JitBase",
     "KernelWrapper",
-    "LegacyCpuJit",
     "CpuJit",
     "NoJit",
     "no_jit",
diff --git a/src/pystencils/jit/cpu/compiler_info.py b/src/pystencils/jit/cpu/compiler_info.py
index 061f37af50e9903707f823b98c390b89230efc33..64c6bbe7fef2776abfe4fdaba0006e63f8462327 100644
--- a/src/pystencils/jit/cpu/compiler_info.py
+++ b/src/pystencils/jit/cpu/compiler_info.py
@@ -16,14 +16,15 @@ class CompilerInfo(ABC):
     optlevel: str | None = "fast"
     """Compiler optimization level"""
 
-    cxx_standard: str = "c++11"
+    cxx_standard: str = "c++14"
     """C++ language standard to be compiled with"""
 
     target: Target = Target.CurrentCPU
     """Hardware target to compile for.
     
-    Here, `Target.CurrentCPU` represents the current hardware,
-    which is reflected by ``-march=native`` in GNU-like compilers.
+    The value of ``target`` is used to set the ``-march`` compiler
+    option (or equivalent).
+    `Target.CurrentCPU` translates to ``-march=native``.
     """
 
     @abstractmethod
@@ -46,6 +47,22 @@ class CompilerInfo(ABC):
     def restrict_qualifier(self) -> str:
         """*restrict* memory qualifier recognized by this compiler"""
 
+    @staticmethod
+    def get_default() -> CompilerInfo:
+        import platform
+
+        sysname = platform.system()
+        match sysname.lower():
+            case "linux":
+                #   Use GCC on Linux
+                return GccInfo()
+            case "darwin":
+                return AppleClangInfo()
+            case _:
+                raise RuntimeError(
+                    f"Cannot determine compiler information for platform {sysname}"
+                )
+
 
 class _GnuLikeCliCompiler(CompilerInfo):
     def cxxflags(self) -> list[str]:
@@ -70,7 +87,7 @@ class _GnuLikeCliCompiler(CompilerInfo):
                 flags += ["-march=x86-64-v4", "-mavx512fp16"]
 
         return flags
-    
+
     def linker_flags(self) -> list[str]:
         return ["-shared"]
 
@@ -91,15 +108,14 @@ class GccInfo(_GnuLikeCliCompiler):
 @dataclass
 class ClangInfo(_GnuLikeCliCompiler):
     """Compiler info for the LLVM C++ compiler (``clang``)."""
-    
-    llvm_version: int | None = None
-    """Major version number of the LLVM installation providing the compiler."""
 
     def cxx(self) -> str:
-        if self.llvm_version is None:
-            return "clang"
-        else:
-            return f"clang-{self.llvm_version}"
-        
-    def linker_flags(self) -> list[str]:
-        return super().linker_flags() + ["-lstdc++"]
+        return "clang++"
+
+
+@dataclass
+class AppleClangInfo(ClangInfo):
+    """Compiler info for the Apple Clang compiler."""
+
+    def cxxflags(self) -> list[str]:
+        return super().cxxflags() + ["-Xclang"]
diff --git a/src/pystencils/jit/cpu/cpu_kernel_module.tmpl.cpp b/src/pystencils/jit/cpu/cpu_kernel_module.tmpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..80b52109fc6d17d51b6512d9ea882ba293ac6d0f
--- /dev/null
+++ b/src/pystencils/jit/cpu/cpu_kernel_module.tmpl.cpp
@@ -0,0 +1,233 @@
+#define PY_SSIZE_T_CLEAN
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+
+#include <Python.h>
+#include <numpy/ndarrayobject.h>
+
+#include <array>
+#include <vector>
+#include <string>
+#include <sstream>
+#include <utility>
+
+${includes}
+
+#define RESTRICT ${restrict_qualifier}
+
+namespace internal {
+
+${kernel_definition}
+
+}
+
+struct PythonError {};
+
+struct KernelModuleError {
+    PyObject * exception;
+    std::string msg;
+};
+
+
+/**
+ * RAII proxy for a NumPy array object.
+ * Supports move, but not copy construction.
+ */
+template< typename T >
+struct ArrayProxy {
+private:
+    //  Don't forget to adapt move constructor / assignment!
+    PyArrayObject * arr_ { nullptr }; // owned by this instance -> decref in destructor
+    size_t itemsize_;
+
+    ArrayProxy(PyArrayObject * array, size_t itemsize) : arr_{array}, itemsize_{itemsize} {}
+
+public:
+    static ArrayProxy< T > fromPyObject(const std::string& name, PyObject * obj, int ndim, int typeno, size_t itemsize = sizeof(T)){
+        if(!PyArray_Check(obj)){
+            throw KernelModuleError { PyExc_TypeError, "Invalid array argument" };
+        }
+        auto array_object = reinterpret_cast< PyArrayObject * >(PyArray_FromArray(reinterpret_cast< PyArrayObject * >(obj), NULL, 0));
+
+        ArrayProxy< T > proxy { array_object, itemsize };
+
+        if( PyArray_TYPE(proxy.arr_) != typeno){
+            std::stringstream err;
+            err << "Invalid element type of array argument " << name;
+            throw KernelModuleError { PyExc_TypeError, err.str() };
+        }
+
+        return std::move(proxy);
+    }
+
+    ArrayProxy(const ArrayProxy &) = delete;
+    ArrayProxy(ArrayProxy && other) : arr_( std::exchange(other.arr_, nullptr) ), itemsize_{ other.itemsize_ } {}
+
+    ArrayProxy& operator=(const ArrayProxy &) = delete;
+    ArrayProxy& operator=(ArrayProxy && other) {
+        std::swap(arr_, other.arr_);
+        this->itemsize_ = other.itemsize_;
+        return *this;
+    }
+
+    ~ArrayProxy() {
+        Py_XDECREF(arr_);
+    }
+
+    T * data() {
+        T * ptr = (T*) PyArray_DATA(arr_);
+        return ptr;
+    }
+
+    size_t ndim() const {
+        return static_cast< size_t >(PyArray_NDIM(arr_));
+    } 
+
+    template< typename index_type = ssize_t >
+    index_type shape(size_t c) const {
+        return static_cast< index_type >(PyArray_DIM(arr_, c));
+    }
+
+    template< typename index_type = ssize_t >
+    index_type stride(size_t c) const {
+        return static_cast< index_type >(PyArray_STRIDE(arr_, c) / itemsize_);
+    }
+};
+
+
+template< typename T, int TYPENO >
+T scalarFromPyObject(PyObject * obj, std::string name = ""){
+    //  obj must be a NumPy or Python scalar
+    if(!PyArray_IsAnyScalar(obj)){
+        std::stringstream err;
+        err << "Invalid type of scalar kernel argument " << name;
+        throw KernelModuleError { PyExc_TypeError, err.str() };
+    }
+
+    //  Convert the given object to the desired NumPy array scalar type
+    PyArray_Descr * dtype = PyArray_DescrFromType(TYPENO);
+    PyObject * arrayScalar = PyObject_CallOneArg((PyObject *) dtype->typeobj, obj);
+    Py_DECREF(dtype);
+    
+    //  Check if cast was successful
+    if( arrayScalar == NULL ){
+        throw PythonError{};
+    }
+
+    //  Extract as C type
+    T val;
+    PyArray_ScalarAsCtype(arrayScalar, &val);
+    Py_DECREF(arrayScalar);
+    return val;
+}
+
+template< typename T >
+void checkFieldShape(const std::string& name, const std::string& expected, const ArrayProxy< T > & arr, size_t coord, ssize_t desired) {
+    if(arr.ndim() <= coord || arr.shape(coord) != desired){
+        std::stringstream err;
+        err << "Invalid shape of array argument '" << name
+            << "': expected " << expected << ".";
+        throw KernelModuleError{ PyExc_ValueError, err.str() };
+    }
+}
+
+template< typename T >
+void checkFieldStride(const std::string& name, const std::string& expected, const ArrayProxy< T > & arr, size_t coord, ssize_t desired) {
+    if(arr.ndim() <= coord || arr.stride(coord) != desired){
+        std::stringstream err;
+        err << "Invalid stride of array argument '" << name
+            << "': expected " << expected << ".";
+        throw KernelModuleError{ PyExc_ValueError, err.str() };
+    }
+}
+
+template< typename T >
+void checkTrivialIndexShape(const std::string& name, const std::string& expected, const ArrayProxy< T > & arr, size_t spatial_rank) {
+    const size_t ndim = arr.ndim();
+    if(ndim > spatial_rank){
+        for(size_t c = spatial_rank; c < ndim; ++c){
+            if(arr.shape(c) != 1) {
+                std::stringstream err;
+                err << "Invalid shape of array argument '" << name
+                    << "': expected " << expected << ".";
+                throw KernelModuleError{ PyExc_ValueError, err.str() };
+            }
+        }
+    }
+}
+
+
+PyObject * getKwarg(PyObject * kwargs, const std::string& key) {
+    PyObject * keyUnicode = PyUnicode_FromString(key.c_str());
+    PyObject * obj = PyDict_GetItemWithError(kwargs, keyUnicode);
+    Py_DECREF(keyUnicode);
+
+    if(obj == NULL) {
+        if( PyErr_Occurred() ){
+            throw PythonError {};
+        } else {
+            std::stringstream err;
+            err << "Missing kernel argument: " << key;
+            throw KernelModuleError{ PyExc_KeyError, err.str() };
+        }
+    }
+
+    return obj;
+}
+
+
+struct KernelArgs_${kernel_name} {
+${argstruct_members}
+
+    KernelArgs_${kernel_name} (PyObject * posargs, PyObject * kwargs){
+        //  Extract borrowed references to required kwargs
+${kernel_kwarg_refs}
+
+        //  Convert arrays to ArrayProxy
+${array_proxy_defs}
+
+        //  Extract scalar kernel arguments
+${extract_kernel_args}
+
+        //  Check preconditions
+${precondition_checks}
+    }
+};
+
+extern "C"
+{
+    static PyObject *
+    invoke(PyObject *module, PyObject *posargs, PyObject * kwargs)
+    {
+        try {
+            KernelArgs_${kernel_name} kernel_args { posargs, kwargs };
+            internal::${kernel_name} ( ${kernel_invocation_args} );
+        } catch (const KernelModuleError & err) {
+            PyErr_SetString(err.exception, err.msg.c_str());
+            return NULL;
+        } catch (const PythonError& err) {
+            //   Error condition was set by Python API - nothing to do
+            return NULL;
+        }
+
+        Py_RETURN_NONE;
+    }
+
+    static PyMethodDef ModuleMethods[] = {
+        {"invoke", (PyCFunction)(void(*)(void)) invoke, METH_VARARGS | METH_KEYWORDS, "Invoke the kernel"},
+        {NULL, NULL, 0, NULL}};
+
+    static PyModuleDef Module = {
+        PyModuleDef_HEAD_INIT,
+        "${module_name}",
+        NULL,
+        -1,
+        ModuleMethods
+    };
+
+    PyMODINIT_FUNC
+    PyInit_${module_name} (void){
+        import_array();
+
+        return PyModule_Create(&Module);
+    }
+}
\ No newline at end of file
diff --git a/src/pystencils/jit/cpu/cpujit.py b/src/pystencils/jit/cpu/cpujit.py
index bddcc0bd9884b050e6362d64e26420bcc72241a0..8472e32912817c97e2111b093af0a5fa641d0825 100644
--- a/src/pystencils/jit/cpu/cpujit.py
+++ b/src/pystencils/jit/cpu/cpujit.py
@@ -10,15 +10,15 @@ from ...codegen.config import _AUTO_TYPE, AUTO
 
 from ..jit import JitError, JitBase, KernelWrapper
 from ...codegen import Kernel
-from .compiler_info import CompilerInfo, GccInfo
+from .compiler_info import CompilerInfo
 
 
 class CpuJit(JitBase):
     """Just-in-time compiler for CPU kernels.
 
-    **Creation**
-    
-    To configure and create a CPU JIT compiler instance, use the `create` factory method.
+    The `CpuJit` turns pystencils `Kernel` objects into executable Python functions
+    by wrapping them in a C++ extension module with glue code to the Python and NumPy API.
+    That module is then compiled by a host compiler and dynamically loaded into the Python session.
 
     **Implementation Details**
 
@@ -29,32 +29,23 @@ class CpuJit(JitBase):
     - The *compiler info* describes the host compiler used to compile and link that extension module.
 
     Args:
-        compiler_info: The compiler info object defining the capabilities
-            and command-line interface of the host compiler
-        ext_module_builder: Extension module builder object used to generate the kernel extension module
-        objcache: Directory to cache the generated code files and compiled modules in.
-            If `None`, a temporary directory will be used, and compilation results will not be cached.
+        compiler_info: Compiler info object defining capabilities and interface of the host compiler.
+            If `None`, a default compiler configuration will be determined from the current OS and runtime
+            environment.
+        objcache: Directory used for caching compilation results.
+            If set to `AUTO`, a persistent cache directory in the current user's home will be used.
+            If set to `None`, compilation results will not be cached--this may impact performance.
+        module_builder: Optionally, an extension module builder to be used by the JIT compiler.
+            When left at `None`, the default implementation will be used.
     """
 
-    @staticmethod
-    def create(
+    def __init__(
+        self,
         compiler_info: CompilerInfo | None = None,
         objcache: str | Path | _AUTO_TYPE | None = AUTO,
-    ) -> CpuJit:
-        """Configure and create a CPU JIT compiler object.
-        
-        Args:
-            compiler_info: Compiler info object defining capabilities and interface of the host compiler.
-                If `None`, a default compiler configuration will be determined from the current OS and runtime
-                environment.
-            objcache: Directory used for caching compilation results.
-                If set to `AUTO`, a persistent cache directory in the current user's home will be used.
-                If set to `None`, compilation results will not be cached--this may impact performance.
-
-        Returns:
-            The CPU just-in-time compiler.
-        """
-        
+        *,
+        module_builder: ExtensionModuleBuilderBase | None = None
+    ):
         if objcache is AUTO:
             from appdirs import AppDirs
 
@@ -65,24 +56,15 @@ class CpuJit(JitBase):
             objcache = Path(objcache)
 
         if compiler_info is None:
-            compiler_info = GccInfo()
-
-        from .cpujit_pybind11 import Pybind11KernelModuleBuilder
-
-        modbuilder = Pybind11KernelModuleBuilder(compiler_info)
+            compiler_info = CompilerInfo.get_default()
 
-        return CpuJit(compiler_info, modbuilder, objcache)
+        if module_builder is None:
+            from .default_module_builder import DefaultExtensionModuleBuilder
+            module_builder = DefaultExtensionModuleBuilder(compiler_info)
 
-    def __init__(
-        self,
-        compiler_info: CompilerInfo,
-        ext_module_builder: ExtensionModuleBuilderBase,
-        objcache: Path | None,
-    ):
         self._compiler_info = copy(compiler_info)
-        self._ext_module_builder = ext_module_builder
-
         self._objcache = objcache
+        self._ext_module_builder = module_builder
 
         #   Include Directories
 
@@ -194,7 +176,11 @@ class ExtensionModuleBuilderBase(ABC):
     @abstractmethod
     def include_dirs() -> list[str]:
         """List of directories that must be on the include path when compiling
-        generated extension modules."""
+        generated extension modules.
+        
+        The Python runtime include directory and the pystencils include directory
+        need not be listed here.
+        """
 
     @abstractmethod
     def render_module(self, kernel: Kernel, module_name: str) -> str:
diff --git a/src/pystencils/jit/cpu/cpujit_pybind11.py b/src/pystencils/jit/cpu/cpujit_pybind11.py
deleted file mode 100644
index 90224b22b7405f7b48d4897071ee32b8fc7684c1..0000000000000000000000000000000000000000
--- a/src/pystencils/jit/cpu/cpujit_pybind11.py
+++ /dev/null
@@ -1,173 +0,0 @@
-from __future__ import annotations
-
-from types import ModuleType
-from typing import Sequence, cast
-from pathlib import Path
-from textwrap import indent
-
-from pystencils.jit.jit import KernelWrapper
-
-from ...types import PsPointerType, PsType
-from ...field import Field
-from ...sympyextensions import DynamicType
-from ...codegen import Kernel, Parameter
-from ...codegen.properties import FieldBasePtr, FieldShape, FieldStride
-
-from .compiler_info import CompilerInfo
-from .cpujit import ExtensionModuleBuilderBase
-
-
-_module_template = Path(__file__).parent / "pybind11_kernel_module.tmpl.cpp"
-
-
-class Pybind11KernelModuleBuilder(ExtensionModuleBuilderBase):
-    @staticmethod
-    def include_dirs() -> list[str]:
-        import pybind11 as pb11
-
-        pybind11_include = pb11.get_include()
-        return [pybind11_include]
-
-    def __init__(
-        self,
-        compiler_info: CompilerInfo,
-    ):
-        self._compiler_info = compiler_info
-
-        self._actual_field_types: dict[Field, PsType]
-        self._param_binds: list[str]
-        self._public_params: list[str]
-        self._param_check_lines: list[str]
-        self._extraction_lines: list[str]
-
-    def render_module(self, kernel: Kernel, module_name: str) -> str:
-        self._actual_field_types = dict()
-        self._param_binds = []
-        self._public_params = []
-        self._param_check_lines = []
-        self._extraction_lines = []
-
-        self._handle_params(kernel.parameters)
-
-        kernel_def = self._get_kernel_definition(kernel)
-        kernel_args = [param.name for param in kernel.parameters]
-        includes = [f"#include {h}" for h in sorted(kernel.required_headers)]
-
-        from string import Template
-
-        templ = Template(_module_template.read_text())
-        code_str = templ.substitute(
-            includes="\n".join(includes),
-            restrict_qualifier=self._compiler_info.restrict_qualifier(),
-            module_name=module_name,
-            kernel_name=kernel.name,
-            param_binds=", ".join(self._param_binds),
-            public_params=", ".join(self._public_params),
-            param_check_lines=indent("\n".join(self._param_check_lines), prefix="    "),
-            extraction_lines=indent("\n".join(self._extraction_lines), prefix="    "),
-            kernel_args=", ".join(kernel_args),
-            kernel_definition=kernel_def,
-        )
-        return code_str
-    
-    def get_wrapper(self, kernel: Kernel, extension_module: ModuleType) -> KernelWrapper:
-        return Pybind11KernelWrapper(kernel, extension_module)
-
-    def _get_kernel_definition(self, kernel: Kernel) -> str:
-        from ...backend.emission import CAstPrinter
-
-        printer = CAstPrinter()
-
-        return printer(kernel)
-
-    def _add_field_param(self, ptr_param: Parameter):
-        field: Field = ptr_param.fields[0]
-
-        ptr_type = ptr_param.dtype
-        assert isinstance(ptr_type, PsPointerType)
-
-        if isinstance(field.dtype, DynamicType):
-            elem_type = ptr_type.base_type
-        else:
-            elem_type = field.dtype
-
-        self._actual_field_types[field] = elem_type
-
-        param_bind = f'py::arg("{field.name}").noconvert()'
-        self._param_binds.append(param_bind)
-
-        kernel_param = f"py::array_t< {elem_type.c_string()} > & {field.name}"
-        self._public_params.append(kernel_param)
-
-        expect_shape = "(" + ", ".join((str(s) if isinstance(s, int) else "*") for s in field.shape) + ")"
-        for coord, size in enumerate(field.shape):
-            if isinstance(size, int):
-                self._param_check_lines.append(
-                    f"checkFieldShape(\"{field.name}\", \"{expect_shape}\", {field.name}, {coord}, {size});"
-                )
-
-        expect_strides = "(" + ", ".join((str(s) if isinstance(s, int) else "*") for s in field.strides) + ")"
-        for coord, stride in enumerate(field.strides):
-            if isinstance(stride, int):
-                self._param_check_lines.append(
-                    f"checkFieldStride(\"{field.name}\", \"{expect_strides}\", {field.name}, {coord}, {stride});"
-                )
-
-    def _add_scalar_param(self, sc_param: Parameter):
-        param_bind = f'py::arg("{sc_param.name}")'
-        self._param_binds.append(param_bind)
-
-        kernel_param = f"{sc_param.dtype.c_string()} {sc_param.name}"
-        self._public_params.append(kernel_param)
-
-    def _extract_base_ptr(self, ptr_param: Parameter, ptr_prop: FieldBasePtr):
-        field_name = ptr_prop.field.name
-        assert isinstance(ptr_param.dtype, PsPointerType)
-        data_method = "data()" if ptr_param.dtype.base_type.const else "mutable_data()"
-        extraction = f"{ptr_param.dtype.c_string()} {ptr_param.name} {{ {field_name}.{data_method} }};"
-        self._extraction_lines.append(extraction)
-
-    def _extract_shape(self, shape_param: Parameter, shape_prop: FieldShape):
-        field_name = shape_prop.field.name
-        coord = shape_prop.coordinate
-        extraction = f"{shape_param.dtype.c_string()} {shape_param.name} {{ {field_name}.shape({coord}) }};"
-        self._extraction_lines.append(extraction)
-
-    def _extract_stride(self, stride_param: Parameter, stride_prop: FieldStride):
-        field = stride_prop.field
-        field_name = field.name
-        coord = stride_prop.coordinate
-        field_type = self._actual_field_types[field]
-        assert field_type.itemsize is not None
-        extraction = (
-            f"{stride_param.dtype.c_string()} {stride_param.name} "
-            f"{{ {field_name}.strides({coord}) / {field_type.itemsize} }};"
-        )
-        self._extraction_lines.append(extraction)
-
-    def _handle_params(self, parameters: Sequence[Parameter]):
-        for param in parameters:
-            if param.get_properties(FieldBasePtr):
-                self._add_field_param(param)
-
-        for param in parameters:
-            if ptr_props := param.get_properties(FieldBasePtr):
-                self._extract_base_ptr(param, cast(FieldBasePtr, ptr_props.pop()))
-            elif shape_props := param.get_properties(FieldShape):
-                self._extract_shape(param, cast(FieldShape, shape_props.pop()))
-            elif stride_props := param.get_properties(FieldStride):
-                self._extract_stride(param, cast(FieldStride, stride_props.pop()))
-            else:
-                self._add_scalar_param(param)
-
-
-class Pybind11KernelWrapper(KernelWrapper):
-    def __init__(self, kernel: Kernel, jit_module: ModuleType):
-        super().__init__(kernel)
-        self._module = jit_module
-        self._check_params = getattr(jit_module, "check_params")
-        self._invoke = getattr(jit_module, "invoke")
-
-    def __call__(self, **kwargs) -> None:
-        self._check_params(**kwargs)
-        return self._invoke(**kwargs)
diff --git a/src/pystencils/jit/cpu/default_module_builder.py b/src/pystencils/jit/cpu/default_module_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..02fa0c9f8f0af20fe8793a9417f8d22f66e52a0d
--- /dev/null
+++ b/src/pystencils/jit/cpu/default_module_builder.py
@@ -0,0 +1,305 @@
+from __future__ import annotations
+
+from types import ModuleType
+from typing import cast
+from pathlib import Path
+from dataclasses import dataclass, field
+from textwrap import indent
+
+from pystencils.jit.jit import KernelWrapper
+
+from ...types import (
+    PsPointerType,
+    PsType,
+    deconstify,
+    PsStructType,
+    PsUnsignedIntegerType,
+)
+from ...field import Field
+from ...sympyextensions import DynamicType
+from ...codegen import Kernel, Parameter
+from ...codegen.properties import FieldBasePtr, FieldShape, FieldStride
+
+from .compiler_info import CompilerInfo
+from .cpujit import ExtensionModuleBuilderBase
+from ..error import JitError
+
+import numpy as np
+
+
+_module_template = Path(__file__).parent / "cpu_kernel_module.tmpl.cpp"
+
+
+class DefaultExtensionModuleBuilder(ExtensionModuleBuilderBase):
+
+    @dataclass
+    class ParamExtraction:
+        actual_field_types: dict[Field, PsType] = field(default_factory=dict)
+
+        argstruct_members: list[str] = field(default_factory=list)
+
+        kernel_kwarg_refs: list[str] = field(default_factory=list)
+        array_proxy_defs: list[str] = field(default_factory=list)
+
+        extract_kernel_args: list[str] = field(default_factory=list)
+        precondition_checks: list[str] = field(default_factory=list)
+        kernel_invocation_args: list[str] = field(default_factory=list)
+
+        def substitutions(self) -> dict[str, str]:
+            t = "    "
+            tt = 2 * t
+
+            return {
+                "argstruct_members": indent(
+                    "\n".join(self.argstruct_members), prefix=t
+                ),
+                "kernel_kwarg_refs": indent(
+                    "\n".join(self.kernel_kwarg_refs), prefix=tt
+                ),
+                "array_proxy_defs": indent("\n".join(self.array_proxy_defs), prefix=tt),
+                "extract_kernel_args": indent(
+                    "\n".join(self.extract_kernel_args), prefix=tt
+                ),
+                "precondition_checks": indent(
+                    "\n".join(self.precondition_checks), prefix=tt
+                ),
+                "kernel_invocation_args": ", ".join(self.kernel_invocation_args),
+            }
+
+        def add_array_for_field(self, ptr_param: Parameter):
+            field: Field = ptr_param.fields[0]
+
+            ptr_type = ptr_param.dtype
+            assert isinstance(ptr_type, PsPointerType)
+
+            if isinstance(field.dtype, DynamicType):
+                elem_type = ptr_type.base_type
+            else:
+                elem_type = field.dtype
+
+            self.actual_field_types[field] = elem_type
+
+            parg_name = self.add_kwarg(field.name)
+            self._init_array_proxy(field.name, elem_type, len(field.shape), parg_name)
+
+        def add_pointer_param(self, ptr_param: Parameter):
+            ptr_type = ptr_param.dtype
+            assert isinstance(ptr_type, PsPointerType)
+
+            parg_name = self.add_kwarg(ptr_param.name)
+            proxy_name = self._init_array_proxy(
+                ptr_param.name, ptr_type.base_type, 1, parg_name
+            )
+            self._add_kernel_argument(ptr_param, f"{proxy_name}.data()")
+
+        def _array_proxy_name(self, name: str) -> str:
+            return f"array_proxy_{name}"
+
+        def _init_array_proxy(
+            self,
+            name: str,
+            dtype: PsType,
+            ndim: int,
+            pyobj: str,
+            itemsize: int | None = None,
+        ) -> str:
+            proxy_name = self._array_proxy_name(name)
+            elem_type = deconstify(dtype)
+            typeno = self._typeno(elem_type)
+
+            if itemsize is None:
+                itemsize = dtype.itemsize
+
+            proxy_ctor_args = [f'"{name}"', pyobj, str(ndim), typeno]
+
+            if itemsize is not None:
+                proxy_ctor_args.append(str(itemsize))
+
+            #   Anonymous structs lowered to uint8
+            if isinstance(elem_type, PsStructType) and elem_type.anonymous:
+                elem_type = PsUnsignedIntegerType(8, const=elem_type.const)
+
+            self.array_proxy_defs.append(
+                f"ArrayProxy< {elem_type.c_string()} > {proxy_name} = "
+                f"ArrayProxy< {elem_type.c_string()} >::fromPyObject( {', '.join(proxy_ctor_args)} ) ;"
+            )
+            return proxy_name
+
+        def add_kwarg(self, name: str) -> str:
+            kwarg_name = f"_ref_{name}"
+            self.kernel_kwarg_refs.append(
+                f'PyObject * {kwarg_name} = getKwarg(kwargs, "{name}");'
+            )
+            return kwarg_name
+
+        def _add_kernel_argument(self, param: Parameter, extraction: str):
+            self.argstruct_members.append(
+                f"{deconstify(param.dtype).c_string()} {param.name};"
+            )
+            self.kernel_invocation_args.append(f"kernel_args.{param.name}")
+            self.extract_kernel_args.append(f"{param.name} = {extraction};")
+            # f'std::cout << "{param.name} = " << {param.name} << std::endl;')
+
+        def add_field_base_pointer(self, param: Parameter, ptr_prop: FieldBasePtr):
+            field_name = ptr_prop.field.name
+            proxy_name = self._array_proxy_name(field_name)
+
+            self._add_kernel_argument(param, f"{proxy_name}.data()")
+
+        def add_scalar_param(self, param: Parameter):
+            parg_name = self.add_kwarg(param.name)
+            stype = deconstify(param.dtype)
+            typeno = self._typeno(stype)
+
+            self._add_kernel_argument(
+                param,
+                f'scalarFromPyObject< {stype.c_string()}, {typeno} > ({parg_name}, "{param.name}")',
+            )
+
+        def add_shape_param(self, param: Parameter, shape_prop: FieldShape):
+            field_name = shape_prop.field.name
+            proxy_name = self._array_proxy_name(field_name)
+            stype = deconstify(param.dtype)
+
+            self._add_kernel_argument(
+                param,
+                f"{proxy_name}.shape< {stype.c_string()} >({shape_prop.coordinate})",
+            )
+
+        def add_stride_param(self, param: Parameter, stride_prop: FieldStride):
+            field_name = stride_prop.field.name
+            proxy_name = self._array_proxy_name(field_name)
+            stype = deconstify(param.dtype)
+            actual_type = self.actual_field_types[stride_prop.field]
+            itemsize = actual_type.itemsize
+            if itemsize is None:
+                raise JitError(
+                    f"Cannot compute array strides for element type of unknown width: {actual_type}"
+                )
+
+            self._add_kernel_argument(
+                param,
+                f"{proxy_name}.stride< {stype.c_string()} > ({stride_prop.coordinate})",
+            )
+
+        def check_fixed_shape_and_strides(self, field: Field):
+            proxy_name = self._array_proxy_name(field.name)
+            expect_shape = (
+                "("
+                + ", ".join(
+                    (str(s) if isinstance(s, int) else "*") for s in field.shape
+                )
+                + ")"
+            )
+            field_shape = field.spatial_shape
+            #   Scalar fields may omit their trivial index dimension
+            if field.index_shape not in ((), (1,)):
+                field_shape += field.index_shape
+                scalar_field = False
+            else:
+                scalar_field = True
+
+            for coord, size in enumerate(field_shape):
+                if isinstance(size, int):
+                    self.precondition_checks.append(
+                        f'checkFieldShape("{field.name}", "{expect_shape}", {proxy_name}, {coord}, {size});'
+                    )
+
+            if scalar_field:
+                self.precondition_checks.append(
+                    f'checkTrivialIndexShape("{field.name}", "{expect_shape}", {proxy_name}, {len(field_shape)});'
+                )
+
+            expect_strides = (
+                "("
+                + ", ".join(
+                    (str(s) if isinstance(s, int) else "*") for s in field.strides
+                )
+                + ")"
+            )
+            for coord, stride in enumerate(field.strides[: len(field_shape)]):
+                if isinstance(stride, int):
+                    self.precondition_checks.append(
+                        f'checkFieldStride("{field.name}", "{expect_strides}", {proxy_name}, {coord}, {stride});'
+                    )
+
+        @staticmethod
+        def _typeno(dtype: PsType):
+            if dtype.numpy_dtype is None:
+                raise JitError(f"Cannot get typeno for non-numpy type {dtype}")
+            npname = dtype.numpy_dtype.name.upper()
+            if npname.startswith("VOID"):
+                npname = "VOID"  # for struct types
+            return f"NPY_{npname}"
+
+    @staticmethod
+    def include_dirs() -> list[str]:
+        return [np.get_include()]
+
+    def __init__(self, compiler_info: CompilerInfo):
+        self._compiler_info = compiler_info
+
+    def render_module(self, kernel: Kernel, module_name: str) -> str:
+        extr = self._handle_params(kernel)
+        kernel_def = self._get_kernel_definition(kernel)
+        includes = [f"#include {h}" for h in sorted(kernel.required_headers)]
+
+        from string import Template
+
+        templ = Template(_module_template.read_text())
+        code_str = templ.substitute(
+            includes="\n".join(includes),
+            restrict_qualifier=self._compiler_info.restrict_qualifier(),
+            module_name=module_name,
+            kernel_name=kernel.name,
+            kernel_definition=kernel_def,
+            **extr.substitutions(),
+        )
+        return code_str
+
+    def get_wrapper(
+        self, kernel: Kernel, extension_module: ModuleType
+    ) -> KernelWrapper:
+        return DefaultCpuKernelWrapper(kernel, extension_module)
+
+    def _get_kernel_definition(self, kernel: Kernel) -> str:
+        from ...backend.emission import CAstPrinter
+
+        printer = CAstPrinter()
+
+        return printer(kernel)
+
+    def _handle_params(self, kernel: Kernel) -> ParamExtraction:
+        parameters = kernel.parameters
+        extr = self.ParamExtraction()
+
+        for param in parameters:
+            if param.get_properties(FieldBasePtr):
+                extr.add_array_for_field(param)
+
+        for param in parameters:
+            if ptr_props := param.get_properties(FieldBasePtr):
+                extr.add_field_base_pointer(param, cast(FieldBasePtr, ptr_props.pop()))
+            elif shape_props := param.get_properties(FieldShape):
+                extr.add_shape_param(param, cast(FieldShape, shape_props.pop()))
+            elif stride_props := param.get_properties(FieldStride):
+                extr.add_stride_param(param, cast(FieldStride, stride_props.pop()))
+            elif isinstance(param.dtype, PsPointerType):
+                extr.add_pointer_param(param)
+            else:
+                extr.add_scalar_param(param)
+
+        for f in kernel.get_fields():
+            extr.check_fixed_shape_and_strides(f)
+
+        return extr
+
+
+class DefaultCpuKernelWrapper(KernelWrapper):
+    def __init__(self, kernel: Kernel, jit_module: ModuleType):
+        super().__init__(kernel)
+        self._module = jit_module
+        self._invoke = getattr(jit_module, "invoke")
+
+    def __call__(self, **kwargs) -> None:
+        return self._invoke(**kwargs)
diff --git a/src/pystencils/jit/cpu/pybind11_kernel_module.tmpl.cpp b/src/pystencils/jit/cpu/pybind11_kernel_module.tmpl.cpp
deleted file mode 100644
index ef945586f460298395642776ced79991ed3e626b..0000000000000000000000000000000000000000
--- a/src/pystencils/jit/cpu/pybind11_kernel_module.tmpl.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-#include "pybind11/pybind11.h"
-#include "pybind11/numpy.h"
-
-#include <array>
-#include <string>
-#include <sstream>
-
-${includes}
-
-namespace py = pybind11;
-
-#define RESTRICT ${restrict_qualifier}
-
-namespace internal {
-
-${kernel_definition}
-
-}
-
-std::string tuple_to_str(const ssize_t * data, const size_t N){
-    std::stringstream acc;
-    acc << "(";
-    for(size_t i = 0; i < N; ++i){
-        acc << data[i];
-        if(i + 1 < N){
-            acc << ", ";
-        }
-    }
-    acc << ")";
-    return acc.str();
-}
-
-template< typename T >
-void checkFieldShape(const std::string& fieldName, const std::string& expected, const py::array_t< T > & arr, size_t coord, size_t desired) {
-    auto panic = [&](){
-        std::stringstream err;
-        err << "Invalid shape of argument " << fieldName
-            << ". Expected " << expected
-            << ", but got " << tuple_to_str(arr.shape(), arr.ndim())
-            << ".";
-        throw py::value_error{ err.str() };
-    };
-    
-    if(arr.ndim() <= coord){
-        panic();
-    }
-
-    if(arr.shape(coord) != desired){
-        panic();
-    }
-}
-
-template< typename T >
-void checkFieldStride(const std::string fieldName, const std::string& expected, const py::array_t< T > & arr, size_t coord, size_t desired) {
-    auto panic = [&](){
-        std::stringstream err;
-        err << "Invalid strides of argument " << fieldName 
-            << ". Expected " << expected
-            << ", but got " << tuple_to_str(arr.strides(), arr.ndim())
-            << ".";
-        throw py::value_error{ err.str() };
-    };
-    
-    if(arr.ndim() <= coord){
-        panic();
-    }
-
-    if(arr.strides(coord) / sizeof(T) != desired){
-        panic();
-    }
-}
-
-void check_params_${kernel_name} (${public_params}) {
-${param_check_lines}
-}
-
-void run_${kernel_name} (${public_params}) {
-${extraction_lines}
-    internal::${kernel_name}(${kernel_args});
-}
-
-PYBIND11_MODULE(${module_name}, m) {
-    m.def("check_params", &check_params_${kernel_name}, py::kw_only(), ${param_binds});
-    m.def("invoke", &run_${kernel_name}, py::kw_only(), ${param_binds});
-}
diff --git a/src/pystencils/jit/cpu_extension_module.py b/src/pystencils/jit/cpu_extension_module.py
deleted file mode 100644
index 4d76ea9ca1d75bc2b2ca2d77976b42c5f16b240c..0000000000000000000000000000000000000000
--- a/src/pystencils/jit/cpu_extension_module.py
+++ /dev/null
@@ -1,402 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, cast
-
-from os import path
-import hashlib
-from itertools import chain
-from textwrap import indent
-
-import numpy as np
-
-from ..codegen import (
-    Kernel,
-    Parameter,
-)
-from ..codegen.properties import FieldBasePtr, FieldShape, FieldStride
-from ..types import (
-    PsType,
-    PsUnsignedIntegerType,
-    PsSignedIntegerType,
-    PsIeeeFloatType,
-    PsPointerType,
-)
-from ..types.quick import Fp, SInt, UInt
-from ..field import Field
-
-
-class PsKernelExtensioNModule:
-    """Replacement for `pystencils.cpu.cpujit.ExtensionModuleCode`.
-    Conforms to its interface for plug-in to `compile_and_load`.
-    """
-
-    def __init__(
-        self, module_name: str = "generated", custom_backend: Any = None
-    ) -> None:
-        self._module_name = module_name
-
-        if custom_backend is not None:
-            raise Exception(
-                "The `custom_backend` parameter exists only for interface compatibility and cannot be set."
-            )
-
-        self._kernels: dict[str, Kernel] = dict()
-        self._code_string: str | None = None
-        self._code_hash: str | None = None
-
-    @property
-    def module_name(self) -> str:
-        return self._module_name
-
-    def add_function(self, kernel_function: Kernel, name: str | None = None):
-        if name is None:
-            name = kernel_function.name
-
-        self._kernels[name] = kernel_function
-
-    def create_code_string(self, restrict_qualifier: str, function_prefix: str):
-        code = ""
-
-        #   Collect headers
-        headers = {"<stdint.h>"}
-        for kernel in self._kernels.values():
-            headers |= kernel.required_headers
-
-        header_list = sorted(headers)
-        header_list.insert(0, '"Python.h"')
-
-        from pystencils.include import get_pystencils_include_path
-
-        ps_incl_path = get_pystencils_include_path()
-
-        ps_headers = []
-        for header in header_list:
-            header = header[1:-1]
-            header_path = path.join(ps_incl_path, header)
-            if path.exists(header_path):
-                ps_headers.append(header_path)
-
-        header_hash = b"".join(
-            [hashlib.sha256(open(h, "rb").read()).digest() for h in ps_headers]
-        )
-
-        #   Prelude: Includes and definitions
-
-        includes = "\n".join(f"#include {header}" for header in header_list)
-
-        code += includes
-        code += "\n"
-        code += f"#define RESTRICT {restrict_qualifier}\n"
-        code += f"#define FUNC_PREFIX {function_prefix}\n"
-        code += "\n"
-
-        #   Kernels and call wrappers
-        from ..backend.emission import CAstPrinter
-
-        printer = CAstPrinter(func_prefix="FUNC_PREFIX")
-
-        for name, kernel in self._kernels.items():
-            old_name = kernel.name
-            kernel.name = f"kernel_{name}"
-
-            code += printer(kernel)
-            code += "\n"
-            code += emit_call_wrapper(name, kernel)
-            code += "\n"
-
-            kernel.name = old_name
-
-        self._code_hash = (
-            "mod_" + hashlib.sha256(code.encode() + header_hash).hexdigest()
-        )
-
-        code += create_module_boilerplate_code(self._code_hash, self._kernels.keys())
-
-        self._code_string = code
-
-    def get_hash_of_code(self):
-        assert self._code_string is not None, "The code must be generated first"
-        return self._code_hash
-
-    def write_to_file(self, file):
-        assert self._code_string is not None, "The code must be generated first"
-        print(self._code_string, file=file)
-
-
-def emit_call_wrapper(function_name: str, kernel: Kernel) -> str:
-    builder = CallWrapperBuilder()
-    builder.extract_params(kernel.parameters)
-
-    # for c in kernel.constraints:
-    #     builder.check_constraint(c)
-
-    builder.call(kernel, kernel.parameters)
-
-    return builder.resolve(function_name)
-
-
-template_module_boilerplate = """
-static PyMethodDef method_definitions[] = {{
-    {method_definitions}
-    {{NULL, NULL, 0, NULL}}
-}};
-
-static struct PyModuleDef module_definition = {{
-    PyModuleDef_HEAD_INIT,
-    "{module_name}",   /* name of module */
-    NULL,     /* module documentation, may be NULL */
-    -1,       /* size of per-interpreter state of the module,
-                 or -1 if the module keeps state in global variables. */
-    method_definitions
-}};
-
-PyMODINIT_FUNC
-PyInit_{module_name}(void)
-{{
-    return PyModule_Create(&module_definition);
-}}
-"""
-
-
-def create_module_boilerplate_code(module_name, names):
-    method_definition = (
-        '{{"{name}", (PyCFunction){name}, METH_VARARGS | METH_KEYWORDS, ""}},'
-    )
-    method_definitions = "\n".join(
-        [method_definition.format(name=name) for name in names]
-    )
-    return template_module_boilerplate.format(
-        module_name=module_name, method_definitions=method_definitions
-    )
-
-
-class CallWrapperBuilder:
-    TMPL_EXTRACT_SCALAR = """
-PyObject * obj_{name} = PyDict_GetItemString(kwargs, "{name}");
-if( obj_{name} == NULL) {{  PyErr_SetString(PyExc_TypeError, "Keyword argument '{name}' missing"); return NULL; }};
-{target_type} {name} = ({target_type}) {extract_function}( obj_{name} );
-if( PyErr_Occurred() ) {{ return NULL; }}
-"""
-
-    TMPL_EXTRACT_ARRAY = """
-PyObject * obj_{name} = PyDict_GetItemString(kwargs, "{name}");
-if( obj_{name} == NULL) {{  PyErr_SetString(PyExc_TypeError, "Keyword argument '{name}' missing"); return NULL; }};
-Py_buffer buffer_{name};
-int buffer_{name}_res = PyObject_GetBuffer(obj_{name}, &buffer_{name}, PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT);
-if (buffer_{name}_res == -1) {{ return NULL; }}
-"""
-
-    TMPL_CHECK_ARRAY_TYPE = """
-if(!({cond})) {{ 
-    PyErr_SetString(PyExc_TypeError, "Wrong {what} of array {name}. Expected {expected}"); 
-    return NULL; 
-}}
-"""
-
-    KWCHECK = """
-if( !kwargs || !PyDict_Check(kwargs) ) {{ 
-    PyErr_SetString(PyExc_TypeError, "No keyword arguments passed"); 
-    return NULL; 
-}}
-"""
-
-    def __init__(self) -> None:
-        self._buffer_types: dict[Any, PsType] = dict()
-        self._array_extractions: dict[Any, str] = dict()
-        self._array_frees: dict[Any, str] = dict()
-
-        self._array_assoc_var_extractions: dict[Parameter, str] = dict()
-        self._scalar_extractions: dict[Parameter, str] = dict()
-
-        self._pointer_extractions: dict[Parameter, str] = dict()
-
-        self._constraint_checks: list[str] = []
-
-        self._call: str | None = None
-
-    def _scalar_extractor(self, dtype: PsType) -> str:
-        match dtype:
-            case Fp(32) | Fp(64):
-                return "PyFloat_AsDouble"
-            case SInt():
-                return "PyLong_AsLong"
-            case UInt():
-                return "PyLong_AsUnsignedLong"
-
-            case _:
-                raise ValueError(f"Don't know how to cast Python objects to {dtype}")
-
-    def _type_char(self, dtype: PsType) -> str | None:
-        if isinstance(
-            dtype, (PsUnsignedIntegerType, PsSignedIntegerType, PsIeeeFloatType)
-        ):
-            np_dtype = dtype.NUMPY_TYPES[dtype.width]
-            return np.dtype(np_dtype).char
-        else:
-            return None
-
-    def get_buffer(self, buffer_name: str) -> str:
-        """Get the Python buffer object for a given buffer name."""
-        return f"buffer_{buffer_name}"
-
-    def get_field_buffer(self, field: Field) -> str:
-        """Get the Python buffer object for the given field."""
-        return self.get_buffer(field.name)
-
-    def extract_buffer(self, buffer: Any, name: str) -> None:
-        """Add the necessary code to extract the NumPy array for a given buffer"""
-        if buffer not in self._array_extractions:
-            extraction_code = self.TMPL_EXTRACT_ARRAY.format(name=name)
-            actual_dtype = self._buffer_types[buffer]
-
-            #   Check array type
-            type_char = self._type_char(actual_dtype)
-            if type_char is not None:
-                dtype_cond = f"buffer_{name}.format[0] == '{type_char}'"
-                extraction_code += self.TMPL_CHECK_ARRAY_TYPE.format(
-                    cond=dtype_cond,
-                    what="data type",
-                    name=name,
-                    expected=str(actual_dtype),
-                )
-
-            #   Check item size
-            itemsize = actual_dtype.itemsize
-            item_size_cond = f"buffer_{name}.itemsize == {itemsize}"
-            extraction_code += self.TMPL_CHECK_ARRAY_TYPE.format(
-                cond=item_size_cond, what="itemsize", name=name, expected=itemsize
-            )
-
-            self._array_extractions[buffer] = extraction_code
-
-            release_code = f"PyBuffer_Release(&buffer_{name});"
-            self._array_frees[buffer] = release_code
-
-    def extract_scalar(self, param: Parameter) -> str:
-        if param not in self._scalar_extractions:
-            extract_func = self._scalar_extractor(param.dtype)
-            code = self.TMPL_EXTRACT_SCALAR.format(
-                name=param.name,
-                target_type=param.dtype.c_string(),
-                extract_function=extract_func,
-            )
-            self._scalar_extractions[param] = code
-
-        return param.name
-
-    def extract_ptr(self, param: Parameter) -> str:
-        if param not in self._pointer_extractions:
-            ptr = param.symbol
-            ptr_dtype = ptr.dtype
-
-            assert isinstance(ptr_dtype, PsPointerType)
-
-            self._buffer_types[ptr] = ptr_dtype.base_type
-            self.extract_buffer(ptr, param.name)
-            buffer = self.get_buffer(param.name)
-            code = (
-                f"{param.dtype.c_string()} {param.name} = ({param.dtype}) {buffer}.buf;"
-            )
-
-            assert code is not None
-
-            self._array_assoc_var_extractions[param] = code
-
-        return param.name
-
-    def extract_array_assoc_var(self, param: Parameter) -> str:
-        if param not in self._array_assoc_var_extractions:
-            field = param.fields[0]
-            buffer = self.get_field_buffer(field)
-            buffer_dtype = self._buffer_types[field]
-            code: str | None = None
-
-            for prop in param.properties:
-                match prop:
-                    case FieldBasePtr():
-                        code = f"{param.dtype.c_string()} {param.name} = ({param.dtype}) {buffer}.buf;"
-                        break
-                    case FieldShape(_, coord):
-                        code = f"{param.dtype.c_string()} {param.name} = {buffer}.shape[{coord}];"
-                        break
-                    case FieldStride(_, coord):
-                        code = (
-                            f"{param.dtype.c_string()} {param.name} = "
-                            f"{buffer}.strides[{coord}] / {buffer_dtype.itemsize};"
-                        )
-                        break
-            assert code is not None
-
-            self._array_assoc_var_extractions[param] = code
-
-        return param.name
-
-    def extract_params(self, params: tuple[Parameter, ...]) -> None:
-        for param in params:
-            if ptr_props := param.get_properties(FieldBasePtr):
-                prop: FieldBasePtr = cast(FieldBasePtr, ptr_props.pop())
-                field = prop.field
-                actual_field_type: PsType
-
-                from .. import DynamicType
-
-                if isinstance(field.dtype, DynamicType):
-                    ptr_type = param.dtype
-                    assert isinstance(ptr_type, PsPointerType)
-                    actual_field_type = ptr_type.base_type
-                else:
-                    actual_field_type = field.dtype
-
-                self._buffer_types[prop.field] = actual_field_type
-                self.extract_buffer(prop.field, field.name)
-
-        for param in params:
-            if param.is_field_parameter:
-                self.extract_array_assoc_var(param)
-            elif isinstance(param.dtype, PsPointerType):
-                self.extract_ptr(param)
-            else:
-                self.extract_scalar(param)
-
-    #     def check_constraint(self, constraint: KernelParamsConstraint):
-    #         variables = constraint.get_parameters()
-
-    #         for var in variables:
-    #             self.extract_parameter(var)
-
-    #         cond = constraint.to_code()
-
-    #         code = f"""
-    # if(!({cond}))
-    # {{
-    #     PyErr_SetString(PyExc_ValueError, "Violated constraint: {constraint}");
-    #     return NULL;
-    # }}
-    # """
-
-    #         self._constraint_checks.append(code)
-
-    def call(self, kernel: Kernel, params: tuple[Parameter, ...]):
-        param_list = ", ".join(p.name for p in params)
-        self._call = f"{kernel.name} ({param_list});"
-
-    def resolve(self, function_name) -> str:
-        assert self._call is not None
-
-        body = "\n\n".join(
-            chain(
-                [self.KWCHECK],
-                self._scalar_extractions.values(),
-                self._array_extractions.values(),
-                self._array_assoc_var_extractions.values(),
-                self._constraint_checks,
-                [self._call],
-                self._array_frees.values(),
-                ["Py_RETURN_NONE;"],
-            )
-        )
-
-        code = f"static PyObject * {function_name}(PyObject * self, PyObject * args, PyObject * kwargs)\n"
-        code += "{\n" + indent(body, prefix="    ") + "\n}\n"
-
-        return code
diff --git a/src/pystencils/jit/error.py b/src/pystencils/jit/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..03bc6e7309bd4b9628bc081104bddafca06570db
--- /dev/null
+++ b/src/pystencils/jit/error.py
@@ -0,0 +1,3 @@
+
+class JitError(Exception):
+    """Indicates an error during just-in-time compilation"""
diff --git a/src/pystencils/jit/gpu_cupy.py b/src/pystencils/jit/gpu_cupy.py
index 89eb3547669991fa78d38e6ff38340465b3cc7de..9b5f9ef3363b508701798276e2df0c27397a4c7d 100644
--- a/src/pystencils/jit/gpu_cupy.py
+++ b/src/pystencils/jit/gpu_cupy.py
@@ -43,7 +43,7 @@ class CupyKernelWrapper(KernelWrapper):
         self._args_cache: dict[Any, tuple] = dict()
 
     @property
-    def kernel_function(self) -> GpuKernel:
+    def kernel(self) -> GpuKernel:
         return self._kfunc
 
     @property
diff --git a/src/pystencils/jit/jit.py b/src/pystencils/jit/jit.py
index 4998c14adfdc810a93d1a1f96cc310ac81c65f5d..49af7173dfae0d1a9b9c655130c069a5f79dff68 100644
--- a/src/pystencils/jit/jit.py
+++ b/src/pystencils/jit/jit.py
@@ -2,14 +2,12 @@ from __future__ import annotations
 from typing import Sequence, TYPE_CHECKING
 from abc import ABC, abstractmethod
 
+from .error import JitError
+
 if TYPE_CHECKING:
     from ..codegen import Kernel, Parameter, Target
 
 
-class JitError(Exception):
-    """Indicates an error during just-in-time compilation"""
-
-
 class KernelWrapper(ABC):
     """Wrapper around a compiled and executable pystencils kernel."""
 
@@ -21,7 +19,7 @@ class KernelWrapper(ABC):
         pass
 
     @property
-    def kernel_function(self) -> Kernel:
+    def kernel(self) -> Kernel:
         return self._kfunc
     
     @property
diff --git a/src/pystencils/jit/legacy_cpu.py b/src/pystencils/jit/legacy_cpu.py
deleted file mode 100644
index 514e9b60e4a5ae83a234be9f3cd514fdc7a0e555..0000000000000000000000000000000000000000
--- a/src/pystencils/jit/legacy_cpu.py
+++ /dev/null
@@ -1,474 +0,0 @@
-# mypy: ignore-errors
-r"""
-
-*pystencils* automatically searches for a compiler, so in most cases no explicit configuration is required.
-On Linux make sure that 'gcc' and 'g++' are installed and in your path.
-On Windows a recent Visual Studio installation is required.
-In case anything does not work as expected or a special compiler should be used, changes can be specified
-in a configuration file.
-
-*pystencils* looks for a configuration file in JSON format at the following locations in the listed order.
-
-1. at the path specified in the environment variable ``PYSTENCILS_CONFIG``
-2. in the current working direction for a file named ``pystencils.json``
-3. or in your home directory at ``~/.config/pystencils/config.json`` (Linux) or
-   ``%HOMEPATH%\.pystencils\config.json`` (Windows)
-
-If no configuration file is found, a default configuration is created at the above-mentioned location in your home.
-So run *pystencils* once, then edit the created configuration file.
-
-
-Compiler Config (Linux)
------------------------
-
-- **'os'**: should be detected automatically as 'linux'
-- **'command'**: path to C++ compiler (defaults to 'g++')
-- **'flags'**: space separated list of compiler flags. Make sure to activate OpenMP in your compiler
-- **'restrict_qualifier'**: the 'restrict' qualifier is not standardized across compilers.
-  For most Linux compilers the qualifier is ``__restrict__``
-
-
-Compiler Config (Windows)
--------------------------
-
-*pystencils* uses the mechanism of *setuptools.msvc* to search for a compilation environment.
-Then 'cl.exe' is used to compile.
-
-- **'os'**: should be detected automatically as 'windows'
-- **'msvc_version'**:  either a version number, year number, 'auto' or 'latest' for automatic detection of latest
-  installed version or 'setuptools' for setuptools-based detection. Alternatively path to folder
-  where Visual Studio is installed. This path has to contain a file called 'vcvarsall.bat'
-- **'arch'**: 'x86' or 'x64'
-- **'flags'**: flags passed to 'cl.exe', make sure OpenMP is activated
-- **'restrict_qualifier'**: the 'restrict' qualifier is not standardized across compilers.
-  For Windows compilers the qualifier should be ``__restrict``
-
-"""
-
-from appdirs import user_cache_dir, user_config_dir
-from collections import OrderedDict
-from typing import Callable
-
-import importlib.util
-import json
-import os
-import platform
-import shutil
-import subprocess
-import sysconfig
-import tempfile
-import time
-import warnings
-
-
-from ..codegen import Kernel
-from .jit import JitBase, KernelWrapper
-from .cpu_extension_module import PsKernelExtensioNModule
-
-from .msvc_detection import get_environment
-from pystencils.include import get_pystencils_include_path
-from pystencils.utils import atomic_file_write, recursive_dict_update
-
-
-class CpuKernelWrapper(KernelWrapper):
-    def __init__(self, kfunc: Kernel, compiled_kernel: Callable[..., None]) -> None:
-        super().__init__(kfunc)
-        self._compiled_kernel = compiled_kernel
-
-    def __call__(self, **kwargs) -> None:
-        self._compiled_kernel(**kwargs)
-
-    @property
-    def kernel(self) -> Callable[..., None]:
-        return self._compiled_kernel
-
-
-class LegacyCpuJit(JitBase):
-    """Wrapper around ``pystencils.cpu.cpujit``"""
-
-    def compile(self, kernel: Kernel) -> KernelWrapper:
-        return compile_and_load(kernel)
-
-
-def make_python_function(kernel_function_node, custom_backend=None):
-    """
-    Creates C code from the abstract syntax tree, compiles it and makes it accessible as Python function
-
-    The parameters of the kernel are:
-        - numpy arrays for each field used in the kernel. The keyword argument name is the name of the field
-        - all symbols which are not defined in the kernel itself are expected as parameters
-
-    :param kernel_function_node: the abstract syntax tree
-    :param custom_backend: use own custom printer for code generation
-    :return: kernel functor
-    """
-    result = compile_and_load(kernel_function_node, custom_backend)
-    return result
-
-
-def set_config(config):
-    """
-    Override the configuration provided in config file
-
-    Configuration of compiler parameters:
-    If this function is not called the configuration is taken from a config file in JSON format which
-    is searched in the following locations in the order specified:
-        - at location provided in environment variable PYSTENCILS_CONFIG (if this variable exists)
-        - a file called ".pystencils.json" in the current working directory
-        - ~/.pystencils.json in your home
-    If none of these files exist a file ~/.pystencils.json is created with a default configuration using
-    the GNU 'g++'
-
-    An example JSON file with all possible keys. If not all keys are specified, default values are used
-    ``
-    {
-        'compiler' :
-        {
-            "command": "/software/intel/2017/bin/icpc",
-            "flags": "-Ofast -DNDEBUG -fPIC -march=native -fopenmp",
-            "env": {
-                "LM_PROJECT": "iwia",
-            }
-        }
-    }
-    ``
-    """
-    global _config
-    _config = config.copy()
-
-
-def get_configuration_file_path():
-    config_path_in_home = os.path.join(user_config_dir("pystencils"), "config.json")
-
-    # 1) Read path from environment variable if found
-    if "PYSTENCILS_CONFIG" in os.environ:
-        return os.environ["PYSTENCILS_CONFIG"], True
-    # 2) Look in current directory for pystencils.json
-    elif os.path.exists("pystencils.json"):
-        return "pystencils.json", True
-    # 3) Try ~/.pystencils.json
-    elif os.path.exists(config_path_in_home):
-        return config_path_in_home, True
-    else:
-        return config_path_in_home, False
-
-
-def create_folder(path, is_file):
-    if is_file:
-        path = os.path.split(path)[0]
-    try:
-        os.makedirs(path)
-    except os.error:
-        pass
-
-
-def read_config():
-    if platform.system().lower() == "linux":
-        default_compiler_config = OrderedDict(
-            [
-                ("os", "linux"),
-                ("command", "g++"),
-                ("flags", "-Ofast -DNDEBUG -fPIC -march=native -fopenmp -std=c++11"),
-                ("restrict_qualifier", "__restrict__"),
-            ]
-        )
-        if platform.machine().startswith("ppc64") or platform.machine() == "arm64":
-            default_compiler_config["flags"] = default_compiler_config["flags"].replace(
-                "-march=native", "-mcpu=native"
-            )
-    elif platform.system().lower() == "windows":
-        default_compiler_config = OrderedDict(
-            [
-                ("os", "windows"),
-                ("msvc_version", "latest"),
-                ("arch", "x64"),
-                ("flags", "/Ox /fp:fast /OpenMP /arch:avx"),
-                ("restrict_qualifier", "__restrict"),
-            ]
-        )
-        if platform.machine() == "ARM64":
-            default_compiler_config["arch"] = "ARM64"
-            default_compiler_config["flags"] = default_compiler_config["flags"].replace(
-                " /arch:avx", ""
-            )
-    elif platform.system().lower() == "darwin":
-        default_compiler_config = OrderedDict(
-            [
-                ("os", "darwin"),
-                ("command", "clang++"),
-                (
-                    "flags",
-                    "-Ofast -DNDEBUG -fPIC -march=native -Xclang -fopenmp -std=c++11",
-                ),
-                ("restrict_qualifier", "__restrict__"),
-            ]
-        )
-        if platform.machine() == "arm64":
-            default_compiler_config["flags"] = default_compiler_config["flags"].replace(
-                "-march=native ", ""
-            )
-        for libomp in [
-            "/opt/local/lib/libomp/libomp.dylib",
-            "/usr/local/lib/libomp.dylib",
-            "/opt/homebrew/lib/libomp.dylib",
-        ]:
-            if os.path.exists(libomp):
-                default_compiler_config["flags"] += " " + libomp
-                break
-    else:
-        raise NotImplementedError(
-            "Generation of default compiler flags for %s is not implemented"
-            % (platform.system(),)
-        )
-
-    default_cache_config = OrderedDict(
-        [
-            ("object_cache", os.path.join(user_cache_dir("pystencils"), "objectcache")),
-            ("clear_cache_on_start", False),
-        ]
-    )
-
-    default_config = OrderedDict(
-        [("compiler", default_compiler_config), ("cache", default_cache_config)]
-    )
-
-    config_path, config_exists = get_configuration_file_path()
-    config = default_config.copy()
-    if config_exists:
-        with open(config_path, "r") as json_config_file:
-            loaded_config = json.load(json_config_file)
-        config = recursive_dict_update(config, loaded_config)
-    else:
-        create_folder(config_path, True)
-        with open(config_path, "w") as f:
-            json.dump(config, f, indent=4)
-
-    if config["cache"]["object_cache"] is not False:
-        config["cache"]["object_cache"] = os.path.expanduser(
-            config["cache"]["object_cache"]
-        ).format(pid=os.getpid())
-
-        clear_cache_on_start = False
-        cache_status_file = os.path.join(
-            config["cache"]["object_cache"], "last_config.json"
-        )
-        if os.path.exists(cache_status_file):
-            # check if compiler config has changed
-            last_config = json.load(open(cache_status_file, "r"))
-            if set(last_config.items()) != set(config["compiler"].items()):
-                clear_cache_on_start = True
-            else:
-                for key in last_config.keys():
-                    if last_config[key] != config["compiler"][key]:
-                        clear_cache_on_start = True
-
-        if config["cache"]["clear_cache_on_start"] or clear_cache_on_start:
-            shutil.rmtree(config["cache"]["object_cache"], ignore_errors=True)
-
-        create_folder(config["cache"]["object_cache"], False)
-        with tempfile.NamedTemporaryFile(
-            "w", dir=os.path.dirname(cache_status_file), delete=False
-        ) as f:
-            json.dump(config["compiler"], f, indent=4)
-        os.replace(f.name, cache_status_file)
-
-    if config["compiler"]["os"] == "windows":
-        msvc_env = get_environment(
-            config["compiler"]["msvc_version"], config["compiler"]["arch"]
-        )
-        if "env" not in config["compiler"]:
-            config["compiler"]["env"] = {}
-        config["compiler"]["env"].update(msvc_env)
-
-    return config
-
-
-_config = read_config()
-
-
-def get_compiler_config():
-    return _config["compiler"]
-
-
-def get_cache_config():
-    return _config["cache"]
-
-
-def add_or_change_compiler_flags(flags):
-    if not isinstance(flags, list) and not isinstance(flags, tuple):
-        flags = [flags]
-
-    compiler_config = get_compiler_config()
-    cache_config = get_cache_config()
-    cache_config["object_cache"] = False  # disable cache
-
-    for flag in flags:
-        flag = flag.strip()
-        if "=" in flag:
-            base = flag.split("=")[0].strip()
-        else:
-            base = flag
-
-        new_flags = [
-            c for c in compiler_config["flags"].split() if not c.startswith(base)
-        ]
-        new_flags.append(flag)
-        compiler_config["flags"] = " ".join(new_flags)
-
-
-def clear_cache():
-    cache_config = get_cache_config()
-    if cache_config["object_cache"] is not False:
-        shutil.rmtree(cache_config["object_cache"], ignore_errors=True)
-        create_folder(cache_config["object_cache"], False)
-
-
-def load_kernel_from_file(module_name, function_name, path):
-    try:
-        spec = importlib.util.spec_from_file_location(name=module_name, location=path)
-        mod = importlib.util.module_from_spec(spec)
-        spec.loader.exec_module(mod)
-    except ImportError:
-        warnings.warn(f"Could not load {path}, trying on more time in 5 seconds ...")
-        time.sleep(5)
-        spec = importlib.util.spec_from_file_location(name=module_name, location=path)
-        mod = importlib.util.module_from_spec(spec)
-        spec.loader.exec_module(mod)
-
-    return getattr(mod, function_name)
-
-
-def run_compile_step(command):
-    compiler_config = get_compiler_config()
-    config_env = compiler_config["env"] if "env" in compiler_config else {}
-    compile_environment = os.environ.copy()
-    compile_environment.update(config_env)
-    try:
-        shell = True if compiler_config["os"].lower() == "windows" else False
-        subprocess.check_output(
-            command, env=compile_environment, stderr=subprocess.STDOUT, shell=shell
-        )
-    except subprocess.CalledProcessError as e:
-        print(" ".join(command))
-        print(e.output.decode("utf8"))
-        raise e
-
-
-def compile_module(code, code_hash, base_dir, compile_flags=None):
-    if compile_flags is None:
-        compile_flags = []
-
-    compiler_config = get_compiler_config()
-    extra_flags = [
-        "-I" + sysconfig.get_paths()["include"],
-        "-I" + get_pystencils_include_path(),
-    ] + compile_flags
-
-    if compiler_config["os"].lower() == "windows":
-        lib_suffix = ".pyd"
-        object_suffix = ".obj"
-        windows = True
-    else:
-        lib_suffix = ".so"
-        object_suffix = ".o"
-        windows = False
-
-    src_file = os.path.join(base_dir, code_hash + ".cpp")
-    lib_file = os.path.join(base_dir, code_hash + lib_suffix)
-    object_file = os.path.join(base_dir, code_hash + object_suffix)
-
-    if not os.path.exists(object_file):
-        try:
-            with open(src_file, "x") as f:
-                code.write_to_file(f)
-        except FileExistsError:
-            pass
-
-        if windows:
-            compile_cmd = ["cl.exe", "/c", "/EHsc"] + compiler_config["flags"].split()
-            compile_cmd += [*extra_flags, src_file, "/Fo" + object_file]
-            run_compile_step(compile_cmd)
-        else:
-            with atomic_file_write(object_file) as file_name:
-                compile_cmd = [compiler_config["command"], "-c"] + compiler_config[
-                    "flags"
-                ].split()
-                compile_cmd += [*extra_flags, "-o", file_name, src_file]
-                run_compile_step(compile_cmd)
-
-        # Linking
-        if windows:
-            config_vars = sysconfig.get_config_vars()
-            py_lib = os.path.join(
-                config_vars["installed_base"],
-                "libs",
-                f"python{config_vars['py_version_nodot']}.lib",
-            )
-            run_compile_step(
-                ["link.exe", py_lib, "/DLL", "/out:" + lib_file, object_file]
-            )
-        elif platform.system().lower() == "darwin":
-            with atomic_file_write(lib_file) as file_name:
-                run_compile_step(
-                    [
-                        compiler_config["command"],
-                        "-shared",
-                        object_file,
-                        "-o",
-                        file_name,
-                        "-undefined",
-                        "dynamic_lookup",
-                    ]
-                    + compiler_config["flags"].split()
-                )
-        else:
-            with atomic_file_write(lib_file) as file_name:
-                run_compile_step(
-                    [
-                        compiler_config["command"],
-                        "-shared",
-                        object_file,
-                        "-o",
-                        file_name,
-                    ]
-                    + compiler_config["flags"].split()
-                )
-    return lib_file
-
-
-def compile_and_load(kernel: Kernel, custom_backend=None):
-    cache_config = get_cache_config()
-
-    compiler_config = get_compiler_config()
-    function_prefix = (
-        "__declspec(dllexport)" if compiler_config["os"].lower() == "windows" else ""
-    )
-
-    code = PsKernelExtensioNModule()
-
-    code.add_function(kernel, kernel.name)
-
-    code.create_code_string(compiler_config["restrict_qualifier"], function_prefix)
-    code_hash_str = code.get_hash_of_code()
-
-    compile_flags = []
-    #   TODO: replace
-    # if kernel.instruction_set and "compile_flags" in kernel.instruction_set:
-    #     compile_flags = kernel.instruction_set["compile_flags"]
-
-    if cache_config["object_cache"] is False:
-        with tempfile.TemporaryDirectory() as base_dir:
-            lib_file = compile_module(
-                code, code_hash_str, base_dir, compile_flags=compile_flags
-            )
-            result = load_kernel_from_file(code_hash_str, kernel.name, lib_file)
-    else:
-        lib_file = compile_module(
-            code,
-            code_hash_str,
-            base_dir=cache_config["object_cache"],
-            compile_flags=compile_flags,
-        )
-        result = load_kernel_from_file(code_hash_str, kernel.name, lib_file)
-
-    return CpuKernelWrapper(kernel, result)
diff --git a/src/pystencils/timeloop.py b/src/pystencils/timeloop.py
index 5c3438680c27ecf017511c5748ca3957039ba316..b92172fea9afaf63d698186a81fe47d1a9e62499 100644
--- a/src/pystencils/timeloop.py
+++ b/src/pystencils/timeloop.py
@@ -26,8 +26,6 @@ class TimeLoop:
         self._single_step_functions.append(f)
 
     def add_call(self, functor, argument_list):
-        if hasattr(functor, 'kernel'):
-            functor = functor.kernel
         if not isinstance(argument_list, list):
             argument_list = [argument_list]
 
diff --git a/tests/fixtures.py b/tests/fixtures.py
index a4c77f550e228ecfeb7b4e61b95bd2dce9739f9f..8f77a84cbef400036732024f117a66b931779a1a 100644
--- a/tests/fixtures.py
+++ b/tests/fixtures.py
@@ -22,14 +22,6 @@ AVAILABLE_TARGETS = ps.Target.available_targets()
 TARGET_IDS = [t.name for t in AVAILABLE_TARGETS]
 
 
-def pytest_addoption(parser: pytest.Parser):
-    parser.addoption(
-        "--experimental-cpu-jit",
-        dest="experimental_cpu_jit",
-        action="store_true"
-    )
-
-
 @pytest.fixture(params=AVAILABLE_TARGETS, ids=TARGET_IDS)
 def target(request) -> ps.Target:
     """Provides all code generation targets available on the current hardware"""
@@ -37,7 +29,7 @@ def target(request) -> ps.Target:
 
 
 @pytest.fixture
-def gen_config(request: pytest.FixtureRequest, target: ps.Target):
+def gen_config(request: pytest.FixtureRequest, target: ps.Target, tmp_path):
     """Default codegen configuration for the current target.
 
     For GPU targets, set default indexing options.
@@ -50,10 +42,16 @@ def gen_config(request: pytest.FixtureRequest, target: ps.Target):
         gen_config.cpu.vectorize.enable = True
         gen_config.cpu.vectorize.assume_inner_stride_one = True
 
-    if target.is_cpu() and request.config.getoption("experimental_cpu_jit"):
+    if target.is_cpu():
         from pystencils.jit.cpu import CpuJit, GccInfo
 
-        gen_config.jit = CpuJit.create(compiler_info=GccInfo(target=target))
+        #   Set target in compiler info such that `-march` is set accordingly
+        cinfo = GccInfo(target=target)
+
+        gen_config.jit = CpuJit(
+            compiler_info=cinfo,
+            objcache=tmp_path
+        )
 
     return gen_config
 
diff --git a/tests/jit/test_cpujit.py b/tests/jit/test_cpujit.py
index bfa4c98975cc3865429e734cb6c2997ec9074622..ac58af5a5b424e1352a728b2168bf2484fe35a34 100644
--- a/tests/jit/test_cpujit.py
+++ b/tests/jit/test_cpujit.py
@@ -1,5 +1,7 @@
 import pytest
 
+
+from itertools import product
 import sympy as sp
 import numpy as np
 from pystencils import create_kernel, Assignment, fields, Field
@@ -8,16 +10,16 @@ from pystencils.jit import CpuJit
 
 @pytest.fixture
 def cpu_jit(tmp_path) -> CpuJit:
-    return CpuJit.create(objcache=tmp_path)
+    return CpuJit(objcache=tmp_path)
 
 
 def test_basic_cpu_kernel(cpu_jit):
     f, g = fields("f, g: [2D]")
-    asm = Assignment(f.center(), 2.0 * g.center())
+    asm = Assignment(g.center(), 2.0 * f.center())
     ker = create_kernel(asm)
     kfunc = cpu_jit.compile(ker)
 
-    rng = np.random.default_rng()
+    rng = np.random.default_rng(0x5eed)
     f_arr = rng.random(size=(34, 26), dtype="float64")
     g_arr = np.zeros_like(f_arr)
 
@@ -26,6 +28,26 @@ def test_basic_cpu_kernel(cpu_jit):
     np.testing.assert_almost_equal(g_arr, 2.0 * f_arr)
 
 
+def test_invalid_args(cpu_jit):
+    f, g = fields("f, g: [2D]")
+    asm = Assignment(f.center(), 2.0 * g.center())
+    ker = create_kernel(asm)
+    kfunc = cpu_jit.compile(ker)
+
+    f_arr = np.zeros((34, 26), dtype="float64")
+    g_arr = np.zeros_like(f_arr)
+
+    #   Missing Arguments
+    with pytest.raises(KeyError):
+        kfunc(f=f_arr)
+
+    with pytest.raises(KeyError):
+        kfunc(g=g_arr)
+
+    #   Extra arguments are ignored
+    kfunc(f=f_arr, g=g_arr, x=2.1)
+
+
 def test_argument_type_error(cpu_jit):
     f, g = fields("f, g: [2D]")
     c = sp.Symbol("c")
@@ -46,6 +68,9 @@ def test_argument_type_error(cpu_jit):
     with pytest.raises(TypeError):
         kfunc(f=arr_fp16, g=arr_fp16, c=2.0)
 
+    with pytest.raises(TypeError):
+        kfunc(f=arr_fp64, g=arr_fp64, c=[2.0])
+
     #   Wrong scalar types are OK, though
     kfunc(f=arr_fp64, g=arr_fp64, c=np.float16(1.0))
 
@@ -95,3 +120,21 @@ def test_fixed_index_shape(cpu_jit):
         f_arr = np.zeros((12, 14, 3))
         g_arr = np.zeros((12, 14, 1, 3))
         kfunc(f=f_arr, g=g_arr)
+
+
+def test_scalar_field(cpu_jit):
+    f, g = fields("f(1), g: [2D]")
+    asm = Assignment(g(), f(0))
+    ker = create_kernel(asm)
+    kfunc = cpu_jit.compile(ker)
+
+    spatial_shape = (31, 29)
+    #   Both implicit and explicit scalar fields must be accepted
+    for ishape_f, ishape_g in product(((), (1,)), ((), (1,))):
+        rng = np.random.default_rng(0x5eed)
+        f_arr = rng.random(size=spatial_shape + ishape_f, dtype="float64")
+        g_arr = np.zeros(spatial_shape + ishape_g)
+        
+        kfunc(f=f_arr, g=g_arr)
+
+        np.testing.assert_allclose(f_arr.flatten(), g_arr.flatten())
diff --git a/tests/kernelcreation/test_buffer.py b/tests/kernelcreation/test_buffer.py
index 36306534fa07f51aa65d069febd518e4ff1a3476..63d5cb99f96f1cff1be690db0cfde01e637d31e8 100644
--- a/tests/kernelcreation/test_buffer.py
+++ b/tests/kernelcreation/test_buffer.py
@@ -7,241 +7,276 @@ import pystencils as ps
 from pystencils import Assignment, Field, FieldType, create_kernel
 from pystencils.field import create_numpy_array_with_layout, layout_string_to_tuple
 from pystencils.slicing import (
-    add_ghost_layers, get_ghost_region_slice, get_slice_before_ghost_layer)
+    get_ghost_region_slice,
+    get_slice_before_ghost_layer,
+)
 from pystencils.stencil import direction_string_to_offset
 
-FIELD_SIZES = [(32, 10), (10, 8, 6)]
 
-
-def _generate_fields(dt=np.uint64, num_directions=1, layout='numpy'):
-    field_sizes = FIELD_SIZES
-    if num_directions > 1:
-        field_sizes = [s + (num_directions,) for s in field_sizes]
-
-    fields = []
-    for size in field_sizes:
-        field_layout = layout_string_to_tuple(layout, len(size))
-        src_arr = create_numpy_array_with_layout(size, field_layout, dtype=dt)
-
-        array_data = np.reshape(np.arange(1, int(np.prod(size) + 1)), size)
-        # Use flat iterator to input data into the array
-        src_arr.flat = add_ghost_layers(array_data, index_dimensions=1 if num_directions > 1 else 0).astype(dt).flat
-        dst_arr = np.zeros(src_arr.shape, dtype=dt)
-        buffer_arr = np.zeros(np.prod(src_arr.shape), dtype=dt)
-        fields.append((src_arr, dst_arr, buffer_arr))
-    return fields
-
-
-def test_full_scalar_field():
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+def test_full_scalar_field(shape, dtype):
     """Tests fully (un)packing a scalar field (from)to a buffer."""
-    fields = _generate_fields()
-    for (src_arr, dst_arr, buffer_arr) in fields:
-        src_field = Field.create_from_numpy_array("src_field", src_arr)
-        dst_field = Field.create_from_numpy_array("dst_field", dst_arr)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1,
-                                      field_type=FieldType.BUFFER, dtype=src_arr.dtype)
-
-        pack_eqs = [Assignment(buffer.center(), src_field.center())]
-        pack_code = create_kernel(pack_eqs)
-        ps.show_code(pack_code)
+    rank = len(shape)
+    src_field, dst_field = ps.fields(f"src, dst: {dtype}[{rank}D]")
+    buffer = Field.create_generic(
+        "buffer", spatial_dimensions=1, field_type=FieldType.BUFFER, dtype=dtype
+    )
 
-        pack_kernel = pack_code.compile()
-        pack_kernel(buffer=buffer_arr, src_field=src_arr)
+    pack_eqs = [Assignment(buffer.center(), src_field.center())]
+    pack_code = create_kernel(pack_eqs)
+    ps.show_code(pack_code)
 
-        unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
+    rng = np.random.default_rng(0x5EED)
+    src_arr = rng.random(shape, dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros(np.prod(dst_arr.shape), dtype=dtype)
 
-        unpack_code = create_kernel(unpack_eqs)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr)
 
-        unpack_kernel = unpack_code.compile()
-        unpack_kernel(dst_field=dst_arr, buffer=buffer_arr)
+    unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
 
-        np.testing.assert_equal(src_arr, dst_arr)
+    unpack_code = create_kernel(unpack_eqs)
 
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(dst=dst_arr, buffer=buffer_arr)
 
-def test_field_slice():
-    """Tests (un)packing slices of a scalar field (from)to a buffer."""
-    fields = _generate_fields()
-    for d in ['N', 'S', 'NW', 'SW', 'TNW', 'B']:
-        for (src_arr, dst_arr, bufferArr) in fields:
-            # Extract slice from N direction of the field
-            slice_dir = direction_string_to_offset(d, dim=len(src_arr.shape))
-            pack_slice = get_slice_before_ghost_layer(slice_dir)
-            unpack_slice = get_ghost_region_slice(slice_dir)
-
-            src_field = Field.create_from_numpy_array("src_field", src_arr[pack_slice])
-            dst_field = Field.create_from_numpy_array("dst_field", dst_arr[unpack_slice])
-            buffer = Field.create_generic("buffer", spatial_dimensions=1,
-                                          field_type=FieldType.BUFFER, dtype=src_arr.dtype)
+    np.testing.assert_equal(src_arr, dst_arr)
 
-            pack_eqs = [Assignment(buffer.center(), src_field.center())]
 
-            pack_code = create_kernel(pack_eqs)
-
-            pack_kernel = pack_code.compile()
-            pack_kernel(buffer=bufferArr, src_field=src_arr[pack_slice])
-
-            # Unpack into ghost layer of dst_field in N direction
-            unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
-
-            unpack_code = create_kernel(unpack_eqs)
-
-            unpack_kernel = unpack_code.compile()
-            unpack_kernel(buffer=bufferArr, dst_field=dst_arr[unpack_slice])
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+@pytest.mark.parametrize("direction", ["N", "S", "NW", "SW", "TNW", "B"])
+def test_field_slice(shape, dtype, direction):
+    """Tests (un)packing slices of a scalar field (from)to a buffer."""
+    rank = len(shape)
+    src_field, dst_field = ps.fields(f"src, dst: {dtype}[{rank}D]")
+    buffer = Field.create_generic(
+        "buffer", spatial_dimensions=1, field_type=FieldType.BUFFER, dtype=dtype
+    )
 
-            np.testing.assert_equal(src_arr[pack_slice], dst_arr[unpack_slice])
+    rng = np.random.default_rng(0x5EED)
+    src_arr = rng.random(shape, dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros(np.prod(dst_arr.shape), dtype=dtype)
 
+    slice_dir = direction_string_to_offset(direction, dim=rank)
+    pack_slice = get_slice_before_ghost_layer(slice_dir)
+    unpack_slice = get_ghost_region_slice(slice_dir)
 
-def test_all_cell_values():
-    """Tests (un)packing all cell values of the a field (from)to a buffer."""
-    num_cell_values = 19
-    fields = _generate_fields(num_directions=num_cell_values)
-    for (src_arr, dst_arr, bufferArr) in fields:
-        src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
-        dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(num_cell_values,),
-                                      field_type=FieldType.BUFFER, dtype=src_arr.dtype)
+    pack_eqs = [Assignment(buffer.center(), src_field.center())]
 
-        pack_eqs = []
-        # Since we are packing all cell values for all cells, then
-        # the buffer index is equivalent to the field index
-        for idx in range(num_cell_values):
-            eq = Assignment(buffer(idx), src_field(idx))
-            pack_eqs.append(eq)
+    pack_code = create_kernel(pack_eqs)
 
-        pack_code = create_kernel(pack_eqs)
-        pack_kernel = pack_code.compile()
-        pack_kernel(buffer=bufferArr, src_field=src_arr)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr[pack_slice])
 
-        unpack_eqs = []
+    # Unpack into ghost layer of dst_field in N direction
+    unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
 
-        for idx in range(num_cell_values):
-            eq = Assignment(dst_field(idx), buffer(idx))
-            unpack_eqs.append(eq)
+    unpack_code = create_kernel(unpack_eqs)
 
-        unpack_code = create_kernel(unpack_eqs)
-        unpack_kernel = unpack_code.compile()
-        unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(buffer=buffer_arr, dst=dst_arr[unpack_slice])
 
-        np.testing.assert_equal(src_arr, dst_arr)
+    np.testing.assert_equal(src_arr[pack_slice], dst_arr[unpack_slice])
 
 
-def test_subset_cell_values():
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+def test_all_cell_values(shape, dtype):
+    """Tests (un)packing all cell values of the a field (from)to a buffer."""
+    values_per_cell = 19
+
+    rank = len(shape)
+    src_field, dst_field = ps.fields(
+        f"src({values_per_cell}), dst({values_per_cell}): {dtype}[{rank}D]"
+    )
+    buffer = Field.create_generic(
+        "buffer",
+        spatial_dimensions=1,
+        index_shape=(values_per_cell,),
+        field_type=FieldType.BUFFER,
+        dtype=dtype,
+    )
+
+    rng = np.random.default_rng(0x5EED)
+    src_arr = rng.random(shape + (values_per_cell,), dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros((np.prod(dst_arr.shape), values_per_cell), dtype=dtype)
+
+    pack_eqs = []
+    for idx in range(values_per_cell):
+        eq = Assignment(buffer(idx), src_field(idx))
+        pack_eqs.append(eq)
+
+    pack_code = create_kernel(pack_eqs)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr)
+
+    unpack_eqs = []
+
+    for idx in range(values_per_cell):
+        eq = Assignment(dst_field(idx), buffer(idx))
+        unpack_eqs.append(eq)
+
+    unpack_code = create_kernel(unpack_eqs)
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(buffer=buffer_arr, dst=dst_arr)
+
+    np.testing.assert_equal(src_arr, dst_arr)
+
+
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+def test_subset_cell_values(shape, dtype):
     """Tests (un)packing a subset of cell values of the a field (from)to a buffer."""
-    num_cell_values = 19
-    # Cell indices of the field to be (un)packed (from)to the buffer
-    cell_indices = [1, 5, 7, 8, 10, 12, 13]
-    fields = _generate_fields(num_directions=num_cell_values)
-    for (src_arr, dst_arr, bufferArr) in fields:
-        src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
-        dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(len(cell_indices),),
-                                      field_type=FieldType.BUFFER, dtype=src_arr.dtype)
-
-        pack_eqs = []
-        # Since we are packing all cell values for all cells, then
-        # the buffer index is equivalent to the field index
-        for buffer_idx, cell_idx in enumerate(cell_indices):
-            eq = Assignment(buffer(buffer_idx), src_field(cell_idx))
-            pack_eqs.append(eq)
-
-        pack_code = create_kernel(pack_eqs)
-        pack_kernel = pack_code.compile()
-        pack_kernel(buffer=bufferArr, src_field=src_arr)
-
-        unpack_eqs = []
-
-        for buffer_idx, cell_idx in enumerate(cell_indices):
-            eq = Assignment(dst_field(cell_idx), buffer(buffer_idx))
-            unpack_eqs.append(eq)
-
-        unpack_code = create_kernel(unpack_eqs)
-        unpack_kernel = unpack_code.compile()
-        unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
-
-        mask_arr = np.ma.masked_where((src_arr - dst_arr) != 0, src_arr)
-        np.testing.assert_equal(dst_arr, mask_arr.filled(int(0)))
-
-
-def test_field_layouts():
-    num_cell_values = 27
-    for layout_str in ['numpy', 'fzyx', 'zyxf', 'reverse_numpy']:
-        fields = _generate_fields(num_directions=num_cell_values, layout=layout_str)
-        for (src_arr, dst_arr, bufferArr) in fields:
-            src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
-            dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-            buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(num_cell_values,),
-                                          field_type=FieldType.BUFFER, dtype=src_arr.dtype)
-
-            pack_eqs = []
-            # Since we are packing all cell values for all cells, then
-            # the buffer index is equivalent to the field index
-            for idx in range(num_cell_values):
-                eq = Assignment(buffer(idx), src_field(idx))
-                pack_eqs.append(eq)
-
-            pack_code = create_kernel(pack_eqs)
-            pack_kernel = pack_code.compile()
-            pack_kernel(buffer=bufferArr, src_field=src_arr)
-
-            unpack_eqs = []
-
-            for idx in range(num_cell_values):
-                eq = Assignment(dst_field(idx), buffer(idx))
-                unpack_eqs.append(eq)
-
-            unpack_code = create_kernel(unpack_eqs)
-            unpack_kernel = unpack_code.compile()
-            unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
-
-
-def test_iteration_slices():
-    num_cell_values = 19
-    dt = np.uint64
-    fields = _generate_fields(dt=dt, num_directions=num_cell_values)
-    for (src_arr, dst_arr, bufferArr) in fields:
-        spatial_dimensions = len(src_arr.shape) - 1
-        # src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
-        # dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-        src_field = Field.create_generic("src_field", spatial_dimensions, index_shape=(num_cell_values,), dtype=dt)
-        dst_field = Field.create_generic("dst_field", spatial_dimensions, index_shape=(num_cell_values,), dtype=dt)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(num_cell_values,),
-                                      field_type=FieldType.BUFFER, dtype=src_arr.dtype)
-
-        pack_eqs = []
-        # Since we are packing all cell values for all cells, then
-        # the buffer index is equivalent to the field index
-        for idx in range(num_cell_values):
-            eq = Assignment(buffer(idx), src_field(idx))
-            pack_eqs.append(eq)
-
-        dim = src_field.spatial_dimensions
-
-        #   Pack only the leftmost slice, only every second cell
-        pack_slice = (slice(None, None, 2),) * (dim - 1) + (slice(0, 1, None),)
-
-        #   Fill the entire array with data
-        src_arr[(slice(None, None, 1),) * dim] = np.arange(num_cell_values)
-        dst_arr.fill(0)
-
-        config = ps.CreateKernelConfig(iteration_slice=pack_slice)
-
-        pack_code = create_kernel(pack_eqs, config=config)
-        pack_kernel = pack_code.compile()
-        pack_kernel(buffer=bufferArr, src_field=src_arr)
-
-        unpack_eqs = []
-
-        for idx in range(num_cell_values):
-            eq = Assignment(dst_field(idx), buffer(idx))
-            unpack_eqs.append(eq)
-
-        config = ps.CreateKernelConfig(iteration_slice=pack_slice)
-
-        unpack_code = create_kernel(unpack_eqs, config=config)
-        unpack_kernel = unpack_code.compile()
-        unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
-
-        #   Check if only every second entry of the leftmost slice has been copied
-        np.testing.assert_equal(dst_arr[pack_slice], src_arr[pack_slice])
-        np.testing.assert_equal(dst_arr[(slice(1, None, 2),) * (dim - 1) + (0,)], 0)
-        np.testing.assert_equal(dst_arr[(slice(None, None, 1),) * (dim - 1) + (slice(1, None),)], 0)
+    values_per_cell = 19
+    indices_to_pack = [1, 5, 7, 8, 10, 12, 13]
+
+    rank = len(shape)
+    src_field, dst_field = ps.fields(
+        f"src({values_per_cell}), dst({values_per_cell}): {dtype}[{rank}D]"
+    )
+    buffer = Field.create_generic(
+        "buffer",
+        spatial_dimensions=1,
+        index_shape=(len(indices_to_pack),),
+        field_type=FieldType.BUFFER,
+        dtype=dtype,
+    )
+
+    rng = np.random.default_rng(0x5EED)
+    src_arr = rng.random(shape + (values_per_cell,), dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros((np.prod(dst_arr.shape), len(indices_to_pack)), dtype=dtype)
+
+    pack_eqs = []
+    for buffer_idx, cell_idx in enumerate(indices_to_pack):
+        eq = Assignment(buffer(buffer_idx), src_field(cell_idx))
+        pack_eqs.append(eq)
+
+    pack_code = create_kernel(pack_eqs)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr)
+
+    unpack_eqs = []
+
+    for buffer_idx, cell_idx in enumerate(indices_to_pack):
+        eq = Assignment(dst_field(cell_idx), buffer(buffer_idx))
+        unpack_eqs.append(eq)
+
+    unpack_code = create_kernel(unpack_eqs)
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(buffer=buffer_arr, dst=dst_arr)
+
+    mask_arr = np.ma.masked_where((src_arr - dst_arr) != 0, src_arr)
+    np.testing.assert_equal(dst_arr, mask_arr.filled(int(0)))
+
+
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+@pytest.mark.parametrize("layout", ["numpy", "fzyx", "zyxf", "reverse_numpy"])
+def test_field_layouts(shape, dtype, layout):
+    values_per_cell = 27
+    rank = len(shape)
+
+    rng = np.random.default_rng(0x5EED)
+    src_arr = create_numpy_array_with_layout(
+        shape + (values_per_cell,),
+        layout_string_to_tuple(layout, rank + 1),
+        dtype=dtype,
+    )
+    src_arr[:] = rng.random(shape + (values_per_cell,), dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros((np.prod(dst_arr.shape), values_per_cell), dtype=dtype)
+
+    src_field = Field.create_from_numpy_array("src", src_arr, index_dimensions=1)
+    dst_field = Field.create_from_numpy_array("dst", dst_arr, index_dimensions=1)
+    buffer = Field.create_generic(
+        "buffer",
+        spatial_dimensions=1,
+        index_shape=(values_per_cell,),
+        field_type=FieldType.BUFFER,
+        dtype=dtype,
+    )
+
+    pack_eqs = []
+    for idx in range(values_per_cell):
+        eq = Assignment(buffer(idx), src_field(idx))
+        pack_eqs.append(eq)
+
+    pack_code = create_kernel(pack_eqs)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr)
+
+    unpack_eqs = []
+
+    for idx in range(values_per_cell):
+        eq = Assignment(dst_field(idx), buffer(idx))
+        unpack_eqs.append(eq)
+
+    unpack_code = create_kernel(unpack_eqs)
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(buffer=buffer_arr, dst=dst_arr)
+
+
+@pytest.mark.parametrize("shape", [(32, 10), (10, 8, 6)])
+@pytest.mark.parametrize("dtype", ["float32", "float64"])
+def test_iteration_slices(shape, dtype):
+    values_per_cell = 19
+
+    rank = len(shape)
+    src_field, dst_field = ps.fields(
+        f"src({values_per_cell}), dst({values_per_cell}): {dtype}[{rank}D]"
+    )
+    buffer = Field.create_generic(
+        "buffer",
+        spatial_dimensions=1,
+        index_shape=(values_per_cell,),
+        field_type=FieldType.BUFFER,
+        dtype=dtype,
+    )
+
+    rng = np.random.default_rng(0x5EED)
+    src_arr = rng.random(shape + (values_per_cell,), dtype=dtype)
+    dst_arr = np.zeros_like(src_arr)
+    buffer_arr = np.zeros((np.prod(dst_arr.shape), values_per_cell), dtype=dtype)
+
+    pack_eqs = []
+    for idx in range(values_per_cell):
+        eq = Assignment(buffer(idx), src_field(idx))
+        pack_eqs.append(eq)
+
+    #   Pack only the leftmost slice, only every second cell
+    pack_slice = (slice(None, None, 2),) * (rank - 1) + (slice(0, 1, None),)
+
+    #   Fill the entire array with data
+    src_arr[(slice(None, None, 1),) * rank] = np.arange(values_per_cell)
+    dst_arr.fill(0)
+
+    config = ps.CreateKernelConfig(iteration_slice=pack_slice)
+
+    pack_code = create_kernel(pack_eqs, config=config)
+    pack_kernel = pack_code.compile()
+    pack_kernel(buffer=buffer_arr, src=src_arr)
+
+    unpack_eqs = []
+
+    for idx in range(values_per_cell):
+        eq = Assignment(dst_field(idx), buffer(idx))
+        unpack_eqs.append(eq)
+
+    config = ps.CreateKernelConfig(iteration_slice=pack_slice)
+
+    unpack_code = create_kernel(unpack_eqs, config=config)
+    unpack_kernel = unpack_code.compile()
+    unpack_kernel(buffer=buffer_arr, dst=dst_arr)
+
+    #   Check if only every second entry of the leftmost slice has been copied
+    np.testing.assert_equal(dst_arr[pack_slice], src_arr[pack_slice])
+    np.testing.assert_equal(dst_arr[(slice(1, None, 2),) * (rank - 1) + (0,)], 0)
+    np.testing.assert_equal(
+        dst_arr[(slice(None, None, 1),) * (rank - 1) + (slice(1, None),)], 0
+    )
diff --git a/tests/nbackend/test_cpujit.py b/tests/nbackend/test_cpujit.py
index c053df9a9e0d381d5f92d129a3b9280a7d56f236..5e3e84c023d1f1fa204c75f074d86796d45b352d 100644
--- a/tests/nbackend/test_cpujit.py
+++ b/tests/nbackend/test_cpujit.py
@@ -10,7 +10,7 @@ from pystencils.backend.ast.expressions import PsBufferAcc, PsExpression
 from pystencils.backend.ast.structural import PsAssignment, PsBlock, PsLoop
 
 from pystencils.types.quick import SInt, Fp
-from pystencils.jit import LegacyCpuJit
+from pystencils.jit import CpuJit
 
 import numpy as np
 
@@ -53,7 +53,7 @@ def test_pairwise_addition():
 
     # func.add_constraints(sizes_constraint)
 
-    jit = LegacyCpuJit()
+    jit = CpuJit()
     kernel = jit.compile(func)
 
     #   Positive case
diff --git a/tests/nbackend/test_vectorization.py b/tests/nbackend/test_vectorization.py
index fecade65d97afcaae4382bcc2ced119b2a957bed..e23f369dc1e030eef26e5c4a9ef17711aa24c4a8 100644
--- a/tests/nbackend/test_vectorization.py
+++ b/tests/nbackend/test_vectorization.py
@@ -21,7 +21,8 @@ from pystencils.backend.transformations import (
 )
 from pystencils.backend.constants import PsConstant
 from pystencils.codegen.driver import KernelFactory
-from pystencils.jit import LegacyCpuJit
+from pystencils.jit import CpuJit
+from pystencils.jit.cpu import GccInfo
 from pystencils import Target, fields, Assignment, Field
 from pystencils.field import create_numpy_array_with_layout
 from pystencils.types import PsScalarType, PsIntegerType
@@ -135,13 +136,15 @@ def create_vector_kernel(
     lower = LowerToC(ctx)
     loop_nest = lower(loop_nest)
 
+    cinfo = GccInfo(target=setup.target)
+
     kfactory = KernelFactory(ctx)
     func = kfactory.create_generic_kernel(
         platform,
         PsBlock([loop_nest]),
         "vector_kernel",
         Target.CPU,
-        LegacyCpuJit(),
+        CpuJit(cinfo),
     )
 
     kernel = func.compile()
diff --git a/tests/runtime/test_cpu_jit.py b/tests/runtime/test_cpu_jit.py
deleted file mode 100644
index 063533703e5bc54b80197ed28affc2329dafae61..0000000000000000000000000000000000000000
--- a/tests/runtime/test_cpu_jit.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import numpy as np
-import pytest
-
-import pystencils as ps
-
-
-def test_dtype_check_wrong_type():
-    array = np.ones((10, 20)).astype(np.float32)
-    output = np.zeros_like(array)
-    x, y = ps.fields('x,y: [2D]')
-    stencil = [[1, 1, 1],
-               [1, 1, 1],
-               [1, 1, 1]]
-    assignment = ps.assignment_from_stencil(stencil, x, y, normalization_factor=1 / np.sum(stencil))
-    kernel = ps.create_kernel([assignment]).compile()
-
-    with pytest.raises(TypeError) as e:
-        kernel(x=array, y=output)
-    assert 'Wrong data type' in str(e.value)
-
-
-def test_dtype_check_correct_type():
-    array = np.ones((10, 20)).astype(np.float64)
-    output = np.zeros_like(array)
-    x, y = ps.fields('x,y: [2D]')
-    stencil = [[1, 1, 1],
-               [1, 1, 1],
-               [1, 1, 1]]
-    assignment = ps.assignment_from_stencil(stencil, x, y, normalization_factor=1 / np.sum(stencil))
-    kernel = ps.create_kernel([assignment]).compile()
-    kernel(x=array, y=output)
-    assert np.allclose(output[1:-1, 1:-1], np.ones_like(output[1:-1, 1:-1]))