diff --git a/src/pystencils_autodiff/__init__.py b/src/pystencils_autodiff/__init__.py
index c90c5b8106ff5d924ac07ee13fa4b7bb9e16caa7..3701bf97d42b666529b9492b7450dd8c03532404 100644
--- a/src/pystencils_autodiff/__init__.py
+++ b/src/pystencils_autodiff/__init__.py
@@ -1,8 +1,8 @@
 import sys
 
 import pystencils_autodiff._backport
-
 import pystencils_autodiff.backends  # NOQA
+import pystencils_autodiff.tensorflow_jit
 from pystencils_autodiff._adjoint_field import AdjointField
 from pystencils_autodiff._autodiff import (
     AutoDiffAstPair, AutoDiffOp, DiffModes, create_backward_assignments,
diff --git a/src/pystencils_autodiff/tensorflow_jit.py b/src/pystencils_autodiff/tensorflow_jit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b68abd1a8b79345a857ddc49777d7a0099eadba6
--- /dev/null
+++ b/src/pystencils_autodiff/tensorflow_jit.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
+#
+# Distributed under terms of the GPLv3 license.
+
+"""
+
+"""
+import subprocess
+import sysconfig
+from itertools import chain
+from os.path import exists, join
+
+from tqdm import tqdm
+
+import pystencils
+from pystencils.cpu.cpujit import get_cache_config, get_compiler_config, get_pystencils_include_path
+from pystencils_autodiff._file_io import read_file, write_file
+
+# TODO: msvc
+if get_compiler_config()['os'] != 'windows':
+    _shared_object_flag = '-shared'
+    _output_flag = '-o'
+    _include_flags = ['-I' + sysconfig.get_paths()['include'], '-I' + get_pystencils_include_path()]
+    _do_not_link_flag = "-c"
+else:
+    _do_not_link_flag = "/c"
+    _output_flag = '/OUT:'
+    _shared_object_flag = '/DLL'
+    _include_flags = ['/I' + sysconfig.get_paths()['include'], '/I' + get_pystencils_include_path()]
+
+
+try:
+    import tensorflow as tf
+
+    _tf_compile_flags = tf.sysconfig.get_compile_flags()
+    _tf_link_flags = tf.sysconfig.get_link_flags()
+except ImportError:
+    pass
+
+
+def link_and_load(object_files, destination_file=None, link_cudart=False, overwrite_destination_file=True):
+    """Compiles given :param:`source_file` to a Tensorflow shared Library.
+
+    .. warning::
+
+        This functions performs no caching. Only call when your source_files changed!
+
+    :sources_file: Files to compile
+    :destination_file: Optional destination path and filename for shared object.
+    :returns: Object containing all Tensorflow Ops in that shared library.
+
+    """
+
+    if not destination_file:
+        destination_file = join(get_cache_config()['object_cache'], f"{abs(hash(tuple(object_files))):x}.so")
+
+    if not exists(destination_file) or overwrite_destination_file:
+        command = [get_compiler_config()['command'],
+                   *(get_compiler_config()['flags']).split(' '),
+                   *object_files,
+                   *_tf_link_flags,
+                   *_tf_compile_flags,
+                   *_include_flags,
+                   _shared_object_flag,
+                   _output_flag + destination_file]  # /out: for msvc???
+        if link_cudart:
+            command.append('-lcudart')
+
+        subprocess.check_call(command)
+
+    lib = tf.load_op_library(destination_file)
+    return lib
+
+
+def try_get_cuda_arch_flag():
+    try:
+        from pycuda.driver import Context
+        arch = "sm_%d%d" % Context.get_device().compute_capability()
+    except Exception:
+        arch = None
+    if arch:
+        return "-arch " + arch
+    else:
+        return None
+
+
+_cuda_arch_flag = try_get_cuda_arch_flag()
+
+
+def compile_file(file, use_nvcc=False, nvcc='nvcc', overwrite_destination_file=True):
+
+    destination_file = file + '.o'
+    if use_nvcc:
+        command = [nvcc,
+                   '--expt-relaxed-constexpr',
+                   '-ccbin',
+                   get_compiler_config()['command'],
+                   *(get_compiler_config()['flags']).split(' '),
+                   file,
+                   '-x',
+                   'cu',
+                   '-Xcompiler',
+                   '-fPIC',  # TODO: msvc!
+                   _do_not_link_flag,
+                   *_tf_compile_flags,
+                   *_include_flags,
+                   _output_flag + destination_file]
+        if _cuda_arch_flag:
+            command.append(_cuda_arch_flag)
+    else:
+        command = [get_compiler_config()['command'],
+                   *(get_compiler_config()['flags']).split(' '),
+                   file,
+                   _do_not_link_flag,
+                   *_tf_compile_flags,
+                   *_include_flags,
+                   _output_flag + destination_file]
+    if not exists(destination_file) or overwrite_destination_file:
+        subprocess.check_call(command)
+    return destination_file
+
+
+def compile_sources_and_load(host_sources, cuda_sources=[]):
+
+    object_files = []
+
+    for source in tqdm(chain(host_sources, cuda_sources), desc='Compiling Tensorflow module...'):
+        is_cuda = source in cuda_sources
+
+        if exists(source):
+            source_code = read_file(source)
+        else:
+            source_code = source
+
+        file_extension = '.cu' if is_cuda else '.cpp'
+        file_name = join(pystencils.cache.cache_dir, f'{abs(hash(source_code)):x}{file_extension}')
+        write_file(file_name, source_code)
+
+        compile_file(file_name, use_nvcc=is_cuda, overwrite_destination_file=False)
+        object_files.append(file_name)
+
+    print('Linking Tensorflow module...')
+    module = link_and_load(object_files, overwrite_destination_file=False, link_cudart=cuda_sources or False)
+    if module:
+        print('Loaded Tensorflow module')
+    return module
diff --git a/tests/test_tensorflow_jit.py b/tests/test_tensorflow_jit.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bc183e8c729ccc3b7afcc6c9c99ba363fd62671
--- /dev/null
+++ b/tests/test_tensorflow_jit.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
+#
+# Distributed under terms of the GPLv3 license.
+
+"""
+
+"""
+
+import pytest
+import sympy
+
+import pystencils
+import pystencils_autodiff
+from pystencils_autodiff import create_backward_assignments
+from pystencils_autodiff.backends.astnodes import TensorflowModule
+
+
+def test_tensorflow_jit_cpu():
+
+    pytest.importorskip('tensorflow')
+
+    module_name = "Ololol"
+
+    target = 'cpu'
+
+    z, y, x = pystencils.fields("z, y, x: [20,40]")
+    a = sympy.Symbol('a')
+
+    forward_assignments = pystencils.AssignmentCollection({
+        z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])
+    })
+
+    backward_assignments = create_backward_assignments(forward_assignments)
+
+    forward_ast = pystencils.create_kernel(forward_assignments, target)
+    forward_ast.function_name = 'forward'
+    backward_ast = pystencils.create_kernel(backward_assignments, target)
+    backward_ast.function_name = 'backward'
+    module = TensorflowModule(module_name, [forward_ast, backward_ast])
+    print(module)
+
+    lib = pystencils_autodiff.tensorflow_jit.compile_sources_and_load([str(module)])
+    assert 'call_forward' in dir(lib)
+    assert 'call_backward' in dir(lib)