Skip to content
Snippets Groups Projects
Commit 0bcd0156 authored by Stephan Seitz's avatar Stephan Seitz
Browse files

Add autodiff feature

parent a3dc9ad2
No related branches found
No related tags found
No related merge requests found
Pipeline #16132 passed
from pystencils.autodiff._autodiff_astpair import AutoDiffAstPair
from pystencils.autodiff.adjoint_field import (ADJOINT_FIELD_LATEX_HIGHLIGHT,
AdjointField)
from pystencils.autodiff.autodiff import (AutoDiffOp,
create_backward_assignments,
get_jacobian_of_assignments)
__all__ = [
'AutoDiffAstPair',
'ADJOINT_FIELD_LATEX_HIGHLIGHT',
'AdjointField',
'AutoDiffOp',
'create_backward_assignments',
'get_jacobian_of_assignments']
# -*- coding: utf-8 -*-
#
# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
"""
import pystencils
class AutoDiffAstPair:
"""A pair of ASTs of forward and backward kernel.
Just needed, if compilation from AssignmentCollection is not sufficient and you want to manipulate the ASTs"""
def __init__(self, forward_ast, backward_ast, compilation_target='cpu'):
self.forward_ast = forward_ast
self.backward_ast = backward_ast
self._target = compilation_target
self._forward_kernel = pystencils.make_python_function(self.forward_ast, target=self._target)
self._backward_kernel = None
def backward(self, *args, **kwargs):
if not self._backward_kernel:
self._backward_kernel = pystencils.make_python_function(self.backward_ast, target=self._target)
return self._backward_kernel(*args, **kwargs)
def forward(self, *args, **kwargs):
return self._forward_kernel(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
import pystencils
from pystencils.astnodes import FieldShapeSymbol, FieldStrideSymbol
"""
Determines how adjoint fields will be denoted in LaTeX output in terms of the forward field representation %s
Default: r"\\hat{%s}"
"""
ADJOINT_FIELD_LATEX_HIGHLIGHT = r"\hat{%s}"
class AdjointField(pystencils.Field):
"""Field representing adjoint variables to a Field representing the forward variables"""
def __init__(self, forward_field, name_prefix='diff'):
new_name = name_prefix + forward_field.name
super(AdjointField, self).__init__(new_name, forward_field.field_type, forward_field._dtype,
forward_field._layout, forward_field.shape, forward_field.strides)
self.corresponding_forward_field = forward_field
self.name_prefix = name_prefix
# Eliminate references to forward fields that might not be present in backward kernels
self.shape = tuple(FieldShapeSymbol([self.name], s.coordinate) if
isinstance(s, FieldShapeSymbol) else s for s in self.shape)
self.strides = tuple(FieldStrideSymbol(self.name, s.coordinate) if
isinstance(s, FieldStrideSymbol) else s for s in self.strides)
if forward_field.latex_name:
self.latex_name = ADJOINT_FIELD_LATEX_HIGHLIGHT % forward_field.latex_name
else:
self.latex_name = ADJOINT_FIELD_LATEX_HIGHLIGHT % forward_field.name
This diff is collapsed.
# -*- coding: utf-8 -*-
#
# Copyright © 2019 Stephan Seitz <stephan.seitz@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
Backends for generating auto-differentiable operations with pystencils.autodiff
This module will be populated by `pystencils_tensorflow`, `pystencils_torch` if available
"""
AVAILABLE_BACKENDS = []
try:
import pystencils_tensorflow # NOQA
except Exception:
pass
try:
import pystencils_torch # NOQA
except Exception:
pass
__all__ = ['pystencils_tensorflow', 'pystencils_torch']
import sympy as sp
import pystencils as ps
import pystencils.autodiff
def test_simple_2d_check_assignment_collection():
# use simply example
z, x, y = ps.fields("z, y, x: [2d]")
forward_assignments = ps.AssignmentCollection([ps.Assignment(
z[0, 0], x[0, 0]*sp.log(x[0, 0]*y[0, 0]))], [])
jac = pystencils.autodiff.get_jacobian_of_assignments(
forward_assignments, [x[0, 0], y[0, 0]])
assert jac.shape == (len(forward_assignments.bound_symbols),
len(forward_assignments.free_symbols))
print(repr(jac))
assert repr(jac) == 'Matrix([[log(x_C*y_C) + 1, y_C/x_C]])'
pystencils.autodiff.create_backward_assignments(
forward_assignments)
pystencils.autodiff.create_backward_assignments(
pystencils.autodiff.create_backward_assignments(forward_assignments))
def test_simple_2d_check_raw_assignments():
# use simply example
z, x, y = ps.fields("z, y, x: [2d]")
forward_assignments = \
[ps.Assignment(z[0, 0], x[0, 0]*sp.log(x[0, 0]*y[0, 0]))]
jac = pystencils.autodiff.get_jacobian_of_assignments(
forward_assignments, [x[0, 0], y[0, 0]])
assert jac.shape == (1, 2)
assert repr(jac) == 'Matrix([[log(x_C*y_C) + 1, y_C/x_C]])'
pystencils.autodiff.create_backward_assignments(
forward_assignments)
def main():
test_simple_2d_check_assignment_collection()
test_simple_2d_check_raw_assignments()
if __name__ == '__main__':
main()
import pystencils as ps
import sympy
import numpy as np
import argparse
import sympy as sp
import pystencils.autodiff
def test_tfmad_stencil():
f, out = ps.fields("f, out: double[2D]")
cont = ps.fd.Diff(f, 0) - ps.fd.Diff(f, 1)
discretize = ps.fd.Discretization2ndOrder(dx=1)
discretization = discretize(cont)
assignment = ps.Assignment(out.center(), discretization)
assignment_collection = ps.AssignmentCollection([assignment], [])
print('Forward')
print(assignment_collection)
print('Backward')
backward = ps.autodiff.create_backward_assignments(
assignment_collection, diff_mode='transposed-forward')
print(backward)
def test_tfmad_two_stencils():
a, b, out = ps.fields("a, b, out: double[2D]")
cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - \
ps.fd.Diff(b, 0) + ps.fd.Diff(b, 1)
discretize = ps.fd.Discretization2ndOrder(dx=1)
discretization = discretize(cont)
assignment = ps.Assignment(out.center(), discretization)
assignment_collection = ps.AssignmentCollection([assignment], [])
print('Forward')
print(assignment_collection)
print('Backward')
auto_diff = pystencils.autodiff.AutoDiffOp(
assignment_collection, diff_mode='transposed-forward')
backward = auto_diff.backward_assignments
print(backward)
print('Forward output fields (to check order)')
print(auto_diff.forward_input_fields)
def check_tfmad_vector_input_data(args):
dtype = args.dtype
domain_shape = args.domain_shape
ndim = len(domain_shape)
# create arrays
c_arr = np.zeros(domain_shape)
v_arr = np.zeros(domain_shape + (ndim,))
# create fields
c, v, c_next = ps.fields(
"c, v(2), c_next: %s[%i,%i]" % ("float" if dtype == np.float32 else "double", domain_shape[0], domain_shape[1]), c=c_arr, v=v_arr, c_next=c_arr)
# write down advection diffusion pde
# the equation is represented by a single term and an implicit "=0" is assumed.
adv_diff_pde = ps.fd.transient(
c) - ps.fd.diffusion(c, sp.Symbol("D")) + ps.fd.advection(c, v)
discretize = ps.fd.Discretization2ndOrder(args.dx, args.dt)
discretization = discretize(adv_diff_pde)
discretization = discretization.subs(
sp.Symbol("D"), args.diffusion_coefficient)
forward_assignments = ps.AssignmentCollection(
[ps.Assignment(c_next.center(), discretization)], [])
autodiff = pystencils.autodiff.AutoDiffOp(
forward_assignments, diff_mode='transposed-forward') # , constant_fields=[v]
print('Forward assignments:')
print(autodiff.forward_assignments)
print('Backward assignments:')
print(autodiff.backward_assignments)
def test_tfmad_vector_input_data():
parser = argparse.ArgumentParser()
parser.add_argument(
'--domain_shape', default=(100, 30), nargs=2, type=int, help="")
parser.add_argument(
'--dx', default=1, type=float, help="")
parser.add_argument(
'--dt', default=0.01, type=float, help="")
parser.add_argument(
'--diffusion_coefficient', default=1, type=float, help="")
parser.add_argument(
'--num_total_time_steps', default=100, type=int)
parser.add_argument(
'--num_time_steps_for_op', default=1, type=int)
parser.add_argument(
'--learning_rate', default=1e-2, type=float)
parser.add_argument(
'--dtype', default=np.float64, type=np.dtype)
parser.add_argument(
'--num_optimization_steps', default=2000, type=int)
args = parser.parse_args()
check_tfmad_vector_input_data(args)
# def test_tfmad_gradient_check():
# a, b, out = ps.fields("a, b, out: double[21,13]")
# cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - \
# ps.fd.Diff(b, 0) + ps.fd.Diff(b, 1)
# discretize = ps.fd.Discretization2ndOrder(dx=1)
# discretization = discretize(cont)
# assignment = ps.Assignment(out.center(), discretization)
# assignment_collection = ps.AssignmentCollection([assignment], [])
# print('Forward')
# print(assignment_collection)
# print('Backward')
# auto_diff = pystencils.autodiff.AutoDiffOp(
# assignment_collection, diff_mode='transposed-forward')
# backward = auto_diff.backward_assignments
# print(backward)
# print('Forward output fields (to check order)')
# print(auto_diff.forward_input_fields)
# a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
# b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
# out_tensor = auto_diff.create_tensorflow_op({a: a_tensor, b: b_tensor})
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# gradient_error = pystencils._tensorflow_utils.compute_gradient_error_without_border(
# [a_tensor, b_tensor], [a.shape, b.shape], out_tensor, out.shape, num_border_pixels=2, ndim=2)
# print('error: %s' % gradient_error.max_error)
# assert gradient_error < 1e-4
# def test_tfmad_gradient_check_torch():
# a, b, out = ps.fields("a, b, out: float[21,13]")
# cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - \
# ps.fd.Diff(b, 0) + ps.fd.Diff(b, 1)
# discretize = ps.fd.Discretization2ndOrder(dx=1)
# discretization = discretize(cont)
# assignment = ps.Assignment(out.center(), discretization)
# assignment_collection = ps.AssignmentCollection([assignment], [])
# print('Forward')
# print(assignment_collection)
# print('Backward')
# auto_diff = pystencils.autodiff.AutoDiffOp(
# assignment_collection, diff_mode='transposed-forward')
# backward = auto_diff.backward_assignments
# print(backward)
# print('Forward output fields (to check order)')
# print(auto_diff.forward_input_fields)
# a_tensor = torch.zeros(
# *a.shape, dtype=torch.float64, requires_grad=True)
# b_tensor = torch.zeros(
# *b.shape, dtype=torch.float64, requires_grad=True)
# function = auto_diff.create_tensorflow_op(
# {a: a_tensor, b: b_tensor}, backend='torch')
# torch.autograd.gradcheck(function.apply, [a_tensor, b_tensor])
def get_curl(input_field: ps.Field, curl_field: ps.Field):
"""Return a ps.AssignmentCollection describing the calculation of
the curl given a 2d or 3d vector field [z,y,x](f) or [y,x](f)
Note that the curl of a 2d vector field is defined in ℝ3!
Only the non-zero z-component is returned
Arguments:
field {ps.Field} -- A field with index_dimensions <= 1
Scalar fields are interpreted as a z-component
Raises:
NotImplementedError -- [description]
NotImplementedError -- Only support 2d or 3d vector fields or scalar fields are supported
Returns:
ps.AssignmentCollection -- AssignmentCollection describing the calculation of the curl
"""
assert input_field.index_dimensions <= 1, "Must be a vector or a scalar field"
assert curl_field.index_dimensions == 1, "Must be a vector field"
discretize = ps.fd.Discretization2ndOrder(dx=1)
if input_field.index_dimensions == 0:
dy = ps.fd.Diff(input_field, 0)
dx = ps.fd.Diff(input_field, 1)
f_x = ps.Assignment(curl_field.center(0), discretize(dy))
f_y = ps.Assignment(curl_field.center(1), discretize(dx))
return ps.AssignmentCollection([f_x, f_y], [])
else:
if input_field.index_shape[0] == 2:
raise NotImplementedError()
elif input_field.index_shape[0] == 3:
raise NotImplementedError()
else:
raise NotImplementedError()
def test_tfmad_two_outputs():
domain_shape = (20, 30)
vector_shape = domain_shape + (2,)
curl_input_for_u = ps.Field.create_fixed_size(
field_name='curl_input', shape=domain_shape, index_dimensions=0)
u_field = ps.Field.create_fixed_size(
field_name='curl', shape=vector_shape, index_dimensions=1)
curl_op = pystencils.autodiff.AutoDiffOp(get_curl(
curl_input_for_u, u_field), diff_mode="transposed-forward")
print('Forward')
print(curl_op.forward_assignments)
print('Backward')
print(curl_op.backward_assignments)
def main():
test_tfmad_stencil()
test_tfmad_two_stencils()
test_tfmad_vector_input_data()
test_tfmad_two_outputs()
if __name__ == '__main__':
main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment