Skip to content
Snippets Groups Projects
Commit f0d770c2 authored by Stephan Seitz's avatar Stephan Seitz
Browse files

Sort fields by name to avoid confusion when returning gradients by order

parent 20f2ee40
No related branches found
No related tags found
No related merge requests found
Pipeline #20532 canceled
...@@ -246,12 +246,12 @@ Backward: ...@@ -246,12 +246,12 @@ Backward:
self._forward_assignments = forward_assignments self._forward_assignments = forward_assignments
self._forward_read_accesses = None self._forward_read_accesses = None
self._forward_write_accesses = None self._forward_write_accesses = None
self._forward_input_fields = list(forward_assignments.free_fields) self._forward_input_fields = list(sorted(forward_assignments.free_fields, key=lambda x: str(x)))
self._forward_output_fields = list(forward_assignments.bound_fields) self._forward_output_fields = list(sorted(forward_assignments.bound_fields, key=lambda x: str(x)))
self._backward_assignments = backward_assignments self._backward_assignments = backward_assignments
self._backward_field_map = None self._backward_field_map = None
self._backward_input_fields = list(backward_assignments.free_fields) self._backward_input_fields = list(sorted(backward_assignments.free_fields, key=lambda x: str(x)))
self._backward_output_fields = list(backward_assignments.bound_fields) self._backward_output_fields = list(sorted(backward_assignments.bound_fields, key=lambda x: str(x)))
else: else:
# if no_caching: # if no_caching:
if diff_mode == 'transposed': if diff_mode == 'transposed':
...@@ -270,12 +270,12 @@ Backward: ...@@ -270,12 +270,12 @@ Backward:
self._forward_assignments = forward_assignments self._forward_assignments = forward_assignments
self._forward_read_accesses = None self._forward_read_accesses = None
self._forward_write_accesses = None self._forward_write_accesses = None
self._forward_input_fields = list(forward_assignments.free_fields) self._forward_input_fields = list(sorted(forward_assignments.free_fields, key=lambda x: str(x)))
self._forward_output_fields = list(forward_assignments.bound_fields) self._forward_output_fields = list(sorted(forward_assignments.bound_fields, key=lambda x: str(x)))
self._backward_assignments = backward_assignments self._backward_assignments = backward_assignments
self._backward_field_map = None self._backward_field_map = None
self._backward_input_fields = list(backward_assignments.free_fields) self._backward_input_fields = list(sorted(backward_assignments.free_fields, key=lambda x: str(x)))
self._backward_output_fields = list(backward_assignments.bound_fields) self._backward_output_fields = list(sorted(backward_assignments.bound_fields, key=lambda x: str(x)))
self._backward_field_map = None self._backward_field_map = None
else: else:
raise NotImplementedError() raise NotImplementedError()
......
...@@ -64,7 +64,7 @@ def create_field_from_array_like(field_name, maybe_array, annotations=None): ...@@ -64,7 +64,7 @@ def create_field_from_array_like(field_name, maybe_array, annotations=None):
index_dimensions = 0 index_dimensions = 0
field_type = FieldType.GENERIC field_type = FieldType.GENERIC
if 'tensorflow.python.' in str(type(maybe_array)) and 'Tensor' in str(type(maybe_array)): if 'tensorflow' in str(type(maybe_array)) and 'Tensor' in str(type(maybe_array)):
try: try:
# This fails on eager execution # This fails on eager execution
return Field.create_fixed_size(maybe_array.name or field_name, return Field.create_fixed_size(maybe_array.name or field_name,
...@@ -96,7 +96,10 @@ def coerce_to_field(field_name, array_like): ...@@ -96,7 +96,10 @@ def coerce_to_field(field_name, array_like):
def is_array_like(a): def is_array_like(a):
import pycuda.gpuarray import pycuda.gpuarray
return (hasattr(a, '__array__') or isinstance(a, pycuda.gpuarray.GPUArray)) and not isinstance(a, sympy.Matrix) return (hasattr(a, '__array__')
or isinstance(a, pycuda.gpuarray.GPUArray)
or ('tensorflow' in str(type(a)) and 'Tensor' in str(type(a)))
or 'torch.Tensor' in str(type(a))) and not isinstance(a, sympy.Matrix)
def tf_constant_from_field(field, init_val=0): def tf_constant_from_field(field, init_val=0):
......
...@@ -2,6 +2,7 @@ import os ...@@ -2,6 +2,7 @@ import os
import numpy as np import numpy as np
import pytest import pytest
import sympy
import pystencils as ps import pystencils as ps
import pystencils_autodiff import pystencils_autodiff
...@@ -230,6 +231,57 @@ def test_tfmad_gradient_check_torch_native(with_offsets, with_cuda): ...@@ -230,6 +231,57 @@ def test_tfmad_gradient_check_torch_native(with_offsets, with_cuda):
[dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True) [dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True)
@pytest.mark.parametrize('with_cuda', (False, 'with_cuda'))
def test_tfmad_gradient_check_two_outputs(with_cuda):
torch = pytest.importorskip('torch')
import torch
a, b, out1, out2, out3 = ps.fields("a, b, out1, out2, out3: float64[21,13]")
assignment_collection = ps.AssignmentCollection({
out1.center: a.center + b.center,
out2.center: a.center - b.center,
out3.center: sympy.exp(b[-1, 0])
})
print('Forward')
print(assignment_collection)
print('Backward')
auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
boundary_handling='zeros',
diff_mode='transposed-forward')
print(auto_diff.backward_fields)
backward = auto_diff.backward_assignments
print(backward)
print('Forward output fields (to check order)')
print(auto_diff.forward_input_fields)
a_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True).contiguous()
b_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
out1_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True).contiguous()
out2_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
out3_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True).contiguous()
if with_cuda:
a_tensor = a_tensor.cuda()
b_tensor = b_tensor.cuda()
out1_tensor = out1_tensor.cuda()
out2_tensor = out2_tensor.cuda()
out3_tensor = out3_tensor.cuda()
function = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='torch_native')
dict = {
a: a_tensor,
b: b_tensor,
out1_tensor: out1_tensor,
out2_tensor: out2_tensor,
out3_tensor: out3_tensor,
}
torch.autograd.gradcheck(function.apply, tuple(
[dict[f] for f in auto_diff.forward_input_fields]), atol=1e-4, raise_exception=True)
@pytest.mark.parametrize('gradient_check', (False, 'with_gradient_check')) @pytest.mark.parametrize('gradient_check', (False, 'with_gradient_check'))
@pytest.mark.parametrize('with_cuda', (False, pytest.param('with_cuda', marks=pytest.mark.xfail))) @pytest.mark.parametrize('with_cuda', (False, pytest.param('with_cuda', marks=pytest.mark.xfail)))
@pytest.mark.parametrize('with_offsets', (False, 'with_offsets')) @pytest.mark.parametrize('with_offsets', (False, 'with_offsets'))
...@@ -266,27 +318,18 @@ def test_tfmad_gradient_check_tensorflow_native(with_offsets, with_cuda, gradien ...@@ -266,27 +318,18 @@ def test_tfmad_gradient_check_tensorflow_native(with_offsets, with_cuda, gradien
# out_tensor = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native') # out_tensor = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
# print(out_tensor) # print(out_tensor)
with tf.Graph().as_default():
a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype)) a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype)) b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
op = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native') op = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
out_tensor = op(a=a_tensor, b=b_tensor)
theoretical, numerical = tf.test.compute_gradient(
with tf.compat.v1.Session() as sess: op,
sess.run(tf.compat.v1.global_variables_initializer()) [a_tensor, b_tensor],
delta=0.001
if gradient_check: )
gradient_error = compute_gradient_error_without_border( assert np.allclose(theoretical[0], numerical[0])
[a_tensor, b_tensor], [a.shape, b.shape], assert np.allclose(theoretical[1], numerical[1])
out_tensor,
out.shape,
num_border_pixels=0,
ndim=2,
debug=True)
print('error: %s' % gradient_error.max_error)
print('avg error: %s' % gradient_error.avg_error)
assert any(e < 1e-4 for e in gradient_error.values())
def get_curl(input_field: ps.Field, curl_field: ps.Field): def get_curl(input_field: ps.Field, curl_field: ps.Field):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment