From cd642eb1fe0ce3b8ec191c1f0ae569cb171107ad Mon Sep 17 00:00:00 2001 From: Stephan Seitz <stephan.seitz@fau.de> Date: Wed, 7 Aug 2019 14:20:44 +0200 Subject: [PATCH] Remove deprecation warning (again :fries:) --- .../backends/_tensorflow.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/pystencils_autodiff/backends/_tensorflow.py b/src/pystencils_autodiff/backends/_tensorflow.py index 1b7b3b7..58eb2c8 100644 --- a/src/pystencils_autodiff/backends/_tensorflow.py +++ b/src/pystencils_autodiff/backends/_tensorflow.py @@ -1,14 +1,14 @@ import tensorflow as tf + import pystencils_autodiff -import numpy as np -from pystencils.utils import DotDict +from tf.compat.v1 import get_default_graph _num_generated_ops = 0 def _py_func(func, inp, Tout, stateful=False, name=None, grad=None): """ - Copied from random internet forum. It seems to be important to give + Copied from random internet forum. It seems to be important to give PyFunc to give an random name in override map to properly register gradients PyFunc defined as given by Tensorflow @@ -29,14 +29,17 @@ def _py_func(func, inp, Tout, stateful=False, name=None, grad=None): tf.RegisterGradient(rnd_name)(grad) # Get current graph - g = tf.get_default_graph() + g = get_default_graph() # Add gradient override map with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}): return tf.py_func(func, inp, Tout, stateful=stateful, name=name) -def tensorflowop_from_autodiffop(autodiffop: pystencils_autodiff.AutoDiffOp, inputfield_tensor_dict, forward_function, backward_function): +def tensorflowop_from_autodiffop(autodiffop: pystencils_autodiff.AutoDiffOp, + inputfield_tensor_dict, + forward_function, + backward_function): def helper_forward(*args): kwargs = dict() @@ -59,7 +62,12 @@ def tensorflowop_from_autodiffop(autodiffop: pystencils_autodiff.AutoDiffOp, inp return [rtn_dict[o.name] for o in autodiffop._backward_output_fields] def backward(op, *grad): - return tf.py_func(helper_backward, [*op.inputs, *grad], [f.dtype.numpy_dtype for f in autodiffop._backward_output_fields], name=autodiffop.op_name + '_backward', stateful=False) + return tf.py_func(helper_backward, + [*op.inputs, + *grad], + [f.dtype.numpy_dtype for f in autodiffop._backward_output_fields], + name=autodiffop.op_name + '_backward', + stateful=False) output_tensors = _py_func(helper_forward, [inputfield_tensor_dict[f] -- GitLab