Support Simple For Range Loop in Dygraph to Static (#22867)

1. Add basic support for `for in range` loop
2. Move `test_dygraph_to_static_*` to `dygraph_to_static` dir and rename them
3. Add test case for dict in while_loop
revert-22710-feature/integrated_ps_api
Huihuang Zheng 5 years ago committed by GitHub
parent f70f1cf122
commit d7a7c5f0bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -215,6 +215,18 @@ def create_api_shape_node(tensor_shape_node):
return api_shape_node return api_shape_node
def get_constant_variable_node(name, value, shape=[1], dtype='int64'):
return gast.parse('%s = fluid.layers.fill_constant(%s, "%s", %s)' %
(name, str(shape), dtype, str(value)))
def get_attribute_full_name(node):
assert isinstance(
node,
gast.Attribute), "Input non-Attribute node to get attribute full name"
return astor.to_source(gast.gast_to_ast(node)).strip()
def generate_name_node(name_ids, ctx=gast.Load()): def generate_name_node(name_ids, ctx=gast.Load()):
""" """
Generate list or gast.Tuple of ast.Name for Return statement. Generate list or gast.Tuple of ast.Name for Return statement.

@ -14,6 +14,7 @@
from __future__ import print_function from __future__ import print_function
import six
import gast import gast
from paddle.fluid.layers import fill_constant from paddle.fluid.layers import fill_constant
@ -39,8 +40,15 @@ def to_static_variable(x):
''' '''
if isinstance(x, bool): if isinstance(x, bool):
return fill_constant(shape=[1], dtype='bool', value=x) return fill_constant(shape=[1], dtype='bool', value=x)
if isinstance(x, int):
return fill_constant(shape=[1], dtype='int64', value=x)
if isinstance(x, float): if isinstance(x, float):
return fill_constant(shape=[1], dtype='float64', value=x) return fill_constant(shape=[1], dtype='float64', value=x)
if six.PY2:
if isinstance(x, int):
return fill_constant(shape=[1], dtype='int32', value=x)
if isinstance(x, long):
return fill_constant(shape=[1], dtype='int64', value=x)
else:
if isinstance(x, int):
return fill_constant(shape=[1], dtype='int64', value=x)
return x return x

@ -59,7 +59,17 @@ class SubNetWithDict(fluid.dygraph.Layer):
cache_k, cache_v = cache["k"], cache["v"] cache_k, cache_v = cache["k"], cache["v"]
k = 0.1 * cache_k + k k = 0.1 * cache_k + k
v = 0.2 * cache_v + v v = 0.2 * cache_v + v
cache["k"], cache["v"] = k, v # TODO: currently while_loop can have a dict as loop_vars, but
# to change the value in a dict, you have to use layers.assign
# because cache["k"] = k is putting k in dict without building
# network. So we cannot write:
#
# cache["k"], cache["v"] = k, v
#
# we have to support this kind of dict in loop in the future.
# For example, automatically change = to assign in AutoTracer
fluid.layers.assign(k, cache["k"])
fluid.layers.assign(v, cache["v"])
weight = fluid.layers.matmul(x=q, y=k, transpose_y=True) weight = fluid.layers.matmul(x=q, y=k, transpose_y=True)
weight = fluid.layers.softmax(weight) weight = fluid.layers.softmax(weight)
@ -94,12 +104,20 @@ class MainNetWithDict(fluid.dygraph.Layer):
for i in range(max_len): for i in range(max_len):
out = self.sub_net(out, cache) out = self.sub_net(out, cache)
cache = self.update_cache(cache) cache = self.update_cache(cache)
return out return out
def update_cache(self, cache): def update_cache(self, cache):
for k, val in six.iteritems(cache): for k, val in six.iteritems(cache):
cache[k] = fluid.layers.softmax(val) # TODO: currently while_loop can have a dict as loop_vars, but
# to change the value in a dict, you have to use layers.assign
# because cache["k"] = k is putting k in dict without building
# network. So we cannot write:
#
# cache[k] = fluid.layers.softmax(val)
#
# we have to support this kind of dict in loop in the future.
# For example, automatically change = to assign in AutoTracer
fluid.layers.assign(fluid.layers.softmax(val), cache[k])
return cache return cache

@ -35,20 +35,34 @@ def while_loop_dyfunc(x):
return i return i
def for_loop_dyfunc(max_len):
for i in range(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32')
fluid.layers.increment(ret, value=2.0, in_place=True)
return ret
class TestNameVisitor(unittest.TestCase): class TestNameVisitor(unittest.TestCase):
def setUp(self):
self.loop_funcs = [while_loop_dyfunc, for_loop_dyfunc]
self.loop_var_names = [set(["i", "x"]), set(["i", "ret", "max_len"])]
self.create_var_names = [set(), set(["ret"])]
def test_loop_vars(self): def test_loop_vars(self):
test_func = inspect.getsource(while_loop_dyfunc) for i in range(len(self.loop_funcs)):
gast_root = gast.parse(test_func) func = self.loop_funcs[i]
name_visitor = NameVisitor(gast_root) test_func = inspect.getsource(func)
for node in gast.walk(gast_root): gast_root = gast.parse(test_func)
if isinstance(node, gast.While): name_visitor = NameVisitor(gast_root)
loop_var_names, create_var_names = name_visitor.get_loop_var_names( for node in gast.walk(gast_root):
node) if isinstance(node, (gast.While, gast.For)):
self.assertEqual(loop_var_names, set(["i", "x"])) loop_var_names, create_var_names = name_visitor.get_loop_var_names(
self.assertEqual(create_var_names, set()) node)
self.assertEqual(loop_var_names, self.loop_var_names[i])
self.assertEqual(create_var_names, self.create_var_names[i])
class TestTransformWhile(unittest.TestCase):
class TestTransformWhileLoop(unittest.TestCase):
def setUp(self): def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
@ -83,5 +97,35 @@ class TestTransformWhile(unittest.TestCase):
# self.assertTrue(np.allclose(self._run_dygraph(), self._run_static())) # self.assertTrue(np.allclose(self._run_dygraph(), self._run_static()))
class TestTransformForLoop(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.len = 100
def _run_static(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
static_func = dygraph_to_static_graph(for_loop_dyfunc)
out = static_func(self.len)
exe = fluid.Executor(self.place)
ret = exe.run(main_program, fetch_list=out)
return ret
def _run_dygraph(self):
with fluid.dygraph.guard(self.place):
ret = for_loop_dyfunc(self.len)
return ret.numpy()
def test_ast_to_func(self):
static_numpy = self._run_static()
self.assertTrue(
np.allclose(
np.full(
shape=(1), fill_value=2, dtype=np.int32), static_numpy))
self._run_dygraph()
self.assertTrue(np.allclose(self._run_dygraph(), self._run_static()))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

@ -77,6 +77,34 @@ class TestApiWhileLoop(unittest.TestCase):
data = np.add(data, data_one) data = np.add(data, data_one)
self.assertTrue(np.allclose(np.asarray(res[1]), data)) self.assertTrue(np.allclose(np.asarray(res[1]), data))
def test_var_dict(self):
def cond(i, ten, test_dict):
return layers.less_than(i, ten)
def body(i, ten, test_dict):
layers.assign(i, test_dict["test_key"])
i = layers.increment(i)
return [i, ten, test_dict]
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
test_data = layers.fill_constant(shape=[1], dtype='int64', value=0)
test_dict = {"test_key": test_data}
i, ten, test_dict = layers.while_loop(cond, body,
[i, ten, test_dict])
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program, fetch_list=[test_dict["test_key"]])
self.assertTrue(
np.allclose(
np.asarray(res[0]),
np.full(
shape=(1), fill_value=9, dtype=np.int64)))
class TestApiWhileLoop_Nested(unittest.TestCase): class TestApiWhileLoop_Nested(unittest.TestCase):
def test_nested_net(self): def test_nested_net(self):

Loading…
Cancel
Save