|
|
|
|
@ -199,10 +199,16 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
|
|
|
|
|
def cond(i, x):
|
|
|
|
|
return layers.less_than(i, eleven)
|
|
|
|
|
|
|
|
|
|
def body(i, x):
|
|
|
|
|
def body(j, x):
|
|
|
|
|
# TODO: In while block, if the var created in parent block
|
|
|
|
|
# participates in the calculation of gradient, the result of gradient
|
|
|
|
|
# is incorrect because each step scope always returns the same value
|
|
|
|
|
# generated by last step.
|
|
|
|
|
# Here we call `assign` op in while block to avoid this bug, and working on fixing it in next PR.
|
|
|
|
|
i = layers.assign(j)
|
|
|
|
|
x = layers.elementwise_mul(x=i, y=i)
|
|
|
|
|
i = layers.increment(i)
|
|
|
|
|
return [i, x]
|
|
|
|
|
j = layers.increment(j)
|
|
|
|
|
return [j, x]
|
|
|
|
|
|
|
|
|
|
main_program = Program()
|
|
|
|
|
startup_program = Program()
|
|
|
|
|
@ -232,7 +238,48 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
|
|
|
|
|
'x': feed_x},
|
|
|
|
|
fetch_list=[mean.name, i.grad_name])
|
|
|
|
|
self.assertTrue(np.allclose(np.asarray(res[0]), data))
|
|
|
|
|
self.assertTrue(np.allclose(np.asarray(res[1]), i_grad))
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.allclose(np.asarray(res[1]), i_grad),
|
|
|
|
|
msg=" \nres = \n{} \n\n ans = \n{}".format(res[1], i_grad))
|
|
|
|
|
|
|
|
|
|
def test_while_loop_backward2(self):
|
|
|
|
|
def cond(i, x):
|
|
|
|
|
return i < 5
|
|
|
|
|
|
|
|
|
|
def body(i, x):
|
|
|
|
|
x = x + i
|
|
|
|
|
i = i + 1
|
|
|
|
|
return [i, x]
|
|
|
|
|
|
|
|
|
|
main_program = Program()
|
|
|
|
|
startup_program = Program()
|
|
|
|
|
with fluid.program_guard(main_program, startup_program):
|
|
|
|
|
i = fluid.data(name='i', shape=[1], dtype='float32')
|
|
|
|
|
i.stop_gradient = False
|
|
|
|
|
x = fluid.data(name='x', shape=[1], dtype='float32')
|
|
|
|
|
x.stop_gradient = False
|
|
|
|
|
|
|
|
|
|
out = layers.while_loop(cond, body, [i, x])
|
|
|
|
|
mean = layers.mean(out[1])
|
|
|
|
|
append_backward(mean)
|
|
|
|
|
|
|
|
|
|
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
|
|
|
|
|
) else fluid.CPUPlace()
|
|
|
|
|
exe = fluid.Executor(place)
|
|
|
|
|
|
|
|
|
|
feed_i = np.ones(1).astype('float32')
|
|
|
|
|
feed_x = np.ones(1).astype('float32')
|
|
|
|
|
data = np.asarray([11]).astype('float32')
|
|
|
|
|
i_grad = np.asarray([1]).astype('float32')
|
|
|
|
|
|
|
|
|
|
res = exe.run(main_program,
|
|
|
|
|
feed={'i': feed_i,
|
|
|
|
|
'x': feed_x},
|
|
|
|
|
fetch_list=[mean.name, i.grad_name])
|
|
|
|
|
self.assertTrue(np.allclose(np.asarray(res[0]), data))
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.allclose(np.asarray(res[1]), i_grad),
|
|
|
|
|
msg=" \nres = \n{} \n\n ans = \n{}".format(res[1], i_grad))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
|
|
|
|
|
@ -410,7 +457,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
|
|
|
|
|
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
|
|
|
|
|
ten_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=10)
|
|
|
|
|
|
|
|
|
|
# The type of `cond` in Op(while_loop) must be callable
|
|
|
|
|
# The type of `cond` in Op(while_loop) must be callable
|
|
|
|
|
def type_error_cond():
|
|
|
|
|
out = layers.while_loop(data, body, [data_1d])
|
|
|
|
|
|
|
|
|
|
|