|
|
|
@ -17,6 +17,7 @@ from __future__ import print_function
|
|
|
|
|
import unittest
|
|
|
|
|
import numpy as np
|
|
|
|
|
|
|
|
|
|
import paddle
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
import paddle.fluid.layers as layers
|
|
|
|
|
import paddle.fluid.core as core
|
|
|
|
@ -200,5 +201,53 @@ class TestExpandDoubleGradCheck(unittest.TestCase):
|
|
|
|
|
self.func(p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestTileDoubleGradCheck(unittest.TestCase):
|
|
|
|
|
@prog_scope()
|
|
|
|
|
def func(self, place):
|
|
|
|
|
x_shape = [3, 12]
|
|
|
|
|
repeat_times = [4, 9]
|
|
|
|
|
eps = 0.005
|
|
|
|
|
dtype = np.float64
|
|
|
|
|
|
|
|
|
|
x = layers.data('x', x_shape, False, dtype)
|
|
|
|
|
x.persistable = True
|
|
|
|
|
out = paddle.tile(x, repeat_times)
|
|
|
|
|
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
|
|
|
|
|
|
|
|
|
|
gradient_checker.double_grad_check(
|
|
|
|
|
[x], out, x_init=x_arr, place=place, eps=eps)
|
|
|
|
|
|
|
|
|
|
def test_grad(self):
|
|
|
|
|
places = [fluid.CPUPlace()]
|
|
|
|
|
if core.is_compiled_with_cuda():
|
|
|
|
|
places.append(fluid.CUDAPlace(0))
|
|
|
|
|
for p in places:
|
|
|
|
|
self.func(p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestExpandV2DoubleGradCheck(unittest.TestCase):
|
|
|
|
|
@prog_scope()
|
|
|
|
|
def func(self, place):
|
|
|
|
|
x_shape = [1, 12]
|
|
|
|
|
new_shape = [4, 12]
|
|
|
|
|
eps = 0.005
|
|
|
|
|
dtype = np.float64
|
|
|
|
|
|
|
|
|
|
x = layers.data('x', x_shape, False, dtype)
|
|
|
|
|
x.persistable = True
|
|
|
|
|
out = paddle.expand(x, new_shape)
|
|
|
|
|
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
|
|
|
|
|
|
|
|
|
|
gradient_checker.double_grad_check(
|
|
|
|
|
[x], out, x_init=x_arr, place=place, eps=eps)
|
|
|
|
|
|
|
|
|
|
def test_grad(self):
|
|
|
|
|
places = [fluid.CPUPlace()]
|
|
|
|
|
if core.is_compiled_with_cuda():
|
|
|
|
|
places.append(fluid.CUDAPlace(0))
|
|
|
|
|
for p in places:
|
|
|
|
|
self.func(p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
unittest.main()
|
|
|
|
|