|
|
|
@ -45,14 +45,16 @@ custom_ops = load(
|
|
|
|
|
verbose=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def concat_dynamic(func, device, dtype, np_inputs, axis_v):
|
|
|
|
|
paddle.set_device(device)
|
|
|
|
|
def concat_dynamic(func, dtype, np_inputs, axis_v, with_attr=False):
|
|
|
|
|
paddle.set_device("cpu")
|
|
|
|
|
inputs = [
|
|
|
|
|
paddle.to_tensor(
|
|
|
|
|
x, dtype=dtype, place=device, stop_gradient=False)
|
|
|
|
|
for x in np_inputs
|
|
|
|
|
x, dtype=dtype, stop_gradient=False) for x in np_inputs
|
|
|
|
|
]
|
|
|
|
|
axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
|
|
|
|
|
if with_attr:
|
|
|
|
|
axis = axis_v
|
|
|
|
|
else:
|
|
|
|
|
axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
|
|
|
|
|
out = func(inputs, axis)
|
|
|
|
|
out.stop_gradient = False
|
|
|
|
|
out.backward()
|
|
|
|
@ -60,14 +62,17 @@ def concat_dynamic(func, device, dtype, np_inputs, axis_v):
|
|
|
|
|
return out.numpy(), grad_inputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def concat_static(func, device, dtype, np_inputs, axis_v):
|
|
|
|
|
def concat_static(func, dtype, np_inputs, axis_v, with_attr=False):
|
|
|
|
|
paddle.enable_static()
|
|
|
|
|
paddle.set_device(device)
|
|
|
|
|
paddle.set_device("cpu")
|
|
|
|
|
with static.scope_guard(static.Scope()):
|
|
|
|
|
with static.program_guard(static.Program()):
|
|
|
|
|
x1 = static.data(name="x1", shape=[2, 3], dtype=dtype)
|
|
|
|
|
x2 = static.data(name="x2", shape=[2, 3], dtype=dtype)
|
|
|
|
|
axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
|
|
|
|
|
if with_attr:
|
|
|
|
|
axis = axis_v
|
|
|
|
|
else:
|
|
|
|
|
axis = paddle.full(shape=[1], dtype='int64', fill_value=axis_v)
|
|
|
|
|
x1.stop_gradient = False
|
|
|
|
|
x2.stop_gradient = False
|
|
|
|
|
out = func([x1, x2], axis)
|
|
|
|
@ -78,13 +83,20 @@ def concat_static(func, device, dtype, np_inputs, axis_v):
|
|
|
|
|
exe = static.Executor()
|
|
|
|
|
exe.run(static.default_startup_program())
|
|
|
|
|
|
|
|
|
|
out_v, x1_grad_v, x2_grad_v = exe.run(
|
|
|
|
|
static.default_main_program(),
|
|
|
|
|
feed={
|
|
|
|
|
if with_attr:
|
|
|
|
|
feed_dict = {
|
|
|
|
|
"x1": np_inputs[0].astype(dtype),
|
|
|
|
|
"x2": np_inputs[1].astype(dtype)
|
|
|
|
|
}
|
|
|
|
|
else:
|
|
|
|
|
feed_dict = {
|
|
|
|
|
"x1": np_inputs[0].astype(dtype),
|
|
|
|
|
"x2": np_inputs[1].astype(dtype),
|
|
|
|
|
"axis": axis
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
out_v, x1_grad_v, x2_grad_v = exe.run(
|
|
|
|
|
static.default_main_program(),
|
|
|
|
|
feed=feed_dict,
|
|
|
|
|
fetch_list=[out.name, x1.name + "@GRAD", x2.name + "@GRAD"])
|
|
|
|
|
paddle.disable_static()
|
|
|
|
|
return out_v, x1_grad_v, x2_grad_v
|
|
|
|
@ -93,55 +105,67 @@ def concat_static(func, device, dtype, np_inputs, axis_v):
|
|
|
|
|
class TestCustomConcatDynamicAxisJit(unittest.TestCase):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.dtypes = ['float32', 'float64', 'int32', 'int64']
|
|
|
|
|
self.devices = ['cpu']
|
|
|
|
|
self.np_inputs = [
|
|
|
|
|
np.array([[1, 2, 3], [4, 5, 6]]),
|
|
|
|
|
np.array([[11, 12, 13], [14, 15, 16]])
|
|
|
|
|
]
|
|
|
|
|
self.axises = [0, 1]
|
|
|
|
|
|
|
|
|
|
def check_output(self, out, pd_out, name):
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(out, pd_out),
|
|
|
|
|
"custom op {}: {},\n paddle api {}: {}".format(name, out, name,
|
|
|
|
|
pd_out))
|
|
|
|
|
|
|
|
|
|
def test_dynamic(self):
|
|
|
|
|
for device in self.devices:
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, grad_inputs = concat_dynamic(custom_ops.custom_concat,
|
|
|
|
|
device, dtype,
|
|
|
|
|
self.np_inputs, axis)
|
|
|
|
|
pd_out, pd_grad_inputs = concat_dynamic(
|
|
|
|
|
paddle.concat, device, dtype, self.np_inputs, axis)
|
|
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(out, pd_out),
|
|
|
|
|
"custom op out: {},\n paddle api out: {}".format(
|
|
|
|
|
out, pd_out))
|
|
|
|
|
for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(x_grad, pd_x_grad),
|
|
|
|
|
"custom op x grad: {},\n paddle api x grad: {}".
|
|
|
|
|
format(x_grad, pd_x_grad))
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, grad_inputs = concat_dynamic(custom_ops.custom_concat,
|
|
|
|
|
dtype, self.np_inputs, axis)
|
|
|
|
|
pd_out, pd_grad_inputs = concat_dynamic(paddle.concat, dtype,
|
|
|
|
|
self.np_inputs, axis)
|
|
|
|
|
|
|
|
|
|
self.check_output(out, pd_out, "out")
|
|
|
|
|
for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
|
|
|
|
|
self.check_output(x_grad, pd_x_grad, "x_grad")
|
|
|
|
|
|
|
|
|
|
def test_static(self):
|
|
|
|
|
for device in self.devices:
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, x1_grad, x2_grad = concat_static(
|
|
|
|
|
custom_ops.custom_concat, device, dtype, self.np_inputs,
|
|
|
|
|
axis)
|
|
|
|
|
pd_out, pd_x1_grad, pd_x2_grad = concat_static(
|
|
|
|
|
paddle.concat, device, dtype, self.np_inputs, axis)
|
|
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(out, pd_out),
|
|
|
|
|
"custom op out: {},\n paddle api out: {}".format(
|
|
|
|
|
out, pd_out))
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(x1_grad, pd_x1_grad),
|
|
|
|
|
"custom op x1_grad: {},\n paddle api x1_grad: {}".
|
|
|
|
|
format(x1_grad, pd_x1_grad))
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
np.array_equal(x2_grad, pd_x2_grad),
|
|
|
|
|
"custom op x2_grad: {},\n paddle api x2_grad: {}".
|
|
|
|
|
format(x2_grad, pd_x2_grad))
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, x1_grad, x2_grad = concat_static(
|
|
|
|
|
custom_ops.custom_concat, dtype, self.np_inputs, axis)
|
|
|
|
|
pd_out, pd_x1_grad, pd_x2_grad = concat_static(
|
|
|
|
|
paddle.concat, dtype, self.np_inputs, axis)
|
|
|
|
|
|
|
|
|
|
self.check_output(out, pd_out, "out")
|
|
|
|
|
self.check_output(x1_grad, pd_x1_grad, "x1_grad")
|
|
|
|
|
self.check_output(x2_grad, pd_x2_grad, "x2_grad")
|
|
|
|
|
|
|
|
|
|
def test_dynamic_with_attr(self):
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, grad_inputs = concat_dynamic(
|
|
|
|
|
custom_ops.custom_concat_with_attr, dtype, self.np_inputs,
|
|
|
|
|
axis, True)
|
|
|
|
|
pd_out, pd_grad_inputs = concat_dynamic(
|
|
|
|
|
paddle.concat, dtype, self.np_inputs, axis, True)
|
|
|
|
|
|
|
|
|
|
self.check_output(out, pd_out, "out")
|
|
|
|
|
for x_grad, pd_x_grad in zip(grad_inputs, pd_grad_inputs):
|
|
|
|
|
self.check_output(x_grad, pd_x_grad, "x_grad")
|
|
|
|
|
|
|
|
|
|
def test_static_with_attr(self):
|
|
|
|
|
for dtype in self.dtypes:
|
|
|
|
|
for axis in self.axises:
|
|
|
|
|
out, x1_grad, x2_grad = concat_static(
|
|
|
|
|
custom_ops.custom_concat_with_attr, dtype, self.np_inputs,
|
|
|
|
|
axis, True)
|
|
|
|
|
pd_out, pd_x1_grad, pd_x2_grad = concat_static(
|
|
|
|
|
paddle.concat, dtype, self.np_inputs, axis, True)
|
|
|
|
|
|
|
|
|
|
self.check_output(out, pd_out, "out")
|
|
|
|
|
self.check_output(x1_grad, pd_x1_grad, "x1_grad")
|
|
|
|
|
self.check_output(x2_grad, pd_x2_grad, "x2_grad")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|