|
|
@ -1,46 +1,186 @@
|
|
|
|
import unittest
|
|
|
|
import unittest
|
|
|
|
import paddle.v2.fluid as fluid
|
|
|
|
import paddle.v2.fluid as fluid
|
|
|
|
|
|
|
|
import paddle.v2.fluid.core as core
|
|
|
|
import numpy
|
|
|
|
import numpy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestReorderLoDTensor(unittest.TestCase):
|
|
|
|
class TestReorderLoDTensor(unittest.TestCase):
|
|
|
|
def test_reorder(self):
|
|
|
|
num_seq = 5
|
|
|
|
dat = fluid.layers.data(name='input', shape=[1], lod_level=2)
|
|
|
|
# [name, dim, lod_level] pair indicating data info of source and target
|
|
|
|
|
|
|
|
data_desc = (['input', 9, 0], ['ref', 5, 1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
|
|
def setUpClass(cls):
|
|
|
|
|
|
|
|
cls.set_program()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
|
|
|
def set_program(cls):
|
|
|
|
|
|
|
|
dat = fluid.layers.data(
|
|
|
|
|
|
|
|
name=cls.data_desc[0][0], shape=[cls.data_desc[0][1]])
|
|
|
|
dat.stop_gradient = False
|
|
|
|
dat.stop_gradient = False
|
|
|
|
rank_dat = fluid.layers.data(name='ref', shape=[1], lod_level=1)
|
|
|
|
rank_dat = fluid.layers.data(
|
|
|
|
|
|
|
|
name=cls.data_desc[1][0], shape=[cls.data_desc[1][1]])
|
|
|
|
table = fluid.layers.lod_rank_table(rank_dat)
|
|
|
|
table = fluid.layers.lod_rank_table(rank_dat)
|
|
|
|
new_dat = fluid.layers.reorder_lod_tensor_by_rank(
|
|
|
|
new_dat = fluid.layers.reorder_lod_tensor_by_rank(
|
|
|
|
x=dat, rank_table=table)
|
|
|
|
x=dat, rank_table=table)
|
|
|
|
loss = fluid.layers.mean(x=new_dat)
|
|
|
|
loss = fluid.layers.reduce_sum(new_dat)
|
|
|
|
fluid.backward.append_backward(loss=loss)
|
|
|
|
fluid.backward.append_backward(loss=loss)
|
|
|
|
|
|
|
|
cls.fetch_list = [new_dat, cls.data_desc[0][0] + '@GRAD']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_program(self):
|
|
|
|
|
|
|
|
outputs = []
|
|
|
|
|
|
|
|
input_grads = []
|
|
|
|
|
|
|
|
places = [core.CPUPlace()]
|
|
|
|
|
|
|
|
if core.is_compile_gpu():
|
|
|
|
|
|
|
|
places.append(core.CUDAPlace(0))
|
|
|
|
|
|
|
|
for place in places:
|
|
|
|
|
|
|
|
self.set_inputs(place)
|
|
|
|
|
|
|
|
exe = fluid.Executor(place)
|
|
|
|
|
|
|
|
output, input_grad = exe.run(fluid.default_main_program(),
|
|
|
|
|
|
|
|
feed=self.inputs,
|
|
|
|
|
|
|
|
fetch_list=self.fetch_list,
|
|
|
|
|
|
|
|
return_numpy=False)
|
|
|
|
|
|
|
|
outputs.append(output)
|
|
|
|
|
|
|
|
input_grads.append(input_grad)
|
|
|
|
|
|
|
|
self.actual_outputs = outputs
|
|
|
|
|
|
|
|
self.actual_grads = input_grads
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_data(self):
|
|
|
|
|
|
|
|
self.data = {}
|
|
|
|
|
|
|
|
for desc in self.data_desc:
|
|
|
|
|
|
|
|
data_name = desc[0]
|
|
|
|
|
|
|
|
data_dim = desc[1]
|
|
|
|
|
|
|
|
data_lod_level = desc[2]
|
|
|
|
|
|
|
|
data_lod = []
|
|
|
|
|
|
|
|
for i in range(data_lod_level):
|
|
|
|
|
|
|
|
lod_level_i = numpy.random.randint(
|
|
|
|
|
|
|
|
low=1,
|
|
|
|
|
|
|
|
high=5,
|
|
|
|
|
|
|
|
size=self.num_seq if i == 0 else lod_level_i[-1])
|
|
|
|
|
|
|
|
lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist()
|
|
|
|
|
|
|
|
data_lod.append(lod_level_i)
|
|
|
|
|
|
|
|
data_value = numpy.random.random(size=[
|
|
|
|
|
|
|
|
data_lod[-1][-1] if data_lod else self.num_seq, data_dim
|
|
|
|
|
|
|
|
]).astype('float32')
|
|
|
|
|
|
|
|
self.data[data_name] = (data_value, data_lod)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def set_inputs(self, place):
|
|
|
|
|
|
|
|
self.inputs = {}
|
|
|
|
|
|
|
|
for desc in self.data_desc:
|
|
|
|
|
|
|
|
tensor = fluid.Tensor()
|
|
|
|
|
|
|
|
tensor.set(self.data[desc[0]][0], place)
|
|
|
|
|
|
|
|
if self.data[desc[0]][1]:
|
|
|
|
|
|
|
|
tensor.set_lod(self.data[desc[0]][1])
|
|
|
|
|
|
|
|
self.inputs[desc[0]] = tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reorder(self):
|
|
|
|
|
|
|
|
level = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# compute the rank_table according to ref_lod
|
|
|
|
|
|
|
|
ref_lod = self.data[self.data_desc[1][0]][1][level]
|
|
|
|
|
|
|
|
rank_table = [] # list of (index, length)
|
|
|
|
|
|
|
|
for i in range(len(ref_lod) - 1):
|
|
|
|
|
|
|
|
rank_table.append((i, ref_lod[i + 1] - ref_lod[i]))
|
|
|
|
|
|
|
|
rank_table = sorted(rank_table, lambda x, y: y[1] - x[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# compute the input sequence info according to input_lod
|
|
|
|
|
|
|
|
input_value, input_lod = self.data[self.data_desc[0][0]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_table = [] # list of (offset, length, sub_lod)
|
|
|
|
|
|
|
|
if input_lod:
|
|
|
|
|
|
|
|
for i in range(len(input_lod[level]) - 1):
|
|
|
|
|
|
|
|
start_idx = i
|
|
|
|
|
|
|
|
end_idx = i + 1
|
|
|
|
|
|
|
|
sub_lod = []
|
|
|
|
|
|
|
|
for lod_level_i in input_lod[level:]:
|
|
|
|
|
|
|
|
sub_lod_i = []
|
|
|
|
|
|
|
|
for idx in range(start_idx, end_idx):
|
|
|
|
|
|
|
|
sub_lod_i.append(lod_level_i[idx + 1] - lod_level_i[
|
|
|
|
|
|
|
|
idx])
|
|
|
|
|
|
|
|
sub_lod.append(sub_lod_i)
|
|
|
|
|
|
|
|
start_idx = lod_level_i[start_idx]
|
|
|
|
|
|
|
|
end_idx = lod_level_i[end_idx]
|
|
|
|
|
|
|
|
input_table.append((start_idx, end_idx - start_idx, sub_lod))
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
input_table = [(i, 1, []) for i in range(len(rank_table))]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# reorder by rank_table
|
|
|
|
|
|
|
|
output_value = numpy.zeros_like(input_value)
|
|
|
|
|
|
|
|
output_lod = []
|
|
|
|
|
|
|
|
offset = 0
|
|
|
|
|
|
|
|
for index, length in rank_table:
|
|
|
|
|
|
|
|
input_seq_start = input_table[index][0]
|
|
|
|
|
|
|
|
input_seq_len = input_table[index][1]
|
|
|
|
|
|
|
|
input_seq_end = input_seq_start + input_seq_len
|
|
|
|
|
|
|
|
output_value[offset:offset + input_seq_len] = input_value[
|
|
|
|
|
|
|
|
input_seq_start:input_seq_end]
|
|
|
|
|
|
|
|
offset += input_seq_len
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_seq_sub_lod = input_table[index][2]
|
|
|
|
|
|
|
|
if len(output_lod) == 0:
|
|
|
|
|
|
|
|
output_lod = [[0] for i in input_seq_sub_lod]
|
|
|
|
|
|
|
|
for i, sub_lod_i in enumerate(input_seq_sub_lod):
|
|
|
|
|
|
|
|
for idx_sub in sub_lod_i:
|
|
|
|
|
|
|
|
output_lod[i].append(output_lod[i][-1] + idx_sub)
|
|
|
|
|
|
|
|
return output_value, output_lod
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_reorder_lod_tensor(self):
|
|
|
|
|
|
|
|
self.data_desc[0][-1] = 2 # input is lod_tensor
|
|
|
|
|
|
|
|
self.set_data()
|
|
|
|
|
|
|
|
self.run_program()
|
|
|
|
|
|
|
|
# check output
|
|
|
|
|
|
|
|
expect_output, expect_output_lod = self.reorder()
|
|
|
|
|
|
|
|
for actual_output in self.actual_outputs:
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
|
|
|
numpy.allclose(
|
|
|
|
|
|
|
|
numpy.array(actual_output), expect_output, atol=0.001))
|
|
|
|
|
|
|
|
self.assertEqual(expect_output_lod, actual_output.lod())
|
|
|
|
|
|
|
|
# check gradient
|
|
|
|
|
|
|
|
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
|
|
|
|
|
|
|
|
expect_grad_lod = self.data[self.data_desc[0][0]][1]
|
|
|
|
|
|
|
|
for actual_grad in self.actual_grads:
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
|
|
|
numpy.allclose(
|
|
|
|
|
|
|
|
numpy.array(actual_grad), expect_grad, atol=0.001))
|
|
|
|
|
|
|
|
self.assertEqual(expect_grad_lod, actual_grad.lod())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_reorder_tensor(self):
|
|
|
|
|
|
|
|
self.data_desc[0][-1] = 0 # input is tensor
|
|
|
|
|
|
|
|
self.set_data()
|
|
|
|
|
|
|
|
self.run_program()
|
|
|
|
|
|
|
|
# check output
|
|
|
|
|
|
|
|
expect_output, expect_output_lod = self.reorder()
|
|
|
|
|
|
|
|
for actual_output in self.actual_outputs:
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
|
|
|
numpy.allclose(
|
|
|
|
|
|
|
|
numpy.array(actual_output), expect_output, atol=0.001))
|
|
|
|
|
|
|
|
self.assertEqual(expect_output_lod, actual_output.lod())
|
|
|
|
|
|
|
|
# check gradient
|
|
|
|
|
|
|
|
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
|
|
|
|
|
|
|
|
expect_grad_lod = self.data[self.data_desc[0][0]][1]
|
|
|
|
|
|
|
|
for actual_grad in self.actual_grads:
|
|
|
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
|
|
|
numpy.allclose(
|
|
|
|
|
|
|
|
numpy.array(actual_grad), expect_grad, atol=0.001))
|
|
|
|
|
|
|
|
self.assertEqual(expect_grad_lod, actual_grad.lod())
|
|
|
|
|
|
|
|
global outputs_from_tensor_implicit_lod
|
|
|
|
|
|
|
|
outputs_from_tensor_implicit_lod = self.actual_outputs
|
|
|
|
|
|
|
|
|
|
|
|
cpu = fluid.CPUPlace()
|
|
|
|
# compare outputs between LodTensors with explicit and implicit lod
|
|
|
|
exe = fluid.Executor(cpu)
|
|
|
|
# use the same data but set the input lod explicitly
|
|
|
|
exe.run(fluid.default_startup_program())
|
|
|
|
input_lod = [[
|
|
|
|
|
|
|
|
i for i in range(len(self.data[self.data_desc[0][0]][0]) + 1)
|
|
|
|
ref = fluid.Tensor()
|
|
|
|
]]
|
|
|
|
ref_lod = [0, 3, 4, 7, 8, 14]
|
|
|
|
self.inputs[self.data_desc[0][0]].set_lod(input_lod)
|
|
|
|
ref.set_lod([ref_lod])
|
|
|
|
# preserve the output of LodTensor with implicit lod to compare
|
|
|
|
|
|
|
|
expect_output = [
|
|
|
|
ref.set(numpy.random.random(size=[14, 1]).astype('float32'), cpu)
|
|
|
|
numpy.array(actual_output) for actual_output in self.actual_outputs
|
|
|
|
input = fluid.Tensor()
|
|
|
|
]
|
|
|
|
lod_level_0 = numpy.random.randint(low=1, high=5, size=5)
|
|
|
|
self.run_program()
|
|
|
|
lod_level_0 = [0] + numpy.cumsum(lod_level_0).tolist()
|
|
|
|
for actual_output in self.actual_outputs:
|
|
|
|
lod_level_1 = numpy.random.randint(low=1, high=5, size=lod_level_0[-1])
|
|
|
|
self.assertTrue(
|
|
|
|
lod_level_1 = [0] + numpy.cumsum(lod_level_1).tolist()
|
|
|
|
numpy.allclose(
|
|
|
|
|
|
|
|
numpy.array(actual_output), expect_output, atol=0.001))
|
|
|
|
input.set_lod([lod_level_0, lod_level_1])
|
|
|
|
|
|
|
|
input.set(
|
|
|
|
|
|
|
|
numpy.random.random(size=[lod_level_1[-1], 1]).astype('float32'),
|
|
|
|
|
|
|
|
cpu)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ig = exe.run(fluid.default_main_program(),
|
|
|
|
|
|
|
|
feed={'input': input,
|
|
|
|
|
|
|
|
'ref': ref},
|
|
|
|
|
|
|
|
fetch_list=['input@GRAD'],
|
|
|
|
|
|
|
|
return_numpy=False)[0]
|
|
|
|
|
|
|
|
self.assertAlmostEqual(numpy.array(ig).sum(), 1.0, delta=0.001)
|
|
|
|
|
|
|
|
self.assertEqual(input.lod(), ig.lod())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
if __name__ == '__main__':
|
|
|
|