|
|
|
@ -54,92 +54,6 @@ class LoDTensorTester : public ::testing::Test {
|
|
|
|
|
LoDTensor lod_tensor_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); }
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, NumElements) {
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(0), 2UL);
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(1), 3UL);
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, NumElements2) {
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(0, 0), 2UL);
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(0, 1), 1UL);
|
|
|
|
|
ASSERT_EQ(lod_tensor_.NumElements(1, 1), 3UL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, ShrinkLevels) {
|
|
|
|
|
// slice 1 level
|
|
|
|
|
for (size_t level = 0; level < 3UL; ++level) {
|
|
|
|
|
LoDTensor new_lod_tensor = lod_tensor_;
|
|
|
|
|
new_lod_tensor.ShrinkLevels(level, level + 1);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
|
|
|
|
|
}
|
|
|
|
|
// shrink 2 level
|
|
|
|
|
for (size_t level = 0; level < 2UL; ++level) {
|
|
|
|
|
LoDTensor new_lod_tensor = lod_tensor_;
|
|
|
|
|
new_lod_tensor.ShrinkLevels(level, level + 2);
|
|
|
|
|
// the lowest level's last element should be the tensor's batch_size.
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.lod().back().back(),
|
|
|
|
|
lod_tensor_.lod().back().back());
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.data<float>(), lod_tensor_.data<float>());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, ShrinkInLevel) {
|
|
|
|
|
size_t level = 0;
|
|
|
|
|
LoDTensor new_lod_tensor = lod_tensor_;
|
|
|
|
|
new_lod_tensor.ShrinkInLevel(level, 0, 1);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumLevels(), 3UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumElements(0), 1UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumElements(1), 2UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumElements(2), 5UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.dims()[0], 12);
|
|
|
|
|
for (int i = 0; i < 12 * 128; i++) {
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.data<float>()[i], i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
level = 1;
|
|
|
|
|
new_lod_tensor = lod_tensor_;
|
|
|
|
|
new_lod_tensor.ShrinkInLevel(level, 1, 2);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumElements(0), 1UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.NumElements(1), 3UL);
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.dims()[0], 7);
|
|
|
|
|
for (int i = 5 * 128; i < 12 * 128; i++) {
|
|
|
|
|
ASSERT_EQ(new_lod_tensor.data<float>()[i - 5 * 128], i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LoDTensor t1;
|
|
|
|
|
t1.set_lod(lod_tensor_.lod());
|
|
|
|
|
t1.ShareDataWith(lod_tensor_);
|
|
|
|
|
|
|
|
|
|
LoDTensor t2;
|
|
|
|
|
t2.set_lod(lod_tensor_.lod());
|
|
|
|
|
t2.ShareDataWith(lod_tensor_);
|
|
|
|
|
|
|
|
|
|
t1.ShrinkInLevel(0, 1, 2);
|
|
|
|
|
t2.ShrinkInLevel(0, 0, 1);
|
|
|
|
|
EXPECT_NE(t1.data<float>(), t2.data<float>());
|
|
|
|
|
EXPECT_NE(t1.data<float>(), lod_tensor_.data<float>());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(LoDTensorTester, SerializeAndDeserialize) {
|
|
|
|
|
LoDTensor dst_tensor;
|
|
|
|
|
platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));
|
|
|
|
|
std::ostringstream oss;
|
|
|
|
|
SerializeToStream(oss, lod_tensor_, cpu_ctx);
|
|
|
|
|
std::istringstream iss(oss.str());
|
|
|
|
|
DeserializeFromStream(iss, &dst_tensor, cpu_ctx);
|
|
|
|
|
float* dst_ptr = dst_tensor.mutable_data<float>(platform::CPUPlace());
|
|
|
|
|
for (int i = 0; i < kLodTensorSize; ++i) {
|
|
|
|
|
EXPECT_EQ(dst_ptr[i], i);
|
|
|
|
|
}
|
|
|
|
|
EXPECT_EQ(dst_tensor.lod(), lod_tensor_.lod());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(LodExpand, test) {
|
|
|
|
|
LoD lod{{0, 2}};
|
|
|
|
|
LoDTensor tensor;
|
|
|
|
|