|
|
@ -37,36 +37,6 @@ namespace framework {
|
|
|
|
|
|
|
|
|
|
|
|
const int kLodTensorSize = 20 * 128;
|
|
|
|
const int kLodTensorSize = 20 * 128;
|
|
|
|
|
|
|
|
|
|
|
|
class LoDTensorTester : public ::testing::Test {
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
virtual void SetUp() override {
|
|
|
|
|
|
|
|
// tensor's batch_size: 30
|
|
|
|
|
|
|
|
// 3 levels
|
|
|
|
|
|
|
|
// 0 10 20
|
|
|
|
|
|
|
|
// 0 5 10 15 20
|
|
|
|
|
|
|
|
// 0 2 5 7 10 12 15 20
|
|
|
|
|
|
|
|
LoD lod;
|
|
|
|
|
|
|
|
lod.push_back(std::vector<size_t>{0, 2, 3});
|
|
|
|
|
|
|
|
lod.push_back(std::vector<size_t>{0, 2, 5, 8});
|
|
|
|
|
|
|
|
lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ASSERT_EQ(lod.size(), 3UL);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/});
|
|
|
|
|
|
|
|
// malloc memory
|
|
|
|
|
|
|
|
float* dst_ptr = lod_tensor_.mutable_data<float>(place);
|
|
|
|
|
|
|
|
for (int i = 0; i < kLodTensorSize; ++i) {
|
|
|
|
|
|
|
|
dst_ptr[i] = i;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
lod_tensor_.set_lod(lod);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
|
|
|
platform::CPUPlace place;
|
|
|
|
|
|
|
|
LoDTensor lod_tensor_;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST(LodExpand, test) {
|
|
|
|
TEST(LodExpand, test) {
|
|
|
|
LoD lod{{0, 2}};
|
|
|
|
LoD lod{{0, 2}};
|
|
|
|
LoDTensor tensor;
|
|
|
|
LoDTensor tensor;
|
|
|
@ -144,5 +114,53 @@ TEST(LoD, ToAbsOffset) {
|
|
|
|
EXPECT_EQ(abs_lod, expected);
|
|
|
|
EXPECT_EQ(abs_lod, expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST(LoD, CheckLoD) {
|
|
|
|
|
|
|
|
LoD relative_lod;
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 2}));
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 1, 3}));
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 2, 4, 5}));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check compatible
|
|
|
|
|
|
|
|
ASSERT_TRUE(CheckLoD(relative_lod));
|
|
|
|
|
|
|
|
relative_lod[1].back()++;
|
|
|
|
|
|
|
|
ASSERT_FALSE(CheckLoD(relative_lod));
|
|
|
|
|
|
|
|
relative_lod[1].back()--; // recover it
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check empty
|
|
|
|
|
|
|
|
LoD empty_lod;
|
|
|
|
|
|
|
|
ASSERT_TRUE(CheckLoD(empty_lod));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check less than 2 offsets in a level
|
|
|
|
|
|
|
|
LoD some_lod0;
|
|
|
|
|
|
|
|
some_lod0.push_back(std::vector<size_t>({0}));
|
|
|
|
|
|
|
|
ASSERT_FALSE(CheckLoD(some_lod0));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check with underlying tensor storage.
|
|
|
|
|
|
|
|
ASSERT_TRUE(CheckLoD(relative_lod, 5));
|
|
|
|
|
|
|
|
ASSERT_FALSE(CheckLoD(relative_lod, 9));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST(LoD, CheckAbsLoD) {
|
|
|
|
|
|
|
|
LoD relative_lod;
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 2}));
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 1, 3}));
|
|
|
|
|
|
|
|
relative_lod.push_back(std::vector<size_t>({0, 2, 4, 5}));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto abs_lod = ToAbsOffset(relative_lod);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(CheckAbsLoD(abs_lod));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check less than 2 offsets in a level.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check the last item should be compatible with tensor height.
|
|
|
|
|
|
|
|
abs_lod.back().back()++;
|
|
|
|
|
|
|
|
ASSERT_FALSE(CheckAbsLoD(abs_lod));
|
|
|
|
|
|
|
|
abs_lod.back().back()--; // restore
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// check less than 2 offsets in a lod.
|
|
|
|
|
|
|
|
LoD abs_lod0;
|
|
|
|
|
|
|
|
abs_lod0.push_back(std::vector<size_t>({0}));
|
|
|
|
|
|
|
|
ASSERT_FALSE(CheckAbsLoD(abs_lod0));
|
|
|
|
|
|
|
|
}
|
|
|
|
} // namespace framework
|
|
|
|
} // namespace framework
|
|
|
|
} // namespace paddle
|
|
|
|
} // namespace paddle
|
|
|
|