|
|
|
@ -72,29 +72,29 @@ TEST(Tensor, MutableData) {
|
|
|
|
|
p2 = src_tensor.mutable_data<float>(make_ddim({2, 2}), CPUPlace());
|
|
|
|
|
EXPECT_EQ(p1, p2);
|
|
|
|
|
}
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
float* p1 = nullptr;
|
|
|
|
|
float* p2 = nullptr;
|
|
|
|
|
// initialization
|
|
|
|
|
p1 = src_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
|
|
|
|
|
EXPECT_NE(p1, nullptr);
|
|
|
|
|
// set src_tensor a new dim with large size
|
|
|
|
|
// momery is supposed to be re-allocated
|
|
|
|
|
p2 = src_tensor.mutable_data<float>(make_ddim({3, 4}), GPUPlace());
|
|
|
|
|
EXPECT_NE(p2, nullptr);
|
|
|
|
|
EXPECT_NE(p1, p2);
|
|
|
|
|
// set src_tensor a new dim with same size
|
|
|
|
|
// momery block is supposed to be unchanged
|
|
|
|
|
p1 = src_tensor.mutable_data<float>(make_ddim({2, 2, 3}), GPUPlace());
|
|
|
|
|
EXPECT_EQ(p1, p2);
|
|
|
|
|
// set src_tensor a new dim with smaller size
|
|
|
|
|
// momery block is supposed to be unchanged
|
|
|
|
|
p2 = src_tensor.mutable_data<float>(make_ddim({2, 2}), GPUPlace());
|
|
|
|
|
EXPECT_EQ(p1, p2);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
float* p1 = nullptr;
|
|
|
|
|
float* p2 = nullptr;
|
|
|
|
|
// initialization
|
|
|
|
|
p1 = src_tensor.mutable_data<float>(make_ddim({1, 2, 3}), GPUPlace());
|
|
|
|
|
EXPECT_NE(p1, nullptr);
|
|
|
|
|
// set src_tensor a new dim with large size
|
|
|
|
|
// momery is supposed to be re-allocated
|
|
|
|
|
p2 = src_tensor.mutable_data<float>(make_ddim({3, 4}), GPUPlace());
|
|
|
|
|
EXPECT_NE(p2, nullptr);
|
|
|
|
|
EXPECT_NE(p1, p2);
|
|
|
|
|
// set src_tensor a new dim with same size
|
|
|
|
|
// momery block is supposed to be unchanged
|
|
|
|
|
p1 = src_tensor.mutable_data<float>(make_ddim({2, 2, 3}), GPUPlace());
|
|
|
|
|
EXPECT_EQ(p1, p2);
|
|
|
|
|
// set src_tensor a new dim with smaller size
|
|
|
|
|
// momery block is supposed to be unchanged
|
|
|
|
|
p2 = src_tensor.mutable_data<float>(make_ddim({2, 2}), GPUPlace());
|
|
|
|
|
EXPECT_EQ(p1, p2);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Tensor, ShareDataFrom) {
|
|
|
|
@ -123,15 +123,15 @@ TEST(Tensor, ShareDataFrom) {
|
|
|
|
|
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
Tensor dst_tensor;
|
|
|
|
|
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
|
|
|
|
|
dst_tensor.ShareDataFrom<int>(src_tensor);
|
|
|
|
|
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
Tensor dst_tensor;
|
|
|
|
|
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
|
|
|
|
|
dst_tensor.ShareDataFrom<int>(src_tensor);
|
|
|
|
|
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Tensor, Slice) {
|
|
|
|
@ -160,29 +160,29 @@ TEST(Tensor, Slice) {
|
|
|
|
|
EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
|
|
|
|
|
Tensor slice_tensor = src_tensor.Slice<double>(2, 6);
|
|
|
|
|
DDim slice_dims = slice_tensor.dims();
|
|
|
|
|
ASSERT_EQ(arity(slice_dims), 2);
|
|
|
|
|
EXPECT_EQ(slice_dims[0], 4);
|
|
|
|
|
EXPECT_EQ(slice_dims[1], 9);
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
{
|
|
|
|
|
Tensor src_tensor;
|
|
|
|
|
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
|
|
|
|
|
Tensor slice_tensor = src_tensor.Slice<double>(2, 6);
|
|
|
|
|
DDim slice_dims = slice_tensor.dims();
|
|
|
|
|
ASSERT_EQ(arity(slice_dims), 2);
|
|
|
|
|
EXPECT_EQ(slice_dims[0], 4);
|
|
|
|
|
EXPECT_EQ(slice_dims[1], 9);
|
|
|
|
|
|
|
|
|
|
uintptr_t src_data_address =
|
|
|
|
|
reinterpret_cast<uintptr_t>(src_tensor.data<double>());
|
|
|
|
|
uintptr_t src_mutable_data_address = reinterpret_cast<uintptr_t>(
|
|
|
|
|
src_tensor.mutable_data<double>(src_tensor.dims(), GPUPlace()));
|
|
|
|
|
uintptr_t slice_data_address =
|
|
|
|
|
reinterpret_cast<uintptr_t>(slice_tensor.data<double>());
|
|
|
|
|
uintptr_t slice_mutable_data_address = reinterpret_cast<uintptr_t>(
|
|
|
|
|
slice_tensor.mutable_data<double>(slice_tensor.dims(), GPUPlace()));
|
|
|
|
|
EXPECT_EQ(src_data_address, src_mutable_data_address);
|
|
|
|
|
EXPECT_EQ(slice_data_address, slice_mutable_data_address);
|
|
|
|
|
EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
uintptr_t src_data_address =
|
|
|
|
|
reinterpret_cast<uintptr_t>(src_tensor.data<double>());
|
|
|
|
|
uintptr_t src_mutable_data_address = reinterpret_cast<uintptr_t>(
|
|
|
|
|
src_tensor.mutable_data<double>(src_tensor.dims(), GPUPlace()));
|
|
|
|
|
uintptr_t slice_data_address =
|
|
|
|
|
reinterpret_cast<uintptr_t>(slice_tensor.data<double>());
|
|
|
|
|
uintptr_t slice_mutable_data_address = reinterpret_cast<uintptr_t>(
|
|
|
|
|
slice_tensor.mutable_data<double>(slice_tensor.dims(), GPUPlace()));
|
|
|
|
|
EXPECT_EQ(src_data_address, src_mutable_data_address);
|
|
|
|
|
EXPECT_EQ(slice_data_address, slice_mutable_data_address);
|
|
|
|
|
EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(Tensor, CopyFrom) {
|
|
|
|
|