|
|
|
@ -19,17 +19,18 @@ limitations under the License. */
|
|
|
|
|
template <typename DeviceContext, typename Place, typename T>
|
|
|
|
|
void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
|
|
|
|
|
paddle::framework::LoDTensor cpu_out_grad;
|
|
|
|
|
paddle::framework::LoDTensor cpu_in_grad;
|
|
|
|
|
paddle::framework::LoDTensor out_grad;
|
|
|
|
|
paddle::framework::LoDTensor in_grad;
|
|
|
|
|
const size_t second_dim = 128u;
|
|
|
|
|
|
|
|
|
|
// construct out_grad's tensor in cpu
|
|
|
|
|
const size_t out_first_dim = lod.size() - 1;
|
|
|
|
|
const size_t out_first_dim = lod[0].size() - 1;
|
|
|
|
|
auto out_dims = paddle::framework::make_ddim(
|
|
|
|
|
{static_cast<int64_t>(out_first_dim), static_cast<int64_t>(second_dim)});
|
|
|
|
|
|
|
|
|
|
cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace());
|
|
|
|
|
for (int64_t i = 0; i < out_grad.numel(); ++i) {
|
|
|
|
|
for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
|
|
|
|
|
cpu_out_grad.data<T>()[i] = static_cast<T>(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -58,16 +59,38 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
|
|
|
|
|
paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
|
|
|
|
|
*context, "SUM", out_grad, &in_grad);
|
|
|
|
|
|
|
|
|
|
if (paddle::platform::is_cpu_place(*place)) {
|
|
|
|
|
cpu_in_grad = in_grad;
|
|
|
|
|
} else {
|
|
|
|
|
TensorCopySync(in_grad, paddle::platform::CPUPlace(), &cpu_in_grad);
|
|
|
|
|
cpu_in_grad.set_lod(in_grad.lod());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim);
|
|
|
|
|
EXPECT_EQ(in_grad.lod(), lod);
|
|
|
|
|
for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
|
|
|
|
|
int64_t begin = in_grad.lod()[0][i];
|
|
|
|
|
int64_t end = in_grad.lod()[0][i + 1];
|
|
|
|
|
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
|
|
|
|
|
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
|
|
|
|
|
for (int64_t m = 0; m != second_dim; ++m) {
|
|
|
|
|
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
|
|
|
|
|
out_grad.data<T>()[m + i * second_dim]);
|
|
|
|
|
|
|
|
|
|
if (paddle::platform::is_cpu_place(*place)) {
|
|
|
|
|
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
|
|
|
|
|
int64_t begin = in_grad.lod()[0][i];
|
|
|
|
|
int64_t end = in_grad.lod()[0][i + 1];
|
|
|
|
|
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
|
|
|
|
|
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
|
|
|
|
|
for (int64_t m = 0; m != second_dim; ++m) {
|
|
|
|
|
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
|
|
|
|
|
out_grad.data<T>()[m + i * second_dim]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
|
|
|
|
|
int64_t begin = cpu_in_grad.lod()[0][i];
|
|
|
|
|
int64_t end = cpu_in_grad.lod()[0][i + 1];
|
|
|
|
|
paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
|
|
|
|
|
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
|
|
|
|
|
for (int64_t m = 0; m != second_dim; ++m) {
|
|
|
|
|
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
|
|
|
|
|
cpu_out_grad.data<T>()[m + i * second_dim]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|