|
|
|
@ -46,8 +46,19 @@ class ShrinkRNNMemoryOp : public ArrayOp {
|
|
|
|
|
auto *out_var = scope.FindVar(Output("Out"));
|
|
|
|
|
PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set");
|
|
|
|
|
auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>();
|
|
|
|
|
|
|
|
|
|
// should consider multiple levels
|
|
|
|
|
size_t height = dst_num_rows;
|
|
|
|
|
auto lod_level = lod_rank_table.level();
|
|
|
|
|
if (x_tensor.lod().size() > lod_level &&
|
|
|
|
|
x_tensor.lod()[lod_level].size() < dst_num_rows) {
|
|
|
|
|
auto lod_offset = framework::GetSubLoDAndAbsoluteOffset(
|
|
|
|
|
x_tensor.lod(), 0, dst_num_rows + 1, lod_level);
|
|
|
|
|
height = lod_offset.second.second;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dst_num_rows != 0) {
|
|
|
|
|
out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows));
|
|
|
|
|
out_tensor.ShareDataWith(x_tensor.Slice(0, height));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
@ -64,11 +75,11 @@ class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker {
|
|
|
|
|
AddOutput("Out", "(LoDTensor) The shrinked RNN step memory.");
|
|
|
|
|
AddComment(
|
|
|
|
|
R"DOC(
|
|
|
|
|
In dynamic RNN, we are able to handle sequences of different lengths.
|
|
|
|
|
Because of the multiple lengths, the size of each step input can be
|
|
|
|
|
In dynamic RNN, we are able to handle sequences of different lengths.
|
|
|
|
|
Because of the multiple lengths, the size of each step input can be
|
|
|
|
|
different, which may lead to a mismatching between the input of
|
|
|
|
|
the current step and the memory generated by the previous one. This
|
|
|
|
|
operator shrinks memory according to the size of the next step input,
|
|
|
|
|
the current step and the memory generated by the previous one. This
|
|
|
|
|
operator shrinks memory according to the size of the next step input,
|
|
|
|
|
to make sure that they can match each other.
|
|
|
|
|
)DOC");
|
|
|
|
|
}
|
|
|
|
|