Removing length dims constraints of seq_pad and seq_unpad (#19497)

* Removing last dims constraints of seq_pad and seq_unpad test=develop

* fix test_layer api code test=develop

* fix sequence_pad_op.cc conflict test=develop

* remove test_analyzer_mm_dnn test=develop

* fix vectorize bug test=develop

* fix vectorize<int> test=develop
expand_as_op_1
Aurelius84 5 years ago committed by GitHub
parent cca26f5c42
commit 99a9615a4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -146,8 +146,8 @@ paddle.fluid.layers.conv2d_transpose (ArgSpec(args=['input', 'num_filters', 'out
paddle.fluid.layers.conv3d_transpose (ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', 'fb08f59141971b11f5f03bba06e9fc5a')) paddle.fluid.layers.conv3d_transpose (ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', 'fb08f59141971b11f5f03bba06e9fc5a'))
paddle.fluid.layers.sequence_expand (ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '10e122eb755c2bd1f78ef2332b28f1a0')) paddle.fluid.layers.sequence_expand (ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '10e122eb755c2bd1f78ef2332b28f1a0'))
paddle.fluid.layers.sequence_expand_as (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '858c432e7cbd8bb952cc2eb555457d50')) paddle.fluid.layers.sequence_expand_as (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '858c432e7cbd8bb952cc2eb555457d50'))
paddle.fluid.layers.sequence_pad (ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '1ba3ccfe13ed5091e113c09c13dc3a20')) paddle.fluid.layers.sequence_pad (ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'df08b9c499ab3a90f95d08ab5b6c6c62'))
paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7f5ce36fb0016621e6bc001f4236d978')) paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e478180d5bc010a84f35af958cafa62c'))
paddle.fluid.layers.lstm_unit (ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)), ('document', 'fe126c58e4339410e875ab1eba246d21')) paddle.fluid.layers.lstm_unit (ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)), ('document', 'fe126c58e4339410e875ab1eba246d21'))
paddle.fluid.layers.reduce_sum (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'dd5f06fb7cf39ca06cbab4abd03e6893')) paddle.fluid.layers.reduce_sum (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'dd5f06fb7cf39ca06cbab4abd03e6893'))
paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'a3024789eba11a70c2ef27c358173400')) paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'a3024789eba11a70c2ef27c358173400'))

@ -124,11 +124,6 @@ set(LAC_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/lac")
download_model_and_data(${LAC_INSTALL_DIR} "lac_model.tar.gz" "lac_data.txt.tar.gz") download_model_and_data(${LAC_INSTALL_DIR} "lac_model.tar.gz" "lac_data.txt.tar.gz")
inference_analysis_api_test(test_analyzer_lac ${LAC_INSTALL_DIR} analyzer_lac_tester.cc) inference_analysis_api_test(test_analyzer_lac ${LAC_INSTALL_DIR} analyzer_lac_tester.cc)
# MM DNN
set(MM_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mm_dnn")
download_model_and_data(${MM_DNN_INSTALL_DIR} "MM_DNN_model.tar.gz" "MM_DNN_data.txt.tar.gz")
inference_analysis_api_test(test_analyzer_mm_dnn ${MM_DNN_INSTALL_DIR} analyzer_mm_dnn_tester.cc)
# Pyramid DNN # Pyramid DNN
set(PYRAMID_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/pyramid_dnn") set(PYRAMID_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/pyramid_dnn")
download_model_and_data(${PYRAMID_DNN_INSTALL_DIR} "PyramidDNN_model.tar.gz" "PyramidDNN_data.txt.tar.gz") download_model_and_data(${PYRAMID_DNN_INSTALL_DIR} "PyramidDNN_model.tar.gz" "PyramidDNN_data.txt.tar.gz")

File diff suppressed because it is too large Load Diff

@ -25,13 +25,13 @@ class SequencePadOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadOp should not be null."); "Input(X) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("PadValue"), PADDLE_ENFORCE_EQ(ctx->HasInput("PadValue"), true,
"Input(PadValue) of SequencePadOp should not be null."); "Input(PadValue) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequencePadOp should not be null."); "Output(Out) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Length"), PADDLE_ENFORCE_EQ(ctx->HasOutput("Length"), true,
"Output(Length) of SequencePadOp should not be null."); "Output(Length) of SequencePadOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
@ -39,8 +39,9 @@ class SequencePadOp : public framework::OperatorWithKernel {
"The rank of Input(X) can't be less than 2."); "The rank of Input(X) can't be less than 2.");
auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size()); auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size());
auto pad_value_dims = ctx->GetInputDim("PadValue"); auto pad_value_dims = ctx->GetInputDim("PadValue");
PADDLE_ENFORCE(pad_value_dims == framework::make_ddim({1}) || PADDLE_ENFORCE_EQ(pad_value_dims == framework::make_ddim({1}) ||
pad_value_dims == time_step_dims, pad_value_dims == time_step_dims,
true,
"The Input(PadValue) must be a scalar or a tensor whose " "The Input(PadValue) must be a scalar or a tensor whose "
"shape equals to time steps in sequences"); "shape equals to time steps in sequences");
@ -52,7 +53,8 @@ class SequencePadOp : public framework::OperatorWithKernel {
framework::Variable* x_var = framework::Variable* x_var =
boost::get<framework::Variable*>(ctx->GetInputVarPtrs("X")[0]); boost::get<framework::Variable*>(ctx->GetInputVarPtrs("X")[0]);
const auto& x_lod = x_var->Get<LoDTensor>().lod(); const auto& x_lod = x_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE(!x_lod.empty(), "The Input(X) must hold lod info."); PADDLE_ENFORCE_EQ(x_lod.empty(), false,
"The Input(X) must hold lod info.");
const auto& x_lod_0 = x_lod[0]; const auto& x_lod_0 = x_lod[0];
PADDLE_ENFORCE_GE(x_lod_0.size(), 2, PADDLE_ENFORCE_GE(x_lod_0.size(), 2,
"The Input(X)'s lod info is corrupted."); "The Input(X)'s lod info is corrupted.");
@ -80,7 +82,7 @@ class SequencePadOp : public framework::OperatorWithKernel {
} }
std::vector<int> out_dims_vec{out_dim_0, padded_length}; std::vector<int> out_dims_vec{out_dim_0, padded_length};
std::vector<int> len_dims_vec{out_dim_0, 1}; std::vector<int> len_dims_vec{out_dim_0};
auto time_step_dims_vec = framework::vectorize<int>(time_step_dims); auto time_step_dims_vec = framework::vectorize<int>(time_step_dims);
out_dims_vec.insert(out_dims_vec.end(), time_step_dims_vec.begin(), out_dims_vec.insert(out_dims_vec.end(), time_step_dims_vec.begin(),
time_step_dims_vec.end()); time_step_dims_vec.end());
@ -143,7 +145,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor: then we get LoDTensor:
Out.data = [[a, b, 0, 0], Out.data = [[a, b, 0, 0],
[c, d, e, 0]] [c, d, e, 0]]
Length.data = [[2], [3]] Length.data = [2, 3]
Case 2: Case 2:
@ -157,7 +159,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor: then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [0, 0]], Out.data = [[[a1, a2], [b1, b2], [0, 0]],
[[c1, c2], [d1, d2], [e1, e2]]] [[c1, c2], [d1, d2], [e1, e2]]]
Length.data = [[2], [3]] Length.data = [2, 3]
Case 3: Case 3:
@ -171,7 +173,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor: then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [p1, p2]], Out.data = [[[a1, a2], [b1, b2], [p1, p2]],
[[c1, c2], [d1, d2], [e1, e2]]] [[c1, c2], [d1, d2], [e1, e2]]]
Length.data = [[2], [3]] Length.data = [2, 3]
)DOC"); )DOC");
} }
@ -182,9 +184,10 @@ class SequencePadGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadGradOp should not be null."); "Input(X) of SequencePadGradOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequencePadGradOp should not be null."); "Input(Out@GRAD) of SequencePadGradOp should not be null.");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {

@ -25,11 +25,11 @@ class SequenceUnpadOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceUnpadOp should not be null."); "Input(X) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Length"), PADDLE_ENFORCE_EQ(ctx->HasInput("Length"), true,
"Input(Length) of SequenceUnpadOp should not be null."); "Input(Length) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequenceUnpadOp should not be null."); "Output(Out) of SequenceUnpadOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
@ -37,10 +37,10 @@ class SequenceUnpadOp : public framework::OperatorWithKernel {
"The rank of Input(X) can't be less than 2."); "The rank of Input(X) can't be less than 2.");
auto len_dims = ctx->GetInputDim("Length"); auto len_dims = ctx->GetInputDim("Length");
PADDLE_ENFORCE(len_dims.size() == 2 && len_dims[1] == 1, PADDLE_ENFORCE_EQ(len_dims.size(), 1,
"The shape of Input(Length) should be [batch_size, 1]."); "The shape of Input(Length) should be [batch_size].");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
len_dims[0] == x_dims[0], len_dims[0], x_dims[0],
"Input(X) and Input(Length) should have the same first dimension."); "Input(X) and Input(Length) should have the same first dimension.");
int64_t out_dim_0 = -1; int64_t out_dim_0 = -1;
@ -96,7 +96,7 @@ class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker {
in which there are 3 sequences padded to length 5, and the acutal length in which there are 3 sequences padded to length 5, and the acutal length
specified by Input(Length): specified by Input(Length):
Length.data = [[2], [3], [4]], Length.data = [2, 3, 4],
after unpadding, Output(Out) will be: after unpadding, Output(Out) will be:
@ -112,10 +112,10 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceUnpadGradOp should not be null."); "Input(X) of SequenceUnpadGradOp should not be null.");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null."); "Input(Out@GRAD) of SequenceUnpadGradOp should not be null.");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {

@ -4588,7 +4588,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy import numpy
x = fluid.layers.data(name='y', shape=[10, 5], x = fluid.layers.data(name='x', shape=[10, 5],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
pad_value = fluid.layers.assign( pad_value = fluid.layers.assign(
input=numpy.array([0.0], dtype=numpy.float32)) input=numpy.array([0.0], dtype=numpy.float32))
@ -4637,7 +4637,7 @@ def sequence_unpad(x, length, name=None):
in which there are 3 sequences padded to length 5, and the acutal length in which there are 3 sequences padded to length 5, and the acutal length
specified by input Variable **length**: specified by input Variable **length**:
length.data = [[2], [3], [4]], length.data = [2, 3, 4],
after unpadding, the output Variable will be: after unpadding, the output Variable will be:
@ -4659,9 +4659,15 @@ def sequence_unpad(x, length, name=None):
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10, 5], dtype='float32') import numpy
len = fluid.layers.data(name='length', shape=[1], dtype='int64')
out = fluid.layers.sequence_unpad(x=x, length=len) # pad data
x = fluid.layers.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=numpy.array([0.0], dtype=numpy.float32))
pad_data, len = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
# upad data
unpad_data = fluid.layers.sequence_unpad(x=pad_data, length=len)
""" """
assert not in_dygraph_mode(), ( assert not in_dygraph_mode(), (

@ -2176,7 +2176,7 @@ class TestBook(LayerTest):
# TODO(minqiyang): dygraph do not support lod now # TODO(minqiyang): dygraph do not support lod now
with self.static_graph(): with self.static_graph():
x = layers.data(name='x', shape=[10, 5], dtype='float32') x = layers.data(name='x', shape=[10, 5], dtype='float32')
length = layers.data(name='length', shape=[1], dtype='int64') length = layers.data(name='length', shape=[], dtype='int64')
return (layers.sequence_unpad(x=x, length=length)) return (layers.sequence_unpad(x=x, length=length))
def test_sequence_softmax(self): def test_sequence_softmax(self):

@ -62,7 +62,7 @@ class TestSequencePadOp(OpTest):
start_idx = end_idx start_idx = end_idx
out_data = np.array(padded_sequences) out_data = np.array(padded_sequences)
length = np.array(self.x_len_lod[0]).reshape((-1, 1)) length = np.array(self.x_len_lod[0]).reshape((-1))
self.outputs = {'Out': out_data, 'Length': length} self.outputs = {'Out': out_data, 'Length': length}
def setUp(self): def setUp(self):

@ -39,10 +39,7 @@ class TestSequenceUnpadOp(OpTest):
else: else:
out_shape = out_shape + self.x_shape[2:] out_shape = out_shape + self.x_shape[2:]
self.inputs = { self.inputs = {'X': x, 'Length': np.array(self.length).astype('int64')}
'X': x,
'Length': np.array(self.length).astype('int64').reshape(-1, 1)
}
self.outputs = {'Out': (out.reshape(out_shape), out_lod)} self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self): def setUp(self):

Loading…
Cancel
Save