diff --git a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h index dd31f9f172..dc0acba122 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h @@ -61,6 +61,9 @@ class SeqConcatKernel : public framework::OpKernel { size_t lod_size = 0; for (auto &x : xs) { if (lod_size == 0) { + PADDLE_ENFORCE_EQ(x.get().lod().empty(), false, + "Input(X) Tensor of SequenceConcatOp does not " + "contain LoD information."); lod_size = x.get().lod()[0].size(); } else { PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h index 3a2c9e3f73..e35412e31d 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h @@ -39,6 +39,9 @@ class SequenceConvKernel : public framework::OpKernel { int context_stride = context.Attr("contextStride"); bool padding_trainable = context.Attr("paddingTrainable"); + PADDLE_ENFORCE_EQ( + in->lod().empty(), false, + "Input(X) Tensor of SequenceConvOp does not contain LoD information."); PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, "Only support one level sequence now."); diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h index 6c5a2e9680..4807521bc0 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h @@ -29,6 +29,10 @@ class SequenceEnumerateKernel : public framework::OpKernel { int win_size = context.Attr("win_size"); auto pad_value = static_cast(context.Attr("pad_value")); + PADDLE_ENFORCE_EQ(in->lod().empty(), false, + "Input(X) Tensor of SequenceEnumerateOp does not contain " + "LoD information."); + auto in_dims = in->dims(); auto lod0 = in->lod()[0]; PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h index af5a64dce5..0c2d289417 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h @@ -28,6 +28,9 @@ class SequenceEraseKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); + PADDLE_ENFORCE_EQ( + lod.empty(), false, + "Input(X) Tensor of SequenceEraseOp does not contain LoD information."); PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), "The actual size mismatches with the LoD information."); auto tokens = ctx.Attr>("tokens"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h index 42c90d01c0..0b228170e2 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h @@ -75,6 +75,10 @@ class SequenceExpandAsKernel : public framework::OpKernel { auto *y = context.Input("Y"); auto *out = context.Output("Out"); + PADDLE_ENFORCE_EQ(y->lod().empty(), false, + "Input(Y) Tensor of SequenceExpandAsOp does not contain " + "LoD information."); + auto &y_lod = y->lod(); PADDLE_ENFORCE_EQ(y_lod.size(), 1, "LoD of Y should be 1."); PADDLE_ENFORCE_GT(y_lod[0].size(), 1, "."); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_op.h index fac63f3fa0..013170199a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.h @@ -92,6 +92,10 @@ class SequenceExpandKernel : public framework::OpKernel { auto& x_lod = x->lod(); auto& y_lod = y->lod(); + PADDLE_ENFORCE_EQ(y_lod.empty(), false, + "Input(Y) Tensor of SequenceExpandOp does not contain " + "LoD information."); + if (ref_level == -1) ref_level = y_lod.size() - 1; out->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h index 840bd39a7f..701cdc496f 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h @@ -35,6 +35,10 @@ class SequencePadOpKernel : public framework::OpKernel { auto* len_t = ctx.Output("Length"); out->mutable_data(ctx.GetPlace()); + PADDLE_ENFORCE_EQ( + x->lod().empty(), false, + "Input(X) Tensor of SequencePadOp does not contain LoD information."); + const auto* pad_value = ctx.Input("PadValue"); int padded_length = ctx.Attr("padded_length"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pool_op.h b/paddle/fluid/operators/sequence_ops/sequence_pool_op.h index 3eec4df121..2d924585b4 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_pool_op.h @@ -38,8 +38,9 @@ class SequencePoolKernel : public framework::OpKernel { auto lod = in->lod(); auto lod_level = lod.size(); // InferShape by lod - PADDLE_ENFORCE_GE(lod_level, 1UL, - "The lod level of input shall be 1 at least."); + PADDLE_ENFORCE_GT( + lod_level, 0, + "Input(X) Tensor of SequencePoolOp does not contain LoD information."); PADDLE_ENFORCE_LE(lod_level, 2UL, "The lod level of input shall be no more than 2."); PADDLE_ENFORCE_GE( diff --git a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h index 2893808ee9..7512a0ac24 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h @@ -32,6 +32,9 @@ class SequenceReshapeKernel : public framework::OpKernel { int64_t in_width = in_dims[1]; auto& in_lod = in->lod(); + PADDLE_ENFORCE_EQ(in_lod.empty(), false, + "Input(X) Tensor of SequenceReshapeOp does not contain " + "LoD information."); PADDLE_ENFORCE_EQ(in_lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h index 14e4fc9b0d..12d0f8095e 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h @@ -107,6 +107,9 @@ class SequenceReverseOpKernel : public framework::OpKernel { auto &x = *ctx.Input("X"); auto *y = ctx.Output("Y"); + PADDLE_ENFORCE_EQ(x.lod().empty(), false, + "Input(X) Tensor of SequenceReverseOp does not contain " + "LoD information."); PADDLE_ENFORCE_EQ(x.lod().size(), 1, "SequenceReverse Op only support one level lod."); diff --git a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h index d9b681b7aa..917a3ed49c 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h @@ -34,6 +34,9 @@ class SequenceScatterOpKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto& ids_lod = ids->lod(); + PADDLE_ENFORCE_EQ(ids_lod.empty(), false, + "Input(Ids) Tensor of SequenceScatterOp does not contain " + "LoD information."); // Initialize out as same as x out->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/sequence_ops/sequence_slice_op.h b/paddle/fluid/operators/sequence_ops/sequence_slice_op.h index a07fc54090..e2ddffa54a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_slice_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_slice_op.h @@ -49,8 +49,11 @@ class SequenceSliceOpKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); - auto n = lod[0].size() - 1; + PADDLE_ENFORCE_EQ( + lod.empty(), false, + "Input(X) Tensor of SequenceSliceOp does not contain LoD information."); + auto n = lod[0].size() - 1; PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ( n, static_cast(length->dims()[0]), diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h index 0555e4ee00..d0b584cfd7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h @@ -95,6 +95,9 @@ class SequenceSoftmaxKernel : public framework::OpKernel { auto lod = x->lod(); auto dims = x->dims(); + PADDLE_ENFORCE_EQ(lod.empty(), false, + "Input(X) Tensor of SequenceSoftmaxOp does not contain " + "LoD information."); const size_t level = lod.size() - 1; PADDLE_ENFORCE_GT( diff --git a/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.h b/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.h index c6bfdea8be..2cb70ee736 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.h @@ -70,6 +70,16 @@ class SequenceTopkAvgPoolingKernel : public framework::OpKernel { auto* out = context.Output("Out"); auto* pos = context.Output("pos"); + PADDLE_ENFORCE_EQ(in->lod().empty(), false, + "Input(X) Tensor of SequenceTopkAvgPoolingOp does not " + "contain LoD information."); + PADDLE_ENFORCE_EQ(row->lod().empty(), false, + "Input(ROW) Tensor of SequenceTopkAvgPoolingOp does not " + "contain LoD information."); + PADDLE_ENFORCE_EQ(col->lod().empty(), false, + "Input(COLUMN) Tensor of SequenceTopkAvgPoolingOp does " + "not contain LoD information."); + auto channel_num = context.Attr("channel_num"); auto topks = context.Attr>("topks"); auto k_num = topks.size();