Remove unused DefaultGradOpDescMaker in REGISTER_OPERATOR() (#19166)

* remove unused DefaultGradOpDescMaker in REGISTER_OPERATOR(), test=develop

* remove SplitIdsOpGradMaker since it is buggy and not tested, update spec file, test=develop
padding_in_crf
Leo Chen 6 years ago committed by Zeng Jinle
parent c70a97f46e
commit 80eab822c1

@ -1,19 +1,10 @@
attention_lstm
conv_shift
cos_sim
dequantize
fc
flatten
fsp
fused_embedding_fc_lstm
fused_embedding_seq_pool
fusion_gru
fusion_lstm
fusion_repeated_fc_relu
fusion_seqconv_eltadd_relu
fusion_seqexpand_concat_fc
fusion_seqpool_concat
fusion_squared_mat_sub
gru
lrn
lstm_unit

@ -419,8 +419,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(attention_lstm, ops::AttentionLSTMOp,
ops::AttentionLSTMOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::AttentionLSTMOpMaker);
REGISTER_OP_CPU_KERNEL(attention_lstm, ops::AttentionLSTMKernel<float>,
ops::AttentionLSTMKernel<double>);

@ -81,27 +81,12 @@ class SplitIdsOpInferVarType : public framework::VarTypeInference {
}
};
class SplitIdsOpGradMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
protected:
std::unique_ptr<framework::OpDesc> Apply() const override {
auto grad = new framework::OpDesc();
grad->SetType("concat");
grad->SetInput("X", OutputGrad("Out"));
grad->SetOutput("Out", InputGrad("Ids"));
grad->SetAttr("axis", 0);
return std::unique_ptr<framework::OpDesc>(grad);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(split_ids, ops::SplitIdsOp, ops::SplitIdsOpMaker,
ops::SplitIdsOpGradMaker, ops::SplitIdsOpInferVarType);
ops::SplitIdsOpInferVarType);
REGISTER_OP_CPU_KERNEL(
split_ids, ops::SplitIdsOpKernel<paddle::platform::CPUPlace, int64_t>,

@ -589,8 +589,7 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fused_embedding_fc_lstm, ops::FusedEmbeddingFCLSTMOp,
ops::FusedEmbeddingFCLSTMOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusedEmbeddingFCLSTMOpMaker);
REGISTER_OP_CPU_KERNEL(fused_embedding_fc_lstm,
ops::FusedEmbeddingFCLSTMKernel<float>,

@ -396,7 +396,7 @@ class FusionGRUKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(fusion_gru, ops::FusionGRUOp, ops::FusionGRUOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_gru, ops::FusionGRUKernel<float>,
ops::FusionGRUKernel<double>);

@ -474,8 +474,7 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_lstm, ops::FusionLSTMOp, ops::FusionLSTMOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(fusion_lstm, ops::FusionLSTMOp, ops::FusionLSTMOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_lstm, ops::FuisonLSTMKernel<float>,
ops::FuisonLSTMKernel<double>);

@ -144,8 +144,7 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_repeated_fc_relu, ops::FusionRepeatedFCReluOp,
ops::FusionRepeatedFCReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusionRepeatedFCReluOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_repeated_fc_relu,
ops::FusionRepeatedFCReluKernel<float>,

@ -220,8 +220,7 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqconv_eltadd_relu, ops::FusionSeqConvEltAddReluOp,
ops::FusionSeqConvEltAddReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusionSeqConvEltAddReluOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_seqconv_eltadd_relu,
ops::FusionSeqConvEltAddReluKernel<float>,

@ -197,8 +197,7 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqexpand_concat_fc, ops::FusionSeqExpandConcatFCOp,
ops::FusionSeqExpandConcatFCOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusionSeqExpandConcatFCOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_seqexpand_concat_fc,
ops::FusionSeqExpandConcatFCOpKernel<float>,

@ -126,8 +126,7 @@ class FusionSeqPoolConcatKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_seqpool_concat, ops::FusionSeqPoolConcatOp,
ops::FusionSeqPoolConcatOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusionSeqPoolConcatOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_seqpool_concat,
ops::FusionSeqPoolConcatKernel<float>,

@ -136,8 +136,7 @@ class FusionSquaredMatSubKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
REGISTER_OPERATOR(fusion_squared_mat_sub, ops::FusionSquaredMatSubOp,
ops::FusionSquaredMatSubOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
ops::FusionSquaredMatSubOpMaker);
REGISTER_OP_CPU_KERNEL(fusion_squared_mat_sub,
ops::FusionSquaredMatSubKernel<float>,

Loading…
Cancel
Save