Merge pull request #9911 from tonyyang-svail/unify_op_registry

Unify REGISTER_OP and REGISTER_OPERATOR
wangkuiyi-patch-2
Tao Luo 7 years ago committed by GitHub
commit d84cdb7b59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <string> #include <string>
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
@ -69,8 +70,7 @@ class GradOpDescMakerBase {
" for input argument with a list of variables, " " for input argument with a list of variables, "
" drop_empty_grad is not allowed because it makes" " drop_empty_grad is not allowed because it makes"
" the correspondence bewteen a variable and its gradient" " the correspondence bewteen a variable and its gradient"
" ambiguous. Use REGISTER_OP_EX to register the op" " ambiguous."
" or call InputGrad(?,false) in GradOpDescMaker."
" Op type %s", " Op type %s",
fwd_op_.Type()); fwd_op_.Type());

@ -16,6 +16,8 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <string>
#include <tuple>
#include <type_traits> #include <type_traits>
#include <typeinfo> #include <typeinfo>
#include <unordered_map> #include <unordered_map>
@ -141,36 +143,6 @@ class OpKernelRegistrar : public Registrar {
return 0; \ return 0; \
} }
/**
* Macro to register Operator. When the input is duplicable, you should
* use REGISTER_OP_EX with drop_empty_grad=false instead.
*/
#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \
grad_op_class) \
REGISTER_OP_EX(op_type, op_class, op_maker_class, grad_op_type, \
grad_op_class, true)
// When an argument is duplicable, we need to use this version.
// Perhaps we can omit DropEmptyIG template parameter and
// only have one version of REGISTER_OP.
#define REGISTER_OP_EX(op_type, op_class, op_maker_class, grad_op_type, \
grad_op_class, drop_empty_grad) \
REGISTER_OPERATOR(grad_op_type, grad_op_class); \
class _GradOpDescMaker_##grad_op_type##_ \
: public ::paddle::framework::DefaultGradOpDescMaker<drop_empty_grad> { \
using ::paddle::framework::DefaultGradOpDescMaker< \
drop_empty_grad>::DefaultGradOpDescMaker; \
\
protected: \
virtual std::string GradOpType() const { return #grad_op_type; } \
}; \
REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \
op_maker_class);
#define REGISTER_OP_WITH_KERNEL(op_type, ...) \
REGISTER_OPERATOR(op_type, ::paddle::framework::OperatorWithKernel, \
##__VA_ARGS__)
#define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \
REGISTER_OPERATOR(op_type, op_class, op_maker_class) REGISTER_OPERATOR(op_type, op_class, op_maker_class)

@ -110,12 +110,12 @@ function(op_library TARGET)
# Note that it's enough to just adding one operator to pybind in a *_op.cc file. # Note that it's enough to just adding one operator to pybind in a *_op.cc file.
# And for detail pybind information, please see generated paddle/pybind/pybind.h. # And for detail pybind information, please see generated paddle/pybind/pybind.h.
file(READ ${TARGET}.cc TARGET_CONTENT) file(READ ${TARGET}.cc TARGET_CONTENT)
string(REGEX MATCH "REGISTER_OP\\(.*REGISTER_OP\\(" multi_register "${TARGET_CONTENT}") string(REGEX MATCH "REGISTER_OPERATOR\\(.*REGISTER_OPERATOR\\(" multi_register "${TARGET_CONTENT}")
string(REGEX MATCH "REGISTER_OP\\([a-z0-9_]*," one_register "${multi_register}") string(REGEX MATCH "REGISTER_OPERATOR\\([a-z0-9_]*," one_register "${multi_register}")
if (one_register STREQUAL "") if (one_register STREQUAL "")
string(REPLACE "_op" "" TARGET "${TARGET}") string(REPLACE "_op" "" TARGET "${TARGET}")
else () else ()
string(REPLACE "REGISTER_OP(" "" TARGET "${one_register}") string(REPLACE "REGISTER_OPERATOR(" "" TARGET "${one_register}")
string(REPLACE "," "" TARGET "${TARGET}") string(REPLACE "," "" TARGET "${TARGET}")
endif() endif()

@ -558,95 +558,126 @@ $$out = \frac{x}{1 + e^{- \beta x}}$$
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad, REGISTER_OPERATOR(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker,
ops::ActivationOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sigmoid_grad, ops::ActivationOpGrad)
REGISTER_OP(logsigmoid, ops::ActivationOp, ops::LogSigmoidOpMaker, REGISTER_OPERATOR(logsigmoid, ops::ActivationOp, ops::LogSigmoidOpMaker,
logsigmoid_grad, ops::ActivationOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(logsigmoid_grad, ops::ActivationOpGrad)
REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad, REGISTER_OPERATOR(exp, ops::ActivationOp, ops::ExpOpMaker,
ops::ActivationOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(relu, ops::ActivationWithMKLDNNOp, ops::ReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(relu_grad, ops::ActivationWithMKLDNNOpGrad)
REGISTER_OPERATOR(tanh, ops::ActivationWithMKLDNNOp, ops::TanhOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(tanh_grad, ops::ActivationWithMKLDNNOpGrad)
REGISTER_OPERATOR(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(tanh_shrink_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softshrink_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(sqrt, ops::ActivationWithMKLDNNOp, ops::SqrtOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sqrt_grad, ops::ActivationWithMKLDNNOpGrad)
REGISTER_OPERATOR(abs, ops::ActivationWithMKLDNNOp, ops::AbsOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(abs_grad, ops::ActivationWithMKLDNNOpGrad)
REGISTER_OPERATOR(ceil, ops::ActivationOp, ops::CeilOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(ceil_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(floor, ops::ActivationOp, ops::FloorOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(floor_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(cos, ops::ActivationOp, ops::CosOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cos_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(sin, ops::ActivationOp, ops::SinOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sin_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(round, ops::ActivationOp, ops::RoundOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(round_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(reciprocal_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(log, ops::ActivationOp, ops::LogOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(square, ops::ActivationOp, ops::SquareOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(softplus, ops::ActivationOp, ops::SoftplusOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softplus_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(softsign, ops::ActivationOp, ops::SoftsignOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softsign_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(brelu, ops::ActivationOp, ops::BReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(brelu_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(soft_relu_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(elu, ops::ActivationOp, ops::ELUOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(relu6, ops::ActivationOp, ops::Relu6OpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(relu6_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(pow, ops::ActivationOp, ops::PowOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(pow_grad, ops::ActivationOpGrad)
REGISTER_OPERATOR(stanh, ops::ActivationOp, ops::STanhOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(stanh_grad, ops::ActivationOpGrad)
REGISTER_OP(relu, ops::ActivationWithMKLDNNOp, ops::ReluOpMaker, relu_grad, REGISTER_OPERATOR(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker,
ops::ActivationWithMKLDNNOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(hard_shrink_grad, ops::ActivationOpGrad)
REGISTER_OP(tanh, ops::ActivationWithMKLDNNOp, ops::TanhOpMaker, tanh_grad, REGISTER_OPERATOR(thresholded_relu, ops::ActivationOp,
ops::ActivationWithMKLDNNOpGrad); ops::ThresholdedReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker, REGISTER_OPERATOR(thresholded_relu_grad, ops::ActivationOpGrad)
tanh_shrink_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker,
REGISTER_OP(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker, paddle::framework::DefaultGradOpDescMaker<true>)
softshrink_grad, ops::ActivationOpGrad); REGISTER_OPERATOR(hard_sigmoid_grad, ops::ActivationOpGrad)
REGISTER_OP(sqrt, ops::ActivationWithMKLDNNOp, ops::SqrtOpMaker, sqrt_grad, REGISTER_OPERATOR(swish, ops::ActivationOp, ops::SwishOpMaker,
ops::ActivationWithMKLDNNOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(swish_grad, ops::ActivationOpGrad)
REGISTER_OP(abs, ops::ActivationWithMKLDNNOp, ops::AbsOpMaker, abs_grad,
ops::ActivationWithMKLDNNOpGrad);
REGISTER_OP(ceil, ops::ActivationOp, ops::CeilOpMaker, ceil_grad,
ops::ActivationOpGrad);
REGISTER_OP(floor, ops::ActivationOp, ops::FloorOpMaker, floor_grad,
ops::ActivationOpGrad);
REGISTER_OP(cos, ops::ActivationOp, ops::CosOpMaker, cos_grad,
ops::ActivationOpGrad);
REGISTER_OP(sin, ops::ActivationOp, ops::SinOpMaker, sin_grad,
ops::ActivationOpGrad);
REGISTER_OP(round, ops::ActivationOp, ops::RoundOpMaker, round_grad,
ops::ActivationOpGrad);
REGISTER_OP(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker,
reciprocal_grad, ops::ActivationOpGrad);
REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad,
ops::ActivationOpGrad);
REGISTER_OP(square, ops::ActivationOp, ops::SquareOpMaker, square_grad,
ops::ActivationOpGrad);
REGISTER_OP(softplus, ops::ActivationOp, ops::SoftplusOpMaker, softplus_grad,
ops::ActivationOpGrad);
REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad,
ops::ActivationOpGrad);
REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad,
ops::ActivationOpGrad);
REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
leaky_relu_grad, ops::ActivationOpGrad);
REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad,
ops::ActivationOpGrad);
REGISTER_OP(elu, ops::ActivationOp, ops::ELUOpMaker, elu_grad,
ops::ActivationOpGrad);
REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker, relu6_grad,
ops::ActivationOpGrad);
REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad,
ops::ActivationOpGrad);
REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad,
ops::ActivationOpGrad);
REGISTER_OP(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker,
hard_shrink_grad, ops::ActivationOpGrad);
REGISTER_OP(thresholded_relu, ops::ActivationOp, ops::ThresholdedReluOpMaker,
thresholded_relu_grad, ops::ActivationOpGrad);
REGISTER_OP(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker,
hard_sigmoid_grad, ops::ActivationOpGrad);
REGISTER_OP(swish, ops::ActivationOp, ops::SwishOpMaker, swish_grad,
ops::ActivationOpGrad);
#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \
REGISTER_OP_CPU_KERNEL( \ REGISTER_OP_CPU_KERNEL( \

@ -153,9 +153,11 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(bilinear_tensor_product, ops::BilinearTensorProductOp, REGISTER_OPERATOR(bilinear_tensor_product, ops::BilinearTensorProductOp,
ops::BilinearTensorProductOpMaker, bilinear_tensor_product_grad, ops::BilinearTensorProductOpMaker,
ops::BilinearTensorProductOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(bilinear_tensor_product_grad,
ops::BilinearTensorProductOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
bilinear_tensor_product, bilinear_tensor_product,
ops::BilinearTensorProductKernel<paddle::platform::CPUDeviceContext, float>, ops::BilinearTensorProductKernel<paddle::platform::CPUDeviceContext, float>,

@ -81,8 +81,9 @@ class ClipOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(clip, ops::ClipOp, ops::ClipOpMaker<float>, clip_grad, REGISTER_OPERATOR(clip, ops::ClipOp, ops::ClipOpMaker<float>,
ops::ClipOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(clip_grad, ops::ClipOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
clip, ops::ClipKernel<paddle::platform::CPUDeviceContext, float>); clip, ops::ClipKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -103,8 +103,10 @@ class ConcatOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_EX(concat, ops::ConcatOp, ops::ConcatOpMaker, concat_grad, REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
ops::ConcatOpGrad, false) paddle::framework::DefaultGradOpDescMaker<
false> /* set false to disable empty grad */)
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, float>) concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, float>)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -335,14 +335,17 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(conv2d, ops::ConvOp, ops::Conv2DOpMaker, conv2d_grad, REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
ops::ConvOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad)
// depthwise convolution op // depthwise convolution op
REGISTER_OP(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker, REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
depthwise_conv2d_grad, ops::ConvOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OP(conv3d, ops::ConvOp, ops::Conv3DOpMaker, conv3d_grad, REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad)
ops::ConvOpGrad); REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad)
// depthwise conv kernel // depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile // TODO(xingzhaolong): neon kernel for mobile

@ -193,8 +193,9 @@ class ConvShiftGradKernel<platform::CPUPlace, T>
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(conv_shift, ops::ConvShiftOp, ops::ConvShiftOpMaker, REGISTER_OPERATOR(conv_shift, ops::ConvShiftOp, ops::ConvShiftOpMaker,
conv_shift_grad, ops::ConvShiftGradOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv_shift_grad, ops::ConvShiftGradOp)
REGISTER_OP_CPU_KERNEL(conv_shift, REGISTER_OP_CPU_KERNEL(conv_shift,
ops::ConvShiftKernel<paddle::platform::CPUPlace, float>); ops::ConvShiftKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -298,8 +298,10 @@ framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp,
conv2d_transpose_grad, ops::ConvTransposeOpGrad); ops::Conv2DTransposeOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv2d_transpose_grad, ops::ConvTransposeOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv2d_transpose, conv2d_transpose,
@ -311,8 +313,10 @@ REGISTER_OP_CPU_KERNEL(
ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
double>); double>);
REGISTER_OP(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp,
conv3d_transpose_grad, ops::ConvTransposeOpGrad); ops::Conv3DTransposeOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv3d_transpose, conv3d_transpose,

@ -153,8 +153,9 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(cos_sim, ops::CosSimOp, ops::CosSimOpMaker, cos_sim_grad, REGISTER_OPERATOR(cos_sim, ops::CosSimOp, ops::CosSimOpMaker,
ops::CosSimOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cos_sim_grad, ops::CosSimOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
cos_sim, ops::CosSimKernel<paddle::platform::CPUDeviceContext, float>); cos_sim, ops::CosSimKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -153,7 +153,9 @@ class CropOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(crop, ops::CropOp, ops::CropOpMaker, crop_grad, ops::CropOpGrad); REGISTER_OPERATOR(crop, ops::CropOp, ops::CropOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(crop_grad, ops::CropOpGrad);
REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel<float>); REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel<float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
crop_grad, ops::CropGradKernel<paddle::platform::CPUDeviceContext, float>); crop_grad, ops::CropGradKernel<paddle::platform::CPUDeviceContext, float>);

@ -164,8 +164,9 @@ or not. But the output only shares the LoD information with input X.
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker, REGISTER_OPERATOR(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker,
cross_entropy_grad, ops::CrossEntropyGradientOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp)
REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel<float>, REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel<float>,
ops::CrossEntropyOpKernel<double>); ops::CrossEntropyOpKernel<double>);
REGISTER_OP_CPU_KERNEL(cross_entropy_grad, REGISTER_OP_CPU_KERNEL(cross_entropy_grad,

@ -101,8 +101,9 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(dropout, ops::DropoutOp, ops::DropoutOpMaker, dropout_grad, REGISTER_OPERATOR(dropout, ops::DropoutOp, ops::DropoutOpMaker,
ops::DropoutOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(dropout_grad, ops::DropoutOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
dropout, ops::CPUDropoutKernel<paddle::platform::CPUDeviceContext, float>); dropout, ops::CPUDropoutKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -30,8 +30,10 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker, REGISTER_OPERATOR(elementwise_div, ops::ElementwiseOp,
elementwise_div_grad, ops::ElementwiseOpGrad); ops::ElementwiseDivOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_div_grad, ops::ElementwiseOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_div, elementwise_div,
ops::ElementwiseDivKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CPUDeviceContext, float>,

@ -29,8 +29,10 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_max, ops::ElementwiseOp, ops::ElementwiseMaxOpMaker, REGISTER_OPERATOR(elementwise_max, ops::ElementwiseOp,
elementwise_max_grad, ops::ElementwiseOpGrad); ops::ElementwiseMaxOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_max, elementwise_max,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, float>,

@ -29,8 +29,10 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_min, ops::ElementwiseOp, ops::ElementwiseMinOpMaker, REGISTER_OPERATOR(elementwise_min, ops::ElementwiseOp,
elementwise_min_grad, ops::ElementwiseOpGrad); ops::ElementwiseMinOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_min, elementwise_min,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, float>,

@ -31,8 +31,10 @@ class ElementwiseMulOpMaker : public ElementwiseOpMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker, REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOp,
elementwise_mul_grad, ops::ElementwiseOpGrad); ops::ElementwiseMulOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_mul, elementwise_mul,
ops::ElementwiseMulKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseMulKernel<paddle::platform::CPUDeviceContext, float>,

@ -29,8 +29,10 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker, REGISTER_OPERATOR(elementwise_sub, ops::ElementwiseOp,
elementwise_sub_grad, ops::ElementwiseOpGrad); ops::ElementwiseSubOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpGrad)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
elementwise_sub, elementwise_sub,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, float>, ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, float>,

@ -14,6 +14,8 @@ limitations under the License. */
#include "paddle/fluid/operators/expand_op.h" #include "paddle/fluid/operators/expand_op.h"
#include <vector>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
@ -128,8 +130,9 @@ class ExpandGradOp : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(expand, ops::ExpandOp, ops::ExpandOpMaker, expand_grad, REGISTER_OPERATOR(expand, ops::ExpandOp, ops::ExpandOpMaker,
ops::ExpandGradOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(expand_grad, ops::ExpandGradOp)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
expand, ops::ExpandKernel<paddle::platform::CPUDeviceContext, float>); expand, ops::ExpandKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(

@ -98,5 +98,6 @@ FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker)
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker, fc_grad, REGISTER_OPERATOR(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker,
paddle::operators::FCOpGrad); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(fc_grad, paddle::operators::FCOpGrad)

@ -100,7 +100,8 @@ Out = [[3, 4],
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(gather, ops::GatherOp, ops::GatherOpMaker, gather_grad, REGISTER_OPERATOR(gather, ops::GatherOp, ops::GatherOpMaker,
ops::GatherGradOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gather_grad, ops::GatherGradOp)
REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel<float>); REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel<float>);
REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel<float>); REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel<float>);

@ -216,7 +216,9 @@ class GRUGradOp : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(gru, ops::GRUOp, ops::GRUOpMaker, gru_grad, ops::GRUGradOp); REGISTER_OPERATOR(gru, ops::GRUOp, ops::GRUOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gru_grad, ops::GRUGradOp)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
gru, ops::GRUKernel<paddle::platform::CPUDeviceContext, float>, gru, ops::GRUKernel<paddle::platform::CPUDeviceContext, float>,
ops::GRUKernel<paddle::platform::CPUDeviceContext, double>); ops::GRUKernel<paddle::platform::CPUDeviceContext, double>);

@ -198,8 +198,9 @@ class GRUUnitGradOp : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(gru_unit, ops::GRUUnitOp, ops::GRUUnitOpMaker, gru_unit_grad, REGISTER_OPERATOR(gru_unit, ops::GRUUnitOp, ops::GRUUnitOpMaker,
ops::GRUUnitGradOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gru_unit_grad, ops::GRUUnitGradOp)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
gru_unit, ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, float>, gru_unit, ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, float>,
ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, double>); ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, double>);

@ -103,8 +103,9 @@ class HingeLossGradOp : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(hinge_loss, ops::HingeLossOp, ops::HingeLossOpMaker<float>, REGISTER_OPERATOR(hinge_loss, ops::HingeLossOp, ops::HingeLossOpMaker<float>,
hinge_loss_grad, ops::HingeLossGradOp); paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(hinge_loss_grad, ops::HingeLossGradOp)
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
hinge_loss, hinge_loss,
ops::HingeLossKernel<paddle::platform::CPUDeviceContext, float>); ops::HingeLossKernel<paddle::platform::CPUDeviceContext, float>);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save