add semicolon to op registry (#10034)

* script to add semicolon

* fix typo
wangkuiyi-patch-2
Yang Yang(Tony) 7 years ago committed by GitHub
parent c5c7dc2e82
commit e04c43d543
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -559,125 +559,125 @@ $$out = \frac{x}{1 + e^{- \beta x}}$$
namespace ops = paddle::operators;
REGISTER_OPERATOR(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sigmoid_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(sigmoid_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(logsigmoid, ops::ActivationOp, ops::LogSigmoidOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(logsigmoid_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(logsigmoid_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(exp, ops::ActivationOp, ops::ExpOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(exp_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(relu, ops::ActivationWithMKLDNNOp, ops::ReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(relu_grad, ops::ActivationWithMKLDNNOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(relu_grad, ops::ActivationWithMKLDNNOpGrad);
REGISTER_OPERATOR(tanh, ops::ActivationWithMKLDNNOp, ops::TanhOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(tanh_grad, ops::ActivationWithMKLDNNOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(tanh_grad, ops::ActivationWithMKLDNNOpGrad);
REGISTER_OPERATOR(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(tanh_shrink_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(tanh_shrink_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softshrink_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(softshrink_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(sqrt, ops::ActivationWithMKLDNNOp, ops::SqrtOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sqrt_grad, ops::ActivationWithMKLDNNOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(sqrt_grad, ops::ActivationWithMKLDNNOpGrad);
REGISTER_OPERATOR(abs, ops::ActivationWithMKLDNNOp, ops::AbsOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(abs_grad, ops::ActivationWithMKLDNNOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(abs_grad, ops::ActivationWithMKLDNNOpGrad);
REGISTER_OPERATOR(ceil, ops::ActivationOp, ops::CeilOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(ceil_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(ceil_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(floor, ops::ActivationOp, ops::FloorOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(floor_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(floor_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(cos, ops::ActivationOp, ops::CosOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cos_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(cos_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(sin, ops::ActivationOp, ops::SinOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(sin_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(sin_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(round, ops::ActivationOp, ops::RoundOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(round_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(round_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(reciprocal_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(reciprocal_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(log, ops::ActivationOp, ops::LogOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(log_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(square, ops::ActivationOp, ops::SquareOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(square_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(softplus, ops::ActivationOp, ops::SoftplusOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softplus_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(softplus_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(softsign, ops::ActivationOp, ops::SoftsignOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(softsign_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(softsign_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(brelu, ops::ActivationOp, ops::BReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(brelu_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(brelu_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(leaky_relu_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(soft_relu_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(soft_relu_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(elu, ops::ActivationOp, ops::ELUOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elu_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(relu6, ops::ActivationOp, ops::Relu6OpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(relu6_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(relu6_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(pow, ops::ActivationOp, ops::PowOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(pow_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pow_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(stanh, ops::ActivationOp, ops::STanhOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(stanh_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(stanh_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(hard_shrink_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(hard_shrink_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(thresholded_relu, ops::ActivationOp,
ops::ThresholdedReluOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(thresholded_relu_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(thresholded_relu_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(hard_sigmoid_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(hard_sigmoid_grad, ops::ActivationOpGrad);
REGISTER_OPERATOR(swish, ops::ActivationOp, ops::SwishOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(swish_grad, ops::ActivationOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(swish_grad, ops::ActivationOpGrad);
#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \
REGISTER_OP_CPU_KERNEL( \

@ -155,9 +155,9 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(bilinear_tensor_product, ops::BilinearTensorProductOp,
ops::BilinearTensorProductOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(bilinear_tensor_product_grad,
ops::BilinearTensorProductOpGrad)
ops::BilinearTensorProductOpGrad);
REGISTER_OP_CPU_KERNEL(
bilinear_tensor_product,
ops::BilinearTensorProductKernel<paddle::platform::CPUDeviceContext, float>,

@ -82,8 +82,8 @@ class ClipOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(clip, ops::ClipOp, ops::ClipOpMaker<float>,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(clip_grad, ops::ClipOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(clip_grad, ops::ClipOpGrad);
REGISTER_OP_CPU_KERNEL(
clip, ops::ClipKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(

@ -105,10 +105,10 @@ class ConcatOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
paddle::framework::DefaultGradOpDescMaker<
false> /* set false to disable empty grad */)
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad)
false> /* set false to disable empty grad */);
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad);
REGISTER_OP_CPU_KERNEL(
concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, float>)
concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(
concat_grad,
ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, float>)
ops::ConcatGradKernel<paddle::platform::CPUDeviceContext, float>);

@ -336,16 +336,16 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
namespace ops = paddle::operators;
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
// depthwise convolution op
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile

@ -194,8 +194,8 @@ class ConvShiftGradKernel<platform::CPUPlace, T>
namespace ops = paddle::operators;
REGISTER_OPERATOR(conv_shift, ops::ConvShiftOp, ops::ConvShiftOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv_shift_grad, ops::ConvShiftGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv_shift_grad, ops::ConvShiftGradOp);
REGISTER_OP_CPU_KERNEL(conv_shift,
ops::ConvShiftKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(

@ -300,8 +300,8 @@ namespace ops = paddle::operators;
REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp,
ops::Conv2DTransposeOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv2d_transpose_grad, ops::ConvTransposeOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_transpose_grad, ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL(
conv2d_transpose,
@ -315,8 +315,8 @@ REGISTER_OP_CPU_KERNEL(
REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp,
ops::Conv3DTransposeOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL(
conv3d_transpose,

@ -154,8 +154,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(cos_sim, ops::CosSimOp, ops::CosSimOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cos_sim_grad, ops::CosSimOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(cos_sim_grad, ops::CosSimOpGrad);
REGISTER_OP_CPU_KERNEL(
cos_sim, ops::CosSimKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(

@ -165,8 +165,8 @@ or not. But the output only shares the LoD information with input X.
namespace ops = paddle::operators;
REGISTER_OPERATOR(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp);
REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel<float>,
ops::CrossEntropyOpKernel<double>);
REGISTER_OP_CPU_KERNEL(cross_entropy_grad,

@ -79,4 +79,4 @@ using CPU = paddle::platform::CPUDeviceContext;
REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, ops::CumsumGradMaker);
REGISTER_OP_CPU_KERNEL(cumsum, ops::CumKernel<CPU, ops::CumsumFunctor<float>>,
ops::CumKernel<CPU, ops::CumsumFunctor<double>>,
ops::CumKernel<CPU, ops::CumsumFunctor<int>>)
ops::CumKernel<CPU, ops::CumsumFunctor<int>>);

@ -19,4 +19,4 @@ using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(cumsum, ops::CumKernel<CUDA, ops::CumsumFunctor<float>>,
ops::CumKernel<CUDA, ops::CumsumFunctor<double>>,
ops::CumKernel<CUDA, ops::CumsumFunctor<int>>)
ops::CumKernel<CUDA, ops::CumsumFunctor<int>>);

@ -102,8 +102,8 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(dropout, ops::DropoutOp, ops::DropoutOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(dropout_grad, ops::DropoutOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(dropout_grad, ops::DropoutOpGrad);
REGISTER_OP_CPU_KERNEL(
dropout, ops::CPUDropoutKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(

@ -32,8 +32,8 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker {
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_div, ops::ElementwiseOp,
ops::ElementwiseDivOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_div_grad, ops::ElementwiseOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elementwise_div_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_div,
ops::ElementwiseDivKernel<paddle::platform::CPUDeviceContext, float>,

@ -31,8 +31,8 @@ class ElementwiseMaxOpMaker : public ElementwiseOpMaker {
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_max, ops::ElementwiseOp,
ops::ElementwiseMaxOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_max,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, float>,

@ -31,8 +31,8 @@ class ElementwiseMinOpMaker : public ElementwiseOpMaker {
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_min, ops::ElementwiseOp,
ops::ElementwiseMinOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_min,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, float>,

@ -33,8 +33,8 @@ class ElementwiseMulOpMaker : public ElementwiseOpMaker {
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOp,
ops::ElementwiseMulOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_mul,
ops::ElementwiseMulKernel<paddle::platform::CPUDeviceContext, float>,

@ -31,8 +31,8 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker {
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_sub, ops::ElementwiseOp,
ops::ElementwiseSubOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpGrad);
REGISTER_OP_CPU_KERNEL(
elementwise_sub,
ops::ElementwiseSubKernel<paddle::platform::CPUDeviceContext, float>,

@ -132,8 +132,8 @@ class ExpandGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(expand, ops::ExpandOp, ops::ExpandOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(expand_grad, ops::ExpandGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(expand_grad, ops::ExpandGradOp);
REGISTER_OP_CPU_KERNEL(
expand, ops::ExpandKernel<paddle::platform::CPUDeviceContext, float>);
REGISTER_OP_CPU_KERNEL(

@ -99,5 +99,5 @@ FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker)
} // namespace paddle
REGISTER_OPERATOR(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(fc_grad, paddle::operators::FCOpGrad)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(fc_grad, paddle::operators::FCOpGrad);

@ -101,7 +101,7 @@ Out = [[3, 4],
namespace ops = paddle::operators;
REGISTER_OPERATOR(gather, ops::GatherOp, ops::GatherOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gather_grad, ops::GatherGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(gather_grad, ops::GatherGradOp);
REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel<float>);
REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel<float>);

@ -217,8 +217,8 @@ class GRUGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(gru, ops::GRUOp, ops::GRUOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gru_grad, ops::GRUGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(gru_grad, ops::GRUGradOp);
REGISTER_OP_CPU_KERNEL(
gru, ops::GRUKernel<paddle::platform::CPUDeviceContext, float>,
ops::GRUKernel<paddle::platform::CPUDeviceContext, double>);

@ -199,8 +199,8 @@ class GRUUnitGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(gru_unit, ops::GRUUnitOp, ops::GRUUnitOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(gru_unit_grad, ops::GRUUnitGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(gru_unit_grad, ops::GRUUnitGradOp);
REGISTER_OP_CPU_KERNEL(
gru_unit, ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, float>,
ops::GRUUnitKernel<paddle::platform::CPUDeviceContext, double>);

@ -104,8 +104,8 @@ class HingeLossGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(hinge_loss, ops::HingeLossOp, ops::HingeLossOpMaker<float>,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(hinge_loss_grad, ops::HingeLossGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(hinge_loss_grad, ops::HingeLossGradOp);
REGISTER_OP_CPU_KERNEL(
hinge_loss,
ops::HingeLossKernel<paddle::platform::CPUDeviceContext, float>);

@ -122,8 +122,8 @@ class HuberLossGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(huber_loss, ops::HuberLossOp, ops::HuberLossOpMaker<float>,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(huber_loss_grad, ops::HuberLossGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(huber_loss_grad, ops::HuberLossGradOp);
REGISTER_OP_CPU_KERNEL(
huber_loss,
ops::HuberLossKernel<paddle::platform::CPUDeviceContext, float>);

@ -149,8 +149,8 @@ class Im2SequenceGradOp : public framework::OperatorWithKernel {
namespace ops = paddle::operators;
REGISTER_OPERATOR(im2sequence, ops::Im2SequenceOp, ops::Im2SequenceOpMaker,
paddle::framework::DefaultGradOpDescMaker<true>)
REGISTER_OPERATOR(im2sequence_grad, ops::Im2SequenceGradOp)
paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(im2sequence_grad, ops::Im2SequenceGradOp);
REGISTER_OP_CPU_KERNEL(
im2sequence,
ops::Im2SequenceKernel<paddle::platform::CPUDeviceContext, float>);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save