Polish English docs of elementwise_add/sub/mul/div (#20027)

Polish English docs of elementwise_add/sub/mul/div
fix-python-transpose
danleifeng 6 years ago committed by gongweibao
parent b916335025
commit 5cef7a2f25

@ -236,10 +236,10 @@ paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=Non
paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7b97042c3ba55fb5fec6a06308523b73'))
paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b992616c1afbd6b0c2a897ac23036381'))
paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453'))
paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7fa4f12d3dad010f3862df271b31e4de'))
paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '39ee2e90c1ede44e47f279fc466f3151'))
paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '890017540bd2f982f80da81a98832609'))
paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '7994818219805a2ec34a37cd9baceeb7'))
paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '77ab8a79746ce9b96625c6195c27dfbd'))
paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '128d140ac78c610c35fc38663baf9654'))
paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '061219cf5a710c090eb5b31d0a0d841d'))
paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '57d99bd329b8ea842802a7ea52724163'))
paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '3b3c2e528712552f6f44aef88796321d'))
paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '817e8ce2b39de9b4a94b1b6d592144e0'))
paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'b5e3964c8711058634cf5b57b4884258'))

@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseAddOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Add"; }
std::string GetEquation() const override { return "Out = X + Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Add two tensors element-wise";
}
};
class ElementwiseAddDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:
@ -45,10 +67,10 @@ class ElementwiseAddDoubleGradDescMaker
} // namespace paddle
REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add);
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, "Add",
"Out = X + Y");
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_add, Add);
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_add_grad, ops::ElementwiseOpExplicitGrad,
ops::ElementwiseGradOpInplace,
ops::ElementwiseGradNoBufVarsInference,

@ -24,6 +24,22 @@ class ElementwiseDivOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Div"; }
std::string GetEquation() const override { return "Out = X / Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Divide two tensors element-wise";
}
};
class ElementwiseDivGradOpDescMaker : public framework::SingleGradOpDescMaker {

@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseMulOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Mul"; }
std::string GetEquation() const override { return "Out = X \\\\odot Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Multiply two tensors element-wise";
}
};
class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker {
public:
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
@ -38,12 +60,6 @@ class ElementwiseMulOpGradDescMaker : public framework::SingleGradOpDescMaker {
}
};
class ElementwiseMulOpMaker : public ElementwiseOpMaker {
protected:
virtual std::string GetName() const { return "Mul"; }
virtual std::string GetEquation() const { return "Out = X \\\\odot Y"; }
};
class ElementwiseMulDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:

@ -413,15 +413,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ElementwiseDoubleGradNoBufVarsInference,
::paddle::operators::ElementwiseGradOpInplace, \
::paddle::operators::ElementwiseGradNoBufVarsInference)
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name, equation) \
class __ElemwiseOp##op_type##Maker__ \
: public ::paddle::operators::ElementwiseOpMaker { \
protected: \
virtual std::string GetName() const { return op_name; } \
virtual std::string GetEquation() const { return equation; } \
}; \
REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \
__ElemwiseOp##op_type##Maker__, \
::paddle::operators::ElementwiseOpInferVarType, \
op_type##GradMaker, \
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name) \
REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \
::paddle::operators::Elementwise##op_name##OpMaker, \
::paddle::operators::ElementwiseOpInferVarType, \
op_type##GradMaker, \
::paddle::operators::ElementwiseOpInplace);

@ -20,6 +20,28 @@ limitations under the License. */
namespace paddle {
namespace operators {
class ElementwiseSubOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Sub"; }
std::string GetEquation() const override { return "Out = X - Y"; }
void AddInputX() override {
AddInput("X",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
void AddInputY() override {
AddInput("Y",
"(Variable), Tensor or LoDTensor of any dimensions. Its dtype "
"should be int32, int64, float32, float64.");
}
std::string GetOpFuntionality() const override {
return "Substract two tensors element-wise";
}
};
class ElementwiseSubDoubleGradDescMaker
: public framework::SingleGradOpDescMaker {
public:
@ -46,8 +68,7 @@ class ElementwiseSubDoubleGradDescMaker
namespace ops = paddle::operators;
REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub);
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, "Sub",
"Out = X - Y");
REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(elementwise_sub, Sub);
REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpExplicitGrad,
ops::ElementwiseGradOpInplace,

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save