Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop
commit
fabfe17a42
@ -0,0 +1,87 @@
|
|||||||
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#include "paddle/operators/minus_op.h"
|
||||||
|
#include "paddle/operators/net_op.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
class MinusOp : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
MinusOp(const std::string &type, const framework::VariableNameMap &inputs,
|
||||||
|
const framework::VariableNameMap &outputs,
|
||||||
|
const framework::AttributeMap &attrs)
|
||||||
|
: OperatorWithKernel(type, inputs, outputs, attrs) {}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void InferShape(const framework::InferShapeContext &ctx) const override {
|
||||||
|
auto *left_tensor = ctx.Input<framework::Tensor>("X");
|
||||||
|
auto *right_tensor = ctx.Input<framework::Tensor>("Y");
|
||||||
|
|
||||||
|
PADDLE_ENFORCE_EQ(
|
||||||
|
framework::product(left_tensor->dims()),
|
||||||
|
framework::product(right_tensor->dims()),
|
||||||
|
"Minus operator must take two tensor with same num of elements");
|
||||||
|
ctx.Output<framework::Tensor>("Out")->Resize(left_tensor->dims());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class MinusOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||||
|
public:
|
||||||
|
MinusOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
|
||||||
|
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||||
|
AddInput("X", "The left tensor of minus operator.").NotInGradient();
|
||||||
|
AddInput("Y", "The right tensor of minus operator.").NotInGradient();
|
||||||
|
AddOutput("Out", "The output tensor of minus operator.").NotInGradient();
|
||||||
|
|
||||||
|
AddComment(R"DOC(Minus Operator
|
||||||
|
|
||||||
|
Equation: Out = X - Y
|
||||||
|
)DOC");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
template <typename AttrType>
|
||||||
|
class MinusGradOp : public NetOp {
|
||||||
|
public:
|
||||||
|
MinusGradOp(const std::string &type, const framework::VariableNameMap &inputs,
|
||||||
|
const framework::VariableNameMap &outputs,
|
||||||
|
const framework::AttributeMap &attrs)
|
||||||
|
: NetOp(type, inputs, outputs, attrs) {
|
||||||
|
auto out_grad = Input(framework::GradVarName("Out"));
|
||||||
|
auto x_grad = Output(framework::GradVarName("X"));
|
||||||
|
auto y_grad = Output(framework::GradVarName("Y"));
|
||||||
|
|
||||||
|
// x_grad = out_grad
|
||||||
|
AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}},
|
||||||
|
{{"Out", {x_grad}}}, {}));
|
||||||
|
|
||||||
|
framework::AttributeMap scale_attr;
|
||||||
|
scale_attr["scale"] = static_cast<AttrType>(-1);
|
||||||
|
AppendOp(framework::OpRegistry::CreateOp("scale", {{"X", {out_grad}}},
|
||||||
|
{{"Out", {y_grad}}}, scale_attr));
|
||||||
|
CompleteAddOp(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
|
||||||
|
USE_OP(scale);
|
||||||
|
USE_OP_ITSELF(identity);
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad,
|
||||||
|
ops::MinusGradOp<float>);
|
||||||
|
REGISTER_OP_CPU_KERNEL(minus,
|
||||||
|
ops::MinusKernel<paddle::platform::CPUPlace, float>);
|
@ -0,0 +1,18 @@
|
|||||||
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#include "paddle/operators/minus_op.h"
|
||||||
|
|
||||||
|
REGISTER_OP_GPU_KERNEL(
|
||||||
|
minus, paddle::operators::MinusKernel<paddle::platform::GPUPlace, float>);
|
@ -0,0 +1,39 @@
|
|||||||
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "paddle/framework/eigen.h"
|
||||||
|
#include "paddle/framework/op_registry.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
template <typename Place, typename T>
|
||||||
|
class MinusKernel : public framework::OpKernel {
|
||||||
|
public:
|
||||||
|
void Compute(const framework::ExecutionContext& context) const override {
|
||||||
|
auto* left_tensor = context.Input<framework::Tensor>("X");
|
||||||
|
auto* right_tensor = context.Input<framework::Tensor>("Y");
|
||||||
|
auto* out_tensor = context.Output<framework::Tensor>("Out");
|
||||||
|
|
||||||
|
out_tensor->mutable_data<T>(context.GetPlace());
|
||||||
|
auto& dev = context.GetEigenDevice<Place>();
|
||||||
|
framework::EigenVector<T>::Flatten(*out_tensor).device(dev) =
|
||||||
|
framework::EigenVector<T>::Flatten(*left_tensor) -
|
||||||
|
framework::EigenVector<T>::Flatten(*right_tensor);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,30 @@
|
|||||||
|
import unittest
|
||||||
|
import numpy as np
|
||||||
|
from gradient_checker import GradientChecker, create_op
|
||||||
|
from op_test_util import OpTestMeta
|
||||||
|
|
||||||
|
|
||||||
|
class MinusOpTest(unittest.TestCase):
|
||||||
|
__metaclass__ = OpTestMeta
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.type = "minus"
|
||||||
|
self.inputs = {
|
||||||
|
'X': np.random.random((32, 84)).astype("float32"),
|
||||||
|
'Y': np.random.random((32, 84)).astype("float32")
|
||||||
|
}
|
||||||
|
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
|
||||||
|
|
||||||
|
|
||||||
|
class MinusGradTest(GradientChecker):
|
||||||
|
def test_left(self):
|
||||||
|
op = create_op("minus")
|
||||||
|
inputs = {
|
||||||
|
"X": np.random.random((10, 10)).astype("float32"),
|
||||||
|
"Y": np.random.random((10, 10)).astype("float32")
|
||||||
|
}
|
||||||
|
self.check_grad(op, inputs, ["X", 'Y'], "Out")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
Loading…
Reference in new issue