tonyyang-svail-feed-op-desgin
commit
29ae410704
@ -1,68 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#include "paddle/operators/add_op.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
|
|
||||||
class AddOp : public framework::OperatorWithKernel {
|
|
||||||
public:
|
|
||||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
void InferShape(framework::InferShapeContextBase* ctx) const override {
|
|
||||||
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of AddOp should not be null.");
|
|
||||||
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of AddOp should not be null.");
|
|
||||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
|
||||||
"Output(Out) of AddOp should not be null.");
|
|
||||||
|
|
||||||
auto x_dims = ctx->GetInputDim("X");
|
|
||||||
auto y_dims = ctx->GetInputDim("Y");
|
|
||||||
PADDLE_ENFORCE_EQ(x_dims, y_dims,
|
|
||||||
"Two input of Add Op's dimension must be same.");
|
|
||||||
ctx->SetOutputDim("Out", x_dims);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class AddOpMaker : public framework::OpProtoAndCheckerMaker {
|
|
||||||
public:
|
|
||||||
AddOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
|
|
||||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
|
||||||
AddInput("X", "The first input of add op");
|
|
||||||
AddInput("Y", "The second input of add op");
|
|
||||||
AddOutput("Out", "The output of add op");
|
|
||||||
AddComment(R"DOC(
|
|
||||||
Two Element Add Operator.
|
|
||||||
|
|
||||||
The equation is: Out = X + Y
|
|
||||||
)DOC");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class AddOpGrad : public framework::OperatorWithKernel {
|
|
||||||
public:
|
|
||||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
void InferShape(framework::InferShapeContextBase* ctx) const override {}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
|
||||||
REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad);
|
|
||||||
|
|
||||||
REGISTER_OP_CPU_KERNEL(add, ops::AddKernel<paddle::platform::CPUPlace, float>);
|
|
@ -1,18 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#include "paddle/operators/add_op.h"
|
|
||||||
|
|
||||||
namespace ops = paddle::operators;
|
|
||||||
REGISTER_OP_GPU_KERNEL(add, ops::AddKernel<paddle::platform::GPUPlace, float>);
|
|
@ -1,48 +0,0 @@
|
|||||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License. */
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include "paddle/framework/eigen.h"
|
|
||||||
#include "paddle/framework/op_registry.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
|
|
||||||
using Tensor = framework::Tensor;
|
|
||||||
template <typename T, int MajorType = Eigen::RowMajor,
|
|
||||||
typename IndexType = Eigen::DenseIndex>
|
|
||||||
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
|
|
||||||
|
|
||||||
template <typename Place, typename T>
|
|
||||||
class AddKernel : public framework::OpKernel<T> {
|
|
||||||
public:
|
|
||||||
void Compute(const framework::ExecutionContext& context) const override {
|
|
||||||
auto* input0 = context.Input<Tensor>("X");
|
|
||||||
auto* input1 = context.Input<Tensor>("Y");
|
|
||||||
auto* output = context.Output<Tensor>("Out");
|
|
||||||
|
|
||||||
output->mutable_data<T>(context.GetPlace());
|
|
||||||
|
|
||||||
auto X = EigenVector<T>::Flatten(*input0);
|
|
||||||
auto Y = EigenVector<T>::Flatten(*input1);
|
|
||||||
auto Z = EigenVector<T>::Flatten(*output);
|
|
||||||
|
|
||||||
auto place = context.GetEigenDevice<Place>();
|
|
||||||
|
|
||||||
Z.device(place) = X + Y;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
@ -1,20 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import numpy as np
|
|
||||||
from op_test import OpTest
|
|
||||||
|
|
||||||
|
|
||||||
class TestAddOp(OpTest):
|
|
||||||
def setUp(self):
|
|
||||||
self.op_type = "add"
|
|
||||||
self.inputs = {
|
|
||||||
'X': np.random.random((102, 105)).astype("float32"),
|
|
||||||
'Y': np.random.random((102, 105)).astype("float32")
|
|
||||||
}
|
|
||||||
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
|
|
||||||
|
|
||||||
def test_check_output(self):
|
|
||||||
self.check_output()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
@ -1,46 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import numpy as np
|
|
||||||
import paddle.v2.framework.core as core
|
|
||||||
from op_test import get_numeric_gradient
|
|
||||||
from op_test import create_op
|
|
||||||
|
|
||||||
|
|
||||||
class GetNumericGradientTest(unittest.TestCase):
|
|
||||||
def test_add_op(self):
|
|
||||||
x = np.random.random((10, 1)).astype("float32")
|
|
||||||
y = np.random.random((10, 1)).astype("float32")
|
|
||||||
z = x + y
|
|
||||||
scope = core.Scope()
|
|
||||||
add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
|
|
||||||
arr = get_numeric_gradient(scope, add_op, {'X': x,
|
|
||||||
'Y': y}, 'X', ['Out'])
|
|
||||||
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
|
|
||||||
|
|
||||||
def test_softmax_op(self):
|
|
||||||
def stable_softmax(x):
|
|
||||||
"""Compute the softmax of vector x in a numerically stable way."""
|
|
||||||
shiftx = x - np.max(x)
|
|
||||||
exps = np.exp(shiftx)
|
|
||||||
return exps / np.sum(exps)
|
|
||||||
|
|
||||||
def label_softmax_grad(Y, dY):
|
|
||||||
dX = Y * 0.0
|
|
||||||
for i in range(Y.shape[0]):
|
|
||||||
d = np.dot(Y[i, :], dY[i, :])
|
|
||||||
dX[i, :] = Y[i, :] * (dY[i, :] - d)
|
|
||||||
return dX
|
|
||||||
|
|
||||||
X = np.random.random((2, 2)).astype("float32")
|
|
||||||
Y = np.apply_along_axis(stable_softmax, 1, X)
|
|
||||||
dY = np.ones(Y.shape)
|
|
||||||
dX = label_softmax_grad(Y, dY)
|
|
||||||
|
|
||||||
scope = core.Scope()
|
|
||||||
softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())
|
|
||||||
|
|
||||||
arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
|
|
||||||
np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
Loading…
Reference in new issue