commit
794117bb93
@ -0,0 +1,91 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "L2DistanceLayer.h"
|
||||
#include "paddle/utils/Logging.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(l2_distance, L2DistanceLayer);
|
||||
|
||||
bool L2DistanceLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parent class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
CHECK_EQ(inputLayers_.size(), 2UL) << "The L2DistanceLayer accepts two and "
|
||||
<< "only two inputs.";
|
||||
CHECK_EQ(getSize(), 1UL) << "The output dimensionality of L2DistanceLayer "
|
||||
<< "is fixed to be 1.";
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void L2DistanceLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
|
||||
const auto inV1 = getInputValue(0);
|
||||
const auto inV2 = getInputValue(1);
|
||||
|
||||
CHECK(inV1 && inV2);
|
||||
CHECK_EQ(inV1->getHeight(), inV2->getHeight())
|
||||
<< "The height of two inputs of this layer must be the same.";
|
||||
CHECK_EQ(inV1->getWidth(), inV2->getWidth())
|
||||
<< "The width of two inputs of this layer must be the same.";
|
||||
|
||||
int batchSize = inV1->getHeight();
|
||||
int output_dim = getSize();
|
||||
{
|
||||
REGISTER_TIMER_INFO("L2DistanceBpAtvTimer", getName().c_str());
|
||||
reserveOutput(batchSize, output_dim);
|
||||
auto outV = getOutputValue();
|
||||
CHECK(outV) << "The output matrix should not be null.";
|
||||
|
||||
Matrix::resizeOrCreate(
|
||||
inputSub_, inV1->getHeight(), inV1->getWidth(), false, useGpu_);
|
||||
|
||||
inputSub_->assign(*inV1);
|
||||
inputSub_->sub(*inV2);
|
||||
outV->sumOfProducts(*inputSub_, *inputSub_, 1, 0);
|
||||
outV->sqrt2(*outV);
|
||||
}
|
||||
}
|
||||
|
||||
void L2DistanceLayer::backward(const UpdateCallback& callback) {
|
||||
const auto outG = getOutputGrad();
|
||||
const auto outV = getOutputValue();
|
||||
CHECK(outG && outV);
|
||||
|
||||
auto inGrad1 = getInputGrad(0);
|
||||
auto inGrad2 = getInputGrad(1);
|
||||
|
||||
{
|
||||
REGISTER_TIMER_INFO("L2DistanceBpAtvTimer", getName().c_str());
|
||||
|
||||
if (inGrad1 || inGrad2) {
|
||||
outV->scalarDiv(*outV, 1.);
|
||||
outV->dotMul(*outG, *outV);
|
||||
}
|
||||
|
||||
if (inGrad1) inGrad1->addRowScale(0, *inputSub_, *outV);
|
||||
|
||||
if (inGrad2) {
|
||||
inputSub_->mulScalar(-1.);
|
||||
inGrad2->addRowScale(0, *inputSub_, *outV);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,52 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Layer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief The layer calculates the l2 distance between two input vectors.
|
||||
* \f[
|
||||
* f(\bf{x}, \bf{y}) = \sqrt{\sum_{i=1}^D(x_i - y_i)}
|
||||
* \f]
|
||||
*
|
||||
* - Input1: A vector (batchSize * dataDim)
|
||||
* - Input2: A vector (batchSize * dataDim)
|
||||
* - Output: A vector (batchSize * 1)
|
||||
*
|
||||
* The configuration api is: l2_distance_layer.
|
||||
*/
|
||||
|
||||
class L2DistanceLayer : public Layer {
|
||||
public:
|
||||
explicit L2DistanceLayer(const LayerConfig& config) : Layer(config) {}
|
||||
~L2DistanceLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
void forward(PassType passType) override;
|
||||
void backward(const UpdateCallback& callback = nullptr) override;
|
||||
|
||||
private:
|
||||
// Store the result of subtracting Input2 from Input1 in forward computation,
|
||||
// which will be reused in backward computation.
|
||||
MatrixPtr inputSub_;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,153 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/logical_op.h"
|
||||
#include "paddle/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
template <typename OpComment>
|
||||
class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
BinaryLogicalOpProtoMaker(framework::OpProto *proto,
|
||||
framework::OpAttrChecker *op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
OpComment comment;
|
||||
AddInput("X",
|
||||
string::Sprintf("(LoDTensor) Left hand operand of %s operator",
|
||||
comment.type));
|
||||
AddInput("Y",
|
||||
string::Sprintf("(LoDTensor) Right hand operand of %s operator",
|
||||
comment.type));
|
||||
AddOutput("Out", string::Sprintf(
|
||||
"(LoDTensor) n-dim bool tensor. Each element is %s",
|
||||
comment.equation));
|
||||
AddComment(string::Sprintf(R"DOC(%s Operator
|
||||
|
||||
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean tensors.
|
||||
Each element of Out is calculated by %s
|
||||
)DOC",
|
||||
comment.type, comment.equation));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename OpComment>
|
||||
class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
UnaryLogicalOpProtoMaker(framework::OpProto *proto,
|
||||
framework::OpAttrChecker *op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
OpComment comment;
|
||||
AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator",
|
||||
comment.type));
|
||||
AddOutput("Out", string::Sprintf(
|
||||
"(LoDTensor) n-dim bool tensor. Each element is %s",
|
||||
comment.equation));
|
||||
AddComment(string::Sprintf(R"DOC(%s Operator
|
||||
|
||||
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean tensors.
|
||||
Each element of Out is calculated by %s
|
||||
)DOC",
|
||||
comment.type, comment.equation));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename OpComment>
|
||||
class BinaryLogicalOpInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext *context) const override {
|
||||
OpComment comment;
|
||||
PADDLE_ENFORCE(context->HasInput("X"),
|
||||
"Input(X) of %s operator must not be null", comment.type);
|
||||
PADDLE_ENFORCE(context->HasInput("Y"),
|
||||
"Input(Y) of %s operator must not be null", comment.type);
|
||||
auto dim_x = context->GetInputDim("X");
|
||||
auto dim_y = context->GetInputDim("Y");
|
||||
PADDLE_ENFORCE_EQ(framework::product(dim_x), framework::product(dim_y),
|
||||
"The number of elements in X and Y should be same");
|
||||
|
||||
context->SetOutputDim("Out", context->GetInputDim("X"));
|
||||
context->ShareLoD("X", "Out");
|
||||
}
|
||||
};
|
||||
|
||||
template <typename OpComment>
|
||||
class UnaryLogicalOpInferShape : public framework::InferShapeBase {
|
||||
public:
|
||||
void operator()(framework::InferShapeContext *context) const override {
|
||||
OpComment comment;
|
||||
PADDLE_ENFORCE(context->HasInput("X"),
|
||||
"Input(X) of %s operator must not be null", comment.type);
|
||||
auto dim_x = context->GetInputDim("X");
|
||||
|
||||
context->SetOutputDim("Out", context->GetInputDim("X"));
|
||||
context->ShareLoD("X", "Out");
|
||||
}
|
||||
};
|
||||
|
||||
class LogicalOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetKernelType(
|
||||
const framework::ExecutionContext &ctx) const override {
|
||||
framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx);
|
||||
// LogicalOp kernel's device type is decided by input tensor place
|
||||
kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
|
||||
return kt;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
#define REGISTER_BINARY_LOGICAL_OP(op_type, _equation) \
|
||||
struct _##op_type##Comment { \
|
||||
static char type[]; \
|
||||
static char equation[]; \
|
||||
}; \
|
||||
char _##op_type##Comment::type[]{#op_type}; \
|
||||
char _##op_type##Comment::equation[]{_equation}; \
|
||||
REGISTER_OPERATOR( \
|
||||
op_type, ::paddle::operators::LogicalOp, \
|
||||
::paddle::operators::BinaryLogicalOpProtoMaker<_##op_type##Comment>, \
|
||||
::paddle::operators::BinaryLogicalOpInferShape<_##op_type##Comment>, \
|
||||
::paddle::framework::EmptyGradOpMaker);
|
||||
|
||||
#define REGISTER_UNARY_LOGICAL_OP(op_type, _equation) \
|
||||
struct _##op_type##Comment { \
|
||||
static char type[]; \
|
||||
static char equation[]; \
|
||||
}; \
|
||||
char _##op_type##Comment::type[]{#op_type}; \
|
||||
char _##op_type##Comment::equation[]{_equation}; \
|
||||
REGISTER_OPERATOR( \
|
||||
op_type, ::paddle::operators::LogicalOp, \
|
||||
::paddle::operators::UnaryLogicalOpProtoMaker<_##op_type##Comment>, \
|
||||
::paddle::operators::UnaryLogicalOpInferShape<_##op_type##Comment>, \
|
||||
::paddle::framework::EmptyGradOpMaker);
|
||||
|
||||
REGISTER_BINARY_LOGICAL_OP(logical_and, "Out = X && Y");
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CPU,
|
||||
paddle::operators::LogicalAndFunctor);
|
||||
REGISTER_BINARY_LOGICAL_OP(logical_or, "Out = X && Y");
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_or, CPU,
|
||||
paddle::operators::LogicalOrFunctor);
|
||||
REGISTER_UNARY_LOGICAL_OP(logical_not, "Out = !X");
|
||||
REGISTER_UNARY_LOGICAL_KERNEL(logical_not, CPU,
|
||||
paddle::operators::LogicalNotFunctor);
|
||||
REGISTER_BINARY_LOGICAL_OP(logical_xor, "Out = (X || Y) && !(X && Y)");
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, CPU,
|
||||
paddle::operators::LogicalXorFunctor);
|
@ -0,0 +1,24 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/logical_op.h"
|
||||
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_and, GPU,
|
||||
paddle::operators::LogicalAndFunctor);
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_or, GPU,
|
||||
paddle::operators::LogicalOrFunctor);
|
||||
REGISTER_UNARY_LOGICAL_KERNEL(logical_not, GPU,
|
||||
paddle::operators::LogicalNotFunctor);
|
||||
REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, GPU,
|
||||
paddle::operators::LogicalXorFunctor);
|
@ -0,0 +1,93 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <math.h>
|
||||
#include <type_traits>
|
||||
#include "paddle/framework/op_registry.h"
|
||||
#include "paddle/platform/transform.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename T>
|
||||
struct LogicalAndFunctor {
|
||||
using ELEM_TYPE = T;
|
||||
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a && b; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LogicalOrFunctor {
|
||||
using ELEM_TYPE = T;
|
||||
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a || b; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LogicalNotFunctor {
|
||||
using ELEM_TYPE = T;
|
||||
HOSTDEVICE bool operator()(const T& a) const { return !a; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LogicalXorFunctor {
|
||||
using ELEM_TYPE = T;
|
||||
HOSTDEVICE bool operator()(const T& a, const T& b) const {
|
||||
return (a || b) && !(a && b);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename Functor>
|
||||
class BinaryLogicalOpKernel
|
||||
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
using T = typename Functor::ELEM_TYPE;
|
||||
auto* x = context.Input<framework::Tensor>("X");
|
||||
auto* y = context.Input<framework::Tensor>("Y");
|
||||
auto* out = context.Output<framework::Tensor>("Out");
|
||||
Functor binary_func;
|
||||
platform::Transform<Place> trans;
|
||||
trans(context.device_context(), x->data<T>(), x->data<T>() + x->numel(),
|
||||
y->data<T>(), out->mutable_data<bool>(context.GetPlace()),
|
||||
binary_func);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename Functor>
|
||||
class UnaryLogicalOpKernel
|
||||
: public framework::OpKernel<typename Functor::ELEM_TYPE> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
using T = typename Functor::ELEM_TYPE;
|
||||
auto* x = context.Input<framework::Tensor>("X");
|
||||
auto* out = context.Output<framework::Tensor>("Out");
|
||||
Functor unary_func;
|
||||
platform::Transform<Place> trans;
|
||||
trans(context.device_context(), x->data<T>(), x->data<T>() + x->numel(),
|
||||
out->mutable_data<bool>(context.GetPlace()), unary_func);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
#define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \
|
||||
REGISTER_OP_##dev##_KERNEL( \
|
||||
op_type, ::paddle::operators::BinaryLogicalOpKernel< \
|
||||
::paddle::platform::dev##Place, functor<bool>>);
|
||||
|
||||
#define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \
|
||||
REGISTER_OP_##dev##_KERNEL( \
|
||||
op_type, ::paddle::operators::UnaryLogicalOpKernel< \
|
||||
::paddle::platform::dev##Place, functor<bool>>);
|
@ -0,0 +1,39 @@
|
||||
type: "nn"
|
||||
layers {
|
||||
name: "x"
|
||||
type: "data"
|
||||
size: 128
|
||||
active_type: ""
|
||||
}
|
||||
layers {
|
||||
name: "y"
|
||||
type: "data"
|
||||
size: 128
|
||||
active_type: ""
|
||||
}
|
||||
layers {
|
||||
name: "__l2_distance_layer_0__"
|
||||
type: "l2_distance"
|
||||
size: 1
|
||||
active_type: ""
|
||||
inputs {
|
||||
input_layer_name: "x"
|
||||
}
|
||||
inputs {
|
||||
input_layer_name: "y"
|
||||
}
|
||||
}
|
||||
input_layer_names: "x"
|
||||
input_layer_names: "y"
|
||||
output_layer_names: "__l2_distance_layer_0__"
|
||||
sub_models {
|
||||
name: "root"
|
||||
layer_names: "x"
|
||||
layer_names: "y"
|
||||
layer_names: "__l2_distance_layer_0__"
|
||||
input_layer_names: "x"
|
||||
input_layer_names: "y"
|
||||
output_layer_names: "__l2_distance_layer_0__"
|
||||
is_recurrent_layer_group: false
|
||||
}
|
||||
|
@ -0,0 +1,7 @@
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
outputs(
|
||||
l2_distance_layer(
|
||||
x=data_layer(
|
||||
name='x', size=128), y=data_layer(
|
||||
name='y', size=128)))
|
@ -0,0 +1,35 @@
|
||||
import op_test
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
|
||||
def create_test_class(op_type, callback, binary_op=True):
|
||||
class Cls(op_test.OpTest):
|
||||
def setUp(self):
|
||||
a = np.random.choice(a=[True, False], size=(10, 7)).astype(bool)
|
||||
if binary_op:
|
||||
b = np.random.choice(a=[True, False], size=(10, 7)).astype(bool)
|
||||
c = callback(a, b)
|
||||
else:
|
||||
c = callback(a)
|
||||
self.outputs = {'Out': c}
|
||||
self.op_type = op_type
|
||||
if binary_op:
|
||||
self.inputs = {'X': a, 'Y': b}
|
||||
else:
|
||||
self.inputs = {'X': a}
|
||||
|
||||
def test_output(self):
|
||||
self.check_output()
|
||||
|
||||
Cls.__name__ = op_type
|
||||
globals()[op_type] = Cls
|
||||
|
||||
|
||||
create_test_class('logical_and', lambda _a, _b: np.logical_and(_a, _b))
|
||||
create_test_class('logical_or', lambda _a, _b: np.logical_or(_a, _b))
|
||||
create_test_class('logical_not', lambda _a: np.logical_not(_a), False)
|
||||
create_test_class('logical_xor', lambda _a, _b: np.logical_xor(_a, _b))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue