parent
e3701ad71f
commit
ba22624d7e
@ -0,0 +1,97 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#include "paddle/fluid/operators/add_position_encoding_op.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
class AddPositionEncodingOp : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||||
|
|
||||||
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("X"),
|
||||||
|
"X(Input) of add_position_encoding_op should not be null.");
|
||||||
|
PADDLE_ENFORCE(
|
||||||
|
ctx->HasOutput("Out"),
|
||||||
|
"Out(Output) of add_position_encoding_op should not be null.");
|
||||||
|
|
||||||
|
auto x_dims = ctx->GetInputDim("X");
|
||||||
|
ctx->SetOutputDim("Out", x_dims);
|
||||||
|
ctx->ShareLoD("X", /*->*/ "Out");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class AddPositionEncodingOpGrad : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||||
|
|
||||||
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) must not be null.");
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("Out"), "Out must not be null.");
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
|
||||||
|
"Out@GRAD must not be null.");
|
||||||
|
|
||||||
|
auto out_dims = ctx->GetInputDim("Out");
|
||||||
|
if (ctx->HasOutput(framework::GradVarName("X"))) {
|
||||||
|
ctx->SetOutputDim(framework::GradVarName("X"), out_dims);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||||
|
public:
|
||||||
|
void Make() override {
|
||||||
|
AddInput("X", "Input of AddPositionEncoding operator");
|
||||||
|
AddOutput("Out", "Output of AddPositionEncoding operator");
|
||||||
|
AddAttr<float>("alpha", "The scale of Original Embedding.")
|
||||||
|
.SetDefault(1.0f)
|
||||||
|
.AddCustomChecker([](const float& alpha) {
|
||||||
|
PADDLE_ENFORCE(alpha >= 0.0f, "'alpha' must be above 0.0.");
|
||||||
|
});
|
||||||
|
AddAttr<float>("beta", "The scale of Position Embedding.")
|
||||||
|
.SetDefault(1.0f)
|
||||||
|
.AddCustomChecker([](const float& beta) {
|
||||||
|
PADDLE_ENFORCE(beta >= 0.0f, "'beta' must be between 0.0.");
|
||||||
|
});
|
||||||
|
AddComment(R"DOC(
|
||||||
|
Add Position Encoding Operator.
|
||||||
|
|
||||||
|
The add position encoding calculates the output based on the input, alpha, beta.
|
||||||
|
The size of each dimension of the parameters checked in the infer-shape.
|
||||||
|
)DOC");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
namespace plt = paddle::platform;
|
||||||
|
|
||||||
|
REGISTER_OPERATOR(add_position_encoding, ops::AddPositionEncodingOp,
|
||||||
|
ops::AddPositionEncodingOpMaker,
|
||||||
|
paddle::framework::DefaultGradOpDescMaker<true>);
|
||||||
|
REGISTER_OPERATOR(add_position_encoding_grad, ops::AddPositionEncodingOpGrad);
|
||||||
|
|
||||||
|
REGISTER_OP_CPU_KERNEL(
|
||||||
|
add_position_encoding,
|
||||||
|
ops::AddPositionEncodingKernel<plt::CPUDeviceContext, float>,
|
||||||
|
ops::AddPositionEncodingKernel<plt::CPUDeviceContext, double>);
|
||||||
|
|
||||||
|
REGISTER_OP_CPU_KERNEL(
|
||||||
|
add_position_encoding_grad,
|
||||||
|
ops::AddPositionEncodingGradKernel<plt::CPUDeviceContext, float>,
|
||||||
|
ops::AddPositionEncodingGradKernel<plt::CPUDeviceContext, double>);
|
@ -0,0 +1,105 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "paddle/fluid/framework/eigen.h"
|
||||||
|
#include "paddle/fluid/framework/op_registry.h"
|
||||||
|
#include "paddle/fluid/operators/detail/safe_ref.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
template <typename DeviceContext, typename T>
|
||||||
|
class AddPositionEncodingKernel : public framework::OpKernel<T> {
|
||||||
|
public:
|
||||||
|
void Compute(const framework::ExecutionContext& context) const override {
|
||||||
|
auto* X = context.Input<framework::LoDTensor>("X");
|
||||||
|
auto& x_lod = X->lod();
|
||||||
|
auto* src_ptr = X->data<T>();
|
||||||
|
|
||||||
|
auto* Out = context.Output<framework::LoDTensor>("Out");
|
||||||
|
auto* dst_ptr = Out->mutable_data<T>(context.GetPlace());
|
||||||
|
|
||||||
|
float alpha = context.Attr<float>("alpha");
|
||||||
|
float beta = context.Attr<float>("beta");
|
||||||
|
|
||||||
|
auto x_dim = X->dims();
|
||||||
|
int batch_size = 0;
|
||||||
|
int max_seq_len = 0;
|
||||||
|
int enc_size = 0;
|
||||||
|
|
||||||
|
if (x_lod.empty()) {
|
||||||
|
PADDLE_ENFORCE(
|
||||||
|
x_dim.size() == 3UL,
|
||||||
|
"The input X of Add Position Encoding should be 3-D Tensor!");
|
||||||
|
batch_size = x_dim[0];
|
||||||
|
max_seq_len = x_dim[1];
|
||||||
|
enc_size = x_dim[2];
|
||||||
|
} else {
|
||||||
|
PADDLE_ENFORCE(
|
||||||
|
x_dim.size() == 2UL,
|
||||||
|
"The input X of Add Position Encoding should be 2-D LoDTensor!");
|
||||||
|
PADDLE_ENFORCE(
|
||||||
|
x_lod.size() == 1UL,
|
||||||
|
"The Add Position Encoding Op only supports lod_level == 1!");
|
||||||
|
batch_size = x_lod[0].size() - 1;
|
||||||
|
max_seq_len = -1;
|
||||||
|
enc_size = x_dim[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
PADDLE_ENFORCE(enc_size % 2 == 0, "Only support even encode size!");
|
||||||
|
|
||||||
|
const int half_size = enc_size / 2;
|
||||||
|
for (int i = 0; i < batch_size; ++i) {
|
||||||
|
const int max_length =
|
||||||
|
x_lod.empty() ? max_seq_len : x_lod[0][i + 1] - x_lod[0][i];
|
||||||
|
for (int j = 0; j < max_length; ++j) {
|
||||||
|
for (int k = 0; k < half_size; ++k) {
|
||||||
|
const double val = (half_size > 1)
|
||||||
|
? j / pow(10000.0, double(k) / (half_size - 1))
|
||||||
|
: j / 10000.0;
|
||||||
|
dst_ptr[k] = src_ptr[k] * alpha + sin(val) * beta;
|
||||||
|
dst_ptr[half_size + k] =
|
||||||
|
src_ptr[half_size + k] * alpha + cos(val) * beta;
|
||||||
|
}
|
||||||
|
src_ptr += enc_size;
|
||||||
|
dst_ptr += enc_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename DeviceContext, typename T>
|
||||||
|
class AddPositionEncodingGradKernel : public framework::OpKernel<T> {
|
||||||
|
public:
|
||||||
|
void Compute(const framework::ExecutionContext& context) const override {
|
||||||
|
auto* dOut =
|
||||||
|
context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
|
||||||
|
auto dout = framework::EigenVector<T>::Flatten(*dOut);
|
||||||
|
|
||||||
|
auto* dX =
|
||||||
|
context.Output<framework::LoDTensor>(framework::GradVarName("X"));
|
||||||
|
dX->mutable_data<T>(context.GetPlace());
|
||||||
|
auto dx = framework::EigenVector<T>::Flatten(*dX);
|
||||||
|
|
||||||
|
float alpha = context.Attr<float>("alpha");
|
||||||
|
|
||||||
|
auto* place =
|
||||||
|
context.template device_context<DeviceContext>().eigen_device();
|
||||||
|
dx.device(*place) = dout * static_cast<T>(alpha);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
@ -0,0 +1,134 @@
|
|||||||
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import unittest
|
||||||
|
import numpy as np
|
||||||
|
import math
|
||||||
|
import paddle.fluid.core as core
|
||||||
|
from op_test import OpTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestAddPositionEncodingTensorOp(OpTest):
|
||||||
|
"""
|
||||||
|
This class is to test the AddPositionEncodingOp
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""
|
||||||
|
the prepared section for add position encoding op
|
||||||
|
"""
|
||||||
|
self.op_type = "add_position_encoding"
|
||||||
|
self.dtype = np.float32
|
||||||
|
self.init_input_output()
|
||||||
|
|
||||||
|
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x), }
|
||||||
|
self.outputs = {'Out': self.out}
|
||||||
|
self.attrs = {'alpha': self.alpha, 'beta': self.beta}
|
||||||
|
|
||||||
|
def test_check_output(self):
|
||||||
|
"""
|
||||||
|
check the correctness of output
|
||||||
|
"""
|
||||||
|
self.check_output()
|
||||||
|
|
||||||
|
def test_check_grad(self):
|
||||||
|
"""
|
||||||
|
check the correctness of grad
|
||||||
|
"""
|
||||||
|
self.check_grad(['X'], 'Out', max_relative_error=0.005)
|
||||||
|
|
||||||
|
def init_input_output(self):
|
||||||
|
"""
|
||||||
|
init the input and output for test cases
|
||||||
|
"""
|
||||||
|
self.alpha = 0.6
|
||||||
|
self.beta = 0.5
|
||||||
|
self.x = np.random.uniform(0.1, 1, [2, 4, 4]).astype(self.dtype)
|
||||||
|
self.out = np.copy(self.x)
|
||||||
|
|
||||||
|
batch_size = self.x.shape[0]
|
||||||
|
max_length = self.x.shape[1]
|
||||||
|
enc_size = self.x.shape[2]
|
||||||
|
|
||||||
|
half_shape = int(enc_size / 2)
|
||||||
|
for i in range(batch_size):
|
||||||
|
for j in range(max_length):
|
||||||
|
for k in range(half_shape):
|
||||||
|
val = j / pow(10000.0, k / (
|
||||||
|
half_shape - 1)) if half_shape > 1 else j / 10000.0
|
||||||
|
self.out[i, j, k] = \
|
||||||
|
self.x[i, j, k] * self.alpha + math.sin(val) * self.beta
|
||||||
|
self.out[i, j, half_shape + k] = \
|
||||||
|
self.x[i, j, half_shape + k] * self.alpha + math.cos(val) * self.beta
|
||||||
|
|
||||||
|
|
||||||
|
class TestAddPositionEncodingLoDTensorOp(OpTest):
|
||||||
|
"""
|
||||||
|
This class is to test the AddPositionEncodingLoDTensorOp
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""
|
||||||
|
the prepared section for add position encoding LoDTensor op
|
||||||
|
"""
|
||||||
|
self.op_type = "add_position_encoding"
|
||||||
|
self.dtype = np.float32
|
||||||
|
self.init_input_output()
|
||||||
|
|
||||||
|
self.inputs = {'X': (self.x, self.lod), }
|
||||||
|
self.outputs = {'Out': (self.out, self.lod)}
|
||||||
|
self.attrs = {'alpha': self.alpha, 'beta': self.beta}
|
||||||
|
|
||||||
|
def test_check_output(self):
|
||||||
|
"""
|
||||||
|
check the correctness of output
|
||||||
|
"""
|
||||||
|
self.check_output()
|
||||||
|
|
||||||
|
def test_check_grad(self):
|
||||||
|
"""
|
||||||
|
check the correctness of grad
|
||||||
|
"""
|
||||||
|
self.check_grad(['X'], 'Out', max_relative_error=0.005)
|
||||||
|
|
||||||
|
def init_input_output(self):
|
||||||
|
"""
|
||||||
|
init the input and output for test cases
|
||||||
|
"""
|
||||||
|
self.alpha = 0.6
|
||||||
|
self.beta = 0.5
|
||||||
|
self.x = np.random.uniform(0.1, 1, [10, 4]).astype(self.dtype)
|
||||||
|
self.lod = [[3, 7]]
|
||||||
|
self.out = np.copy(self.x)
|
||||||
|
|
||||||
|
batch_size = len(self.lod[0])
|
||||||
|
enc_size = self.x.shape[1]
|
||||||
|
|
||||||
|
start = 0
|
||||||
|
half_shape = int(enc_size / 2)
|
||||||
|
for i in range(batch_size):
|
||||||
|
max_length = self.lod[0][i]
|
||||||
|
for j in range(max_length):
|
||||||
|
for k in range(half_shape):
|
||||||
|
val = j / pow(10000.0, k / (
|
||||||
|
half_shape - 1)) if half_shape > 1 else j / 10000.0
|
||||||
|
pos = start + j
|
||||||
|
self.out[pos, k] = \
|
||||||
|
self.x[pos, k] * self.alpha + math.sin(val) * self.beta
|
||||||
|
self.out[pos, half_shape + k] = \
|
||||||
|
self.x[pos, half_shape + k] * self.alpha + math.cos(val) * self.beta
|
||||||
|
start += max_length
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
Loading…
Reference in new issue