parent
e602c707f8
commit
9bc71087b6
@ -0,0 +1,120 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/lod_reset_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
class LoDResetOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext *ctx) const override {
|
||||
// input check
|
||||
PADDLE_ENFORCE(ctx->HasInput("X"),
|
||||
"Input(X) of LoDResetOp should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||
"Output(Out) of LoDResetOp should not be null.");
|
||||
// If target LoD is not set form Input(), then it must be set from Attr().
|
||||
if (!ctx->HasInput("TargetLoD")) {
|
||||
auto level0 = ctx->Attrs().Get<std::vector<int>>("target_lod");
|
||||
PADDLE_ENFORCE(level0.size() > 1,
|
||||
"Target LoD is not found, should be set to be a valid one "
|
||||
"through Input() or Attr().");
|
||||
}
|
||||
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
|
||||
}
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetKernelType(
|
||||
const framework::ExecutionContext &ctx) const override {
|
||||
return framework::OpKernelType(
|
||||
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
|
||||
ctx.device_context());
|
||||
}
|
||||
};
|
||||
|
||||
class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
LoDResetOpMaker(framework::OpProto *proto,
|
||||
framework::OpAttrChecker *op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddInput("X", "(LoDTensor) The input tensor of lod_reset operator.");
|
||||
AddInput("TargetLoD",
|
||||
"(Tensor, optional) The target level 0 LoD from Input().")
|
||||
.AsDispensable();
|
||||
AddOutput("Out", "(LoDTensor) The output tensor of lod_reset operator.");
|
||||
AddAttr<std::vector<int>>("target_lod",
|
||||
"The target level 0 LoD from Attr().")
|
||||
.SetDefault(std::vector<int>{});
|
||||
AddComment(R"DOC(LoDReset operator
|
||||
|
||||
Reset LoD of Input(X) into a new one specified by Input(TargetLoD) or
|
||||
Attr(target_lod), or set LoD for Input(X) if it doesn't have one.
|
||||
Currently the lod_reset operator only supports the reset of level 0 LoD.
|
||||
At least one of Input(TargetLoD) and Attr(target_lod) must be set,
|
||||
and if both of them are set, Input(TargetLoD) will be chosen as the
|
||||
target LoD.
|
||||
|
||||
An example:
|
||||
Given a float LoDTensor X with shape (6, 1), its transpose form represents
|
||||
|
||||
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
|
||||
|
||||
with LoD = [[0, 2, 5, 6]] and the three (transposed) sequences look like
|
||||
|
||||
[1.0, 2.0], [3.0, 4.0, 5.0], [6.0].
|
||||
|
||||
If target LoD = [0, 4, 6], the lod_reset operator will reset the LoD and
|
||||
the sequences that the LoDTensor Output(Out) contains becomes:
|
||||
|
||||
[1.0, 2.0, 3.0, 4.0], [5.0, 6.0].
|
||||
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class LoDResetGradOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext *ctx) const override {
|
||||
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null.");
|
||||
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
|
||||
"Input(Out@GRAD) shouldn't be null.");
|
||||
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
|
||||
}
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetKernelType(
|
||||
const framework::ExecutionContext &ctx) const override {
|
||||
return framework::OpKernelType(
|
||||
framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
|
||||
ctx.device_context());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(lod_reset, ops::LoDResetOp, ops::LoDResetOpMaker, lod_reset_grad,
|
||||
ops::LoDResetGradOp);
|
||||
REGISTER_OP_CPU_KERNEL(lod_reset,
|
||||
ops::LoDResetKernel<paddle::platform::CPUPlace, float>,
|
||||
ops::LoDResetKernel<paddle::platform::CPUPlace, double>);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
lod_reset_grad, ops::LoDResetGradKernel<paddle::platform::CPUPlace, float>,
|
||||
ops::LoDResetGradKernel<paddle::platform::CPUPlace, double>);
|
@ -0,0 +1,24 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/lod_reset_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_GPU_KERNEL(lod_reset,
|
||||
ops::LoDResetKernel<paddle::platform::GPUPlace, float>,
|
||||
ops::LoDResetKernel<paddle::platform::GPUPlace, double>);
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
lod_reset_grad, ops::LoDResetGradKernel<paddle::platform::GPUPlace, float>,
|
||||
ops::LoDResetGradKernel<paddle::platform::GPUPlace, double>);
|
@ -0,0 +1,78 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/framework/eigen.h"
|
||||
#include "paddle/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename Place, typename T>
|
||||
class LoDResetKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const {
|
||||
auto* out = ctx.Output<framework::LoDTensor>("Out");
|
||||
auto* in = ctx.Input<framework::LoDTensor>("X");
|
||||
auto* lod_t = ctx.Input<framework::Tensor>("TargetLoD");
|
||||
|
||||
std::vector<int> level0;
|
||||
if (lod_t) {
|
||||
auto* lod = lod_t->data<int>();
|
||||
if (platform::is_gpu_place(ctx.GetPlace())) {
|
||||
framework::Tensor lod_cpu;
|
||||
lod_cpu.CopyFrom(*lod_t, platform::CPUPlace(), ctx.device_context());
|
||||
lod = lod_cpu.data<int>();
|
||||
}
|
||||
level0 = std::vector<int>(lod, lod + lod_t->numel());
|
||||
} else {
|
||||
level0 = ctx.Attr<std::vector<int>>("target_lod");
|
||||
}
|
||||
|
||||
PADDLE_ENFORCE(level0.size() > 1UL,
|
||||
"The size of target LoD should be greater than 1.");
|
||||
PADDLE_ENFORCE(level0[0] == 0,
|
||||
"Target LoD should be a vector starting from 0.");
|
||||
PADDLE_ENFORCE(level0.back() == in->dims()[0],
|
||||
"Target LoD should be a vector end with the "
|
||||
"first dimension of Input(X).");
|
||||
for (size_t i = 0; i < level0.size() - 1; ++i) {
|
||||
PADDLE_ENFORCE(level0[i + 1] > level0[i],
|
||||
"Target LoD should be an ascending vector.");
|
||||
}
|
||||
|
||||
out->ShareDataWith(*in);
|
||||
|
||||
std::vector<size_t> ulevel0(level0.size(), 0);
|
||||
std::transform(level0.begin(), level0.end(), ulevel0.begin(),
|
||||
[](int a) { return static_cast<size_t>(a); });
|
||||
framework::LoD target_lod;
|
||||
target_lod.push_back(ulevel0);
|
||||
out->set_lod(target_lod);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename T>
|
||||
class LoDResetGradKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const {
|
||||
auto* d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
|
||||
auto* d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
|
||||
|
||||
d_x->ShareDataWith(*d_out);
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,64 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class TestLodResetOpByAttr(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "lod_reset"
|
||||
x = np.random.random((10, 20)).astype("float32")
|
||||
lod = [[0, 3, 5, 10]]
|
||||
target_lod_0 = [0, 7, 10]
|
||||
self.inputs = {'X': (x, lod)}
|
||||
self.attrs = {'target_lod': target_lod_0}
|
||||
self.outputs = {'Out': (x, [target_lod_0])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(["X"], "Out")
|
||||
|
||||
|
||||
class TestLodResetOpByInput(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "lod_reset"
|
||||
x = np.random.random((10, 20)).astype("float32")
|
||||
lod = [[0, 3, 5, 10]]
|
||||
target_lod_0 = [0, 4, 7, 10]
|
||||
self.inputs = {
|
||||
'X': (x, lod),
|
||||
'TargetLoD': np.array([target_lod_0]).astype('int32')
|
||||
}
|
||||
self.outputs = {'Out': (x, [target_lod_0])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD"))
|
||||
|
||||
|
||||
class TestLodResetOpBoth(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "lod_reset"
|
||||
x = np.random.random((10, 20)).astype("float32")
|
||||
lod = [[0, 3, 5, 10]]
|
||||
target_lod_0_attr = [0, 7, 10]
|
||||
target_lod_0_in = [0, 4, 7, 10]
|
||||
self.inputs = {
|
||||
'X': (x, lod),
|
||||
'TargetLoD': np.array(target_lod_0_in).astype('int32')
|
||||
}
|
||||
self.attrs = {'target_lod': target_lod_0_attr}
|
||||
self.outputs = {'Out': (x, [target_lod_0_in])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue