Merge pull request #5501 from reyoung/feature/lod_array_length

Add `lod_array_length` operator
mobile_baidu
Yu Yang 8 years ago committed by GitHub
commit c88f98cf9e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,71 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_tensor_array.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
class LoDArrayLengthOp : public framework::OperatorBase {
public:
LoDArrayLengthOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto &x = scope.FindVar(Input("X"))->Get<framework::LoDTensorArray>();
auto &out =
*scope.FindVar(Output("Out"))->GetMutable<framework::LoDTensor>();
out.Resize({1});
auto cpu = platform::CPUPlace();
*out.mutable_data<int64_t>(cpu) = static_cast<int64_t>(x.size());
}
};
class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
LoDArrayLengthProtoMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "(LoDTensorArray) The input tensor array.");
AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t");
AddComment(R"DOC(Get the length of lod tensor array
Out = len(X)
NOTE: The output is a CPU Tensor since the control variable should be only in
CPU and the length of LoDTensorArray should be used as control variables.
)DOC");
}
};
class LoDArrayLengthInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"));
PADDLE_ENFORCE(context->HasOutput("Out"));
context->SetOutputDim("Out", {1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(lod_array_length, ops::LoDArrayLengthOp,
ops::LoDArrayLengthInferShape, ops::LoDArrayLengthProtoMaker,
paddle::framework::EmptyGradOpMaker);

@ -947,3 +947,12 @@ def shrink_memory(x, i, table, main_program=None):
outputs={'Out': [out]}, outputs={'Out': [out]},
attrs={}) attrs={})
return out return out
def array_length(array, main_program=None):
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp

@ -0,0 +1,21 @@
import unittest
import paddle.v2.framework.layers as layers
from paddle.v2.framework.executor import Executor
import paddle.v2.framework.core as core
import numpy
class TestLoDArrayLength(unittest.TestCase):
def test_array_length(self):
tmp = layers.zeros(shape=[10], dtype='int32')
i = layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_write(tmp, i=i)
arr_len = layers.array_length(arr)
cpu = core.CPUPlace()
exe = Executor(cpu)
result = numpy.array(exe.run(fetch_list=[arr_len])[0])
self.assertEqual(11, result[0])
if __name__ == '__main__':
unittest.main()
Loading…
Cancel
Save