Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into add-merge-splited-ids
commit
f031555cfb
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,130 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/operators/slice_op.h"
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
class SliceOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext *ctx) const override {
|
||||
PADDLE_ENFORCE(ctx->HasInput("Input"),
|
||||
"Input (Input) of slice op should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||
"Output (Out) of slice op should not be null.");
|
||||
|
||||
auto in_dims = ctx->GetInputDim("Input");
|
||||
PADDLE_ENFORCE(in_dims.size() < 7,
|
||||
"The rank of input should be less than 7.");
|
||||
framework::DDim out_dims(in_dims);
|
||||
auto axes = ctx->Attrs().Get<std::vector<int>>("axes");
|
||||
auto starts = ctx->Attrs().Get<std::vector<int>>("starts");
|
||||
auto ends = ctx->Attrs().Get<std::vector<int>>("ends");
|
||||
|
||||
PADDLE_ENFORCE_EQ(starts.size(), ends.size());
|
||||
PADDLE_ENFORCE_EQ(starts.size(), axes.size());
|
||||
int dim_value, start, end;
|
||||
for (size_t i = 0; i < axes.size(); ++i) {
|
||||
dim_value = out_dims[axes[i]];
|
||||
start = starts[i] < 0 ? (starts[i] + dim_value) : starts[i];
|
||||
end = ends[i] < 0 ? (ends[i] + dim_value) : ends[i];
|
||||
start = std::max(start, 0);
|
||||
end = std::max(end, 0);
|
||||
start = std::min(start, dim_value);
|
||||
end = std::min(end, dim_value);
|
||||
start = std::min(start, end);
|
||||
out_dims[axes[i]] = end - start;
|
||||
}
|
||||
ctx->SetOutputDim("Out", out_dims);
|
||||
}
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetExpectedKernelType(
|
||||
const framework::ExecutionContext &ctx) const override {
|
||||
return framework::OpKernelType(
|
||||
framework::ToDataType(ctx.Input<Tensor>("Input")->type()),
|
||||
ctx.GetPlace());
|
||||
}
|
||||
};
|
||||
|
||||
class SliceOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
void Make() override {
|
||||
AddInput("Input", "Tensor of data to extract slices from.");
|
||||
AddOutput("Out", "Sliced data tensor.");
|
||||
|
||||
AddAttr<std::vector<int>>(
|
||||
"axes",
|
||||
"(list<int>) Axes that `starts` and `ends` apply to. It's optional."
|
||||
"If not present, will be treated as [0, 1, ..., len(`starts`) - 1].");
|
||||
AddAttr<std::vector<int>>(
|
||||
"starts",
|
||||
"(list<int>) Starting indices of corresponding axis in `axes`");
|
||||
AddAttr<std::vector<int>>(
|
||||
"ends",
|
||||
"(list<int>) Starting indices of corresponding axis in `axes`.");
|
||||
|
||||
AddComment(R"DOC(
|
||||
Slice Operator.
|
||||
|
||||
Produces a slice of the input tensor along multiple axes. Similar to numpy:
|
||||
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
|
||||
Slice uses `axes`, `starts` and `ends` attributes to specify the start and
|
||||
end dimension for each axis in the list of axes, it uses this information
|
||||
to slice the input data tensor. If a negative value is passed for any of
|
||||
the start or end indices, it represents number of elements before the end
|
||||
of that dimension. If the value passed to start or end is larger than
|
||||
the n (the number of elements in this dimension), it represents n.
|
||||
For slicing to the end of a dimension with unknown size, it is recommended
|
||||
to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1].
|
||||
|
||||
Example 1:
|
||||
Given:
|
||||
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
|
||||
axes = [0, 1]
|
||||
starts = [1, 0]
|
||||
ends = [2, 3]
|
||||
Then:
|
||||
result = [ [5, 6, 7], ]
|
||||
|
||||
Example 2:
|
||||
Given:
|
||||
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
|
||||
starts = [0, 1]
|
||||
ends = [-1, 1000]
|
||||
Then:
|
||||
result = [ [2, 3, 4], ]
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker,
|
||||
paddle::framework::EmptyGradOpMaker);
|
||||
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
slice, ops::SliceKernel<paddle::platform::CPUDeviceContext, int>,
|
||||
ops::SliceKernel<paddle::platform::CPUDeviceContext, int64_t>,
|
||||
ops::SliceKernel<paddle::platform::CPUDeviceContext, float>,
|
||||
ops::SliceKernel<paddle::platform::CPUDeviceContext, double>);
|
@ -0,0 +1,22 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/operators/slice_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_CUDA_KERNEL(
|
||||
slice, ops::SliceKernel<paddle::platform::CUDADeviceContext, float>,
|
||||
ops::SliceKernel<paddle::platform::CUDADeviceContext, double>,
|
||||
ops::SliceKernel<paddle::platform::CUDADeviceContext, int>,
|
||||
ops::SliceKernel<paddle::platform::CUDADeviceContext, int64_t>);
|
@ -0,0 +1,88 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename DeviceContext, typename T>
|
||||
class SliceKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
int rank = ctx.Input<framework::Tensor>("Input")->dims().size();
|
||||
switch (rank) {
|
||||
case 1:
|
||||
SliceCompute<1>(ctx);
|
||||
break;
|
||||
case 2:
|
||||
SliceCompute<2>(ctx);
|
||||
break;
|
||||
case 3:
|
||||
SliceCompute<3>(ctx);
|
||||
break;
|
||||
case 4:
|
||||
SliceCompute<4>(ctx);
|
||||
break;
|
||||
case 5:
|
||||
SliceCompute<5>(ctx);
|
||||
break;
|
||||
case 6:
|
||||
SliceCompute<6>(ctx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
template <size_t D>
|
||||
void SliceCompute(const framework::ExecutionContext& context) const {
|
||||
auto& place =
|
||||
*context.template device_context<DeviceContext>().eigen_device();
|
||||
auto in = context.Input<framework::Tensor>("Input");
|
||||
auto out = context.Output<framework::Tensor>("Out");
|
||||
out->mutable_data<T>(context.GetPlace());
|
||||
auto out_dims = out->dims();
|
||||
auto in_dims = in->dims();
|
||||
auto axes = context.Attr<std::vector<int>>("axes");
|
||||
auto starts = context.Attr<std::vector<int>>("starts");
|
||||
|
||||
auto offsets = Eigen::array<int, D>();
|
||||
auto extents = Eigen::array<int, D>();
|
||||
for (size_t i = 0; i < D; ++i) {
|
||||
offsets[i] = 0;
|
||||
extents[i] = out_dims[i];
|
||||
}
|
||||
int start;
|
||||
for (size_t i = 0; i < axes.size(); ++i) {
|
||||
start = starts[i];
|
||||
if (start < 0) {
|
||||
start = (start + in_dims[axes[i]]);
|
||||
}
|
||||
start = std::max(start, 0);
|
||||
offsets[axes[i]] = start;
|
||||
}
|
||||
auto in_t =
|
||||
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
|
||||
*in);
|
||||
auto out_t =
|
||||
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
|
||||
*out);
|
||||
out_t.device(place) = in_t.slice(offsets, extents);
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue