parent
5fc305220c
commit
5e52dafda5
@ -0,0 +1,153 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#include "paddle/fluid/operators/roi_align_op.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
using Tensor = framework::Tensor;
|
||||||
|
using LoDTensor = framework::LoDTensor;
|
||||||
|
|
||||||
|
class ROIAlignOp : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||||
|
|
||||||
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("X"),
|
||||||
|
"Input(X) of ROIAlignOp should not be null.");
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput("ROIs"),
|
||||||
|
"Input(ROIs) of ROIAlignOp should not be null.");
|
||||||
|
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||||
|
"Output(Out) of ROIAlignOp should not be null.");
|
||||||
|
auto input_dims = ctx->GetInputDim("X");
|
||||||
|
auto rois_dims = ctx->GetInputDim("ROIs");
|
||||||
|
|
||||||
|
PADDLE_ENFORCE(input_dims.size() == 4,
|
||||||
|
"The format of input tensor is NCHW.");
|
||||||
|
PADDLE_ENFORCE(rois_dims.size() == 2,
|
||||||
|
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
|
||||||
|
"given as [[x1, y1, x2, y2], …].");
|
||||||
|
PADDLE_ENFORCE(rois_dims[1] == 4,
|
||||||
|
"ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
|
||||||
|
"given as [[x1, y1, x2, y2], …].");
|
||||||
|
int pooled_height = ctx->Attrs().Get<int>("pooled_height");
|
||||||
|
int pooled_width = ctx->Attrs().Get<int>("pooled_width");
|
||||||
|
float spatial_scale = ctx->Attrs().Get<float>("spatial_scale");
|
||||||
|
|
||||||
|
PADDLE_ENFORCE_GT(pooled_height, 0,
|
||||||
|
"The pooled output height must greater than 0");
|
||||||
|
PADDLE_ENFORCE_GT(pooled_width, 0,
|
||||||
|
"The pooled output width must greater than 0");
|
||||||
|
PADDLE_ENFORCE_GT(spatial_scale, 0.0f,
|
||||||
|
"The spatial scale must greater than 0");
|
||||||
|
|
||||||
|
auto out_dims = input_dims;
|
||||||
|
out_dims[0] = rois_dims[0];
|
||||||
|
out_dims[1] = input_dims[1];
|
||||||
|
out_dims[2] = pooled_height;
|
||||||
|
out_dims[3] = pooled_width;
|
||||||
|
|
||||||
|
ctx->SetOutputDim("Out", out_dims);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
framework::OpKernelType GetExpectedKernelType(
|
||||||
|
const framework::ExecutionContext& ctx) const override {
|
||||||
|
return framework::OpKernelType(
|
||||||
|
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
|
||||||
|
ctx.device_context());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ROIAlignGradOp : public framework::OperatorWithKernel {
|
||||||
|
public:
|
||||||
|
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||||
|
|
||||||
|
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||||
|
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
|
||||||
|
"The GRAD@Out of ROIAlignGradOp should not be null.");
|
||||||
|
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")),
|
||||||
|
"The GRAD@X of ROIAlignGradOp should not be null.");
|
||||||
|
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
framework::OpKernelType GetExpectedKernelType(
|
||||||
|
const framework::ExecutionContext& ctx) const override {
|
||||||
|
return framework::OpKernelType(
|
||||||
|
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
|
||||||
|
ctx.device_context());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ROIAlignOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||||
|
public:
|
||||||
|
void Make() override {
|
||||||
|
AddInput("X",
|
||||||
|
"(Tensor), "
|
||||||
|
"the input of ROIAlignOp. "
|
||||||
|
"The format of input tensor is NCHW. Where N is batch size, "
|
||||||
|
"C is the number of input channels, "
|
||||||
|
"H is the height of the feature, and "
|
||||||
|
"W is the width of the feature.");
|
||||||
|
AddInput("ROIs",
|
||||||
|
"(LoDTensor), "
|
||||||
|
"ROIs (Regions of Interest) to pool over. "
|
||||||
|
"should be a 2-D LoDTensor of shape (num_rois, 4)"
|
||||||
|
"given as [[x1, y1, x2, y2], …]. "
|
||||||
|
"Where batch_id is the id of the data, "
|
||||||
|
"(x1, y1) is the top left coordinates, and "
|
||||||
|
"(x2, y2) is the bottom right coordinates.");
|
||||||
|
AddOutput("Out",
|
||||||
|
"(Tensor), "
|
||||||
|
"The output of ROIAlignOp is a 4-D tensor with shape "
|
||||||
|
"(num_rois, channels, pooled_h, pooled_w).");
|
||||||
|
AddAttr<float>("spatial_scale",
|
||||||
|
"(float, default 1.0), "
|
||||||
|
"Multiplicative spatial scale factor "
|
||||||
|
"to translate ROI coords from their input scale "
|
||||||
|
"to the scale used when pooling.")
|
||||||
|
.SetDefault(1.0);
|
||||||
|
AddAttr<int>("pooled_height",
|
||||||
|
"(int, default 1), "
|
||||||
|
"The pooled output height.")
|
||||||
|
.SetDefault(1);
|
||||||
|
AddAttr<int>("pooled_width",
|
||||||
|
"(int, default 1), "
|
||||||
|
"The pooled output width.")
|
||||||
|
.SetDefault(1);
|
||||||
|
AddAttr<int>("sampling_ratio",
|
||||||
|
"(int,default -1),"
|
||||||
|
"number of sampling points in the interpolation grid"
|
||||||
|
"If <=0, then grid points are adaptive to roi_width "
|
||||||
|
"and pooled_w, likewise for height")
|
||||||
|
.SetDefault(-1);
|
||||||
|
AddComment(R"DOC(
|
||||||
|
)DOC");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
REGISTER_OPERATOR(roi_align, ops::ROIAlignOp, ops::ROIAlignOpMaker,
|
||||||
|
paddle::framework::DefaultGradOpDescMaker<true>);
|
||||||
|
REGISTER_OPERATOR(roi_align_grad, ops::ROIAlignGradOp);
|
||||||
|
REGISTER_OP_CPU_KERNEL(
|
||||||
|
roi_align,
|
||||||
|
ops::CPUROIAlignOpKernel<paddle::platform::CPUDeviceContext, float>,
|
||||||
|
ops::CPUROIAlignOpKernel<paddle::platform::CPUDeviceContext, double>);
|
||||||
|
REGISTER_OP_CPU_KERNEL(
|
||||||
|
roi_align_grad,
|
||||||
|
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, float>,
|
||||||
|
ops::CPUROIAlignGradOpKernel<paddle::platform::CPUDeviceContext, double>);
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,169 @@
|
|||||||
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import numpy as np
|
||||||
|
import math
|
||||||
|
import sys
|
||||||
|
from op_test import OpTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestROIAlignOp(OpTest):
|
||||||
|
def set_data(self):
|
||||||
|
self.init_test_case()
|
||||||
|
self.make_rois()
|
||||||
|
self.calc_roi_align()
|
||||||
|
self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)}
|
||||||
|
self.attrs = {
|
||||||
|
'spatial_scale': self.spatial_scale,
|
||||||
|
'pooled_height': self.pooled_height,
|
||||||
|
'pooled_width': self.pooled_width,
|
||||||
|
'sampling_ratio': self.sampling_ratio
|
||||||
|
}
|
||||||
|
|
||||||
|
self.outputs = {'Out': self.out_data}
|
||||||
|
|
||||||
|
def init_test_case(self):
|
||||||
|
self.batch_size = 1
|
||||||
|
self.channels = 3
|
||||||
|
self.height = 8
|
||||||
|
self.width = 6
|
||||||
|
|
||||||
|
# n, c, h, w
|
||||||
|
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
|
||||||
|
|
||||||
|
self.spatial_scale = 1.0 / 1.0
|
||||||
|
self.pooled_height = 2
|
||||||
|
self.pooled_width = 2
|
||||||
|
self.sampling_ratio = 2
|
||||||
|
|
||||||
|
self.x = np.random.random(self.x_dim).astype('float32')
|
||||||
|
|
||||||
|
def pre_calc(self, x_i, roi_xmin, roi_ymin, roi_bin_grid_h, roi_bin_grid_w,
|
||||||
|
bin_size_h, bin_size_w):
|
||||||
|
count = roi_bin_grid_h * roi_bin_grid_w
|
||||||
|
bilinear_pos = np.zeros(
|
||||||
|
[self.channels, self.pooled_height, self.pooled_width, count, 4],
|
||||||
|
np.int32)
|
||||||
|
bilinear_w = np.zeros(
|
||||||
|
[self.pooled_height, self.pooled_width, count, 4], np.float32)
|
||||||
|
for ph in range(self.pooled_width):
|
||||||
|
for pw in range(self.pooled_height):
|
||||||
|
c = 0
|
||||||
|
for iy in range(roi_bin_grid_h):
|
||||||
|
y = roi_ymin + ph * bin_size_h + (iy + 0.5) * \
|
||||||
|
bin_size_h / roi_bin_grid_h
|
||||||
|
for ix in range(roi_bin_grid_w):
|
||||||
|
x = roi_xmin + pw * bin_size_w + (ix + 0.5) * \
|
||||||
|
bin_size_w / roi_bin_grid_w
|
||||||
|
if y < -1.0 or y > self.height or \
|
||||||
|
x < -1.0 or x > self.width:
|
||||||
|
continue
|
||||||
|
if y <= 0:
|
||||||
|
y = 0
|
||||||
|
if x <= 0:
|
||||||
|
x = 0
|
||||||
|
y_low = int(y)
|
||||||
|
x_low = int(x)
|
||||||
|
if y_low >= self.height - 1:
|
||||||
|
y = y_high = y_low = self.height - 1
|
||||||
|
else:
|
||||||
|
y_high = y_low + 1
|
||||||
|
if x_low >= self.width - 1:
|
||||||
|
x = x_high = x_low = self.width - 1
|
||||||
|
else:
|
||||||
|
x_high = x_low = self.width - 1
|
||||||
|
ly = y - y_low
|
||||||
|
lx = x - x_low
|
||||||
|
hy = 1 - ly
|
||||||
|
hx = 1 - lx
|
||||||
|
for ch in range(self.channels):
|
||||||
|
bilinear_pos[ch, ph, pw, c, 0] = x_i[ch, y_low,
|
||||||
|
x_low]
|
||||||
|
bilinear_pos[ch, ph, pw, c, 1] = x_i[ch, y_low,
|
||||||
|
x_high]
|
||||||
|
bilinear_pos[ch, ph, pw, c, 2] = x_i[ch, y_high,
|
||||||
|
x_low]
|
||||||
|
bilinear_pos[ch, ph, pw, c, 3] = x_i[ch, y_high,
|
||||||
|
x_high]
|
||||||
|
bilinear_w[ph, pw, c, 0] = hy * hx
|
||||||
|
bilinear_w[ph, pw, c, 1] = hy * lx
|
||||||
|
bilinear_w[ph, pw, c, 2] = ly * hx
|
||||||
|
bilinear_w[ph, pw, c, 3] = ly * lx
|
||||||
|
c = c + 1
|
||||||
|
return bilinear_pos, bilinear_w
|
||||||
|
|
||||||
|
def calc_roi_align(self):
|
||||||
|
self.out_data = np.zeros((self.rois_num, self.channels,
|
||||||
|
self.pooled_height, self.pooled_width))
|
||||||
|
|
||||||
|
for i in range(self.rois_num):
|
||||||
|
roi = self.rois[i]
|
||||||
|
roi_batch_id = int(roi[0])
|
||||||
|
x_i = self.x[roi_batch_id]
|
||||||
|
roi_xmin = roi[1] * self.spatial_scale
|
||||||
|
roi_ymin = roi[2] * self.spatial_scale
|
||||||
|
roi_xmax = roi[3] * self.spatial_scale
|
||||||
|
roi_ymax = roi[4] * self.spatial_scale
|
||||||
|
roi_width = int(max(roi_xmax - roi_xmin, 1))
|
||||||
|
roi_height = int(max(roi_ymax - roi_ymin, 1))
|
||||||
|
bin_size_h = float(roi_height) / float(self.pooled_height)
|
||||||
|
bin_size_w = float(roi_width) / float(self.pooled_width)
|
||||||
|
roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else \
|
||||||
|
math.ceil(roi_height / pooled_height)
|
||||||
|
roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else \
|
||||||
|
math.ceil(roi_width / pooled_width)
|
||||||
|
count = int(roi_bin_grid_h * roi_bin_grid_w)
|
||||||
|
pre_size = count * self.pooled_width * self.pooled_height
|
||||||
|
bilinear_pos, bilinear_w = self.pre_calc(x_i, roi_xmin, roi_ymin,
|
||||||
|
int(roi_bin_grid_h),
|
||||||
|
int(roi_bin_grid_w),
|
||||||
|
bin_size_h, bin_size_w)
|
||||||
|
for ch in range(self.channels):
|
||||||
|
align_per_bin = (bilinear_pos[ch] * bilinear_w).sum(axis=-1)
|
||||||
|
output_val = align_per_bin.mean(axis=-1)
|
||||||
|
self.out_data[i, ch, :, :] = output_val
|
||||||
|
|
||||||
|
def make_rois(self):
|
||||||
|
rois = []
|
||||||
|
self.rois_lod = [[0]]
|
||||||
|
for bno in range(self.batch_size):
|
||||||
|
self.rois_lod[0].append(bno + 1)
|
||||||
|
for i in range(bno + 1):
|
||||||
|
x1 = np.random.random_integers(
|
||||||
|
0, self.width // self.spatial_scale - self.pooled_width)
|
||||||
|
y1 = np.random.random_integers(
|
||||||
|
0, self.height // self.spatial_scale - self.pooled_height)
|
||||||
|
|
||||||
|
x2 = np.random.random_integers(x1 + self.pooled_width,
|
||||||
|
self.width // self.spatial_scale)
|
||||||
|
y2 = np.random.random_integers(
|
||||||
|
y1 + self.pooled_height, self.height // self.spatial_scale)
|
||||||
|
|
||||||
|
roi = [bno, x1, y1, x2, y2]
|
||||||
|
rois.append(roi)
|
||||||
|
self.rois_num = len(rois)
|
||||||
|
self.rois = np.array(rois).astype("float32")
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.op_type = "roi_align"
|
||||||
|
self.set_data()
|
||||||
|
|
||||||
|
def test_check_output(self):
|
||||||
|
self.check_output()
|
||||||
|
|
||||||
|
def test_check_grad(self):
|
||||||
|
self.check_grad(['X'], 'Out')
|
Loading…
Reference in new issue