Merge pull request #12355 from NHZlX/add_tensorrt_pooling_converter
Add tensorrt pooling converterbugfix/anakin-compile
commit
85c4912755
@ -0,0 +1,80 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace tensorrt {
|
||||
|
||||
/*
|
||||
* Pool2dOp, IPoolingLayer in TRT. This Layer doesn't has weights.
|
||||
*/
|
||||
class Pool2dOpConverter : public OpConverter {
|
||||
public:
|
||||
void operator()(const framework::proto::OpDesc& op,
|
||||
const framework::Scope& scope, bool test_mode) override {
|
||||
VLOG(4)
|
||||
<< "convert a fluid pool2d op to tensorrt pool2d layer without bias";
|
||||
framework::OpDesc op_desc(op, nullptr);
|
||||
// Declare inputs
|
||||
PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1);
|
||||
auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]);
|
||||
|
||||
std::string pool_type =
|
||||
boost::get<std::string>(op_desc.GetAttr("pooling_type"));
|
||||
std::vector<int> ksize =
|
||||
boost::get<std::vector<int>>(op_desc.GetAttr("ksize"));
|
||||
std::vector<int> strides =
|
||||
boost::get<std::vector<int>>(op_desc.GetAttr("strides"));
|
||||
std::vector<int> paddings =
|
||||
boost::get<std::vector<int>>(op_desc.GetAttr("paddings"));
|
||||
|
||||
const nvinfer1::DimsHW nv_ksize(ksize[0], ksize[1]);
|
||||
const nvinfer1::DimsHW nv_strides(strides[0], strides[1]);
|
||||
const nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]);
|
||||
|
||||
PADDLE_ENFORCE_EQ(input1->getDimensions().nbDims, 3UL);
|
||||
|
||||
nvinfer1::PoolingType nv_pool_type = nvinfer1::PoolingType::kMAX;
|
||||
if (pool_type == "max") {
|
||||
nv_pool_type = nvinfer1::PoolingType::kMAX;
|
||||
} else if (pool_type == "avg") {
|
||||
nv_pool_type = nvinfer1::PoolingType::kAVERAGE;
|
||||
} else {
|
||||
PADDLE_THROW("TensorRT unsupported pooling type!");
|
||||
}
|
||||
|
||||
auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling,
|
||||
*const_cast<nvinfer1::ITensor*>(input1),
|
||||
nv_pool_type, nv_ksize);
|
||||
PADDLE_ENFORCE_NOT_NULL(layer, "pool layer could not be created.");
|
||||
layer->setStride(nv_strides);
|
||||
layer->setPadding(nv_paddings);
|
||||
|
||||
auto output_name = op_desc.Output("Out")[0];
|
||||
engine_->SetITensor(output_name, layer->getOutput(0));
|
||||
if (test_mode) {
|
||||
engine_->DeclareOutput(output_name);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace tensorrt
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
||||
|
||||
USE_OP(pool2d);
|
||||
REGISTER_TRT_OP_CONVERTER(pool2d, Pool2dOpConverter);
|
@ -0,0 +1,60 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
#include <gtest/gtest.h>
|
||||
#include <fstream>
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace tensorrt {
|
||||
|
||||
TEST(Pool2dOpConverter, main) {
|
||||
framework::Scope scope;
|
||||
std::unordered_set<std::string> parameters;
|
||||
TRTConvertValidation validator(5, parameters, scope, 1 << 15);
|
||||
|
||||
// The ITensor's Dims should not contain the batch size.
|
||||
// So, the ITensor's Dims of input and output should be C * H * W.
|
||||
validator.DeclInputVar("pool2d-X", nvinfer1::Dims3(3, 4, 4));
|
||||
validator.DeclOutputVar("pool2d-Out", nvinfer1::Dims3(3, 2, 2));
|
||||
|
||||
// Prepare Op description
|
||||
framework::OpDesc desc;
|
||||
desc.SetType("pool2d");
|
||||
desc.SetInput("X", {"pool2d-X"});
|
||||
desc.SetOutput("Out", {"pool2d-Out"});
|
||||
|
||||
std::vector<int> ksize({2, 2});
|
||||
std::vector<int> strides({2, 2});
|
||||
std::vector<int> paddings({0, 0});
|
||||
std::string pooling_t = "max";
|
||||
|
||||
desc.SetAttr("pooling_type", pooling_t);
|
||||
desc.SetAttr("ksize", ksize);
|
||||
desc.SetAttr("strides", strides);
|
||||
desc.SetAttr("paddings", paddings);
|
||||
|
||||
LOG(INFO) << "set OP";
|
||||
validator.SetOp(*desc.Proto());
|
||||
LOG(INFO) << "execute";
|
||||
|
||||
validator.Execute(3);
|
||||
}
|
||||
|
||||
} // namespace tensorrt
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
||||
|
||||
USE_OP(pool2d);
|
Loading…
Reference in new issue