commit
a275fd6e0c
@ -0,0 +1,239 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/ir/cpu_quantize_pass.h"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/eigen.h"
|
||||
#include "paddle/fluid/string/pretty_log.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
namespace {
|
||||
|
||||
void UnlinkNodes(ir::Node* a, ir::Node* b) {
|
||||
a->outputs.erase(std::remove(a->outputs.begin(), a->outputs.end(), b),
|
||||
a->outputs.end());
|
||||
b->inputs.erase(std::remove(b->inputs.begin(), b->inputs.end(), a),
|
||||
b->inputs.end());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
enum { U8_MAX = 255, S8_MAX = 127 };
|
||||
|
||||
using EigenVectorArrayMap = Eigen::Map<Eigen::Array<double, Eigen::Dynamic, 1>>;
|
||||
using string::PrettyLogDetail;
|
||||
|
||||
void CPUQuantizePass::QuantizeInput(Graph* g, Node* op, Node* input,
|
||||
std::string input_name, double scale_to_one,
|
||||
bool is_unsigned,
|
||||
std::string scale_attr_name) const {
|
||||
unsigned max = is_unsigned ? U8_MAX : S8_MAX;
|
||||
float scale = scale_to_one * max;
|
||||
|
||||
// Create quantize output variable
|
||||
VarDesc quantize_out_desc(patterns::PDNodeName("quantize", "out"));
|
||||
auto* quantize_out_node = g->CreateVarNode(&quantize_out_desc);
|
||||
|
||||
// create a quantize op node
|
||||
OpDesc q_desc;
|
||||
q_desc.SetType("quantize");
|
||||
q_desc.SetInput("Input", std::vector<std::string>({input->Name()}));
|
||||
q_desc.SetOutput("Output",
|
||||
std::vector<std::string>({quantize_out_node->Name()}));
|
||||
q_desc.SetAttr("Scale", scale);
|
||||
q_desc.SetAttr("is_negative_input", !is_unsigned);
|
||||
auto quantize_op = g->CreateOpNode(&q_desc); // OpDesc will be copied.
|
||||
|
||||
// update op's input
|
||||
op->Op()->SetInput(input_name,
|
||||
std::vector<std::string>({quantize_out_node->Name()}));
|
||||
|
||||
// link quantize op
|
||||
UnlinkNodes(input, op);
|
||||
IR_NODE_LINK_TO(input, quantize_op);
|
||||
IR_NODE_LINK_TO(quantize_op, quantize_out_node);
|
||||
IR_NODE_LINK_TO(quantize_out_node, op);
|
||||
|
||||
if (!scale_attr_name.empty()) op->Op()->SetAttr(scale_attr_name, scale);
|
||||
}
|
||||
|
||||
void CPUQuantizePass::DequantizeOutput(Graph* g, Node* op, Node* output,
|
||||
std::string output_name,
|
||||
double scale_to_one, bool is_unsigned,
|
||||
std::string scale_attr_name) const {
|
||||
unsigned max = is_unsigned ? U8_MAX : S8_MAX;
|
||||
float scale = scale_to_one * max;
|
||||
|
||||
// Create dequantize input variable
|
||||
VarDesc dequantize_in_desc(patterns::PDNodeName("dequantize", "in"));
|
||||
auto* dequantize_in_node = g->CreateVarNode(&dequantize_in_desc);
|
||||
|
||||
// create a dequantize op node for output.
|
||||
OpDesc deq_desc;
|
||||
deq_desc.SetType("dequantize");
|
||||
deq_desc.SetInput("Input",
|
||||
std::vector<std::string>({dequantize_in_node->Name()}));
|
||||
deq_desc.SetOutput("Output", std::vector<std::string>({output->Name()}));
|
||||
deq_desc.SetAttr("Scale", scale);
|
||||
auto dequantize_op = g->CreateOpNode(&deq_desc); // OpDesc will be copied.
|
||||
|
||||
// update op's output
|
||||
op->Op()->SetOutput(output_name,
|
||||
std::vector<std::string>({dequantize_in_node->Name()}));
|
||||
|
||||
// link dequantize op
|
||||
UnlinkNodes(op, output);
|
||||
IR_NODE_LINK_TO(op, dequantize_in_node);
|
||||
IR_NODE_LINK_TO(dequantize_in_node, dequantize_op);
|
||||
IR_NODE_LINK_TO(dequantize_op, output);
|
||||
|
||||
if (!scale_attr_name.empty()) op->Op()->SetAttr(scale_attr_name, scale);
|
||||
}
|
||||
|
||||
void CPUQuantizePass::QuantizeConv(Graph* graph,
|
||||
bool with_residual_data) const {
|
||||
GraphPatternDetector gpd;
|
||||
auto pattern = gpd.mutable_pattern();
|
||||
patterns::ConvResidual conv_pattern{pattern, name_scope_};
|
||||
conv_pattern(with_residual_data);
|
||||
|
||||
int quantize_conv_count = 0;
|
||||
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
|
||||
Graph* g) {
|
||||
VLOG(4) << "Quantize conv2d op";
|
||||
GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern);
|
||||
auto* conv_op_desc = conv_op->Op();
|
||||
|
||||
// skip if should not be quantized
|
||||
if (!conv_op_desc->HasAttr("use_quantizer") ||
|
||||
!boost::get<bool>(conv_op_desc->GetAttr("use_quantizer")))
|
||||
return;
|
||||
|
||||
GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern);
|
||||
GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern);
|
||||
GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern);
|
||||
|
||||
// get scales calculated after warmup, they scale variables to MAX=1.0
|
||||
auto scales = Get<VarQuantScale>("quant_var_scales");
|
||||
|
||||
auto input_scale = scales[conv_input->Name()].second.data<double>()[0];
|
||||
bool is_input_unsigned = scales[conv_input->Name()].first;
|
||||
QuantizeInput(g, conv_op, conv_input, "Input", input_scale,
|
||||
is_input_unsigned, "Scale_in");
|
||||
|
||||
auto filter_scale_tensor = scales[conv_filter->Name()].second;
|
||||
EigenVectorArrayMap eigen_tensor{filter_scale_tensor.data<double>(),
|
||||
filter_scale_tensor.numel(), 1};
|
||||
eigen_tensor *= static_cast<double>(S8_MAX);
|
||||
std::vector<float> filter_scale{
|
||||
filter_scale_tensor.data<double>(),
|
||||
filter_scale_tensor.data<double>() + filter_scale_tensor.numel()};
|
||||
|
||||
conv_op->Op()->SetAttr("Scale_weights", filter_scale);
|
||||
|
||||
if (with_residual_data) {
|
||||
GET_IR_NODE_FROM_SUBGRAPH(conv_residual_data, conv_residual_data,
|
||||
conv_pattern);
|
||||
auto residual_scale =
|
||||
scales[conv_residual_data->Name()].second.data<double>()[0];
|
||||
bool is_residual_unsigned = scales[conv_residual_data->Name()].first;
|
||||
|
||||
QuantizeInput(g, conv_op, conv_residual_data, "ResidualData",
|
||||
residual_scale, is_residual_unsigned, "Scale_in_eltwise");
|
||||
}
|
||||
|
||||
auto output_scale = scales[conv_output->Name()].second.data<double>()[0];
|
||||
bool is_output_unsigned = scales[conv_output->Name()].first;
|
||||
DequantizeOutput(g, conv_op, conv_output, "Output", output_scale,
|
||||
is_output_unsigned, "Scale_out");
|
||||
|
||||
++quantize_conv_count;
|
||||
};
|
||||
|
||||
gpd(graph, handler);
|
||||
AddStatis(quantize_conv_count);
|
||||
|
||||
std::stringstream msg_ss;
|
||||
msg_ss << "--- quantized " << quantize_conv_count << " conv2d ops";
|
||||
if (with_residual_data) msg_ss << " with residual connection";
|
||||
PrettyLogDetail(msg_ss.str().c_str());
|
||||
}
|
||||
|
||||
void CPUQuantizePass::QuantizePool(Graph* graph) const {
|
||||
GraphPatternDetector gpd;
|
||||
auto pattern = gpd.mutable_pattern();
|
||||
patterns::Pool pool_pattern{pattern, name_scope_};
|
||||
pool_pattern();
|
||||
|
||||
int quantize_pool_count = 0;
|
||||
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
|
||||
Graph* g) {
|
||||
VLOG(4) << "Quantize pool2d op";
|
||||
GET_IR_NODE_FROM_SUBGRAPH(pool_op, pool_op, pool_pattern);
|
||||
auto* pool_op_desc = pool_op->Op();
|
||||
|
||||
// skip if should not be quantized
|
||||
if (!pool_op_desc->HasAttr("use_quantizer") ||
|
||||
!boost::get<bool>(pool_op_desc->GetAttr("use_quantizer")))
|
||||
return;
|
||||
|
||||
GET_IR_NODE_FROM_SUBGRAPH(pool_input, pool_input, pool_pattern);
|
||||
GET_IR_NODE_FROM_SUBGRAPH(pool_output, pool_output, pool_pattern);
|
||||
|
||||
// get scales calculated after warmup, they scale variables to MAX=1.0
|
||||
auto scales = Get<VarQuantScale>("quant_var_scales");
|
||||
|
||||
auto input_scale = scales[pool_input->Name()].second.data<double>()[0];
|
||||
bool is_input_unsigned = scales[pool_input->Name()].first;
|
||||
QuantizeInput(g, pool_op, pool_input, "X", input_scale, is_input_unsigned);
|
||||
|
||||
auto output_scale = scales[pool_output->Name()].second.data<double>()[0];
|
||||
bool is_output_unsigned = scales[pool_output->Name()].first;
|
||||
DequantizeOutput(g, pool_op, pool_output, "Out", output_scale,
|
||||
is_output_unsigned);
|
||||
|
||||
++quantize_pool_count;
|
||||
};
|
||||
|
||||
gpd(graph, handler);
|
||||
AddStatis(quantize_pool_count);
|
||||
|
||||
PrettyLogDetail("--- quantized %d pool2d ops", quantize_pool_count);
|
||||
}
|
||||
|
||||
std::unique_ptr<ir::Graph> CPUQuantizePass::ApplyImpl(
|
||||
std::unique_ptr<ir::Graph> graph) const {
|
||||
VLOG(3) << "Quantizing the graph.";
|
||||
PADDLE_ENFORCE(graph.get());
|
||||
FusePassBase::Init(name_scope_, graph.get());
|
||||
|
||||
PADDLE_ENFORCE(param_scope());
|
||||
|
||||
QuantizeConv(graph.get(), true /* with_residual_data */);
|
||||
QuantizeConv(graph.get());
|
||||
QuantizePool(graph.get());
|
||||
|
||||
return graph;
|
||||
}
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_PASS(cpu_quantize_pass, paddle::framework::ir::CPUQuantizePass)
|
||||
.RequirePassAttr("quant_var_scales");
|
@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
|
||||
#include "paddle/fluid/framework/ir/graph.h"
|
||||
#include "paddle/fluid/framework/ir/graph_pattern_detector.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
/*
|
||||
* Map variable name to tensor of scaling factors scaling it to MAX=1.0.
|
||||
* bool denotes whether quantization of the variable should be done to unsigned
|
||||
* type.
|
||||
*/
|
||||
using VarQuantScale =
|
||||
std::unordered_map<std::string, std::pair<bool, LoDTensor>>;
|
||||
|
||||
/*
|
||||
* Quantize all supported operators.
|
||||
*/
|
||||
class CPUQuantizePass : public FusePassBase {
|
||||
public:
|
||||
virtual ~CPUQuantizePass() {}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<ir::Graph> ApplyImpl(
|
||||
std::unique_ptr<ir::Graph> graph) const override;
|
||||
|
||||
void QuantizeConv(Graph* graph, bool with_residual_data = false) const;
|
||||
|
||||
void QuantizePool(Graph* graph) const;
|
||||
|
||||
void QuantizeInput(Graph* g, Node* op, Node* input, std::string input_name,
|
||||
double scale_to_one, bool is_unsigned,
|
||||
std::string scale_attr_name = "") const;
|
||||
|
||||
void DequantizeOutput(Graph* g, Node* op, Node* output,
|
||||
std::string output_name, double scale_to_one,
|
||||
bool is_unsigned,
|
||||
std::string scale_attr_name = "") const;
|
||||
|
||||
const std::string name_scope_{"quantize"};
|
||||
};
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,211 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/ir/cpu_quantize_pass.h"
|
||||
#include <gtest/gtest.h>
|
||||
#include "paddle/fluid/framework/naive_executor.h"
|
||||
#include "paddle/fluid/platform/place.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
namespace ir {
|
||||
|
||||
void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
|
||||
const std::vector<std::string>& inputs,
|
||||
const std::vector<std::string>& outputs, bool use_mkldnn,
|
||||
bool use_quantizer = false) {
|
||||
auto* op = prog->MutableBlock(0)->AppendOp();
|
||||
op->SetType(type);
|
||||
op->SetAttr("use_mkldnn", use_mkldnn);
|
||||
op->SetAttr("name", name);
|
||||
if (type == "conv2d") {
|
||||
op->SetInput("Input", {inputs[0]});
|
||||
op->SetInput("Filter", {inputs[1]});
|
||||
if (inputs.size() > 2)
|
||||
op->SetInput("Bias", {inputs[2]});
|
||||
else
|
||||
op->SetInput("Bias", {});
|
||||
if (inputs.size() > 3) {
|
||||
op->SetInput("ResidualData", {inputs[3]});
|
||||
op->SetAttr("fuse_residual_connection", true);
|
||||
} else {
|
||||
op->SetInput("ResidualData", {});
|
||||
op->SetAttr("fuse_residual_connection", false);
|
||||
}
|
||||
op->SetOutput("Output", {outputs[0]});
|
||||
op->SetAttr("use_quantizer", use_quantizer);
|
||||
op->SetAttr("Scale_in", 1.0f);
|
||||
op->SetAttr("Scale_out", 1.0f);
|
||||
op->SetAttr("Scale_weights", std::vector<float>{1.0f});
|
||||
} else if (type == "pool2d") {
|
||||
op->SetInput("X", {inputs[0]});
|
||||
op->SetOutput("Out", {outputs[0]});
|
||||
op->SetAttr("use_quantizer", use_quantizer);
|
||||
} else if (type == "dropout") {
|
||||
op->SetInput("X", {inputs[0]});
|
||||
op->SetOutput("Out", {outputs[0]});
|
||||
} else if (type == "fc") {
|
||||
op->SetInput("Input", {inputs[0]});
|
||||
if (inputs.size() > 1) op->SetInput("W", {inputs[1]});
|
||||
if (inputs.size() > 2) op->SetInput("Bias", {inputs[2]});
|
||||
op->SetOutput("Out", {outputs[0]});
|
||||
}
|
||||
}
|
||||
|
||||
static const std::initializer_list<std::string> variable_names{
|
||||
"a", "w1", "c", "d", "w2", "e", "f", "g",
|
||||
"h", "w3", "b1", "i", "j", "w4", "b2"};
|
||||
// (a,w1)->Conv1->c and c->Pool1->d
|
||||
//
|
||||
// (d,w2)->Conv2->e and e->Pool2->f
|
||||
//
|
||||
// d->Dropout1->g and g->Fc1->h and (h,w3,b1,i)->Conv3->j
|
||||
//
|
||||
// (d,w4, b2)->Conv4->i
|
||||
ProgramDesc BuildProgramDesc(bool use_mkldnn, bool use_quantizer) {
|
||||
ProgramDesc prog;
|
||||
for (auto& v : variable_names) {
|
||||
auto* var = prog.MutableBlock(0)->Var(v);
|
||||
if (v.find("w") == 0 || v.find("b") == 0) {
|
||||
var->SetPersistable(true);
|
||||
}
|
||||
}
|
||||
|
||||
SetOp(&prog, "conv2d", "Conv1", {"a", "w1"}, {"c"}, use_mkldnn,
|
||||
use_quantizer);
|
||||
SetOp(&prog, "pool2d", "Pool1", {"c"}, {"d"}, use_mkldnn, use_quantizer);
|
||||
|
||||
SetOp(&prog, "conv2d", "Conv2", {"d", "w2"}, {"e"}, use_mkldnn,
|
||||
use_quantizer);
|
||||
SetOp(&prog, "pool2d", "Pool2", {"e"}, {"f"}, use_mkldnn, use_quantizer);
|
||||
|
||||
SetOp(&prog, "dropout", "Dropout1", {"d"}, {"g"}, use_mkldnn);
|
||||
SetOp(&prog, "fc", "Fc1", {"g"}, {"h"}, use_mkldnn);
|
||||
SetOp(&prog, "conv2d", "Conv3", {"h", "w3", "b1", "i"}, {"j"}, use_mkldnn,
|
||||
use_quantizer);
|
||||
|
||||
SetOp(&prog, "conv2d", "Conv4", {"c", "w4", "b2"}, {"i"}, use_mkldnn,
|
||||
use_quantizer);
|
||||
|
||||
return prog;
|
||||
}
|
||||
|
||||
void InitTensorHolder(Scope* scope, const paddle::platform::Place& place,
|
||||
const char* var_name) {
|
||||
auto x = scope->Var(var_name);
|
||||
auto tensor = x->GetMutable<LoDTensor>();
|
||||
tensor->mutable_data(place, proto::VarType::FP32,
|
||||
::paddle::memory::Allocator::kDefault, 1);
|
||||
}
|
||||
|
||||
void MainTest(const ProgramDesc& prog, int conv_count, int pool_count,
|
||||
int quant_count, int dequant_count, int added_nodes_count,
|
||||
float scale) {
|
||||
std::unique_ptr<ir::Graph> graph(new ir::Graph(prog));
|
||||
|
||||
// Init scope, as it is used in pass
|
||||
auto place = paddle::platform::CPUPlace();
|
||||
NaiveExecutor exe{place};
|
||||
Scope scope;
|
||||
exe.CreateVariables(prog, 0, true, &scope);
|
||||
|
||||
auto* scales = new VarQuantScale();
|
||||
|
||||
for (auto& v : variable_names) {
|
||||
InitTensorHolder(&scope, place, v.c_str());
|
||||
LoDTensor tensor;
|
||||
tensor.Resize({1});
|
||||
auto* ptr = tensor.mutable_data<double>(place);
|
||||
ptr[0] = 2.0;
|
||||
|
||||
(*scales)[v] = std::make_pair(false, std::move(tensor));
|
||||
}
|
||||
|
||||
graph->Set(kParamScopeAttr, new framework::Scope*(&scope));
|
||||
|
||||
auto pass = PassRegistry::Instance().Get("cpu_quantize_pass");
|
||||
pass->Set("quant_var_scales", scales);
|
||||
|
||||
int original_nodes_num = graph->Nodes().size();
|
||||
|
||||
graph = pass->Apply(std::move(graph));
|
||||
|
||||
int current_nodes_num = graph->Nodes().size();
|
||||
|
||||
int quantize_nodes_count = 0;
|
||||
int dequantize_nodes_count = 0;
|
||||
int conv2d_nodes_count = 0;
|
||||
int pool2d_nodes_count = 0;
|
||||
for (auto* node : graph->Nodes()) {
|
||||
if (node->IsOp()) {
|
||||
auto* op = node->Op();
|
||||
if (op->Type() == "conv2d") {
|
||||
conv2d_nodes_count++;
|
||||
auto op_name = boost::get<std::string>(op->GetAttr("name"));
|
||||
EXPECT_EQ(boost::get<float>(op->GetAttr("Scale_in")), scale)
|
||||
<< "Scale_in for node '" + op_name + "'.";
|
||||
EXPECT_EQ(boost::get<float>(op->GetAttr("Scale_out")), scale)
|
||||
<< "Scale_out for node '" + op_name + "'.";
|
||||
EXPECT_EQ(
|
||||
boost::get<std::vector<float>>(op->GetAttr("Scale_weights"))[0],
|
||||
scale)
|
||||
<< "Scale_weights for node '" + op_name + "'.";
|
||||
} else if (op->Type() == "pool2d") {
|
||||
pool2d_nodes_count++;
|
||||
} else if (op->Type() == "quantize") {
|
||||
quantize_nodes_count++;
|
||||
} else if (op->Type() == "dequantize") {
|
||||
dequantize_nodes_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(conv2d_nodes_count, conv_count);
|
||||
EXPECT_EQ(pool2d_nodes_count, pool_count);
|
||||
EXPECT_EQ(quantize_nodes_count, quant_count);
|
||||
EXPECT_EQ(dequantize_nodes_count, dequant_count);
|
||||
EXPECT_EQ(original_nodes_num + added_nodes_count, current_nodes_num);
|
||||
}
|
||||
|
||||
TEST(CpuQuantizePass, quantize) {
|
||||
bool use_mkldnn = true;
|
||||
bool use_quantizer = true;
|
||||
// (a->QUANT1->IN1,w1)->Conv1->OUT1->DEQUANT1->c and
|
||||
// c->QUANT2->IN2->Pool1->OUT2->DEQUANT2->d
|
||||
//
|
||||
// (d->QUANT3->IN3,w2)->Conv2->OUT3->DEQUANT3->e and
|
||||
// e->QUANT4->IN4->Pool2->OUT4->DEQUANT4->f
|
||||
//
|
||||
// d->Dropout1->g and g->Fc1->h and
|
||||
// (h->QUANT5->IN5,w3,b1,i->QUANT6->IN6)->Conv3->OUT5->DEQUANT5->j
|
||||
//
|
||||
// (d->QUANT7->IN7,w4, b2)->Conv4->DEQUANT6->OUT6->i
|
||||
// Insert nodes: 7 Quant + 7 IN + 6 OUT + 6 DEQUANT
|
||||
int added_nodes = 7 + 7 + 6 + 6;
|
||||
MainTest(BuildProgramDesc(use_mkldnn, use_quantizer), 4, 2, 7, 6, added_nodes,
|
||||
2.0f * 127);
|
||||
}
|
||||
|
||||
TEST(CpuQuantizePass, do_not_quantize) {
|
||||
bool use_mkldnn = true;
|
||||
bool use_quantizer = false;
|
||||
int added_nodes = 0;
|
||||
MainTest(BuildProgramDesc(use_mkldnn, use_quantizer), 4, 2, 0, 0, added_nodes,
|
||||
1.0f);
|
||||
}
|
||||
|
||||
} // namespace ir
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
USE_PASS(cpu_quantize_pass);
|
@ -0,0 +1,167 @@
|
||||
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/operators/detection/yolo_box_op.h"
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using framework::Tensor;
|
||||
|
||||
class YoloBoxOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||
PADDLE_ENFORCE(ctx->HasInput("X"),
|
||||
"Input(X) of YoloBoxOp should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasInput("ImgSize"),
|
||||
"Input(ImgSize) of YoloBoxOp should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Boxes"),
|
||||
"Output(Boxes) of YoloBoxOp should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Scores"),
|
||||
"Output(Scores) of YoloBoxOp should not be null.");
|
||||
|
||||
auto dim_x = ctx->GetInputDim("X");
|
||||
auto dim_imgsize = ctx->GetInputDim("ImgSize");
|
||||
auto anchors = ctx->Attrs().Get<std::vector<int>>("anchors");
|
||||
int anchor_num = anchors.size() / 2;
|
||||
auto class_num = ctx->Attrs().Get<int>("class_num");
|
||||
|
||||
PADDLE_ENFORCE_EQ(dim_x.size(), 4, "Input(X) should be a 4-D tensor.");
|
||||
PADDLE_ENFORCE_EQ(
|
||||
dim_x[1], anchor_num * (5 + class_num),
|
||||
"Input(X) dim[1] should be equal to (anchor_mask_number * (5 "
|
||||
"+ class_num)).");
|
||||
PADDLE_ENFORCE_EQ(dim_imgsize.size(), 2,
|
||||
"Input(ImgSize) should be a 2-D tensor.");
|
||||
PADDLE_ENFORCE_EQ(
|
||||
dim_imgsize[0], dim_x[0],
|
||||
"Input(ImgSize) dim[0] and Input(X) dim[0] should be same.");
|
||||
PADDLE_ENFORCE_EQ(dim_imgsize[1], 2, "Input(ImgSize) dim[1] should be 2.");
|
||||
PADDLE_ENFORCE_GT(anchors.size(), 0,
|
||||
"Attr(anchors) length should be greater than 0.");
|
||||
PADDLE_ENFORCE_EQ(anchors.size() % 2, 0,
|
||||
"Attr(anchors) length should be even integer.");
|
||||
PADDLE_ENFORCE_GT(class_num, 0,
|
||||
"Attr(class_num) should be an integer greater than 0.");
|
||||
|
||||
int box_num = dim_x[2] * dim_x[3] * anchor_num;
|
||||
std::vector<int64_t> dim_boxes({dim_x[0], box_num, 4});
|
||||
ctx->SetOutputDim("Boxes", framework::make_ddim(dim_boxes));
|
||||
|
||||
std::vector<int64_t> dim_scores({dim_x[0], box_num, class_num});
|
||||
ctx->SetOutputDim("Scores", framework::make_ddim(dim_scores));
|
||||
}
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const override {
|
||||
return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
|
||||
ctx.GetPlace());
|
||||
}
|
||||
};
|
||||
|
||||
class YoloBoxOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
void Make() override {
|
||||
AddInput("X",
|
||||
"The input tensor of YoloBox operator is a 4-D tensor with "
|
||||
"shape of [N, C, H, W]. The second dimension(C) stores "
|
||||
"box locations, confidence score and classification one-hot "
|
||||
"keys of each anchor box. Generally, X should be the output "
|
||||
"of YOLOv3 network.");
|
||||
AddInput("ImgSize",
|
||||
"The image size tensor of YoloBox operator, "
|
||||
"This is a 2-D tensor with shape of [N, 2]. This tensor holds "
|
||||
"height and width of each input image used for resizing output "
|
||||
"box in input image scale.");
|
||||
AddOutput("Boxes",
|
||||
"The output tensor of detection boxes of YoloBox operator, "
|
||||
"This is a 3-D tensor with shape of [N, M, 4], N is the "
|
||||
"batch num, M is output box number, and the 3rd dimension "
|
||||
"stores [xmin, ymin, xmax, ymax] coordinates of boxes.");
|
||||
AddOutput("Scores",
|
||||
"The output tensor of detection boxes scores of YoloBox "
|
||||
"operator, This is a 3-D tensor with shape of "
|
||||
"[N, M, :attr:`class_num`], N is the batch num, M is "
|
||||
"output box number.");
|
||||
|
||||
AddAttr<int>("class_num", "The number of classes to predict.");
|
||||
AddAttr<std::vector<int>>("anchors",
|
||||
"The anchor width and height, "
|
||||
"it will be parsed pair by pair.")
|
||||
.SetDefault(std::vector<int>{});
|
||||
AddAttr<int>("downsample_ratio",
|
||||
"The downsample ratio from network input to YoloBox operator "
|
||||
"input, so 32, 16, 8 should be set for the first, second, "
|
||||
"and thrid YoloBox operators.")
|
||||
.SetDefault(32);
|
||||
AddAttr<float>("conf_thresh",
|
||||
"The confidence scores threshold of detection boxes. "
|
||||
"Boxes with confidence scores under threshold should "
|
||||
"be ignored.")
|
||||
.SetDefault(0.01);
|
||||
AddComment(R"DOC(
|
||||
This operator generates YOLO detection boxes from output of YOLOv3 network.
|
||||
|
||||
The output of previous network is in shape [N, C, H, W], while H and W
|
||||
should be the same, H and W specify the grid size, each grid point predict
|
||||
given number boxes, this given number, which following will be represented as S,
|
||||
is specified by the number of anchors. In the second dimension(the channel
|
||||
dimension), C should be equal to S * (5 + class_num), class_num is the object
|
||||
category number of source dataset(such as 80 in coco dataset), so the
|
||||
second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
|
||||
also includes confidence score of the box and class one-hot key of each anchor
|
||||
box.
|
||||
|
||||
Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box
|
||||
predictions should be as follows:
|
||||
|
||||
$$
|
||||
b_x = \\sigma(t_x) + c_x
|
||||
$$
|
||||
$$
|
||||
b_y = \\sigma(t_y) + c_y
|
||||
$$
|
||||
$$
|
||||
b_w = p_w e^{t_w}
|
||||
$$
|
||||
$$
|
||||
b_h = p_h e^{t_h}
|
||||
$$
|
||||
|
||||
in the equation above, :math:`c_x, c_y` is the left top corner of current grid
|
||||
and :math:`p_w, p_h` is specified by anchors.
|
||||
|
||||
The logistic regression value of the 5th channel of each anchor prediction boxes
|
||||
represents the confidence score of each prediction box, and the logistic
|
||||
regression value of the last :attr:`class_num` channels of each anchor prediction
|
||||
boxes represents the classifcation scores. Boxes with confidence scores less than
|
||||
:attr:`conf_thresh` should be ignored, and box final scores is the product of
|
||||
confidence scores and classification scores.
|
||||
|
||||
$$
|
||||
score_{pred} = score_{conf} * score_{class}
|
||||
$$
|
||||
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(yolo_box, ops::YoloBoxOp, ops::YoloBoxOpMaker,
|
||||
paddle::framework::EmptyGradOpMaker);
|
||||
REGISTER_OP_CPU_KERNEL(yolo_box, ops::YoloBoxKernel<float>,
|
||||
ops::YoloBoxKernel<double>);
|
@ -0,0 +1,120 @@
|
||||
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/operators/detection/yolo_box_op.h"
|
||||
#include "paddle/fluid/operators/math/math_function.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
template <typename T>
|
||||
__global__ void KeYoloBoxFw(const T* input, const int* imgsize, T* boxes,
|
||||
T* scores, const float conf_thresh,
|
||||
const int* anchors, const int n, const int h,
|
||||
const int w, const int an_num, const int class_num,
|
||||
const int box_num, int input_size) {
|
||||
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
T box[4];
|
||||
for (; tid < n * box_num; tid += stride) {
|
||||
int grid_num = h * w;
|
||||
int i = tid / box_num;
|
||||
int j = (tid % box_num) / grid_num;
|
||||
int k = (tid % grid_num) / w;
|
||||
int l = tid % w;
|
||||
|
||||
int an_stride = (5 + class_num) * grid_num;
|
||||
int img_height = imgsize[2 * i];
|
||||
int img_width = imgsize[2 * i + 1];
|
||||
|
||||
int obj_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4);
|
||||
T conf = sigmoid<T>(input[obj_idx]);
|
||||
if (conf < conf_thresh) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int box_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0);
|
||||
GetYoloBox<T>(box, input, anchors, l, k, j, h, input_size, box_idx,
|
||||
grid_num, img_height, img_width);
|
||||
box_idx = (i * box_num + j * grid_num + k * w + l) * 4;
|
||||
CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width);
|
||||
|
||||
int label_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5);
|
||||
int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num;
|
||||
CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf,
|
||||
grid_num);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class YoloBoxOpCUDAKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
auto* input = ctx.Input<Tensor>("X");
|
||||
auto* img_size = ctx.Input<Tensor>("ImgSize");
|
||||
auto* boxes = ctx.Output<Tensor>("Boxes");
|
||||
auto* scores = ctx.Output<Tensor>("Scores");
|
||||
|
||||
auto anchors = ctx.Attr<std::vector<int>>("anchors");
|
||||
int class_num = ctx.Attr<int>("class_num");
|
||||
float conf_thresh = ctx.Attr<float>("conf_thresh");
|
||||
int downsample_ratio = ctx.Attr<int>("downsample_ratio");
|
||||
|
||||
const int n = input->dims()[0];
|
||||
const int h = input->dims()[2];
|
||||
const int w = input->dims()[3];
|
||||
const int box_num = boxes->dims()[1];
|
||||
const int an_num = anchors.size() / 2;
|
||||
int input_size = downsample_ratio * h;
|
||||
|
||||
auto& dev_ctx = ctx.cuda_device_context();
|
||||
auto& allocator =
|
||||
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
|
||||
int bytes = sizeof(int) * anchors.size();
|
||||
auto anchors_ptr = allocator.Allocate(sizeof(int) * anchors.size());
|
||||
int* anchors_data = reinterpret_cast<int*>(anchors_ptr->ptr());
|
||||
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
|
||||
const auto cplace = platform::CPUPlace();
|
||||
memory::Copy(gplace, anchors_data, cplace, anchors.data(), bytes,
|
||||
dev_ctx.stream());
|
||||
|
||||
const T* input_data = input->data<T>();
|
||||
const int* imgsize_data = img_size->data<int>();
|
||||
T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace());
|
||||
T* scores_data =
|
||||
scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace());
|
||||
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
|
||||
set_zero(dev_ctx, boxes, static_cast<T>(0));
|
||||
set_zero(dev_ctx, scores, static_cast<T>(0));
|
||||
|
||||
int grid_dim = (n * box_num + 512 - 1) / 512;
|
||||
grid_dim = grid_dim > 8 ? 8 : grid_dim;
|
||||
|
||||
KeYoloBoxFw<T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
|
||||
input_data, imgsize_data, boxes_data, scores_data, conf_thresh,
|
||||
anchors_data, n, h, w, an_num, class_num, box_num, input_size);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_CUDA_KERNEL(yolo_box, ops::YoloBoxOpCUDAKernel<float>,
|
||||
ops::YoloBoxOpCUDAKernel<double>);
|
@ -0,0 +1,149 @@
|
||||
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/platform/hostdevice.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
template <typename T>
|
||||
HOSTDEVICE inline T sigmoid(T x) {
|
||||
return 1.0 / (1.0 + std::exp(-x));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HOSTDEVICE inline void GetYoloBox(T* box, const T* x, const int* anchors, int i,
|
||||
int j, int an_idx, int grid_size,
|
||||
int input_size, int index, int stride,
|
||||
int img_height, int img_width) {
|
||||
box[0] = (i + sigmoid<T>(x[index])) * img_width / grid_size;
|
||||
box[1] = (j + sigmoid<T>(x[index + stride])) * img_height / grid_size;
|
||||
box[2] = std::exp(x[index + 2 * stride]) * anchors[2 * an_idx] * img_width /
|
||||
input_size;
|
||||
box[3] = std::exp(x[index + 3 * stride]) * anchors[2 * an_idx + 1] *
|
||||
img_height / input_size;
|
||||
}
|
||||
|
||||
HOSTDEVICE inline int GetEntryIndex(int batch, int an_idx, int hw_idx,
|
||||
int an_num, int an_stride, int stride,
|
||||
int entry) {
|
||||
return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HOSTDEVICE inline void CalcDetectionBox(T* boxes, T* box, const int box_idx,
|
||||
const int img_height,
|
||||
const int img_width) {
|
||||
boxes[box_idx] = box[0] - box[2] / 2;
|
||||
boxes[box_idx + 1] = box[1] - box[3] / 2;
|
||||
boxes[box_idx + 2] = box[0] + box[2] / 2;
|
||||
boxes[box_idx + 3] = box[1] + box[3] / 2;
|
||||
|
||||
boxes[box_idx] = boxes[box_idx] > 0 ? boxes[box_idx] : static_cast<T>(0);
|
||||
boxes[box_idx + 1] =
|
||||
boxes[box_idx + 1] > 0 ? boxes[box_idx + 1] : static_cast<T>(0);
|
||||
boxes[box_idx + 2] = boxes[box_idx + 2] < img_width - 1
|
||||
? boxes[box_idx + 2]
|
||||
: static_cast<T>(img_width - 1);
|
||||
boxes[box_idx + 3] = boxes[box_idx + 3] < img_height - 1
|
||||
? boxes[box_idx + 3]
|
||||
: static_cast<T>(img_height - 1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HOSTDEVICE inline void CalcLabelScore(T* scores, const T* input,
|
||||
const int label_idx, const int score_idx,
|
||||
const int class_num, const T conf,
|
||||
const int stride) {
|
||||
for (int i = 0; i < class_num; i++) {
|
||||
scores[score_idx + i] = conf * sigmoid<T>(input[label_idx + i * stride]);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class YoloBoxKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
auto* input = ctx.Input<Tensor>("X");
|
||||
auto* imgsize = ctx.Input<Tensor>("ImgSize");
|
||||
auto* boxes = ctx.Output<Tensor>("Boxes");
|
||||
auto* scores = ctx.Output<Tensor>("Scores");
|
||||
auto anchors = ctx.Attr<std::vector<int>>("anchors");
|
||||
int class_num = ctx.Attr<int>("class_num");
|
||||
float conf_thresh = ctx.Attr<float>("conf_thresh");
|
||||
int downsample_ratio = ctx.Attr<int>("downsample_ratio");
|
||||
|
||||
const int n = input->dims()[0];
|
||||
const int h = input->dims()[2];
|
||||
const int w = input->dims()[3];
|
||||
const int box_num = boxes->dims()[1];
|
||||
const int an_num = anchors.size() / 2;
|
||||
int input_size = downsample_ratio * h;
|
||||
|
||||
const int stride = h * w;
|
||||
const int an_stride = (class_num + 5) * stride;
|
||||
|
||||
Tensor anchors_;
|
||||
auto anchors_data =
|
||||
anchors_.mutable_data<int>({an_num * 2}, ctx.GetPlace());
|
||||
std::copy(anchors.begin(), anchors.end(), anchors_data);
|
||||
|
||||
const T* input_data = input->data<T>();
|
||||
const int* imgsize_data = imgsize->data<int>();
|
||||
T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace());
|
||||
memset(boxes_data, 0, boxes->numel() * sizeof(T));
|
||||
T* scores_data =
|
||||
scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace());
|
||||
memset(scores_data, 0, scores->numel() * sizeof(T));
|
||||
|
||||
T box[4];
|
||||
for (int i = 0; i < n; i++) {
|
||||
int img_height = imgsize_data[2 * i];
|
||||
int img_width = imgsize_data[2 * i + 1];
|
||||
|
||||
for (int j = 0; j < an_num; j++) {
|
||||
for (int k = 0; k < h; k++) {
|
||||
for (int l = 0; l < w; l++) {
|
||||
int obj_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 4);
|
||||
T conf = sigmoid<T>(input_data[obj_idx]);
|
||||
if (conf < conf_thresh) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int box_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 0);
|
||||
GetYoloBox<T>(box, input_data, anchors_data, l, k, j, h, input_size,
|
||||
box_idx, stride, img_height, img_width);
|
||||
box_idx = (i * box_num + j * stride + k * w + l) * 4;
|
||||
CalcDetectionBox<T>(boxes_data, box, box_idx, img_height,
|
||||
img_width);
|
||||
|
||||
int label_idx =
|
||||
GetEntryIndex(i, j, k * w + l, an_num, an_stride, stride, 5);
|
||||
int score_idx = (i * box_num + j * stride + k * w + l) * class_num;
|
||||
CalcLabelScore<T>(scores_data, input_data, label_idx, score_idx,
|
||||
class_num, conf, stride);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue