!5785 clean redundant code

Merge pull request !5785 from hangq/master
pull/5785/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit c866996e81

@ -15,6 +15,8 @@
*/
#include <utility>
#include <memory>
#include <vector>
#include "tools/anf_importer/anf_importer.h"
#include "schema/model_generated.h"
#include "ir/dtype.h"

@ -108,7 +108,6 @@ class MS_API Benchmark {
int Init();
int RunBenchmark(const std::string &deviceType = "NPU");
// int RunNPUBenchmark();
private:
// call GenerateInputData or ReadInputFile to init inputTensors
@ -131,7 +130,6 @@ class MS_API Benchmark {
static int i = 0;
auto inData = reinterpret_cast<T *>(input->MutableData());
std::cout << "InData" << i++ << ": ";
// int printSize = std::min(20, input->ElementsNum());
for (size_t j = 0; j < 20; j++) {
std::cout << static_cast<float >(inData[j]) << " ";
}
@ -217,7 +215,6 @@ class MS_API Benchmark {
std::unordered_map<std::string, CheckTensor *> calibData;
std::unordered_map<std::string, TypeId> dataTypeMap{
{"FLOAT", TypeId::kNumberTypeFloat}, {"INT8", TypeId::kNumberTypeInt8}, {"INT32", TypeId::kNumberTypeInt32}};
// TypeId msInputBinDataType = TypeId::kNumberTypeFloat;
TypeId msCalibDataType = TypeId::kNumberTypeFloat;
};

@ -15,6 +15,7 @@
*/
#include "tools/common/flag_parser.h"
#include "utils/log_adapter.h"
namespace mindspore {
namespace lite {
@ -134,7 +135,7 @@ Option<std::string> FlagParser::InnerParseFlags(std::multimap<std::string, Optio
void Replaceall(std::string *str, const std::string &oldValue, const std::string &newValue) {
if (str == nullptr) {
// MS_LOG(ERROR)("Input str is nullptr");
MS_LOG(ERROR) << "Input str is nullptr";
return;
}
while (true) {

@ -114,7 +114,7 @@ template <typename Flags, typename T>
void FlagParser::ConstructFlag(Option<T> Flags::*t1, const std::string &flagName, const std::string &helpInfo,
FlagInfo *flag) {
if (flag == nullptr) {
// MS_LOGE("FlagInfo is nullptr");
MS_LOG(ERROR) << "FlagInfo is nullptr";
return;
}
flag->flagName = flagName;
@ -128,11 +128,11 @@ void FlagParser::ConstructFlag(Option<T> Flags::*t1, const std::string &flagName
template <typename Flags, typename T>
void FlagParser::ConstructFlag(T Flags::*t1, const std::string &flagName, const std::string &helpInfo, FlagInfo *flag) {
if (flag == nullptr) {
// MS_LOGE("FlagInfo is nullptr");
MS_LOG(ERROR) << "FlagInfo is nullptr";
return;
}
if (t1 == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
flag->flagName = flagName;
@ -146,7 +146,7 @@ inline void FlagParser::AddFlag(const FlagInfo &flagItem) { flags[flagItem.flagN
template <typename Flags, typename T>
void FlagParser::AddFlag(T Flags::*t, const std::string &flagName, const std::string &helpInfo) {
if (t == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
AddFlag(t, flagName, helpInfo, static_cast<const T *>(nullptr));
@ -155,7 +155,7 @@ void FlagParser::AddFlag(T Flags::*t, const std::string &flagName, const std::st
template <typename Flags, typename T1, typename T2>
void FlagParser::AddFlag(T1 Flags::*t1, const std::string &flagName, const std::string &helpInfo, const T2 &t2) {
if (t1 == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
AddFlag(t1, flagName, helpInfo, &t2);
@ -165,7 +165,7 @@ void FlagParser::AddFlag(T1 Flags::*t1, const std::string &flagName, const std::
template <typename Flags, typename T1, typename T2>
void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 &t2) {
if (t1 == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
AddFlag(t1, flagName, helpInfo, &t2);
@ -174,7 +174,7 @@ void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, c
template <typename Flags, typename T1, typename T2>
void FlagParser::AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 *t2) {
if (t1 == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
@ -214,7 +214,7 @@ void FlagParser::AddFlag(T1 *t1, const std::string &flagName, const std::string
template <typename Flags, typename T1, typename T2>
void FlagParser::AddFlag(T1 Flags::*t1, const std::string &flagName, const std::string &helpInfo, const T2 *t2) {
if (t1 == nullptr) {
// MS_LOGE("t1 is nullptr");
MS_LOG(ERROR) << "t1 is nullptr";
return;
}
@ -263,13 +263,13 @@ void FlagParser::AddFlag(T1 Flags::*t1, const std::string &flagName, const std::
template <typename Flags, typename T>
void FlagParser::AddFlag(Option<T> Flags::*t, const std::string &flagName, const std::string &helpInfo) {
if (t == nullptr) {
// MS_LOGE("t is nullptr");
MS_LOG(ERROR) << "t is nullptr";
return;
}
Flags *flag = dynamic_cast<Flags *>(this);
if (flag == nullptr) {
// MS_LOGE("dynamic_cast failed");
MS_LOG(ERROR) << "dynamic_cast failed";
return;
}

@ -133,7 +133,7 @@ STATUS IsolateNode(schema::MetaGraphT *graphT, CNodeT *node) {
return RET_ERROR;
}
if (outputTensorIdxes.size() != 1) {
MS_LOG(ERROR) << "FakeQuantNode " << node->name.c_str() \
MS_LOG(ERROR) << "FakeQuantNode " << node->name.c_str()
<< "should has 1 output, in fact: " << outputTensorIdxes.size();
return RET_ERROR;
}
@ -376,7 +376,7 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPl
} else if (place == kAfter) {
return InsertNodeAfter(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, opDefCopyer);
} else {
// MS_LOG(ERROR)("Invalid InsertPlace : %d", place);
MS_LOG(ERROR) << "Invalid InsertPlace : " << place;
return graphT->nodes.end();
}
}

@ -47,19 +47,18 @@ schema::MetaGraphT *Storage::Load(const std::string &inputPath) {
size_t size;
auto buf = ReadFile(inputPath.c_str(), &size);
if (buf == nullptr) {
// MS_LOG(ERROR)("the file buffer is nullptr");
MS_LOG(ERROR) << "the file buffer is nullptr";
return nullptr;
}
flatbuffers::Verifier verify((const uint8_t *)buf, size);
// if (false == VerifyGraphDefBuffer(verify)) {
// //MS_LOG(ERROR)("the buffer is invalid and fail to create graph");
// return nullptr;
// }
if (false == schema::VerifyMetaGraphBuffer(verify)) {
MS_LOG(ERROR) << "the buffer is invalid and fail to create meta graph";
return nullptr;
}
auto graphDefT = schema::UnPackMetaGraph(buf);
return graphDefT.release();
}
} // namespace lite
} // namespace mindspore

@ -41,18 +41,18 @@ FusionPattern &FusionPattern::AddPatternOp(const std::string &id,
FusionPattern &FusionPattern::AddPatternOp(const std::string &id, const std::vector<schema::PrimitiveType> &types) {
if (id.empty()) {
// MS_LOG(ERROR) << "Id cannot be empty");
MS_LOG(ERROR) << "Id cannot be empty";
hasError = true;
}
if (GetPatternOp(id) != nullptr) {
// MS_LOG(ERROR) << "Id repeated. (id:%s)", id.c_str());
MS_LOG(ERROR) << "Id repeated. id: " << id;
hasError = true;
}
std::shared_ptr<PatternOp> op(new PatternOp());
if (op == nullptr) {
// MS_LOG(ERROR) << "new an object failed");
MS_LOG(ERROR) << "new an object failed";
hasError = true;
} else {
op->id = id;
@ -78,12 +78,12 @@ FusionPattern &FusionPattern::RemovePatternOp(const std::string &id) {
bool FusionPattern::Check() {
if (hasError) {
// MS_LOG(ERROR) << "Has Error in previous Func");
MS_LOG(ERROR) << "Has Error in previous Func";
return false;
}
if (GetPatternOp(this->outputOpId) == nullptr) {
// MS_LOG(ERROR) << "Can not find the output of the pattern");
MS_LOG(ERROR) << "Can not find the output of the pattern";
return false;
}
@ -132,7 +132,7 @@ FusionPattern &FusionPattern::Finish() {
std::vector<std::string> inputNodeIds;
for (auto patternOp : ops) {
if (IsContain(ids, patternOp->id)) {
// MS_LOG(ERROR) << "Duplicate id find: %s", patternOp->id.c_str());
MS_LOG(ERROR) << "Duplicate id find: " << patternOp->id;
hasError = true;
return *this;
}
@ -155,12 +155,12 @@ FusionPattern &FusionPattern::Finish() {
}
}
if (ids.size() > 1) {
// MS_LOG(ERROR) << "Multi-output node find, only support pattern with one output");
MS_LOG(ERROR) << "Multi-output node find, only support pattern with one output";
hasError = true;
return *this;
}
if (ids.empty()) {
// MS_LOG(ERROR) << "No output node find, only support pattern with one output");
MS_LOG(ERROR) << "No output node find, only support pattern with one output";
hasError = true;
return *this;
}

@ -23,7 +23,6 @@
#include <utility>
#include <vector>
#include <map>
// #include <memory>
#include "utils/log_adapter.h"
#include "schema/inner/model_generated.h"

@ -22,7 +22,6 @@
#include "tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h"
#include "utils/log_adapter.h"
#include "securec/include/securec.h"
// #include "utils/log_adapter.h"
#include "tools/common/graph_util.h"
#include "include/errorcode.h"
#include "schema/inner/model_generated.h"
@ -202,9 +201,8 @@ STATUS MatMulBiasAddFusionPass::AddFullConnectionBiasTensor(const std::shared_pt
return RET_ERROR;
}
if (!biasDims.empty() && biasDims.size() != BIASADD_WEIGHT_SHAPE_SIZE) {
MS_LOG(ERROR)
<< "BiasAdd bias tensor should has one dimension, current number of dimension %zu. or bias tensor is a scaler";
// biasDims.size());
MS_LOG(ERROR) << "BiasAdd bias tensor should has one dimension, current number of dimension " << biasDims.size()
<< ". or bias tensor is a scaler";
return RET_ERROR;
}
// add biasTensor to matmul

@ -22,7 +22,6 @@
#include "tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h"
#include "utils/log_adapter.h"
#include "securec/include/securec.h"
// #include "utils/log_adapter.h"
#include "tools/common/graph_util.h"
#include "include/errorcode.h"
#include "schema/inner/model_generated.h"
@ -47,7 +46,7 @@ STATUS MulAddFusionPass::DefinePattern() {
baOp->types = {schema::PrimitiveType_Add};
baOp->left = mulOp;
std::unique_ptr<FusionPattern> fusionPattern(new(std::nothrow) FusionPattern("MulAddFusion"));
std::unique_ptr<FusionPattern> fusionPattern(new (std::nothrow) FusionPattern("MulAddFusion"));
if (fusionPattern == nullptr) {
MS_LOG(ERROR) << "new fusionPattern failed";
return RET_ERROR;
@ -101,15 +100,15 @@ STATUS MulAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &patternN
// convert mul and add to scale
auto status = AddNewScaleNode(graph, mulNode, addNode.get(), addNodeInputIndex.at(ADD_OP_BIAS_INDEX));
if (RET_OK != status) {
MS_LOG(ERROR) << "AddFullConnectionBiasTensor failed, %d"; // status);
MS_LOG(ERROR) << "AddFullConnectionBiasTensor failed, " << status;
return status;
}
return RET_OK;
}
STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_ptr<CNodeT> &mulNode,
CNodeT* addNode, uint32_t addBiasIndex) {
STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_ptr<CNodeT> &mulNode, CNodeT *addNode,
uint32_t addBiasIndex) {
MS_ASSERT(graph != nullptr);
MS_ASSERT(mulNode != nullptr);
MS_ASSERT(addNode != nullptr);
@ -129,7 +128,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
// repace addnode as activation
std::unique_ptr<ActivationT> activationParam(new ActivationT());
activationParam->type = addNode->primitive->value.AsAdd()->activationType;
// activationParam->alpha = 0.0;
addNode->primitive->value.type = schema::PrimitiveType_Activation;
addNode->primitive->value.value = activationParam.release();
addNode->inputIndex.pop_back();
@ -138,8 +136,7 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
// delete addnode
auto status = IsolateOneWayNode(graph, addNode);
if (status != RET_OK) {
MS_LOG(ERROR) << "IsolateOneWayNode failed, subGraph: %zu, node: %zu, error: %d";
// baPath->subGraphIdx, baPath->nodeIdx, status);
MS_LOG(ERROR) << "IsolateOneWayNode failed";
return status;
}
return RET_OK;

@ -103,30 +103,8 @@ STATUS FormatTransPass::DoNodeInoutFormatTrans(schema::MetaGraphT *graph) {
for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) {
FormatTransNodeType beforeNodeType, afterNodeType;
if (fmkType == converter::FmkType_TFLITE) { // inference by nhwc
// if (quantType == QuantType_AwareTraining) { // AwareTraining op use
// nhwc
// if (IsContain(GetUint8NhwcOpList(), GetCNodeTType(**iter))) { // uint8NhwcOp only
// support nhwc
// continue;
// }
// if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
// continue;
// }
// } else {
// if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
continue;
// }
// }
// beforeNodeType = kNCHW2NHWC;
// afterNodeType = kNHWC2NCHW;
} else if (fmkType == converter::FmkType_CAFFE) { // inference by nchw
// if (quantType == QuantType_AwareTraining) { // AwareTraining op use nhwc
// if (!IsContain(GetUint8NhwcOpList(), GetCNodeTType(**iter))) { // uint8NhwcOp only support nhwc
// continue;
// }
// } else {
// continue;
// }
if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
continue;
}

@ -71,7 +71,6 @@ STATUS WeightFormatTransformPass::QuantDataFormatTrans(MetaGraphT *graph) {
}
status = TransFilterFormat(weightTensor.get(), curDstFormat);
if (status == RET_OK) {
// node->primitive->value.AsConv2D()->format = schema::Format_NHWC;
weightTensor->format = curDstFormat;
} else {
MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To"

@ -557,7 +557,6 @@ OnnxNodeRegistrar g_onnxInt8AddParser("Int8Add", new OnnxAddParser());
OnnxNodeRegistrar g_onnxSubParser("Sub", new OnnxSubParser());
OnnxNodeRegistrar g_onnxMulParser("Mul", new OnnxMulParser());
OnnxNodeRegistrar g_onnxDivParser("Div", new OnnxDivParser());
// OnnxNodeRegistrar g_onnxMeanParser("Mean", new OnnxMeanParser()); // onnx's Mean is different from mslite's
OnnxNodeRegistrar g_onnxPowParser("Power", new OnnxPowParser());
OnnxNodeRegistrar g_onnxEqualParser("Equal", new OnnxEqualParser());
OnnxNodeRegistrar g_onnxLessParser("Less", new OnnxLessParser());

@ -323,7 +323,6 @@ void OnnxModelParser::SetOpQuantParams(const onnx::GraphProto &onnx_graph, const
quant_param->min = FLT_MAX;
quant_param->max = FLT_MAX;
}
// quant_param_array->param.emplace_back(std::move(quant_param));
dst_tensor->quantParams.emplace_back(std::move(quant_param));
if (argNum == 2) {
findQuantParams++;

@ -16,6 +16,7 @@
#include "tools/converter/parser/onnx/onnx_slice_parser.h"
#include <memory>
#include <vector>
namespace mindspore {
namespace lite {

@ -141,7 +141,6 @@ schema::ActivationType GetActivationFunctionType(tflite::ActivationFunctionType
std::string GetMSOpType(tflite::BuiltinOperator tfliteOpType) {
auto iter = tfMsOpTypeMap.find(tfliteOpType);
if (iter == tfMsOpTypeMap.end()) {
// return "unsupported_op_type";
return tflite::EnumNameBuiltinOperator(tfliteOpType);
}
return iter->second;

@ -103,62 +103,7 @@ STATUS AwareQuantizer::GenerateDefaultQuantParam(const schema::MetaGraphT *subGr
return RET_OK;
}
STATUS AwareQuantizer::SetAttrToConvolution(const schema::MetaGraphT *subGraph, schema::CNodeT *node) {
// MS_ASSERT(subGraph != nullptr);
// MS_ASSERT(node != nullptr);
// auto inputIndexes = node->inputIndex;
// MS_ASSERT(GetCNodeTType(*node) == OpT_Conv2D || GetCNodeTType(*node) ==
// OpT_DepthwiseConv2D ||
// GetCNodeTType(*node) == OpT_DeConv2D || GetCNodeTType(*node) ==
// OpT_DeDepthwiseConv2D);
// if (inputIndexes.size() < 2) {
// MS_LOGE("in aware quant %s node's input tensors is invalid(%zu)!",
// node->name.c_str(), inputIndexes.size()); return RET_ERROR;
// }
// TensorDefT *filterTensor = subGraph->allTensors.at(inputIndexes[1]).get();
// MS_ASSERT(filterTensor != nullptr);
// auto filterDims = filterTensor->dims;
// MS_ASSERT(filterDims.size() == 4);
// if (GetCNodeTType(*node) == OpT_Conv2D) {
// if (node->fmkType == FmkType_MS) {
// node->attr.AsConv2D()->channelOut = (int32_t)filterDims[0];
// node->attr.AsConv2D()->channelIn = (int32_t)filterDims[1];
// node->attr.AsConv2D()->kernelH = (int32_t)filterDims[2];
// node->attr.AsConv2D()->kernelW = (int32_t)filterDims[3];
// } else if (node->fmkType == FmkType_TF) {
// node->attr.AsConv2D()->kernelH = (int32_t)filterDims[0];
// node->attr.AsConv2D()->kernelW = (int32_t)filterDims[1];
// node->attr.AsConv2D()->channelIn = (int32_t)filterDims[2];
// node->attr.AsConv2D()->channelOut = (int32_t)filterDims[3];
// } else {
// MS_LOGE("Unsupport");
// }
// }
// if (GetCNodeTType(*node) == OpT_DepthwiseConv2D) {
// if (node->fmkType == FmkType_MS) {
// node->attr.AsDepthwiseConv2D()->channelIn = (int32_t)filterDims[0];
// node->attr.AsDepthwiseConv2D()->channelMultiplier =
// (int32_t)filterDims[1]; node->attr.AsDepthwiseConv2D()->kernelH =
// (int32_t)filterDims[2]; node->attr.AsDepthwiseConv2D()->kernelW =
// (int32_t)filterDims[3];
// } else if (node->fmkType == FmkType_TF) {
// node->attr.AsDepthwiseConv2D()->kernelH = (int32_t)filterDims[0];
// node->attr.AsDepthwiseConv2D()->kernelW = (int32_t)filterDims[1];
// node->attr.AsDepthwiseConv2D()->channelIn = (int32_t)filterDims[2];
// node->attr.AsDepthwiseConv2D()->channelMultiplier =
// (int32_t)filterDims[3];
// } else {
// MS_LOGE("Unsupport");
// }
// }
// if (GetCNodeTType(*node) == OpT_DeConv2D) {
// MS_ASSERT(false);
// }
// if (GetCNodeTType(*node) == OpT_DeDepthwiseConv2D) {
// MS_ASSERT(false);
// }
return RET_OK;
}
STATUS AwareQuantizer::SetAttrToConvolution(const schema::MetaGraphT *subGraph, schema::CNodeT *node) { return RET_OK; }
STATUS AwareQuantizer::GenerateQuantParam() {
MS_ASSERT(graph->inputIndex.size() == 1);
@ -288,7 +233,7 @@ STATUS AwareQuantizer::QuantAddConstTensor(const schema::MetaGraphT *graph, sche
case kNumberTypeUInt8:
break;
default:
// MS_LOGE("Unsupported dataType: %d", inTensor->dataType);
MS_LOG(ERROR) << "Unsupported dataType: " << inTensor->dataType;
return RET_ERROR;
}
}
@ -307,7 +252,7 @@ STATUS AwareQuantizer::QuantDetectionPostProcessConstTensor(const schema::MetaGr
size_t constTensorShapeSize = GetShapeSize(*constTensor);
std::unique_ptr<QuantParamT> quantParam = GetTensorQuantParam(constTensor);
if (quantParam == nullptr) {
// MS_LOGE("new QuantParamT failed");
MS_LOG(ERROR) << "new QuantParamT failed";
return RET_NULL_PTR;
}
vector<uint8_t> qDatas(constTensorShapeSize);
@ -335,7 +280,7 @@ STATUS AwareQuantizer::QuantConvBias(const mindspore::schema::MetaGraphT *graph,
return RET_OK;
}
if (biasTensor->dataType != TypeId::kNumberTypeFloat && biasTensor->dataType != TypeId::kNumberTypeFloat32) {
// MS_LOGE("conv %s's bias data is not float", node->name.c_str());
MS_LOG(ERROR) << "conv " << node->name << "'s bias data is not float";
return RET_ERROR;
}
auto &inputTensor = graph->allTensors.at(inputIndexes.at(0));
@ -349,7 +294,7 @@ STATUS AwareQuantizer::QuantConvBias(const mindspore::schema::MetaGraphT *graph,
// set bias quant param
std::unique_ptr<QuantParamT> biasQuantParam = GetTensorQuantParam(biasTensor);
if (biasQuantParam == nullptr) {
// MS_LOGE("new QuantParamT failed");
MS_LOG(ERROR) << "new QuantParamT failed";
return RET_ERROR;
}
biasQuantParam->inited = true;

@ -33,7 +33,7 @@ STATUS QuantParamCalcer::ComputeConstQuantParam(const schema::TensorT &tensor, Q
return RET_OK;
}
if (tensor.dataType != TypeId::kNumberTypeFloat) {
// MS_LOGW("Const Tensor without quantParam should has float dataType, in fact: %d", tensor.dataType);
MS_LOG(WARNING) << "Const Tensor without quantParam should has float dataType, in fact: " << tensor.dataType;
return RET_ERROR;
}
const auto *constData = reinterpret_cast<const float *>(tensor.data.data());
@ -53,7 +53,7 @@ STATUS QuantParamCalcer::ComputeConstQuantParam(const schema::TensorT &tensor, Q
isQuantExact &= (constData[i] == min || constData[i] == max);
}
if (!isQuantExact) {
// //MS_LOGD("compute quantParam for const tensor may be a cause of poor inference accuracy");
MS_LOG(DEBUG) << "compute quantParam for const tensor may be a cause of poor inference accuracy";
}
return quant::CalQuantizationParams(quantParam, min, max);
}
@ -80,7 +80,7 @@ int QuantParamCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
if (tensor->refCount == schema::NodeType_ValueNode && !IsContain(graph->inputIndex, node.inputIndex.at(i))) {
auto status = ComputeConstQuantParam((*tensor), quantParam.get());
if (status != RET_OK) {
// MS_LOGW("ComputeConstQuantParam failed: %d", status);
MS_LOG(WARNING) << "ComputeConstQuantParam failed: " << status;
return status;
}
tensor->quantParams.front() = std::move(quantParam);
@ -110,15 +110,15 @@ int QuantParamCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
int CommonCalcer::Calc(MetaGraphT *subGraph, const CNodeT &node) {
auto status = QuantParamCalcer::Calc(subGraph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: " << status;
return status;
}
if (inputParamDone != node.inputIndex.size()) {
MS_LOG(ERROR) << "Can not determine inputTensor quantParam, node " << node.name.c_str();
MS_LOG(ERROR) << "Can not determine inputTensor quantParam, node " << node.name;
return RET_ERROR;
}
if (outputParamDone != node.outputIndex.size()) {
MS_LOG(ERROR) << "Can not determine outputTensor quantParam, node " << node.name.c_str();
MS_LOG(ERROR) << "Can not determine outputTensor quantParam, node " << node.name;
return RET_ERROR;
}
return RET_OK;
@ -127,7 +127,7 @@ int CommonCalcer::Calc(MetaGraphT *subGraph, const CNodeT &node) {
int LinearCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
auto status = QuantParamCalcer::Calc(graph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: " << status;
return status;
}
if (inputParamDone != node.inputIndex.size()) {
@ -137,7 +137,7 @@ int LinearCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
auto outputQuantParam = GetTensorQuantParam(outTensor);
MS_ASSERT(outputQuantParam != nullptr);
if (!outputQuantParam->inited) {
// MS_LOGW("Can not determine inputTensor quantParam from outputTensor for node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine inputTensor quantParam from outputTensor for node " << node.name;
return RET_ERROR;
}
for (unsigned int i : node.inputIndex) {
@ -157,7 +157,7 @@ int LinearCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
MS_ASSERT(inTensor != nullptr);
auto inQuantParam = GetTensorQuantParam(inTensor);
if (!inQuantParam->inited) {
// MS_LOGW("Can not determine outputTensor quantParam from inputTensor for node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine outputTensor quantParam from inputTensor for node %s" << node.name;
return RET_ERROR;
}
for (size_t i = 0; i < node.outputIndex.size(); i++) {
@ -182,12 +182,12 @@ class CalcConcat : public QuantParamCalcer {
MS_ASSERT(node.outputIndex.size() == 1);
auto status = QuantParamCalcer::Calc(graph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: " << status;
return status;
}
if (inputParamDone != node.inputIndex.size()) {
// MS_LOGW("Can not determine concat inputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine concat inputTensor quantParam, node " << node.name;
return RET_ERROR;
}
@ -228,7 +228,7 @@ class CalcConcat : public QuantParamCalcer {
status = quant::CalQuantizationParams(outQuantParam.get(), minMin, maxMax, narrowRange, numBits);
if (status != RET_OK) {
// MS_LOGW("in aware quantization run CalQuantizationParams failed!");
MS_LOG(WARNING) << "in aware quantization run CalQuantizationParams failed!";
return RET_ERROR;
}
outputParamDone++;
@ -247,12 +247,12 @@ class CalcAdd : public QuantParamCalcer {
MS_ASSERT(node.outputIndex.size() == 1);
auto status = QuantParamCalcer::Calc(graph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: " << status;
return status;
}
if (inputParamDone != 2) {
// MS_LOGW("Can not determine add inputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine add inputTensor quantParam, node " << node.name;
return RET_ERROR;
}
if (outputParamDone != 1) {
@ -277,7 +277,7 @@ class CalcAdd : public QuantParamCalcer {
biasTensor = &tensor1;
paramTensor = &tensor0;
} else {
// MS_LOGW("Can not determine add outputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine add outputTensor quantParam, node " << node.name;
return RET_ERROR;
}
auto quantParam = GetTensorQuantParam(*paramTensor);
@ -292,7 +292,7 @@ class CalcAdd : public QuantParamCalcer {
auto *bias = static_cast<float *>(oriTensorData);
status = quant::CalQuantizationParams(outQuantParam.get(), min + (*bias), max + (*bias));
if (status != RET_OK) {
// MS_LOGW("in aware quantization run CalQuantizationParams failed!");
MS_LOG(WARNING) << "in aware quantization run CalQuantizationParams failed!";
return RET_ERROR;
}
} else if ((*biasTensor)->dataType == TypeId::kNumberTypeUInt8) {
@ -301,11 +301,11 @@ class CalcAdd : public QuantParamCalcer {
auto *bias = static_cast<uint8_t *>(oriTensorData);
status = quant::CalQuantizationParams(outQuantParam.get(), min + (*bias), max + (*bias));
if (status != RET_OK) {
// MS_LOGW("in aware quantization run CalQuantizationParams failed!");
MS_LOG(WARNING) << "in aware quantization run CalQuantizationParams failed!";
return RET_ERROR;
}
} else {
// MS_LOGW("Unsupported tensor dataType: %d", (*biasTensor)->dataType);
MS_LOG(WARNING) << "Unsupported tensor dataType: " << (*biasTensor)->dataType;
return RET_ERROR;
}
}
@ -323,12 +323,12 @@ class CalcRealDiv : public QuantParamCalcer {
MS_ASSERT(node.outputIndex.size() == 1);
auto status = QuantParamCalcer::Calc(graph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: " << status;
return status;
}
if (inputParamDone != 2) {
// MS_LOGW("Can not determine realdiv inputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine realdiv inputTensor quantParam, node " << node.name;
return RET_ERROR;
}
if (outputParamDone != 1) {
@ -354,7 +354,7 @@ class CalcRealDiv : public QuantParamCalcer {
MS_ASSERT(*div != 0);
status = quant::CalQuantizationParams(outQuantParam.get(), min / (*div), max / (*div));
if (status != RET_OK) {
// MS_LOGW("in aware quantization run CalQuantizationParams failed!");
MS_LOG(WARNING) << "in aware quantization run CalQuantizationParams failed!";
return RET_ERROR;
}
} else if (tensor1->dataType == TypeId::kNumberTypeUInt8) {
@ -363,16 +363,16 @@ class CalcRealDiv : public QuantParamCalcer {
auto *div = static_cast<uint8_t *>(oriTensorData);
status = quant::CalQuantizationParams(outQuantParam.get(), min / (*div), max + (*div));
if (status != RET_OK) {
// MS_LOGW("in aware quantization run CalQuantizationParams failed!");
MS_LOG(WARNING) << "in aware quantization run CalQuantizationParams failed!";
return RET_ERROR;
}
} else {
// MS_LOGW("Unsupported tensor dataType: %d", tensor1->dataType);
MS_LOG(WARNING) << "Unsupported tensor dataType: " << tensor1->dataType;
return RET_ERROR;
}
}
} else {
// MS_LOGW("Can not determine realDiv outputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine realDiv outputTensor quantParam, node " << node.name;
return RET_ERROR;
}
}
@ -389,18 +389,18 @@ class CalcToSet : public QuantParamCalcer {
MS_ASSERT(node.outputIndex.size() == 1);
auto status = QuantParamCalcer::Calc(graph, node);
if (status != RET_OK) {
// MS_LOGW("Call QuantParamCalcer::Calc failed: %d", status);
MS_LOG(WARNING) << "Call QuantParamCalcer::Calc failed: %d" << status;
return status;
}
// input
if (inputParamDone != node.inputIndex.size()) {
// MS_LOGW("Can not determine inputTensor quantParam, node %s", node.name.c_str());
MS_LOG(WARNING) << "Can not determine inputTensor quantParam, node " << node.name;
return RET_ERROR;
}
// output
std::unique_ptr<QuantParamT> quantParam(new (std::nothrow) QuantParamT());
if (quantParam == nullptr) {
// MS_LOGW("new QuantParamT failed");
MS_LOG(WARNING) << "new QuantParamT failed";
return RET_ERROR;
}
quantParam->scale = (max - min) / 256;
@ -486,7 +486,6 @@ QuantParamCalcRegister::QuantParamCalcRegister() {
_registerMap[schema::PrimitiveType_FullConnection] = commonCalcer.get();
_registerMap[schema::PrimitiveType_Nchw2Nhwc] = linearCalcer.get();
_registerMap[schema::PrimitiveType_Nhwc2Nchw] = linearCalcer.get();
// todo
// detection_postprocess op's quant param will not infer only fetch from preNode or postNode
// because we will not insert quantTransNode after this node in tflite_graph_8bit model if input data is float.
// if quantTransNode is inserted after detection_postprocess node, there will be some errors

@ -513,7 +513,6 @@ STATUS PostTrainingQuantizer::DoQuantOutput(double scale, int zeropoint, struct
STATUS PostTrainingQuantizer::DoWeightQuant(AnfNodePtr weight, std::shared_ptr<PrimitiveC> primitive_c, bool perchanel,
bool depthwise) {
// const vector<int> dims = filter->dims;
// perlayer
if (!weight->isa<Parameter>()) {
MS_LOG(ERROR) << "not a parameter";

@ -86,8 +86,6 @@ class PostTrainingQuantizer : public Quantizer {
STATUS QuantNode();
// STATUS reformatConvWeight(GraphDefT *graph);
STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min, std::shared_ptr<PrimitiveC>);
STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min, std::shared_ptr<PrimitiveC>);

@ -198,7 +198,6 @@ STATUS CalQuantizationParams(schema::QuantParamT *quantParam, double mMin, doubl
auto quantMaxFloat = static_cast<double>(quant_max);
double scale = (mMax - mMin) / (quantMaxFloat - quantMinFloat);
const double zeroPointFromMin = quantMinFloat - mMin / scale;
// const double zeroPointFromMax = quantMaxFloat - mMax / scale;
int zeroPoint = static_cast<int32_t>(std::round(zeroPointFromMin));
// The zero point should always be in the range of quantized value,

Loading…
Cancel
Save