!8726 modify static check to matser

From: @lyvette
Reviewed-by: @hangangqiang,@zhanghaibo5
Signed-off-by: @hangangqiang
pull/8726/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 88fa121734

@ -36,13 +36,11 @@ class KernelRegistry {
static KernelRegistry *GetInstance(); static KernelRegistry *GetInstance();
int Init(); int Init();
void FreeCreatorArray();
virtual kernel::KernelCreator GetCreator(const kernel::KernelKey &desc); virtual kernel::KernelCreator GetCreator(const kernel::KernelKey &desc);
const kernel::KernelCreator *GetCreatorArrays(); const kernel::KernelCreator *GetCreatorArrays();
int GetCreatorFuncIndex(const kernel::KernelKey desc); int GetCreatorFuncIndex(kernel::KernelKey desc);
void RegKernel(const kernel::KernelKey desc, kernel::KernelCreator creator); void RegKernel(kernel::KernelKey desc, kernel::KernelCreator creator);
void RegKernel(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType type, void RegKernel(kernel::KERNEL_ARCH arch, TypeId data_type, schema::PrimitiveType type, kernel::KernelCreator creator);
kernel::KernelCreator creator);
bool Merge(const std::unordered_map<kernel::KernelKey, kernel::KernelCreator> &newCreators); bool Merge(const std::unordered_map<kernel::KernelKey, kernel::KernelCreator> &newCreators);
kernel::LiteKernel *GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, kernel::LiteKernel *GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const PrimitiveC *primitive, const InnerContext *ctx, const kernel::KernelKey &key); const PrimitiveC *primitive, const InnerContext *ctx, const kernel::KernelKey &key);
@ -61,6 +59,7 @@ class KernelRegistrar {
KernelRegistrar(const kernel::KernelKey &desc, kernel::KernelCreator creator) { KernelRegistrar(const kernel::KernelKey &desc, kernel::KernelCreator creator) {
KernelRegistry::GetInstance()->RegKernel(desc, creator); KernelRegistry::GetInstance()->RegKernel(desc, creator);
} }
~KernelRegistrar() = default;
KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type, KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type,
kernel::KernelCreator creator) { kernel::KernelCreator creator) {

@ -23,6 +23,7 @@ int ConvertSubGraph(const schema::SubGraph &sub_graph, Model *model) {
MS_LOG(ERROR) << "new subGraph fail!"; MS_LOG(ERROR) << "new subGraph fail!";
return RET_ERROR; return RET_ERROR;
} }
MS_ASSERT(sub_graph.name() != nullptr);
subgraph->name_ = sub_graph.name()->c_str(); subgraph->name_ = sub_graph.name()->c_str();
MS_ASSERT(sub_graph.inputIndices() != nullptr); MS_ASSERT(sub_graph.inputIndices() != nullptr);
auto in_count = sub_graph.inputIndices()->size(); auto in_count = sub_graph.inputIndices()->size();
@ -68,6 +69,7 @@ const void *GetMetaGraphByVerison(const char *buf, const int &schema_version) {
} }
int GenerateModelByVersion(const void *meta_graph, Model *model, const int &schema_version) { int GenerateModelByVersion(const void *meta_graph, Model *model, const int &schema_version) {
MS_ASSERT(meta_graph != nullptr);
MS_ASSERT(model != nullptr); MS_ASSERT(model != nullptr);
int status = RET_ERROR; int status = RET_ERROR;
if (schema_version == SCHEMA_VERSION::SCHEMA_CUR) { if (schema_version == SCHEMA_VERSION::SCHEMA_CUR) {

@ -27,7 +27,7 @@ namespace mindspore {
class ParamValueLite : public Value { class ParamValueLite : public Value {
public: public:
ParamValueLite() : tensor_addr_(nullptr), tensor_size_(0) {} ParamValueLite() : tensor_addr_(nullptr), tensor_size_(0) {}
virtual ~ParamValueLite() { ~ParamValueLite() override {
if (tensor_addr_ != nullptr) { if (tensor_addr_ != nullptr) {
auto tensor_mem = reinterpret_cast<char *>(tensor_addr_); auto tensor_mem = reinterpret_cast<char *>(tensor_addr_);
delete[](tensor_mem); delete[](tensor_mem);

@ -58,7 +58,7 @@ class DequantUtil {
} }
} }
} else if (input_tensor->GetQuantParams().size() != kPerTensor) { } else if (input_tensor->GetQuantParams().size() != kPerTensor) {
size_t channels = static_cast<size_t>(input_tensor->Batch()); auto channels = static_cast<size_t>(input_tensor->Batch());
if (input_tensor->GetQuantParams().size() != channels) { if (input_tensor->GetQuantParams().size() != channels) {
MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels;
free(dequant_datas); free(dequant_datas);
@ -136,6 +136,10 @@ class DequantUtil {
template <typename T1, typename T2> template <typename T1, typename T2>
static void UnPackUtil(const schema::Tensor *input_tensor, int origin_bit, void *unpack_int_data) { static void UnPackUtil(const schema::Tensor *input_tensor, int origin_bit, void *unpack_int_data) {
if (input_tensor == nullptr || input_tensor->data() == nullptr) {
MS_LOG(ERROR) << "tensor data is null";
return;
}
auto weight_data = input_tensor->data()->data(); auto weight_data = input_tensor->data()->data();
int pack_size = int pack_size =
input_tensor->dataType() == kNumberTypeInt8 ? input_tensor->data()->size() : input_tensor->data()->size() / 2; input_tensor->dataType() == kNumberTypeInt8 ? input_tensor->data()->size() : input_tensor->data()->size() / 2;

@ -848,6 +848,7 @@ ThreadPool *CreateThreadPool(int thread_num, int mode) {
if (thread_pool->thread_list == NULL) { if (thread_pool->thread_list == NULL) {
LOG_ERROR("create thread list failed"); LOG_ERROR("create thread list failed");
DestroyThreadPool(thread_pool); DestroyThreadPool(thread_pool);
thread_pool = NULL;
return NULL; return NULL;
} }
thread_pool->thread_list->head = NULL; thread_pool->thread_list->head = NULL;

@ -15,11 +15,7 @@
*/ */
#include "tools/anf_importer/import_from_protobuf.h" #include "tools/anf_importer/import_from_protobuf.h"
#include <fcntl.h>
#include <unistd.h> #include <unistd.h>
#include <fstream>
#include <map> #include <map>
#include <memory> #include <memory>
#include <stack> #include <stack>
@ -243,7 +239,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node
node->set_abstract(abstract_tensor); node->set_abstract(abstract_tensor);
if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) {
Tensor *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); auto *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
if (tensor_info == nullptr) { if (tensor_info == nullptr) {
return RET_MEMORY_FAILED; return RET_MEMORY_FAILED;
} }
@ -435,7 +431,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con
} }
if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) {
if (kv.size() == 1) { if (kv.size() == 1) {
std::unordered_map<std::string, ValuePtr>::iterator iter = kv.begin(); auto iter = kv.begin();
prim->AddAttr(attr_name, iter->second); prim->AddAttr(attr_name, iter->second);
} else { } else {
auto res = ParserScalarAttrValue(ref_attr_name, kv); auto res = ParserScalarAttrValue(ref_attr_name, kv);
@ -459,7 +455,7 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val
param_value->set_tensor_shape(shape_vector); param_value->set_tensor_shape(shape_vector);
param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]); param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]);
const std::string &tensor_buf = attr_tensor.raw_data(); const std::string &tensor_buf = attr_tensor.raw_data();
auto tensor_data = new (std::nothrow) char[tensor_buf.size()]; auto tensor_data = new (std::nothrow) char[tensor_buf.size() + 1];
if (tensor_data == nullptr) { if (tensor_data == nullptr) {
MS_LOG(ERROR) << "Tensor_data is nullptr"; MS_LOG(ERROR) << "Tensor_data is nullptr";
return false; return false;
@ -648,14 +644,14 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out
MS_LOG(ERROR) << "funcgraph new cnode failed"; MS_LOG(ERROR) << "funcgraph new cnode failed";
return nullptr; return nullptr;
} }
if (0 == kv.size()) { if (kv.empty()) {
AbstractBasePtrList elem; AbstractBasePtrList elem;
for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) { for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) {
elem.push_back(cnode_ptr->input(index)->abstract()); elem.push_back(cnode_ptr->input(index)->abstract());
} }
cnode_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); cnode_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem));
} else if (1 == kv.size()) { } else if (1 == kv.size()) {
std::unordered_map<std::string, abstract::AbstractTensorPtr>::iterator iter = kv.begin(); auto iter = kv.begin();
cnode_ptr->set_abstract(iter->second); cnode_ptr->set_abstract(iter->second);
} else { } else {
auto abstract = ParserAttrShape(shape_ref_attr_name, kv); auto abstract = ParserAttrShape(shape_ref_attr_name, kv);

@ -57,17 +57,17 @@ class AnfImporterFromProtobuf : public AnfImporter {
const schema::QuantType &quantType); const schema::QuantType &quantType);
bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
const CNodePtr &cnode_ptr); const CNodePtr &cnode_ptr);
bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto); static bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto);
bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, static bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name,
const onnx::TensorProto &attr_tensor); const onnx::TensorProto &attr_tensor);
ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor); static ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor);
bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, static bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name,
const onnx::TensorProto &attr_tensor); const onnx::TensorProto &attr_tensor);
bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto); bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto);
bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor);
bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto); bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto);
bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor);
std::unordered_map<std::string, abstract::AbstractTensorPtr> GetAbstractForCNode( static std::unordered_map<std::string, abstract::AbstractTensorPtr> GetAbstractForCNode(
const onnx::AttributeProto &attr_proto); const onnx::AttributeProto &attr_proto);
private: private:

@ -147,7 +147,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
scaleParam->axis = 0 - shape_size; scaleParam->axis = 0 - shape_size;
mulNode->inputIndex.push_back(addBiasIndex); mulNode->inputIndex.push_back(addBiasIndex);
MS_ASSERT(addNode->primitive != nullptr); MS_ASSERT(addNode->primitive != nullptr);
MS_ASSERT(addNode->primitive->value != nullptr);
MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr);
auto activationType = addNode->primitive->value.AsAdd()->activationType; auto activationType = addNode->primitive->value.AsAdd()->activationType;
if (activationType == ActivationType_RELU || activationType == ActivationType_RELU6 || if (activationType == ActivationType_RELU || activationType == ActivationType_RELU6 ||
@ -163,7 +162,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt
// repace addnode as activation // repace addnode as activation
std::unique_ptr<ActivationT> activationParam(new ActivationT()); std::unique_ptr<ActivationT> activationParam(new ActivationT());
MS_ASSERT(addNode->primitive != nullptr); MS_ASSERT(addNode->primitive != nullptr);
MS_ASSERT(addNode->primitive->value != nullptr);
MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr);
activationParam->type = addNode->primitive->value.AsAdd()->activationType; activationParam->type = addNode->primitive->value.AsAdd()->activationType;
addNode->primitive->value.type = schema::PrimitiveType_Activation; addNode->primitive->value.type = schema::PrimitiveType_Activation;

@ -19,8 +19,7 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
namespace mindspore { namespace mindspore::lite {
namespace lite {
constexpr int32_t kSingleGroup = 1; constexpr int32_t kSingleGroup = 1;
bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op) { bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx DepthwiseConvParser"; MS_LOG(DEBUG) << "onnx DepthwiseConvParser";
@ -140,6 +139,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
} }
std::vector<int> weight_shape; std::vector<int> weight_shape;
auto size = (*nodeIter).dims_size(); auto size = (*nodeIter).dims_size();
weight_shape.reserve(size);
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
weight_shape.emplace_back((*nodeIter).dims(i)); weight_shape.emplace_back((*nodeIter).dims(i));
} }
@ -157,7 +157,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
auto iter = std::find_if((*nodeIter).attribute().begin(), (*nodeIter).attribute().end(), auto iter = std::find_if((*nodeIter).attribute().begin(), (*nodeIter).attribute().end(),
[](const onnx::AttributeProto &attr) { return attr.name() == "shape"; }); [](const onnx::AttributeProto &attr) { return attr.name() == "shape"; });
if (iter != (*nodeIter).attribute().end()) { if (iter != (*nodeIter).attribute().end()) {
MS_ASSERT(iter->ints() != nullptr);
MS_ASSERT(iter->ints().begin() != nullptr); MS_ASSERT(iter->ints().begin() != nullptr);
MS_ASSERT(iter->ints().end() != nullptr); MS_ASSERT(iter->ints().end() != nullptr);
dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end()); dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end());
@ -188,5 +187,4 @@ OnnxNodeRegistrar g_onnxConvParser("Conv", new OnnxConvParser());
OnnxNodeRegistrar g_onnxInt8ConvParser("Int8Conv", new OnnxConvParser()); OnnxNodeRegistrar g_onnxInt8ConvParser("Int8Conv", new OnnxConvParser());
OnnxNodeRegistrar g_onnxConvReluParser("ConvRelu", new OnnxConvParser()); OnnxNodeRegistrar g_onnxConvReluParser("ConvRelu", new OnnxConvParser());
OnnxNodeRegistrar g_onnxInt8ConvReluParser("Int8ConvRelu", new OnnxConvParser()); OnnxNodeRegistrar g_onnxInt8ConvReluParser("Int8ConvRelu", new OnnxConvParser());
} // namespace lite } // namespace mindspore::lite
} // namespace mindspore

@ -31,7 +31,7 @@ class OnnxConvParser : public OnnxNodeParser {
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
private: private:
bool ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op); static bool ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::CNodeT *op);
}; };
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore

@ -17,8 +17,7 @@
#include "tools/converter/parser/onnx/onnx_lp_norm_parser.h" #include "tools/converter/parser/onnx/onnx_lp_norm_parser.h"
#include <memory> #include <memory>
namespace mindspore { namespace mindspore::lite {
namespace lite {
STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) { schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx LpNormParser"; MS_LOG(DEBUG) << "onnx LpNormParser";
@ -38,13 +37,12 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
return RET_NULL_PTR; return RET_NULL_PTR;
} }
auto onnx_node_attr = onnx_node.attribute(); for (const auto &onnx_node_attr : onnx_node.attribute()) {
for (int i = 0; i < onnx_node_attr.size(); ++i) { const auto &attribute_name = onnx_node_attr.name();
MS_ASSERT(onnx_node_attr.at(i) != nullptr); if (attribute_name == "axis") {
if (onnx_node_attr.at(i).name() == "axis") { attr->axis = onnx_node_attr.i();
attr->axis = onnx_node_attr.at(i).i(); } else if (attribute_name == "p") {
} else if (onnx_node_attr.at(i).name() == "p") { attr->p = onnx_node_attr.i();
attr->p = onnx_node_attr.at(i).i();
} }
} }
@ -54,5 +52,4 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
} }
OnnxNodeRegistrar g_onnxLpNormParser("LpNormalization", new OnnxLpNormParser()); OnnxNodeRegistrar g_onnxLpNormParser("LpNormalization", new OnnxLpNormParser());
} // namespace lite } // namespace mindspore::lite
} // namespace mindspore

@ -17,8 +17,7 @@
#include "tools/converter/parser/onnx/onnx_lrn_parser.h" #include "tools/converter/parser/onnx/onnx_lrn_parser.h"
#include <memory> #include <memory>
namespace mindspore { namespace mindspore::lite {
namespace lite {
STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx LrnParser"; MS_LOG(DEBUG) << "onnx LrnParser";
if (op == nullptr) { if (op == nullptr) {
@ -37,18 +36,17 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
return RET_NULL_PTR; return RET_NULL_PTR;
} }
auto onnx_node_attr = onnx_node.attribute();
int32_t size = 0; int32_t size = 0;
for (int i = 0; i < onnx_node_attr.size(); ++i) { for (const auto &onnx_node_attr : onnx_node.attribute()) {
MS_ASSERT(onnx_node_attr.at(i) != nullptr); const auto &attribute_name = onnx_node_attr.name();
if (onnx_node_attr.at(i).name() == "alpha") { if (attribute_name == "alpha") {
attr->alpha = onnx_node_attr.at(i).f(); attr->alpha = onnx_node_attr.f();
} else if (onnx_node_attr.at(i).name() == "beta") { } else if (attribute_name == "beta") {
attr->beta = onnx_node_attr.at(i).f(); attr->beta = onnx_node_attr.f();
} else if (onnx_node_attr.at(i).name() == "bias") { } else if (attribute_name == "bias") {
attr->bias = onnx_node_attr.at(i).f(); attr->bias = onnx_node_attr.f();
} else if (onnx_node_attr.at(i).name() == "size") { } else if (attribute_name == "size") {
size = static_cast<int32_t>(onnx_node_attr.at(i).i()); size = static_cast<int32_t>(onnx_node_attr.i());
attr->depth_radius = size / 2; attr->depth_radius = size / 2;
} }
} }
@ -66,5 +64,4 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser()); OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser());
OnnxNodeRegistrar g_onnxLRNxParser("LRN", new OnnxLrnParser()); OnnxNodeRegistrar g_onnxLRNxParser("LRN", new OnnxLrnParser());
} // namespace lite } // namespace mindspore::lite
} // namespace mindspore

@ -192,22 +192,24 @@ class Calibrator {
STATUS AddQuantizedOp(const CNodePtr &node); STATUS AddQuantizedOp(const CNodePtr &node);
STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info); static STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
STATUS UpdateDivergInverval(std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *diverg_info); static STATUS UpdateDivergInverval(
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *diverg_info);
STATUS UpdateDataFrequency(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info); static STATUS UpdateDataFrequency(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
void Dump(); void Dump();
STATUS ComputeThreshold(); STATUS ComputeThreshold();
std::unordered_map<CNodePtr, float> GetScale( static std::unordered_map<CNodePtr, float> GetScale(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info); std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
std::unordered_map<CNodePtr, int32_t> GetZeropoint( static std::unordered_map<CNodePtr, int32_t> GetZeropoint(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info); std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
std::map<CNodePtr, MaxMin> GetMinMax(std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info); static std::map<CNodePtr, MaxMin> GetMinMax(
std::unordered_map<std::string, std::unique_ptr<DivergInfo>> *diverg_info);
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *GetInputDivergInfo(); std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *GetInputDivergInfo();

@ -110,8 +110,9 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) {
parameter->set_default_param(param_value); parameter->set_default_param(param_value);
return parameter; return parameter;
} }
kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs, OpParameter *parameter, kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, const std::vector<Tensor *> &outputs,
lite::InnerContext *context, mindspore::lite::PrimitiveC *primitive) { OpParameter *parameter, lite::InnerContext *context,
mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(nullptr != lite_primitive); MS_ASSERT(nullptr != lite_primitive);
auto data_type = inputs.front()->data_type(); auto data_type = inputs.front()->data_type();
kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()}; kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()};
@ -163,15 +164,15 @@ lite::STATUS ReplaceCNode(const FuncGraphPtr &func_graph, const CNodePtr &any_no
} // namespace } // namespace
void FreeTensors(std::vector<Tensor *> *input_tensor, std::vector<Tensor *> *output_tensor) { void FreeTensors(std::vector<Tensor *> *input_tensor, std::vector<Tensor *> *output_tensor) {
if (input_tensor != nullptr) { if (input_tensor != nullptr) {
for (size_t i = 0; i < input_tensor->size(); i++) { for (auto &i : *input_tensor) {
delete (*input_tensor)[i]; delete i;
(*input_tensor)[i] = nullptr; i = nullptr;
} }
} }
if (output_tensor != nullptr) { if (output_tensor != nullptr) {
for (size_t i = 0; i < output_tensor->size(); i++) { for (auto &i : *output_tensor) {
delete (*output_tensor)[i]; delete i;
(*output_tensor)[i] = nullptr; i = nullptr;
} }
} }
} }
@ -231,9 +232,9 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
// here, input_tensor's format need to be transposed nhwc according to fmkType, // here, input_tensor's format need to be transposed nhwc according to fmkType,
// but for the time being, we only transpose the tensor with 0/1/2/3D. // but for the time being, we only transpose the tensor with 0/1/2/3D.
// Others should be added in future. // Others should be added in future.
for (size_t j = 0; j < input_tensors.size(); ++j) { for (auto &input_tensor : input_tensors) {
input_tensors[j]->SetFormat(schema::Format::Format_NHWC); input_tensor->SetFormat(schema::Format::Format_NHWC);
if (input_tensors[j]->shape().size() == 4) { if (input_tensor->shape().size() == 4) {
MS_LOG(INFO) << "init input_tensor format to nhwc"; MS_LOG(INFO) << "init input_tensor format to nhwc";
} }
} }

@ -233,14 +233,11 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne
delete[] tmp_weight_data; delete[] tmp_weight_data;
return; return;
} }
delete[] tmp_weight_data;
if (tmp_weight_data != nullptr) {
delete[] tmp_weight_data;
}
} }
const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag, const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag,
const float *trans_scale, const float *trans_bias) const { const float *trans_scale, const float *trans_bias) {
MS_ASSERT(bias_data != nullptr); MS_ASSERT(bias_data != nullptr);
if (bias_flag) { if (bias_flag) {
auto tmp_bias_data = new (std::nothrow) float[kernel_num]; auto tmp_bias_data = new (std::nothrow) float[kernel_num];

@ -31,7 +31,7 @@ class ConvTransformFusion : public PatternProcessPass {
virtual const void InitTransParam(const CNodePtr &, int, float *, float *) const = 0; virtual const void InitTransParam(const CNodePtr &, int, float *, float *) const = 0;
const void GenNewConvTensor(const FuncGraphPtr &, const CNodePtr &, int, const float *, const float *) const; const void GenNewConvTensor(const FuncGraphPtr &, const CNodePtr &, int, const float *, const float *) const;
const void CalNewWeightTensor(float *, int, int, const float *) const; const void CalNewWeightTensor(float *, int, int, const float *) const;
const void CalNewBiasTensor(float *, int, bool, const float *, const float *) const; static const void CalNewBiasTensor(float *, int, bool, const float *, const float *);
}; };
} // namespace mindspore::opt } // namespace mindspore::opt
#endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_ #endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_

@ -79,7 +79,6 @@ bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) {
MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveT"; MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveT";
return RET_ERROR; return RET_ERROR;
} }
MS_ASSERT(primT->value != nullptr);
MS_ASSERT(primT->value.AsTranspose() != nullptr); MS_ASSERT(primT->value.AsTranspose() != nullptr);
std::vector<int32_t> perm = primT->value.AsTranspose()->perm; std::vector<int32_t> perm = primT->value.AsTranspose()->perm;
if (perm == kPermNHWC) { if (perm == kPermNHWC) {

Loading…
Cancel
Save