!8777 mosify static check to Master

From: @lyvette
Reviewed-by: @hangangqiang,@hangangqiang
Signed-off-by: @hangangqiang,@hangangqiang
pull/8777/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 44cb24204d

@ -89,7 +89,7 @@ void AnfExporter::RemoveIfDepend(const CNodePtr &cnode) {
}
int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph,
const std::shared_ptr<PrimitiveC> primitive,
const std::shared_ptr<PrimitiveC> &primitive,
const std::unique_ptr<schema::CNodeT> &dst_node) {
MS_ASSERT(meta_graph != nullptr);
MS_ASSERT(primitive != nullptr);
@ -173,7 +173,7 @@ void AnfExporter::SetGraphInputIndex(const std::unique_ptr<schema::MetaGraphT> &
int AnfExporter::SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *return_node) {
MS_ASSERT(nullptr != meta_graph);
MS_ASSERT(nullptr != meta_graphT);
MS_ASSERT(nullptr != return_node);
for (size_t i = 1; i < cnode->inputs().size(); i++) {
auto input_node = cnode->input(i);
@ -191,8 +191,8 @@ int AnfExporter::SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_pt
return RET_ERROR;
}
}
for (size_t i = 0; i < return_node->inputIndex.size(); ++i) {
meta_graphT->outputIndex.push_back(return_node->inputIndex[i]);
for (unsigned int &i : return_node->inputIndex) {
meta_graphT->outputIndex.push_back(i);
}
return RET_OK;
}
@ -272,7 +272,7 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph, bool kee
return meta_graphT.release();
}
int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode) {
int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> &input_anode, schema::CNodeT *output_cnode) {
std::string input_name = input_anode->fullname_with_scope();
auto input_cnode = utils::cast<CNodePtr>(input_anode);
@ -336,7 +336,7 @@ int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, s
return RET_OK;
}
int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> &input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *output_cnode) {
auto paramNode = input_anode->cast<ParameterPtr>();
@ -382,7 +382,7 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anod
return RET_OK;
}
int AnfExporter::ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
int AnfExporter::ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *output_cnode) {
auto valueNode = input_anode->cast<ValueNodePtr>();
@ -478,7 +478,7 @@ int AnfExporter::ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *fb_node) {
MS_ASSERT(nullptr != meta_graph);
MS_ASSERT(nullptr != meta_graphT);
MS_ASSERT(nullptr != fb_node);
if (cnode->inputs().size() <= 1) {
return RET_OK;
@ -518,14 +518,14 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<sch
void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *fb_node) {
MS_ASSERT(nullptr != graph);
MS_ASSERT(nullptr != meta_graphT);
MS_ASSERT(nullptr != fb_node);
std::string cnode_name = fb_node->name;
if (utils::isa<abstract::AbstractTuple>(cnode->abstract())) {
auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(cnode->abstract());
for (size_t i = 0; i < tuple->size(); i++) {
auto msTensor = new schema::TensorT();
auto msTensor = new (std::nothrow) schema::TensorT();
msTensor->nodeType = schema::NodeType_CNode;
fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size());
#ifdef SUPPORT_TRAIN
@ -552,7 +552,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s
#endif
}
} else {
auto ms_tensor = new schema::TensorT();
auto ms_tensor = new (std::nothrow) schema::TensorT();
ms_tensor->nodeType = schema::NodeType_CNode;
ms_tensor->dataType = TypeId::kNumberTypeFloat32;
fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size());

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_
#define MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_
#define MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_
#include <map>
#include <string>
@ -36,21 +36,22 @@ class AnfExporter {
schema::CNodeT *fb_node);
int SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *fb_node);
void RemoveIfMakeTuple(const CNodePtr &cnode);
void RemoveIfDepend(const CNodePtr &cnode);
static void RemoveIfMakeTuple(const CNodePtr &cnode);
static void RemoveIfDepend(const CNodePtr &cnode);
protected:
int ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode);
int ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
int ConvertInputCNode(const std::shared_ptr<AnfNode> &input_anode, schema::CNodeT *output_cnode);
int ConvertInputParameter(const std::shared_ptr<AnfNode> &input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
int ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
int ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
void SetGraphInputIndex(const std::unique_ptr<schema::MetaGraphT> &meta_graphT);
int SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *return_node);
bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type);
int ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph,
const std::shared_ptr<PrimitiveC> primitive, const std::unique_ptr<schema::CNodeT> &dst_node);
static bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type);
static int ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph,
const std::shared_ptr<PrimitiveC> &primitive,
const std::unique_ptr<schema::CNodeT> &dst_node);
private:
std::map<std::string, int> node_id_map_;
@ -62,4 +63,4 @@ class AnfExporter {
// and clear.
schema::MetaGraphT *Export(const FuncGraphPtr &func_graph, bool keep_graph = false, bool copy_primitive = false);
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_

@ -15,8 +15,6 @@
*/
#include <utility>
#include <memory>
#include <vector>
#include "tools/anf_importer/anf_importer.h"
#include "schema/model_generated.h"
#include "ir/dtype.h"

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_
#define MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_
#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_
#include <unordered_map>
#include "ir/func_graph.h"
@ -51,4 +51,4 @@ class AnfImporter {
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_

@ -22,7 +22,6 @@
#include "src/param_value_lite.h"
#include "src/common/log_adapter.h"
#include "include/errorcode.h"
#include "tools/common/tensor_util.h"
namespace mindspore::lite {
int AnfImporterFromMetaGraphT::ConverterConstTensor() {
@ -31,11 +30,9 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) {
auto &tensor = meta_graph_->allTensors.at(i);
MS_ASSERT(tensor != nullptr);
// converter weight and graph input into parameter node
if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) {
continue;
}
MS_ASSERT(tensor->dims() != nullptr);
auto parameter = func_graph_->add_parameter();
std::vector<int> shape(tensor->dims.size());
std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin());
@ -45,11 +42,12 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector),
[](const int32_t &value) { return static_cast<int64_t>(value); });
auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector);
MS_ASSERT(nullptr != abstract_tensor);
parameter->set_abstract(abstract_tensor);
parameter->set_name("const_" + std::to_string(i) + "_parameter");
ParamValueLitePtr param_value = std::make_shared<ParamValueLite>();
MS_ASSERT(param_value != nullptr);
MS_ASSERT(nullptr != param_value);
param_value->set_tensor_shape(shape);
param_value->set_tensor_type(type_id);
param_value->set_format(tensor->format);
@ -123,7 +121,9 @@ abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTe
std::vector<int64_t> shape_vector;
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector),
[](const int32_t &value) { return static_cast<int64_t>(value); });
return std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector);
auto ptr = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector);
MS_ASSERT(nullptr != ptr);
return ptr;
}
int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode,
@ -175,15 +175,16 @@ int AnfImporterFromMetaGraphT::ConverterCNode() {
return RET_NULL_PTR;
}
std::vector<AnfNodePtr> op_inputs = {anf_primitive};
for (unsigned int j : cNode->inputIndex) {
for (int j : cNode->inputIndex) {
auto node = GetNode(j);
if (nullptr == node) {
MS_LOG(ERROR) << "Can't find input node.";
return RET_ERROR;
return RET_NULL_PTR;
}
op_inputs.push_back(node);
}
auto new_cnode = func_graph_->NewCNode(op_inputs);
MS_ASSERT(nullptr != new_cnode);
new_cnode->set_fullname_with_scope(cNode->name);
auto status = ConvertAbstract(cNode, new_cnode);
if (status != RET_OK) {
@ -195,10 +196,8 @@ int AnfImporterFromMetaGraphT::ConverterCNode() {
}
int AnfImporterFromMetaGraphT::AddReturnCNode() {
if (meta_graph_ == nullptr || func_graph_ == nullptr) {
MS_LOG(ERROR) << "meta_graph or func_graph is nullptr";
return RET_NULL_PTR;
}
MS_ASSERT(nullptr != meta_graph_);
MS_ASSERT(nullptr != func_graph_);
if (meta_graph_->outputIndex.size() > 1) {
std::vector<AnfNodePtr> make_tuple_inputs;
auto make_tuple_prim_ptr = GetMakeTuplePrim();
@ -229,6 +228,7 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() {
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
MS_ASSERT(nullptr != cnode);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
} else {

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_
#define MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_
#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_
#include <utility>
#include <memory>
@ -40,7 +40,9 @@ class AnfImporterFromMetaGraphT : public AnfImporter {
int ConverterCNode() override;
ValueNodePtr ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode);
abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr<schema::TensorT> &tensor);
static abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr<schema::TensorT> &tensor);
int ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, const CNodePtr &dst_cnode);
int AddReturnCNode() override;
@ -51,4 +53,4 @@ class AnfImporterFromMetaGraphT : public AnfImporter {
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_

@ -239,7 +239,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node
node->set_abstract(abstract_tensor);
if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) {
auto *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
auto *tensor_info = new (std::nothrow) Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape);
if (tensor_info == nullptr) {
return RET_MEMORY_FAILED;
}
@ -345,7 +345,6 @@ ValuePtr AnfImporterFromProtobuf::ObtainCNodeAttrInScalarForm(const onnx::Tensor
MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type;
return {};
}
return {};
}
bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name,
@ -871,7 +870,7 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) {
}
onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) {
auto onnx_model = new onnx::ModelProto;
auto onnx_model = new (std::nothrow) onnx::ModelProto;
if (RET_OK != ValidateFileStr(model_path, ".mindir")) {
MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir";
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_INPUT_PARAM_INVALID);

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_
#define MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_
#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_
#include <map>
#include <string>
@ -81,4 +81,4 @@ class AnfImporterFromProtobuf : public AnfImporter {
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_

@ -24,7 +24,6 @@ Option<std::string> FlagParser::ParseFlags(int argc, const char *const *argv, bo
bool supportDuplicate) {
MS_ASSERT(argv != nullptr);
const int FLAG_PREFIX_LEN = 2;
// Get binary name
binName = GetFileName(argv[0]);
std::multimap<std::string, Option<std::string>> keyValues;
@ -45,9 +44,7 @@ Option<std::string> FlagParser::ParseFlags(int argc, const char *const *argv, bo
Option<std::string> value = Option<std::string>(None());
size_t pos = flagItem.find_first_of('=');
if (pos == std::string::npos && flagItem.find("--no-") != std::string::npos) {
key = flagItem.substr(FLAG_PREFIX_LEN);
} else if (pos == std::string::npos) {
if (pos == std::string::npos) {
key = flagItem.substr(FLAG_PREFIX_LEN);
} else {
key = flagItem.substr(FLAG_PREFIX_LEN, pos - FLAG_PREFIX_LEN);
@ -81,10 +78,10 @@ bool FlagParser::GetRealFlagName(std::string *flagName, const std::string &oriFl
// Inner parse function
Option<std::string> FlagParser::InnerParseFlags(std::multimap<std::string, Option<std::string>> *keyValues) {
MS_ASSERT(keyValues != nullptr);
for (auto it = keyValues->begin(); it != keyValues->end(); ++it) {
for (auto &keyValue : *keyValues) {
std::string flagName;
bool opaque = GetRealFlagName(&flagName, (*it).first);
Option<std::string> flagValue = (*it).second;
bool opaque = GetRealFlagName(&flagName, keyValue.first);
Option<std::string> flagValue = keyValue.second;
auto item = flags.find(flagName);
if (item == flags.end()) {
@ -133,7 +130,7 @@ Option<std::string> FlagParser::InnerParseFlags(std::multimap<std::string, Optio
return Option<std::string>(None());
}
void Replaceall(std::string *str, const std::string &oldValue, const std::string &newValue) {
void ReplaceAll(std::string *str, const std::string &oldValue, const std::string &newValue) {
if (str == nullptr) {
MS_LOG(ERROR) << "Input str is nullptr";
return;
@ -153,9 +150,9 @@ std::string FlagParser::Usage(const Option<std::string> &usgMsg) const {
std::string usageString = usgMsg.IsSome() ? usgMsg.Get() + "\n" : "";
// usage of bin name
usageString += usageMsg.IsNone() ? "\nusage: " + binName + " [options]\n" : usageMsg.Get() + "\n";
// help line of help message, usageLine:message of parametors
std::string helpLine = "";
std::string usageLine = "";
// help line of help message, usageLine:message of parameters
std::string helpLine;
std::string usageLine;
uint32_t i = 0;
for (auto flag = flags.begin(); flag != flags.end(); flag++) {
std::string flagName = flag->second.flagName;
@ -165,7 +162,7 @@ std::string FlagParser::Usage(const Option<std::string> &usgMsg) const {
if (++i <= flags.size()) {
// add parameter help message of each line
thisLine += " " + helpInfo;
Replaceall(&helpInfo, "\n\r", "\n");
ReplaceAll(&helpInfo, "\n\r", "\n");
usageLine += thisLine + "\n";
} else {
// breif help message

@ -14,21 +14,18 @@
* limitations under the License.
*/
#ifndef PREDICT_COMMON_FLAG_PARSER_H_
#define PREDICT_COMMON_FLAG_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H
#define MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H
#include <functional>
#include <map>
#include <utility>
#include <string>
#include "src/common/utils.h"
#include "tools/common/option.h"
namespace mindspore {
namespace lite {
struct FlagInfo;
struct Nothing {};
class FlagParser {
@ -44,6 +41,7 @@ class FlagParser {
template <typename Flags, typename T1, typename T2>
void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 *t2);
template <typename Flags, typename T1, typename T2>
void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 &t2);
@ -94,7 +92,7 @@ class FlagParser {
Option<std::string> InnerParseFlags(std::multimap<std::string, Option<std::string>> *values);
bool GetRealFlagName(std::string *flagName, const std::string &oriFlagName);
static bool GetRealFlagName(std::string *flagName, const std::string &oriFlagName);
std::map<std::string, FlagInfo> flags;
};
@ -181,7 +179,7 @@ void FlagParser::AddFlag(T1 *t1, const std::string &flagName, const std::string
FlagInfo flagItem;
// flagItem is as a output parameter
// flagItem is as an output parameter
ConstructFlag(t1, flagName, helpInfo, flagItem);
flagItem.parse = [t1](FlagParser *base, const std::string &value) -> Option<Nothing> {
if (base != nullptr) {
@ -301,4 +299,4 @@ void FlagParser::AddFlag(Option<T> Flags::*t, const std::string &flagName, const
} // namespace lite
} // namespace mindspore
#endif // PREDICT_COMMON_FLAG_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H

@ -15,8 +15,7 @@
*/
#include "tools/common/graph_util.h"
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <utility>
#include <set>
#include "schema/inner/model_generated.h"
@ -29,7 +28,10 @@ namespace mindspore {
namespace lite {
OpDefCopyer GetSimpleOpCopyer() {
return [](CNodeT *inCNode) -> std::unique_ptr<CNodeT> {
std::unique_ptr<CNodeT> newCNode(new CNodeT);
std::unique_ptr<CNodeT> newCNode = std::make_unique<CNodeT>();
if (newCNode == nullptr) {
return nullptr;
}
newCNode->name = inCNode->name;
newCNode->quantType = inCNode->quantType;
@ -163,8 +165,6 @@ STATUS IsolateNode(schema::MetaGraphT *graphT, CNodeT *node) {
}
}
// whether need to remove weightInputTensores
// remove all node's outputTensors
RemoveTensor(graphT, outputTensorIdxes);
node->inputIndex.clear();
node->outputIndex.clear();
@ -183,8 +183,11 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, size_t nodeIdx, bool remove
MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx;
return RET_PARAM_INVALID;
}
CNodeT *node = graphT->nodes.at(nodeIdx).get();
if (node == nullptr) {
MS_LOG(ERROR) << "node is null";
return RET_NULL_PTR;
}
auto inputTensorIdxes = node->inputIndex;
auto outputTensorIdxes = node->outputIndex;
auto preNodeIdxes = GetInputNodeIdx(*graphT, nodeIdx);
@ -244,6 +247,7 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, CNodeT *node, bool removeTe
size_t nodeIdx = 0;
for (size_t i = 0; i < graphT->nodes.size(); i++) {
auto &inNode = graphT->nodes.at(i);
MS_ASSERT(inNode != nullptr);
if (inNode->name == node->name) {
isSubNode = true;
nodeIdx = i;
@ -259,6 +263,7 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, CNodeT *node, bool removeTe
}
STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector<uint32_t> toDeleteTensorIdxes, bool forceDelete) {
MS_ASSERT(graphT != nullptr);
for (auto iter = toDeleteTensorIdxes.begin(); iter != toDeleteTensorIdxes.end();) {
uint32_t deleteIdx = *iter;
if (!forceDelete) {
@ -297,6 +302,7 @@ STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector<uint32_t> toDeleteTe
}
STATUS UpdateNodeIndex(CNodeT *node, uint32_t deleteIdx) {
MS_ASSERT(node != nullptr);
for (auto inIdxIt = node->inputIndex.begin(); inIdxIt != node->inputIndex.end();) {
if (*inIdxIt == deleteIdx) {
inIdxIt = node->inputIndex.erase(inIdxIt);
@ -330,6 +336,7 @@ STATUS AddTensor2Node(schema::MetaGraphT *graphT, uint32_t nodeIdx, std::unique_
graphT->allTensors.emplace_back(std::move(tensor));
uint32_t newTensorIdx = graphT->allTensors.size() - 1;
auto node = graphT->nodes.at(nodeIdx).get();
MS_ASSERT(node != nullptr);
if (place == kBefore) {
node->inputIndex.emplace_back(newTensorIdx);
} else {
@ -340,11 +347,13 @@ STATUS AddTensor2Node(schema::MetaGraphT *graphT, uint32_t nodeIdx, std::unique_
STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_t inTensorIdx,
std::unique_ptr<TensorT> tensor) {
MS_ASSERT(graphT != nullptr);
if (nodeIdx >= graphT->nodes.size()) {
MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx;
return RET_PARAM_INVALID;
}
auto node = graphT->nodes.at(nodeIdx).get();
MS_ASSERT(node != nullptr);
if (inTensorIdx >= graphT->allTensors.size()) {
MS_LOG(ERROR) << "inTensorIdx out of range: " << nodeIdx;
return RET_PARAM_INVALID;
@ -358,7 +367,9 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_
}
NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex,
std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer) {
std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) {
MS_ASSERT(graphT != nullptr);
MS_ASSERT(errorCode != nullptr);
if (existNodeIdx >= graphT->nodes.size()) {
MS_LOG(ERROR) << "nodeIdx out of range: " << existNodeIdx;
return graphT->nodes.end();
@ -370,7 +381,9 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPla
}
NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx,
std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer) {
std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) {
MS_ASSERT(graphT != nullptr);
MS_ASSERT(errorCode != nullptr);
if (place == kBefore) {
return InsertNodeBefore(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, opDefCopyer);
} else if (place == kAfter) {
@ -382,7 +395,9 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPl
}
NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx,
std::unique_ptr<CNodeT> toAddNodeIn, STATUS *errorCode, OpDefCopyer opDefCopyer) {
std::unique_ptr<CNodeT> toAddNodeIn, STATUS *errorCode, const OpDefCopyer &opDefCopyer) {
MS_ASSERT(graphT != nullptr);
MS_ASSERT(errorCode != nullptr);
auto &existNode = *existNodeIter;
MS_ASSERT(existNode != nullptr);
MS_ASSERT(existNode->inputIndex.size() > inputIndexIdx);
@ -390,7 +405,7 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si
auto preTensorIdx = existNode->inputIndex.at(inputIndexIdx);
MS_ASSERT(graphT->allTensors.size() > preTensorIdx);
auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode.get()), inputIndexIdx);
auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode), inputIndexIdx);
if (preNodeIdxes.empty()) {
auto &preTensor = graphT->allTensors.at(preTensorIdx);
MS_ASSERT(preTensor != nullptr);
@ -402,9 +417,12 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si
}
preTensor->refCount = 0;
preTensor->data.clear();
MS_ASSERT(toAddNodeIn->primitive != nullptr);
if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT;
toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT;
auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
MS_ASSERT(prim != nullptr);
preTensor->dataType = prim->srcT;
toAddTensor->dataType = prim->dstT;
}
graphT->allTensors.emplace_back(std::move(toAddTensor));
size_t toAddTensorIdx = graphT->allTensors.size() - 1;
@ -438,9 +456,12 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si
MS_LOG(ERROR) << "Copy TensorT failed";
return graphT->nodes.end();
}
MS_ASSERT(toAddNodeIn->primitive != nullptr);
if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT;
toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT;
auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
MS_ASSERT(prim != nullptr);
preTensor->dataType = prim->srcT;
toAddTensor->dataType = prim->dstT;
}
graphT->allTensors.emplace_back(std::move(toAddTensor));
size_t toAddTensorIdx = graphT->allTensors.size() - 1;
@ -473,7 +494,10 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si
}
NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx,
std::unique_ptr<schema::CNodeT> toAddNodeIn, STATUS *errorCode, OpDefCopyer opDefCopyer) {
std::unique_ptr<schema::CNodeT> toAddNodeIn, STATUS *errorCode,
const OpDefCopyer &opDefCopyer) {
MS_ASSERT(graphT != nullptr);
MS_ASSERT(errorCode != nullptr);
auto &existNode = *existNodeIter;
MS_ASSERT(existNode != nullptr);
MS_ASSERT(existNode->outputIndex.size() > outputIndexIdx);
@ -481,7 +505,7 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz
auto postTensorIdx = existNode->outputIndex.at(outputIndexIdx);
MS_ASSERT(graphT->allTensors.size() > postTensorIdx);
auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode.get()), outputIndexIdx);
auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode), outputIndexIdx);
if (postNodeIdxes.empty()) {
auto &postTensor = graphT->allTensors.at(postTensorIdx);
MS_ASSERT(postTensor != nullptr);
@ -491,9 +515,12 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz
*errorCode = RET_NULL_PTR;
return graphT->nodes.end();
}
MS_ASSERT(toAddNodeIn->primitive != nullptr);
if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
postTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT;
toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT;
auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
MS_ASSERT(prim != nullptr);
postTensor->dataType = prim->srcT;
toAddTensor->dataType = prim->dstT;
}
graphT->allTensors.emplace_back(std::move(toAddTensor));
size_t toAddTensorIdx = graphT->allTensors.size() - 1;
@ -554,9 +581,12 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz
*errorCode = RET_NULL_PTR;
return graphT->nodes.end();
}
MS_ASSERT(toAddNodeIn->primitive != nullptr);
if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) {
postTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT;
toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT;
auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast();
MS_ASSERT(prim != nullptr);
postTensor->dataType = prim->srcT;
toAddTensor->dataType = prim->dstT;
}
graphT->allTensors.emplace_back(std::move(toAddTensor));
size_t toAddTensorIdx = graphT->allTensors.size() - 1;
@ -589,13 +619,9 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz
return existNodeIter;
}
STATUS ValidateFileStr(const std::string &modelFile, std::string fileType) {
if (modelFile.size() > fileType.size()) {
if (modelFile.substr(modelFile.size() - fileType.size()) == fileType) {
return RET_OK;
} else {
return RET_ERROR;
}
STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType) {
if (modelFile.size() > fileType.size() && modelFile.substr(modelFile.size() - fileType.size()) == fileType) {
return RET_OK;
} else {
return RET_ERROR;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_GRAPH_UTIL_H
#define MINDSPORE_PREDICT_GRAPH_UTIL_H
#ifndef MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H
#define MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H
#include <cstdlib>
#include <unordered_map>
@ -23,7 +23,6 @@
#include <string>
#include <memory>
#include <vector>
#include "include/errorcode.h"
#include "schema/inner/model_generated.h"
#include "src/common/graph_util.h"
@ -73,19 +72,19 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_
NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex,
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode,
OpDefCopyer opDefCopyer = GetSimpleOpCopyer());
const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer());
NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx,
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode,
OpDefCopyer opDefCopyer = GetSimpleOpCopyer());
const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer());
NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx,
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer);
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer);
NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx,
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer);
std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer);
STATUS ValidateFileStr(const std::string &modelFile, std::string fileType);
STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType);
void TransformAttrByAxes(int *origin_attr, int *axes, int element_size);
@ -97,4 +96,4 @@ std::string GetModelName(const std::string &modelFile);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_PREDICT_GRAPH_UTIL_H
#endif // MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H

@ -160,6 +160,7 @@ std::vector<schema::PrimitiveType> GetInt8OpList() { return int8OpList; }
STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::vector<int32_t> &src_dims,
mindspore::schema::Format dst_format, std::vector<int32_t> *dst_dims) {
MS_ASSERT(nullptr != dst_dims);
if ((src_dims.size() != DIM_DEFAULT_SIZE && src_dims.size() != 3) || src_format == dst_format) {
MS_LOG(ERROR) << "Convert format , src size " << src_dims.size()
<< " <3 or src format is equal to dst format,not need convert";
@ -189,7 +190,7 @@ STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::v
return RET_ERROR;
}
if (nchw_dim.size() == 0) {
if (nchw_dim.empty()) {
MS_LOG(ERROR) << "Param nchw_dim is empty!";
return RET_ERROR;
}
@ -215,6 +216,10 @@ STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::v
STATUS GetFilterDim(const std::vector<int32_t> &oriDims, kTransFilterType type, int32_t *filterK, int32_t *filterC,
int32_t *filterH, int32_t *filterW) {
if (filterK == nullptr || filterC == nullptr || filterH == nullptr || filterW == nullptr) {
MS_LOG(ERROR) << "null input";
return RET_NULL_PTR;
}
MS_ASSERT(oriDims.size() == 4);
if (type == kKCHW2HWCK || type == kKCHW2HWKC || type == kKCHW2KHWC || type == kKCHW2CKHW) {
*filterK = oriDims.at(KCHW_K);
@ -282,6 +287,7 @@ STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filt
STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) {
if (tensor == nullptr) {
MS_LOG(ERROR) << "tensor is null";
return RET_NULL_PTR;
}
std::vector<int32_t> oriDims = tensor->dims;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_NODE_UTIL_H
#define MINDSPORE_PREDICT_NODE_UTIL_H
#ifndef MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H
#define MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H
#include <memory>
#include <vector>
@ -60,13 +60,6 @@ class NodeUtils {
public:
static STATUS ConvertDims(schema::Format src_format, const std::vector<int32_t> &src_dims, schema::Format dst_format,
std::vector<int32_t> *dst_dims);
static void SliceData(std::vector<char *> &input, int64_t chunk_size, std::vector<char *> &output, int64_t begin,
int64_t out_dim, int64_t stride);
static STATUS SetOutputSliceData(void *data, int64_t data_size, int32_t data_type, std::vector<int32_t> &input_dims,
std::vector<int32_t> &begin, std::vector<int32_t> &output_dims,
schema::TensorT *output, std::vector<int32_t> &stride);
};
enum kTransFilterType {
@ -133,7 +126,7 @@ static STATUS TransFilterData(schema::TensorT *tensor, kTransFilterType type, in
if (type == kCHWK2HWCK) {
p2Buff =
buf.get() + ((h * filterW * filterC * filterK) + (w * filterC * filterK) + (c * filterK) + (k));
} else if (type == kCHWK2KHWC) {
} else {
p2Buff =
buf.get() + ((k * filterH * filterW * filterC) + (h * filterW * filterC) + (w * filterC) + (c));
}
@ -334,4 +327,4 @@ static STATUS TransFilterFormat(schema::TensorT *tensor, kTransFilterType type)
STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_PREDICT_NODE_UTIL_H
#endif // MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef PREDICT_COMMON_OPTION_H_
#define PREDICT_COMMON_OPTION_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_OPTION_H
#define MINDSPORE_LITE_TOOLS_COMMON_OPTION_H
#include <type_traits>
#include <utility>
@ -56,7 +56,7 @@ class Option {
}
}
virtual ~Option() {}
virtual ~Option() = default;
bool IsNone() const { return state == NONE; }
@ -116,4 +116,4 @@ class Option {
} // namespace lite
} // namespace mindspore
#endif // PREDICT_COMMON_OPTION_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_OPTION_H

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H
#define MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H
#include <string>
#include <vector>
@ -35,4 +35,4 @@ STATUS ReadProtoFromBinaryFile(const char *file, google::protobuf::Message *mess
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H

@ -50,7 +50,7 @@ int Storage::Save(const schema::MetaGraphT &graph, const std::string &outputPath
}
schema::MetaGraphT *Storage::Load(const std::string &inputPath) {
size_t size;
size_t size = 0;
auto buf = ReadFile(inputPath.c_str(), &size);
if (buf == nullptr) {
MS_LOG(ERROR) << "the file buffer is nullptr";
@ -58,7 +58,7 @@ schema::MetaGraphT *Storage::Load(const std::string &inputPath) {
}
flatbuffers::Verifier verify((const uint8_t *)buf, size);
if (false == schema::VerifyMetaGraphBuffer(verify)) {
if (!schema::VerifyMetaGraphBuffer(verify)) {
MS_LOG(ERROR) << "the buffer is invalid and fail to create meta graph";
return nullptr;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef PREDICT_COMMON_STORAGE_H_
#define PREDICT_COMMON_STORAGE_H_
#ifndef MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H
#define MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H
#include <fstream>
#include <string>
@ -27,11 +27,11 @@ namespace mindspore {
namespace lite {
class Storage {
public:
int Save(const schema::MetaGraphT &graph, const std::string &outputPath);
static int Save(const schema::MetaGraphT &graph, const std::string &outputPath);
schema::MetaGraphT *Load(const std::string &inputPath);
static schema::MetaGraphT *Load(const std::string &inputPath);
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_COMMON_STORAGE_H_
#endif // MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H

@ -14,7 +14,6 @@
* limitations under the License.
*/
#include <cfloat>
#include "src/common/utils.h"
#include "tools/common/tensor_util.h"
#include "tools/common/graph_util.h"

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_TENSOR_UTIL_H
#define MINDSPORE_PREDICT_TENSOR_UTIL_H
#ifndef MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H
#define MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H
#include <cmath>
#include <unordered_map>
@ -58,13 +58,11 @@ std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schem
std::unique_ptr<schema::QuantParamT> CopyQuantParamArrayT(
const std::unique_ptr<schema::QuantParamT> &srcQuantParamArray);
using MSGraphDefTPtr = std::shared_ptr<schema::MetaGraphT>;
enum Category { CONST = 0, GRAPH_INPUT = 1, OP_OUTPUT = 2, TF_CONST = 3 };
class TensorCache {
public:
TensorCache() {}
TensorCache() = default;
~TensorCache() { tensors.clear(); }
@ -97,12 +95,12 @@ class TensorCache {
return -1;
}
void UpdateTensorIndex(const std::string &name, int index) {
void UpdateTensorIndex(const std::string &name, int idx) {
auto iter = tensorIndex.find(name);
if (iter != tensorIndex.end()) {
tensorIndex[name] = index;
tensorIndex[name] = idx;
} else {
tensorIndex.insert(make_pair(name, index));
tensorIndex.insert(make_pair(name, idx));
}
}
@ -120,4 +118,4 @@ class TensorCache {
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_PREDICT_TENSOR_UTIL_H
#endif // MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H

@ -38,17 +38,16 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, const caffe:
return RET_NULL_PTR;
}
// set default params
attr->outMaxValue = false;
attr->topK = 1;
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
const caffe::ArgMaxParameter &argmaxParam = proto.argmax_param();
if (argmaxParam.has_out_max_val()) {
attr->outMaxValue = argmaxParam.out_max_val();
}
if (argmaxParam.has_top_k()) {
attr->topK = argmaxParam.top_k();
}
int32_t axisType;
int32_t axisType = 0;
int32_t axis = 0;
if (!argmaxParam.has_axis()) {
axisType = 2;

@ -26,7 +26,8 @@ namespace lite {
class CaffeArgMaxParser : public CaffeNodeParser {
public:
CaffeArgMaxParser() : CaffeNodeParser("argmax") {}
~CaffeArgMaxParser() = default;
~CaffeArgMaxParser() override = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};

@ -19,12 +19,6 @@
#include <memory>
#include "tools/common/tensor_util.h"
#define CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT 0.00001
#define CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT 0.000000001
static const int CAFFE_BATCHNORMAL_BOTTOM_SIZE = 1;
static const int CAFFE_BATCHNORMAL_TOP_SIZE = 1;
namespace mindspore {
namespace lite {
using STATUS = int;
@ -32,6 +26,10 @@ using STATUS = int;
STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeBatchNormParser";
if (weightVec == nullptr) {
MS_LOG(ERROR) << "weightVec is null";
return RET_NULL_PTR;
}
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
@ -48,43 +46,38 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
return RET_NULL_PTR;
}
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
// check bottom size
if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) {
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be "
<< CAFFE_BATCHNORMAL_BOTTOM_SIZE << "but is " << proto.bottom_size();
const caffe::BatchNormParameter &batchNormParam = proto.batch_norm_param();
if (proto.bottom_size() != 1) {
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be 1, but is "
<< proto.bottom_size();
return RET_ERROR;
}
// check top size
if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) {
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be "
<< CAFFE_BATCHNORMAL_TOP_SIZE << "but is " << proto.top_size();
if (proto.top_size() != 1) {
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be 1, but is "
<< proto.top_size();
return RET_ERROR;
}
if (batchNormParam.has_eps()) {
if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) {
attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT;
if (std::fabs(1e-5 - batchNormParam.eps()) < 1e-9) {
attr->epsilon = 1e-5;
} else {
auto tmpAuto = batchNormParam.eps();
attr->epsilon = tmpAuto;
}
} else {
attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT;
attr->epsilon = 1e-5;
}
const float blob2Data =
(weight.blobs(2).double_data_size() > 0) ? weight.blobs(2).double_data(0) : weight.blobs(2).data(0);
const float scaleFactor = blob2Data == 0 ? 0 : 1 / blob2Data;
// parse weight gamma
auto gamma = ConvertWeight(weight.blobs(0));
if (gamma == nullptr) {
MS_LOG(ERROR) << "Convert blobs(0) for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
auto estimatedMean = reinterpret_cast<float *>(gamma->data.data());
auto estimatedMeanShapeSize = GetShapeSize(*gamma);
for (size_t i = 0; i < estimatedMeanShapeSize; i++) {
@ -93,13 +86,11 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
estimatedMean = nullptr;
weightVec->push_back(gamma);
// parse weight beta
auto beta = ConvertWeight(weight.blobs(1));
if (beta == nullptr) {
MS_LOG(ERROR) << "Convert blobs(1) for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
auto estimatedVariance = reinterpret_cast<float *>(beta->data.data());
size_t estimatedVarianceShapeSize = GetShapeSize(*beta);
for (size_t i = 0; i < estimatedVarianceShapeSize; i++) {

@ -26,6 +26,7 @@ namespace lite {
class CaffeBatchNormParser : public CaffeNodeParser {
public:
CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {}
~CaffeBatchNormParser() override = default;
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;

@ -17,8 +17,6 @@
#include "tools/converter/parser/caffe/caffe_concat_parser.h"
#include <memory>
const int32_t CONCAT_DEFAULT_AXIS = 1;
namespace mindspore {
namespace lite {
STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
@ -40,7 +38,7 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe:
return RET_NULL_PTR;
}
const caffe::ConcatParameter concatParam = proto.concat_param();
const caffe::ConcatParameter &concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
MS_LOG(ERROR) << "Concat param in caffe have concat_dim and axis simultaneously, return fail";
return RET_ERROR;
@ -48,19 +46,19 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe:
if (concatParam.has_concat_dim()) {
MS_LOG(DEBUG) << "Concat dim , set axis: " << concatParam.concat_dim();
int32_t concat_dim_value = (int32_t)concatParam.concat_dim();
auto concat_dim_value = (int32_t)concatParam.concat_dim();
if (concat_dim_value < 0) {
MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << concat_dim_value;
return RET_ERROR;
}
attr->axis = concat_dim_value;
} else if (concatParam.has_axis()) {
MS_LOG(DEBUG) << "axis , set axis: " << concatParam.axis();
int32_t tmpInt = (int32_t)concatParam.axis();
MS_LOG(DEBUG) << "set axis: " << concatParam.axis();
auto tmpInt = (int32_t)concatParam.axis();
attr->axis = tmpInt;
} else {
MS_LOG(DEBUG) << "default , set axis: " << CONCAT_DEFAULT_AXIS;
attr->axis = CONCAT_DEFAULT_AXIS;
MS_LOG(DEBUG) << "by default, set axis = 1";
attr->axis = 1;
}
attr->n = proto.bottom_size();

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save