diff --git a/ge/client/proto/insert_op.proto b/ge/client/proto/insert_op.proto index c635ca14..bf918b20 100644 --- a/ge/client/proto/insert_op.proto +++ b/ge/client/proto/insert_op.proto @@ -45,6 +45,9 @@ message AippOpParams { // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 uint32 related_input_rank = 2; + // related_input_name is optional and the top name of data node which inserts aipp + string related_input_name = 6; + // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 // 配置值 <= Data算子输出边的个数。 diff --git a/ge/common/proto/insert_op.proto b/ge/common/proto/insert_op.proto index c635ca14..bf918b20 100644 --- a/ge/common/proto/insert_op.proto +++ b/ge/common/proto/insert_op.proto @@ -45,6 +45,9 @@ message AippOpParams { // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 uint32 related_input_rank = 2; + // related_input_name is optional and the top name of data node which inserts aipp + string related_input_name = 6; + // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 // 配置值 <= Data算子输出边的个数。 diff --git a/ge/executor/proto/insert_op.proto b/ge/executor/proto/insert_op.proto index c635ca14..bf918b20 100644 --- a/ge/executor/proto/insert_op.proto +++ b/ge/executor/proto/insert_op.proto @@ -45,6 +45,9 @@ message AippOpParams { // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 uint32 related_input_rank = 2; + // related_input_name is optional and the top name of data node which inserts aipp + string related_input_name = 6; + // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 // 配置值 <= Data算子输出边的个数。 diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index a511dca0..729c47de 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -183,6 +183,11 @@ Status AippOp::InsertAippToGraph(ComputeGraphPtr &graph, std::string &aippConfig GE_CHECK_NOTNULL(graph); NodePtr target_input = nullptr; std::vector> target_edges; + + if (this->ConvertRelatedInputNameToRank() != SUCCESS) { + GELOGE(FAILED, "AippOp: convert related input name to rank failed."); + return FAILED; + } GE_CHK_STATUS_RET(this->GetTargetPosition(graph, target_input, target_edges), "Get data nodes position failed"); std::map out_anchors_to_aipp; @@ -410,6 +415,38 @@ Status AippOp::GetStaticTargetNode(const ComputeGraphPtr &graph, NodePtr &data_n return SUCCESS; } +Status AippOp::ConvertRelatedInputNameToRank() { + GE_CHECK_NOTNULL(aipp_params_); + + string related_input_name = aipp_params_->related_input_name(); + if(related_input_name.empty()) { + return SUCCESS; + } + + std::vector data_top_names = domi::GetContext().data_top_names; + GELOGI("Convert name to rank start: data size[%zu]", data_top_names.size()); + uint32_t index = 0; + bool convert_flag = false; + for (const auto &data_top_name : data_top_names) { + if (related_input_name == data_top_name) { + aipp_params_->set_related_input_rank(index); + convert_flag = true; + GELOGI("AippOp: rank: %u, top name: %s.", index, data_top_name.c_str()); + break; + } + index++; + } + if (!convert_flag) { + string error_msg = "Top name " + related_input_name + "convert rank failed, Please" + " ensure top name in aipp config is the top name of data node."; + ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); + GELOGE(PARAM_INVALID, "Top name[%s] converts rank failed.", related_input_name.c_str()); + return PARAM_INVALID; + } + + return SUCCESS; +} + Status AippOp::GetTargetPosition(ComputeGraphPtr graph, NodePtr &target_input, std::vector> &target_edges) { diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.h b/ge/graph/preprocess/insert_op/ge_aipp_op.h index 22ae0cea..5e509dda 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.h +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.h @@ -82,6 +82,7 @@ class AippOp : public InsertOpBase { Status AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size); Status AddAippAttrbutes(const OpDescPtr &op_desc, const std::string &aipp_cfg_path, const uint32_t &index); Status AddAttrToAippData(const OpDescPtr &aipp_data_op_desc); + Status ConvertRelatedInputNameToRank(); domi::AippOpParams *aipp_params_ = nullptr; ge::NodePtr aipp_node_ = nullptr; diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc index 8274ce8c..57929f83 100755 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc @@ -34,6 +34,7 @@ #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" +#include "util_insert_aipp_op.h" using domi::AippOpParams; @@ -115,22 +116,94 @@ void InsertNewOpUtil::ClearNewOps() { } } -Status InsertNewOpUtil::CheckPositionNotRepeat() { +Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() { + for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) { + const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i); + GE_CHECK_NOTNULL(item); + + for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) { + const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j); + GE_CHECK_NOTNULL(another_item); + if (another_item->related_input_name().empty()) { + string error_msg = "Can not both set related_input_name and related_input_rank!" + " Please ensure param is the same with the first aipp config(related_input_name)."; + ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); + GELOGE(PARAM_INVALID, + "Can not both set related_input_rank and related_input_name!" + " Please ensure param is the same with the first aipp config(related_input_name)."); + return PARAM_INVALID; + } + if (item->related_input_name() == another_item->related_input_name()) { + string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name" + " param is different in different aipp config."; + ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); + GELOGE(PARAM_INVALID, + "Can not insert aipp op to the same postion! Please ensure related_input_rank param " + "is different in different aipp config."); + return PARAM_INVALID; + } + } + } + + return SUCCESS; +} + +Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() { for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) { const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i); + GE_CHECK_NOTNULL(item); for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) { const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j); - GE_IF_BOOL_EXEC(item->related_input_rank() == another_item->related_input_rank(), - string errormsg = "Can not insert aipp to the same postion! Please ensure related_input_rank" - " param is different in different aipp config."; - ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {errormsg}); - GELOGE(PARAM_INVALID, - "Can not insert aipp op to the same postion! Please ensure related_input_rank param " - "is different in different aipp config."); - return PARAM_INVALID;); + GE_CHECK_NOTNULL(another_item); + if (!another_item->related_input_name().empty()) { + string error_msg = "Can not both set related_input_rank and related_input_name!" + " Please ensure param is the same with the first aipp config(related_input_rank)."; + ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); + GELOGE(PARAM_INVALID, + "Can not both set related_input_rank and related_input_name!" + " Please ensure param is the same with the first aipp config(related_input_rank)."); + return PARAM_INVALID; + } + if (item->related_input_rank() == another_item->related_input_rank()) { + string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank" + " param is different in different aipp config."; + ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); + GELOGE(PARAM_INVALID, + "Can not insert aipp op to the same postion! Please ensure related_input_rank param " + "is different in different aipp config."); + return PARAM_INVALID; + } } } + + return SUCCESS; + +} + +Status InsertNewOpUtil::CheckPositionNotRepeat() { + GE_CHECK_NOTNULL(insert_op_conf_); + + if (insert_op_conf_->aipp_op_size() <= 1) { + GELOGI("Aipp op size[%d] less than 2, no need to check position repeat.", insert_op_conf_->aipp_op_size()); + return SUCCESS; + } + + const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(0); + GE_CHECK_NOTNULL(item); + + string related_input_name = item->related_input_name(); + Status ret = FAILED; + if (related_input_name.empty()) { + ret = CheckInputRankPositionNoRepeat(); + } else { + ret = CheckInputNamePositionNotRepeat(); + } + if (ret != SUCCESS) { + GELOGE(FAILED, "Check position not repeat failed."); + return FAILED; + } + return SUCCESS; } diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.h b/ge/graph/preprocess/insert_op/util_insert_aipp_op.h index e785da98..52e7ed5d 100644 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.h +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.h @@ -51,6 +51,10 @@ class InsertNewOpUtil { Status GetAippParams(const std::unique_ptr &aippParams, const ge::NodePtr &aipp_node); + Status CheckInputNamePositionNotRepeat(); + + Status CheckInputRankPositionNoRepeat(); + Status CheckGraph(const ge::ComputeGraphPtr &graph); InsertNewOpUtil() = default; diff --git a/ge/offline/proto/insert_op.proto b/ge/offline/proto/insert_op.proto index c635ca14..bf918b20 100644 --- a/ge/offline/proto/insert_op.proto +++ b/ge/offline/proto/insert_op.proto @@ -45,6 +45,9 @@ message AippOpParams { // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 uint32 related_input_rank = 2; + // related_input_name is optional and the top name of data node which inserts aipp + string related_input_name = 6; + // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 // 配置值 <= Data算子输出边的个数。 diff --git a/ge/proto/insert_op.proto b/ge/proto/insert_op.proto index c635ca14..bf918b20 100644 --- a/ge/proto/insert_op.proto +++ b/ge/proto/insert_op.proto @@ -45,6 +45,9 @@ message AippOpParams { // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 uint32 related_input_rank = 2; + // related_input_name is optional and the top name of data node which inserts aipp + string related_input_name = 6; + // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 // 配置值 <= Data算子输出边的个数。 diff --git a/ge/session/omg.cc b/ge/session/omg.cc index f7c681db..0d8e084e 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -1038,6 +1038,7 @@ void UpdateOmgCtxWithParserCtx() { domi::GetContext().out_top_names = GetParserContext().out_top_names; domi::GetContext().user_out_nodes_top_vec = GetParserContext().user_out_nodes_top_vec; domi::GetContext().default_out_nodes = GetParserContext().default_out_nodes; + domi::GetContext().data_top_names = GetParserContext().data_top_names; } void UpdateParserCtxWithOmgCtx() { @@ -1054,5 +1055,6 @@ void UpdateParserCtxWithOmgCtx() { GetParserContext().input_nodes_format_map = domi::GetContext().input_nodes_format_map; GetParserContext().out_top_names = domi::GetContext().out_top_names; GetParserContext().user_out_nodes_top_vec = domi::GetContext().user_out_nodes_top_vec; + GetParserContext().data_top_names = domi::GetContext().data_top_names; } } // namespace ge diff --git a/inc/framework/omg/omg_inner_types.h b/inc/framework/omg/omg_inner_types.h index c48d1649..6cc4afd3 100644 --- a/inc/framework/omg/omg_inner_types.h +++ b/inc/framework/omg/omg_inner_types.h @@ -100,6 +100,8 @@ struct OmgContext { std::vector net_out_nodes; // net out nodes top names(only caffe has top) std::vector out_top_names; + // net data nodes top names(only caffe has top) + std::vector data_top_names; // preferential format used by the entire network domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED; domi::FrameworkType type = domi::FRAMEWORK_RESERVED; diff --git a/inc/framework/omg/parser/parser_inner_ctx.h b/inc/framework/omg/parser/parser_inner_ctx.h index b57420eb..b92c6155 100644 --- a/inc/framework/omg/parser/parser_inner_ctx.h +++ b/inc/framework/omg/parser/parser_inner_ctx.h @@ -49,6 +49,8 @@ struct ParserContext { std::vector user_out_nodes_top_vec; // net out nodes (where user_out_nodes or leaf nodes) std::vector net_out_nodes; + // net data nodes top names(only caffe has top) + std::vector data_top_names; // net out nodes top names(only caffe has top) std::vector out_top_names; // Whether to use dynamic batch size or dynamic image size @@ -57,9 +59,12 @@ struct ParserContext { domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND; domi::FrameworkType type = domi::FRAMEWORK_RESERVED; RunMode run_mode = ONLY_PRE_CHECK; - std::string custom_proto_path; // save caffe custom proto path, used by caffe parse - std::string caffe_proto_path; // save caffe proto path, used by caffe parse - std::string enable_scope_fusion_passes; // name of the pass that needs to take effect + // save caffe custom proto path, used by caffe parse + std::string custom_proto_path; + // save caffe proto path, used by caffe parse + std::string caffe_proto_path; + // name of the pass that needs to take effect + std::string enable_scope_fusion_passes; }; ParserContext &GetParserContext(); diff --git a/metadef b/metadef index 1cc55bca..5d06bc75 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 1cc55bcae09902b3d158993dd57bfbd1d3337066 +Subproject commit 5d06bc7547189f24195b3cedcb0bfc3d787c80a5 diff --git a/parser b/parser index db4e6070..5af5c72f 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit db4e6070bb2cec01cead264a44ceae07e7f3048e +Subproject commit 5af5c72fba1315f3d52113a5e88dc618d68e7dbc