From 655c23ef71422c42888e0ff1ebc7e5249dc46a0a Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Sat, 27 Feb 2021 16:24:57 +0800 Subject: [PATCH] for ut cov --- ge/graph/load/model_manager/davinci_model.cc | 4 ++-- ge/graph/manager/graph_manager.cc | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 740a86f5..1da5af48 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -337,7 +337,7 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { if (is_feature_map_mem_has_inited_) { - GELOGE(PARAM_INVALID, "call InitFeatureMapMem more than once"); + GELOGE(PARAM_INVALID, "call InitFeatureMapMem more than once."); return PARAM_INVALID; } is_feature_map_mem_has_inited_ = true; @@ -360,7 +360,7 @@ Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc feature map memory failed. size: %zu", data_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; } - GEEVENT("[IMAS]InitFeatureMapAndP2PMem graph_%u MallocMemory type[F] memaddr[%p] mem_size[%zu]", + GEEVENT("[IMAS]InitFeatureMapAndP2PMem graph_%u MallocMemory type[F] memaddr[%p] mem_size[%zu].", runtime_param_.graph_id, mem_base_, data_size); if (!is_inner_weight_base_) { diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 92951a89..6d1dcb04 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -293,7 +293,7 @@ Status GraphManager::InitDynamicParams(ComputeGraphPtr &compute_graph) { return FAILED; } if ((op_desc->GetType() == DATA) || (op_type == kGetNextName)) { - GELOGI("Need to process multi batch for compute graph."); + GELOGI("Need to process multi batch for compute graph. op_type:%s", op_desc->GetType(),c_str()); GetLocalOmgContext().need_multi_batch = true; break; } @@ -348,7 +348,7 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph, for (auto &subgraph : compute_graph->GetAllSubgraphs()) { (void)AttrUtils::SetStr(*subgraph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id); } - GELOGD("Get graph session_graph_id attr failed, set session id to default value: [0]"); + GELOGD("Get graph session_graph_id attr failed, set session id to default value: [0]."); } GraphNodePtr graph_node = MakeShared(graph_id); @@ -734,8 +734,8 @@ Status GraphManager::PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node, } Status GraphManager::SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id) { - GELOGD("set rt_context, session id: %lu, graph id: %u, mode %d, device id:%u.", session_id, graph_id, - static_cast(mode), ge::GetContext().DeviceId()); + GELOGD("set rt_context, session id: %lu, graph id: %u, mode %d, device id:%u.", + session_id, graph_id, static_cast(mode), ge::GetContext().DeviceId()); rtError_t rt_ret = rtCtxCreate(&rt_context, mode, ge::GetContext().DeviceId()); if (rt_ret != RT_ERROR_NONE) { @@ -758,7 +758,7 @@ Status GraphManager::RunCustomPass(const GraphNodePtr &graph_node) { GE_TIMESTAMP_START(RunCustomPass); GraphPtr graph = std::const_pointer_cast(const_graph); - GE_CHK_STATUS_RET(CustomPassHelper::Instance().Run(graph), "Graph[%s] run custom pass fail.", + GE_CHK_STATUS_RET(CustomPassHelper::Instance().Run(graph), "Graph[%s] run custom pass fail", comp_graph->GetName().c_str()); GE_TIMESTAMP_END(RunCustomPass, "GraphBuilder::RunCustomPass"); return SUCCESS; @@ -776,7 +776,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vectorBuildJsonObject(session_id, compute_graph->GetGraphID()), "BuildJsonObject Failed") - GEEVENT("PreRun start, graph node size %zu, session id %lu, graph id %u, graph name %s", + GEEVENT("PreRun start, graph node size %zu, session id %lu, graph id %u, graph name %s.", compute_graph->GetDirectNodesSize(), session_id, compute_graph->GetGraphID(), compute_graph->GetName().c_str()); GE_DUMP(compute_graph, "PreRunBegin"); @@ -797,7 +797,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vectorGetName().c_str()); + GELOGE(ret, "Run PreRunOptimizeOriginalGraph failed for graph:%s", compute_graph->GetName().c_str()); return ret; } } @@ -869,7 +869,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: // release rts generate context RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId()); if (ret != SUCCESS) { - GELOGE(ret, "PreRun Failed."); + GELOGE(ret, "PreRun Failed. graph_id:%u", graph_node->GetGraphId()); return ret; } } @@ -1209,7 +1209,7 @@ Status GraphManager::BuildGraphForUnregisteredOp(const GraphId &graph_id, const Status GraphManager::BuildGraph(const GraphId &graph_id, const std::vector &inputs, GeRootModelPtr &ge_root_model, uint64_t session_id, bool async) { - GELOGD("[BuildGraph] start to build graph, graph_id=%u.", graph_id); + GELOGD("[BuildGraph] start to build graph, graph_id:%u.", graph_id); if (inputs.empty()) { GELOGW("[BuildGraph] BuildGraph warning: empty GeTensor inputs"); } @@ -1241,7 +1241,7 @@ Status GraphManager::BuildGraph(const GraphId &graph_id, const std::vectorSetRunFlag(false); if (ret != SUCCESS) { - GELOGE(GE_GRAPH_PRERUN_FAILED, "[BuildGraph] StartForRunGraph failed!"); + GELOGE(GE_GRAPH_PRERUN_FAILED, "[BuildGraph] StartForRunGraph failed! graph_id:%u", graph_id); return GE_GRAPH_PRERUN_FAILED; }