From 50552c3631cf8889ea3b7a7489249c4b6795742c Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Mon, 29 Mar 2021 14:05:55 +0800 Subject: [PATCH 01/14] modified: ge/graph/passes/base_pass.cc modified: ge/graph/passes/base_pass.h modified: ge/graph/passes/infershape_pass.cc modified: ge/hybrid/executor/hybrid_model_async_executor.cc modified: ge/hybrid/executor/subgraph_executor.cc modified: ge/hybrid/node_executor/aicore/aicore_op_task.cc --- ge/graph/passes/base_pass.cc | 103 +++++++++++------- ge/graph/passes/base_pass.h | 11 ++ ge/graph/passes/infershape_pass.cc | 15 +++ .../executor/hybrid_model_async_executor.cc | 2 + ge/hybrid/executor/subgraph_executor.cc | 4 +- .../node_executor/aicore/aicore_op_task.cc | 2 - 6 files changed, 91 insertions(+), 46 deletions(-) diff --git a/ge/graph/passes/base_pass.cc b/ge/graph/passes/base_pass.cc index 3b854c18..0868b729 100755 --- a/ge/graph/passes/base_pass.cc +++ b/ge/graph/passes/base_pass.cc @@ -30,8 +30,15 @@ constexpr int kMaxRePassTimes = 10000; constexpr size_t kMaxOneInNodes = 1000; // Each iteration, we take about 0.3k memory on the stack, we should change the recursion to loop later constexpr int kMaxRecursiveDepth = 20; +struct DuringPassNodeSets { + std::unordered_set nodes_seen; + std::unordered_set nodes_deleted; + std::unordered_set nodes_re_pass; + std::unordered_set nodes_re_pass_immediately; + std::unordered_set nodes_last; +}; -void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, std::queue &input_edge_nodes, +void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, std::deque &input_edge_nodes, std::unordered_set &nodes_seen, std::unordered_set &nodes_last) { nodes_last.clear(); for (auto &node : graph->GetDirectNode()) { @@ -40,7 +47,7 @@ void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, std::queue &i } size_t in_nums = node->GetInNodes().size(); if (in_nums == 0) { - input_edge_nodes.push(node); + input_edge_nodes.push_back(node); nodes_seen.insert(node.get()); } else if (in_nums > kMaxOneInNodes) { nodes_last.insert(node); @@ -48,7 +55,7 @@ void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, std::queue &i } } -void AddNextIterNodes(const Node::Vistor &nodes, std::queue &nodes_to_pass, +void AddNextIterNodes(const Node::Vistor &nodes, std::deque &nodes_to_pass, std::unordered_set &nodes_seen, std::unordered_set &nodes_last) { for (auto &node : nodes) { if (node == nullptr) { @@ -60,13 +67,30 @@ void AddNextIterNodes(const Node::Vistor &nodes, std::queue &n bool all_in_nodes_seen = node->IsAllInNodesSeen(nodes_seen); if (all_in_nodes_seen && nodes_seen.insert(node.get()).second) { - nodes_to_pass.push(node); + nodes_to_pass.push_back(node); } } } -Status RunPasses(NodePtr &node, const NamesToPass &names_to_passes, std::unordered_set &nodes_re_pass, - std::unordered_set &nodes_deleted, std::unordered_set &nodes_seen) { +void PushToRePassIfSeen(NodePtr &node, const std::pair &name_to_pass, + std::unordered_set &nodes_seen, std::unordered_set &nodes_to_re_pass, + std::unordered_set &nodes_re_pass) { + for (const auto &node_to_re_pass : nodes_to_re_pass) { + if (node_to_re_pass == nullptr) { + GELOGW("Found null re-pass node when executing %s on node %s type %s", name_to_pass.first.c_str(), + node->GetName().c_str(), node->GetType().c_str()); + continue; + } + if (nodes_seen.count(node_to_re_pass.get()) > 0 || node_to_re_pass->IsAllInNodesSeen(nodes_seen)) { + GELOGD("The node %s will be re-pass.", node_to_re_pass->GetName().c_str()); + nodes_re_pass.insert(node_to_re_pass); + } else { + GELOGD("The node %s are not all seen, don't set repass this time", node_to_re_pass->GetName().c_str()); + } + } +} + +Status RunPasses(NodePtr &node, const NamesToPass &names_to_passes, DuringPassNodeSets &during_pass_node_set) { if (node == nullptr) { GELOGE(FAILED, "parameter is null."); return FAILED; @@ -90,22 +114,15 @@ Status RunPasses(NodePtr &node, const NamesToPass &names_to_passes, std::unorder } auto nodes_to_re_pass = name_to_pass.second->GetNodesNeedRePass(); - for (const auto &node_to_re_pass : nodes_to_re_pass) { - if (node_to_re_pass == nullptr) { - GELOGW("Found null re-pass node when executing %s on node %s type %s", name_to_pass.first.c_str(), - node->GetName().c_str(), node->GetType().c_str()); - continue; - } - if (nodes_seen.count(node_to_re_pass.get()) > 0 || node_to_re_pass->IsAllInNodesSeen(nodes_seen)) { - GELOGD("The node %s will be re-pass later", node_to_re_pass->GetName().c_str()); - nodes_re_pass.insert(node_to_re_pass); - } else { - GELOGD("The node %s are not all seen, don't set repass this time", node_to_re_pass->GetName().c_str()); - } - } + PushToRePassIfSeen(node, name_to_pass, during_pass_node_set.nodes_seen, nodes_to_re_pass, + during_pass_node_set.nodes_re_pass); + + auto nodes_to_re_pass_immediately = name_to_pass.second->GetNodesNeedRePassImmediately(); + PushToRePassIfSeen(node, name_to_pass, during_pass_node_set.nodes_seen, nodes_to_re_pass_immediately, + during_pass_node_set.nodes_re_pass_immediately); auto nodes_deleted_by_pass = name_to_pass.second->GetNodesDeleted(); - nodes_deleted.insert(nodes_deleted_by_pass.begin(), nodes_deleted_by_pass.end()); + during_pass_node_set.nodes_deleted.insert(nodes_deleted_by_pass.begin(), nodes_deleted_by_pass.end()); if (nodes_deleted_by_pass.count(node) > 0) { GELOGD("The node %s was deleted by pass %s, stop the remain passes", node->GetName().c_str(), name_to_pass.first.c_str()); @@ -181,36 +198,33 @@ Status GEPass::Run(const NamesToPass &names_to_passes) { Status GEPass::RunPassesOneGraph(const NamesToPass &names_to_passes) { GELOGD("Begin to run pass on graph, passes count %zu", names_to_passes.size()); - std::queue nodes; - std::unordered_set nodes_seen; - std::unordered_set nodes_deleted; - std::unordered_set nodes_re_pass; - std::unordered_set nodes_last; - GetAllNodesNoInputEdge(graph_, nodes, nodes_seen, nodes_last); + std::deque nodes; + DuringPassNodeSets during_pass_node_set; + GetAllNodesNoInputEdge(graph_, nodes, during_pass_node_set.nodes_seen, during_pass_node_set.nodes_last); GELOGD("Start points count %zu", nodes.size()); int re_pass_times = 0; do { - for (auto &node : nodes_re_pass) { - nodes.push(node); - nodes_seen.insert(node.get()); + for (auto &node : during_pass_node_set.nodes_re_pass) { + nodes.push_back(node); + during_pass_node_set.nodes_seen.insert(node.get()); } - nodes_re_pass.clear(); + during_pass_node_set.nodes_re_pass.clear(); while (!nodes.empty()) { NodePtr node = nodes.front(); - nodes.pop(); + nodes.pop_front(); - (void)nodes_re_pass.erase(node); + (void)during_pass_node_set.nodes_re_pass.erase(node); GE_IF_BOOL_EXEC(node == nullptr, GELOGW("node is null"); continue); - if (nodes_deleted.count(node) > 0) { + if (during_pass_node_set.nodes_deleted.count(node) > 0) { GELOGD("The node %s was deleted before, skip it.", node->GetName().c_str()); continue; } - AddNextIterNodes(node->GetOutNodes(), nodes, nodes_seen, nodes_last); + AddNextIterNodes(node->GetOutNodes(), nodes, during_pass_node_set.nodes_seen, during_pass_node_set.nodes_last); - auto ret = RunPasses(node, names_to_passes, nodes_re_pass, nodes_deleted, nodes_seen); + auto ret = RunPasses(node, names_to_passes, during_pass_node_set); if (ret != SUCCESS) { GELOGE(ret, "Failed to process passes on node %s type %s, error code: %u", node->GetName().c_str(), node->GetType().c_str(), ret); @@ -227,7 +241,7 @@ Status GEPass::RunPassesOneGraph(const NamesToPass &names_to_passes) { if (has_sub_graph) { GELOGD("There are subgraphs on node %s, run passes for for the second time", node->GetName().c_str()); SetFlagOption(kOptimizeAfterSubGraph, names_to_passes); - ret = RunPasses(node, names_to_passes, nodes_re_pass, nodes_deleted, nodes_seen); + ret = RunPasses(node, names_to_passes, during_pass_node_set); if (ret != SUCCESS) { GELOGE(ret, "Failed to process passes on node %s type %s, error code: %u", node->GetName().c_str(), node->GetType().c_str(), ret); @@ -239,16 +253,21 @@ Status GEPass::RunPassesOneGraph(const NamesToPass &names_to_passes) { // should be called each time at the begin of the iteration ClearOption(names_to_passes); } + for (const auto &node : during_pass_node_set.nodes_re_pass_immediately) { + GELOGD("The node %s will be re-pass immediately.", node->GetName().c_str()); + nodes.push_front(node); + } + during_pass_node_set.nodes_re_pass_immediately.clear(); } - for (auto &node : nodes_last) { - bool all_in_nodes_seen = node->IsAllInNodesSeen(nodes_seen); - if (all_in_nodes_seen && nodes_seen.insert(node.get()).second) { - nodes.push(node); + for (auto &node : during_pass_node_set.nodes_last) { + bool all_in_nodes_seen = node->IsAllInNodesSeen(during_pass_node_set.nodes_seen); + if (all_in_nodes_seen && during_pass_node_set.nodes_seen.insert(node.get()).second) { + nodes.push_back(node); } } - nodes_last.clear(); - } while ((!nodes_re_pass.empty() || !nodes.empty()) && ++re_pass_times < kMaxRePassTimes); + during_pass_node_set.nodes_last.clear(); + } while ((!during_pass_node_set.nodes_re_pass.empty() || !nodes.empty()) && ++re_pass_times < kMaxRePassTimes); if (re_pass_times == kMaxRePassTimes) { GELOGW("re_pass_times should not come to %d", kMaxRePassTimes); diff --git a/ge/graph/passes/base_pass.h b/ge/graph/passes/base_pass.h index bb41691d..89a364a9 100644 --- a/ge/graph/passes/base_pass.h +++ b/ge/graph/passes/base_pass.h @@ -53,6 +53,8 @@ class BaseNodePass { std::unordered_set GetNodesNeedRePass() { return nodes_need_re_pass_; } + std::unordered_set GetNodesNeedRePassImmediately() { return nodes_need_re_pass_immediately_; } + std::unordered_set GetNodesDeleted() { return nodes_deleted_; } void SetOption(NodePassOption option, const std::string &value) { options_[option] = value; } @@ -79,6 +81,14 @@ class BaseNodePass { /// void AddRePassNode(NodePtr &node) { nodes_need_re_pass_.insert(node); } + /// + /// Add a node to be optimized immediately again. If you add a new node to the graph, or + /// change a node connections, and you want to make sure the node will be + /// optimized by other passes, call this function. + /// @param node + /// + void AddImmediateRePassNode(NodePtr &node) { nodes_need_re_pass_immediately_.insert(node); } + /// /// Add a node and it's input/output data nodes to be optimized again. /// @param node @@ -109,6 +119,7 @@ class BaseNodePass { private: std::unordered_set nodes_need_re_pass_; + std::unordered_set nodes_need_re_pass_immediately_; std::unordered_set nodes_deleted_; std::map options_; }; diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index 7b8f7b50..a54a15c1 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -25,6 +25,7 @@ namespace ge { Status InferShapePass::Run(NodePtr &node) { + // kOptimizeAfterSubGraph exist means after subgraph auto ret = ShapeRefiner::InferShapeAndType(node, !OptionExists(kOptimizeAfterSubGraph)); if (ret != GRAPH_SUCCESS) { // select INFERSHAPE failed info @@ -41,6 +42,20 @@ Status InferShapePass::Run(NodePtr &node) { GELOGE(GE_GRAPH_INFERSHAPE_FAILED, "infershape failed. node: %s", node->GetName().c_str()); return GE_GRAPH_INFERSHAPE_FAILED; } + bool need_repass = false; + auto has_attr = AttrUtils::GetBool(node->GetOpDesc(), "need_infer_again_", need_repass); + if (has_attr) { + if (!OptionExists(kOptimizeAfterSubGraph)) { + return SUCCESS; + } + if (need_repass) { + AddImmediateRePassNode(node); + GELOGD("Node %s need repass immediately.", node->GetName().c_str()); + } else { + // clear attr on while + node->GetOpDesc()->DelAttr("need_infer_again_"); + } + } return SUCCESS; } } // namespace ge diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 9f37e7d5..0194a492 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -67,6 +67,7 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr &lis future_ = std::async(std::launch::async, [&]() -> Status { GetThreadLocalContext() = *executor_->GetContext()->ge_context; GetContext().SetSessionId(executor_->GetContext()->session_id); + GetContext().SetContextId(executor_->GetContext()->context_id); return RunInternal(); }); @@ -166,6 +167,7 @@ Status HybridModelAsyncExecutor::RunInternal() { } else { GELOGI("HybridModel will execute in singleline mode"); ge::GetContext().SetSessionId(executor_->GetContext()->session_id); + ge::GetContext().SetContextId(executor_->GetContext()->context_id); ret = executor_->Execute(args); } ret = HandleResult(ret, current_data.index, args, data_wrapper->GetOutput()); diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 45db9936..57e4052d 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -227,6 +227,7 @@ Status SubgraphExecutor::PrepareNodes(int group) { if (node_item.is_dynamic) { auto prepare_future = pre_run_pool_.commit([this, p_node_state]() -> Status { GetContext().SetSessionId(context_->session_id); + GetContext().SetContextId(context_->context_id); GE_CHK_STATUS_RET_NOLOG(InferShape(shape_inference_engine_.get(), *p_node_state)); return PrepareForExecution(context_, *p_node_state); }); @@ -273,10 +274,8 @@ Status SubgraphExecutor::PrepareNodes(int group) { } Status SubgraphExecutor::InferShape(ShapeInferenceEngine *shape_inference_engine, NodeState &node_state) const { - GetContext().SetSessionId(context_->context_id); HYBRID_CHK_STATUS_RET(shape_inference_engine->InferShape(node_state), "[%s] Failed to InferShape.", node_state.GetName().c_str()); - GetContext().SetSessionId(context_->session_id); HYBRID_CHK_STATUS_RET(shape_inference_engine->PropagateOutputShapes(node_state), "[%s] Failed to PropagateOutputShapes.", node_state.GetName().c_str()); return SUCCESS; @@ -345,6 +344,7 @@ Status SubgraphExecutor::ScheduleTasks(int group) { GELOGD("[%s] Start to schedule prepare workers.", graph_item_->GetName().c_str()); auto prepare_future = std::async(std::launch::async, [&]() -> Status { GetContext().SetSessionId(context_->session_id); + GetContext().SetContextId(context_->context_id); auto ret = PrepareNodes(group); ready_queue_.Push(nullptr); return ret; diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 07c2ddb5..6af2fd4a 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -307,11 +307,9 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { auto execution_context = context.GetExecutionContext(); - GetContext().SetSessionId(execution_context->context_id); RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] Start"); GE_CHK_STATUS_RET(CalcTilingInfo(node, tiling_info)); RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End"); - GetContext().SetSessionId(execution_context->session_id); // update op args by tiling info block_dim_ = static_cast(tiling_info.block_dim); From 8e0634323d69d4f0aa7ffe478e583269057f072d Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Mon, 29 Mar 2021 20:43:27 +0800 Subject: [PATCH 02/14] modified: ge/graph/passes/base_pass.h --- ge/graph/passes/base_pass.h | 1 + 1 file changed, 1 insertion(+) diff --git a/ge/graph/passes/base_pass.h b/ge/graph/passes/base_pass.h index 89a364a9..a9f4f000 100644 --- a/ge/graph/passes/base_pass.h +++ b/ge/graph/passes/base_pass.h @@ -64,6 +64,7 @@ class BaseNodePass { void init() { nodes_need_re_pass_.clear(); nodes_deleted_.clear(); + nodes_need_re_pass_immediately_.clear(); } protected: From 4a7f623b1210d8ca6bf9ed7f83145598d2bb7e06 Mon Sep 17 00:00:00 2001 From: chuxing Date: Tue, 30 Mar 2021 10:20:58 +0800 Subject: [PATCH 03/14] while loop failed to restore input desc --- ge/hybrid/executor/node_state.cc | 10 ++-- ge/hybrid/model/node_item.cc | 36 +++++++++++- ge/hybrid/model/node_item.h | 13 ++++- .../controlop/control_op_executor.cc | 56 ++++++++----------- .../controlop/control_op_executor.h | 1 - ge/hybrid/node_executor/task_context.cc | 11 ++++ ge/hybrid/node_executor/task_context.h | 3 + 7 files changed, 88 insertions(+), 42 deletions(-) diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 3834478c..99fe8593 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -35,12 +35,14 @@ ShapeInferenceState::ShapeInferenceState(const NodeItem &node_item) : node_item( node_item.NodeName().c_str(), this->num_pending_shapes_); - for (int i = 0; i < node_item.num_inputs; ++i){ - input_tensor_desc.emplace_back(*node_item.MutableInputDesc(i)); + input_tensor_desc.resize(node_item.num_inputs); + for (int i = 0; i < node_item.num_inputs; ++i) { + node_item.GetInputDesc(i, input_tensor_desc[i]); } - for (int i = 0; i < node_item.num_outputs; ++i){ - output_tensor_desc.emplace_back(*node_item.MutableOutputDesc(i)); + output_tensor_desc.resize(node_item.num_outputs); + for (int i = 0; i < node_item.num_outputs; ++i) { + node_item.GetOutputDesc(i, output_tensor_desc[i]); } } diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index 06d654cf..f14e9a21 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -297,7 +297,7 @@ void NodeItem::SetToDynamic() { } } -GeTensorDescPtr NodeItem::MutableInputDesc(int index) const { +GeTensorDescPtr NodeItem::DoGetInputDesc(int index) const { if (!has_optional_inputs) { return op_desc->MutableInputDesc(static_cast(index)); } @@ -314,6 +314,40 @@ GeTensorDescPtr NodeItem::MutableInputDesc(int index) const { return op_desc->MutableInputDesc(input_desc_indices_[index]); } +GeTensorDescPtr NodeItem::MutableInputDesc(int index) const { + std::lock_guard lk(mu_); + return DoGetInputDesc(index); +} + +Status NodeItem::GetInputDesc(int index, GeTensorDesc &tensor_desc) const { + std::lock_guard lk(mu_); + auto input_desc = DoGetInputDesc(index); + GE_CHECK_NOTNULL(input_desc); + tensor_desc = *input_desc; + return SUCCESS; +} + +Status NodeItem::GetOutputDesc(int index, GeTensorDesc &tensor_desc) const { + std::lock_guard lk(mu_); + auto output_desc = op_desc->MutableOutputDesc(static_cast(index)); + GE_CHECK_NOTNULL(output_desc); + tensor_desc = *output_desc; + return SUCCESS; +} + +GeTensorDescPtr NodeItem::MutableOutputDesc(int index) const { + std::lock_guard lk(mu_); + return op_desc->MutableOutputDesc(static_cast(index)); +} + +Status NodeItem::UpdateInputDesc(int index, const GeTensorDesc &tensor_desc) { + std::lock_guard lk(mu_); + auto input_desc = DoGetInputDesc(index); + GE_CHECK_NOTNULL(input_desc); + *input_desc = tensor_desc; + return SUCCESS; +} + Status NodeItem::GetCanonicalInputIndex(uint32_t index, int &canonical_index) const { if (!has_optional_inputs) { canonical_index = index; diff --git a/ge/hybrid/model/node_item.h b/ge/hybrid/model/node_item.h index 474a1da4..54c5e938 100644 --- a/ge/hybrid/model/node_item.h +++ b/ge/hybrid/model/node_item.h @@ -17,6 +17,7 @@ #ifndef GE_HYBRID_MODEL_NODE_ITEM_H_ #define GE_HYBRID_MODEL_NODE_ITEM_H_ +#include #include #include "external/ge/ge_api_error_codes.h" #include "graph/node.h" @@ -57,12 +58,16 @@ struct NodeItem { bool IsInputShapeStatic(int index) const; - GeTensorDescPtr MutableOutputDesc(int index) const { - return op_desc->MutableOutputDesc(static_cast(index)); - } + GeTensorDescPtr MutableOutputDesc(int index) const; + + Status UpdateInputDesc(int index, const GeTensorDesc &tensor_desc); GeTensorDescPtr MutableInputDesc(int index) const; + Status GetInputDesc(int index, GeTensorDesc &tensor_desc) const; + + Status GetOutputDesc(int index, GeTensorDesc &tensor_desc) const; + Status GetCanonicalInputIndex(uint32_t index, int &canonical_index) const; bool IsControlOp() const; @@ -113,9 +118,11 @@ struct NodeItem { Status ResolveDynamicState(); Status ResolveStaticInputsAndOutputs(); void ResolveUnknownShapeType(); + GeTensorDescPtr DoGetInputDesc(int index) const; std::vector is_input_shape_static_; std::vector input_desc_indices_; + mutable std::mutex mu_; }; } // namespace hybrid } // namespace ge diff --git a/ge/hybrid/node_executor/controlop/control_op_executor.cc b/ge/hybrid/node_executor/controlop/control_op_executor.cc index 74920b22..4e7e71f1 100644 --- a/ge/hybrid/node_executor/controlop/control_op_executor.cc +++ b/ge/hybrid/node_executor/controlop/control_op_executor.cc @@ -237,8 +237,8 @@ Status WhileOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::fun } bool is_continue = false; - GE_CHK_STATUS_RET(ExecuteOneLoop(task_context, is_continue), - "[%s] Failed to execute iteration 0.", + GE_CHK_STATUS_RET(ExecuteCond(task_context, is_continue), + "[%s] Failed to execute cond-subgraph", task_context.GetNodeName()); if (!is_continue) { for (int i = 0; i < task_context.NumInputs(); ++i) { @@ -259,42 +259,28 @@ Status WhileOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::fun } // backup original input tensor desc - std::vector ori_input_desc; + std::vector ori_input_desc(task_context.NumInputs()); for (int i = 0; i < task_context.NumInputs(); ++i) { - auto tensor_desc = task_context.GetInputDesc(i); - GE_CHECK_NOTNULL(tensor_desc); - ori_input_desc.emplace_back(*tensor_desc); + GE_CHK_STATUS_RET_NOLOG(task_context.GetInputDesc(i, ori_input_desc[i])); } - int iteration = 1; - while (true) { + int iteration = 0; + while (is_continue) { + ++iteration; GELOGD("[%s] Start to execute, iteration = %d", task_context.GetNodeName(), iteration); GE_CHK_STATUS_RET(ExecuteOneLoop(task_context, is_continue), "[%s] Failed to execute iteration %d.", task_context.GetNodeName(), iteration); - - if (!is_continue) { - GELOGD("[%s] Quit from loop. current iteration = %d", task_context.GetNodeName(), iteration); - break; - } - - ++iteration; } - - for (int i = 0; i < task_context.NumInputs(); ++i) { - auto input_tensor = task_context.GetInput(i); - auto tensor_desc = task_context.MutableInputDesc(i); - GE_CHECK_NOTNULL(input_tensor); - GE_CHECK_NOTNULL(tensor_desc); - // restore original input tensor desc - *tensor_desc = std::move(ori_input_desc[i]); - GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(i, *input_tensor)); - } - + GELOGD("[%s] Quit from loop. current iteration = %d", task_context.GetNodeName(), iteration); if (done_callback) { done_callback(); } + + for (int i = 0; i < task_context.NumInputs(); ++i) { + GE_CHK_STATUS_RET_NOLOG(task_context.UpdateInputDesc(i, ori_input_desc[i])); + } return SUCCESS; } @@ -379,13 +365,6 @@ Status WhileOpNodeTask::MoveOutputs2Inputs(TaskContext &task_context) { } Status WhileOpNodeTask::ExecuteOneLoop(TaskContext &task_context, bool &is_continue) const { - GE_CHK_STATUS_RET(ExecuteCond(task_context, is_continue), - "[%s] Failed to execute cond-subgraph", - task_context.GetNodeName()); - if (!is_continue) { - return SUCCESS; - } - GELOGD("[%s] Start to execute body-subgraph.", task_context.GetNodeName()); GE_CHK_STATUS_RET(ExecuteSubgraph(body_, task_context, nullptr), "[%s] Failed to execute cond-subgraph", task_context.GetNodeName()); @@ -396,6 +375,17 @@ Status WhileOpNodeTask::ExecuteOneLoop(TaskContext &task_context, bool &is_conti "[%s] Failed to move outputs to inputs", task_context.GetNodeName()); + GE_CHK_STATUS_RET(ExecuteCond(task_context, is_continue), + "[%s] Failed to execute cond-subgraph", + task_context.GetNodeName()); + + if (!is_continue) { + for (int i = 0; i < task_context.NumInputs(); ++i) { + auto input_desc = task_context.GetInput(i); + GE_CHECK_NOTNULL(input_desc); + GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(i, *input_desc)); + } + } return SUCCESS; } diff --git a/ge/hybrid/node_executor/controlop/control_op_executor.h b/ge/hybrid/node_executor/controlop/control_op_executor.h index 3becfaaa..fd02bd25 100644 --- a/ge/hybrid/node_executor/controlop/control_op_executor.h +++ b/ge/hybrid/node_executor/controlop/control_op_executor.h @@ -80,7 +80,6 @@ class WhileOpNodeTask : public ControlOpNodeTask { Status ExecuteCond(TaskContext &task_context, bool &is_continue) const; static Status MoveOutputs2Inputs(TaskContext &task_context); - Status ExecuteOneLoop(TaskContext &task_context, bool &is_continue) const; private: diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index f4271551..4e1b367b 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -554,5 +554,16 @@ NodeState *TaskContext::GetNodeState() const { return node_state_; } +Status TaskContext::GetInputDesc(int index, GeTensorDesc &tensor_desc) const { + return node_item_->GetInputDesc(index, tensor_desc); +} + +Status TaskContext::UpdateInputDesc(int index, const GeTensorDesc &tensor_desc) { + return const_cast(node_item_)->UpdateInputDesc(index, tensor_desc); +} + +Status TaskContext::GetOutputDesc(int index, GeTensorDesc &tensor_desc) const { + return node_item_->GetOutputDesc(index, tensor_desc); +} } // namespace hybrid } // namespace ge diff --git a/ge/hybrid/node_executor/task_context.h b/ge/hybrid/node_executor/task_context.h index e00c5048..ba4c62e6 100644 --- a/ge/hybrid/node_executor/task_context.h +++ b/ge/hybrid/node_executor/task_context.h @@ -50,9 +50,12 @@ class TaskContext { const char *GetNodeName() const; TensorValue *MutableInput(int index); ConstGeTensorDescPtr GetInputDesc(int index) const; + Status GetInputDesc(int index, GeTensorDesc &tensor_desc) const; ConstGeTensorDescPtr GetOutputDesc(int index) const; + Status GetOutputDesc(int index, GeTensorDesc &tensor_desc) const; GeTensorDescPtr MutableInputDesc(int index) const; GeTensorDescPtr MutableOutputDesc(int index) const; + Status UpdateInputDesc(int index, const GeTensorDesc &tensor_desc); void ReleaseInputsAndOutputs(); bool NeedCallback(); void ReleaseInput(int index); From 59a3e2e0ff248027784b76cd042cdc06a129b5b0 Mon Sep 17 00:00:00 2001 From: yangwei Date: Tue, 30 Mar 2021 10:33:37 +0800 Subject: [PATCH 04/14] fix import --- inc/framework/common/debug/ge_log.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inc/framework/common/debug/ge_log.h b/inc/framework/common/debug/ge_log.h index 45db7e93..754712f3 100644 --- a/inc/framework/common/debug/ge_log.h +++ b/inc/framework/common/debug/ge_log.h @@ -42,9 +42,9 @@ class GE_FUNC_VISIBILITY GeLog { public: static uint64_t GetTid() { #ifdef __GNUC__ - thread_local static uint64_t tid = static_cast(syscall(__NR_gettid)); + uint64_t tid = static_cast(syscall(__NR_gettid)); #else - thread_local static uint64_t tid = static_cast(GetCurrentThreadId()); + uint64_t tid = static_cast(GetCurrentThreadId()); #endif return tid; } From 4fe73f77bc2dc1172d3b4398e97ed476992895c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Mon, 22 Mar 2021 20:10:16 +0800 Subject: [PATCH 05/14] fixed sc warning --- ge/analyzer/analyzer.cc | 17 ++++-- .../format_transfers/datatype_transfer.cc | 3 +- .../format_transfer_c1hwncoc0_hwcn.cc | 3 +- .../format_transfer_dhwcn_fracz3D.cc | 6 +- ...format_transfer_dhwnc_fracz3D_transpose.cc | 6 +- .../format_transfer_fractal_nz.cc | 30 ++++++---- .../format_transfer_fractal_z.cc | 26 +++++--- .../format_transfer_fractal_zz.cc | 27 ++++++--- .../format_transfer_fracz_hwcn.cc | 5 +- .../format_transfer_fracz_nchw.cc | 12 ++-- .../format_transfer_fracz_nhwc.cc | 54 +++++++++-------- .../format_transfer_hwcn_c1hwncoc0.cc | 3 +- .../format_transfer_nc1hwc0_nchw.cc | 51 ++++++++-------- .../format_transfer_nc1hwc0_nhwc.cc | 6 +- .../format_transfer_nchw_fz_c04.cc | 15 +++-- .../format_transfer_nhwc_nc1hwc0.cc | 3 +- ge/graph/build/memory/block_mem_assigner.cc | 60 ++++++++++--------- ge/graph/build/memory/graph_mem_assigner.cc | 6 +- ge/graph/load/model_manager/model_utils.cc | 3 +- ge/graph/preprocess/graph_preprocess.cc | 4 +- ge/host_kernels/gather_v2_kernel.cc | 3 +- .../executor/hybrid_model_async_executor.cc | 2 +- ge/ir_build/atc_ir_common.cc | 16 +++-- ge/session/omg.cc | 2 +- .../format_transfer_5d_nchw_unittest.cc | 18 +++--- .../format_transfer_fracz_nhwc_unittest.cc | 20 +++---- .../ut/ge/graph/load/model_utils_unittest.cc | 18 ++++++ 27 files changed, 251 insertions(+), 168 deletions(-) diff --git a/ge/analyzer/analyzer.cc b/ge/analyzer/analyzer.cc index 1f733f28..b2f30db7 100755 --- a/ge/analyzer/analyzer.cc +++ b/ge/analyzer/analyzer.cc @@ -155,12 +155,12 @@ std::shared_ptr Analyzer::GetJsonObject(uint64_t session_id, uint64_t std::lock_guard lg(mutex_); auto iter = graph_infos_.find(session_id); if (iter == graph_infos_.end()) { - GELOGE(PARAM_INVALID, "[Check][Session_id]session_id:%lu does not exist! graph_id:%lu.", session_id, graph_id); + GELOGE(PARAM_INVALID, "[Check][SessionId]session_id:%lu does not exist! graph_id:%lu", session_id, graph_id); return nullptr; } else { auto iter1 = (iter->second).find(graph_id); if (iter1 == (iter->second).end()) { - GELOGE(PARAM_INVALID, "[Check][Graph_id]graph_id:%lu does not exist! session_id:%lu.", graph_id, session_id); + GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%lu does not exist! session_id:%lu.", graph_id, session_id); return nullptr; } GELOGI("GetJsonObject Success!session_id:%lu graph_id:%lu", session_id, graph_id); @@ -200,7 +200,7 @@ ge::Status Analyzer::CreateAnalyzerFile() { } ge::Status Analyzer::SaveAnalyzerDataToFile(uint64_t session_id, uint64_t graph_id) { - GELOGD("start to save analyze file."); + GELOGD("start to save analyze file"); auto graph_info = GetJsonObject(session_id, graph_id); GE_CHECK_NOTNULL(graph_info); @@ -221,7 +221,10 @@ ge::Status Analyzer::SaveAnalyzerDataToFile(uint64_t session_id, uint64_t graph_ try { json_file_ << jsn.dump(kJsonDumpLevel) << std::endl; } catch (nlohmann::detail::type_error &e) { - GELOGE(FAILED, "[Json.dump][GraphInfo]json.dump to analyze file [%s] failed because [%s], session_id:%lu, graph_id:%lu", json_file_name_.c_str(), e.what(), session_id, graph_id); + GELOGE(FAILED, + "[Json.dump][GraphInfo]json.dump to analyze file [%s] failed because [%s]," + "session_id:%lu, graph_id:%lu", + json_file_name_.c_str(), e.what(), session_id, graph_id); ret_failed = true; } json_file_.close(); @@ -229,7 +232,7 @@ ge::Status Analyzer::SaveAnalyzerDataToFile(uint64_t session_id, uint64_t graph_ } ge::Status Analyzer::DoAnalyze(DataInfo &data_info) { - GELOGD("start to do analyzer process!"); + GELOGD("start to do analyzer process"); auto pnode = data_info.node_ptr; GE_CHECK_NOTNULL(pnode); @@ -241,7 +244,9 @@ ge::Status Analyzer::DoAnalyze(DataInfo &data_info) { GE_CHECK_NOTNULL(graph_info); auto status = SaveOpInfo(desc, data_info, graph_info); if (status != SUCCESS) { - GELOGE(status, "[Check][SaveOpInfo]save op info: desc_name [%s] desc_type [%s] failed!", desc->GetName().c_str(), desc->GetType().c_str()); + GELOGE(status, + "[Check][SaveOpInfo]save op info: desc_name [%s] desc_type [%s] failed!", + desc->GetName().c_str(), desc->GetType().c_str()); return FAILED; } // create json file diff --git a/ge/common/formats/format_transfers/datatype_transfer.cc b/ge/common/formats/format_transfers/datatype_transfer.cc index 4ef866f5..b1df4f53 100644 --- a/ge/common/formats/format_transfers/datatype_transfer.cc +++ b/ge/common/formats/format_transfers/datatype_transfer.cc @@ -154,7 +154,8 @@ Status DataTypeTransfer::TransDataType(const CastArgs &args, TransResult &result std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to alloc the memory for dst buf %zu, data size %zu", total_size, args.src_data_size); + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to alloc the memory for dst buf %zu, data size %zu", total_size, args.src_data_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; } diff --git a/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc b/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc index 706f401e..20f493d7 100644 --- a/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc +++ b/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc @@ -73,7 +73,8 @@ Status CheckArgsForC1hwncoc0ToHwcn(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size, int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc b/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc index 57574856..0508a1a5 100644 --- a/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc +++ b/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc @@ -94,7 +94,8 @@ Status TransFormatDhwckToFz3D(const TransArgs &args, TransResult &result) { std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -122,7 +123,8 @@ Status TransFormatDhwckToFz3D(const TransArgs &args, TransResult &result) { args.data + src_idx * data_size, static_cast(data_size)); } if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", dst_offset, ret, pad_zero); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } diff --git a/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc b/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc index 6e1e47ed..8dd1757b 100644 --- a/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc +++ b/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc @@ -95,7 +95,8 @@ Status TransFormatDhwncToFz3DTranspose(const TransArgs &args, TransResult &resul std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -123,7 +124,8 @@ Status TransFormatDhwncToFz3DTranspose(const TransArgs &args, TransResult &resul args.data + src_idx * data_size, static_cast(data_size)); } if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", dst_offset, ret, pad_zero); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc index bb9b71de..fccdb57b 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc @@ -139,7 +139,8 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size](), std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -175,7 +176,8 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -189,7 +191,8 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -210,7 +213,8 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -246,7 +250,8 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -260,7 +265,8 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -274,14 +280,16 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con Status FormatTransferFractalNz::TransFormat(const TransArgs &args, TransResult &result) { if (!IsDataTypeSupport(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, + "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); return ACL_ERROR_GE_DATATYPE_INVALID; } if (!CheckShape(args.src_format, args.src_shape) || !IsShapeValid(args.dst_shape)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); @@ -325,7 +333,8 @@ Status FormatTransferFractalNz::TransShape(Format src_format, const ShapeVector Status FormatTransferFractalNzND::TransFormat(const TransArgs &args, TransResult &result) { if (!IsDataTypeSupport(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, + "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); @@ -333,7 +342,8 @@ Status FormatTransferFractalNzND::TransFormat(const TransArgs &args, TransResult } if (!IsShapeValid(args.src_shape) || !CheckShape(args.dst_format, args.dst_shape)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Trans format from %s to %s, src shape %s, dst shape %s, data type %s is not supported", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc index 712f7c61..f5d35d2a 100644 --- a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc @@ -127,7 +127,8 @@ Status TransFormatFromNchwToFz(const TransArgs &args, TransResult &result) { std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( dst == nullptr, - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION;); @@ -173,8 +174,9 @@ Status TransFormatFromNchwToFz(const TransArgs &args, TransResult &result) { } } if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d pad mode %d", offset, - ret, need_pad_zero); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d pad mode %d", + offset, ret, need_pad_zero); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -213,7 +215,8 @@ Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) { std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( dst == nullptr, - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION;); @@ -235,7 +238,8 @@ Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) { static_cast(data_size)); } else { if (protected_size < data_size) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Failed to operate the dst memory, protected_size is %ld and size is %ld", + GELOGE(ACL_ERROR_GE_PARAM_INVALID, + "Failed to operate the dst memory, protected_size is %ld and size is %ld", protected_size, data_size); return ACL_ERROR_GE_PARAM_INVALID; } @@ -247,7 +251,8 @@ Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) { } } if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", dst_offset, ret, pad_zero); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } @@ -288,7 +293,8 @@ Status TransFormatNhwcToFz(const TransArgs &args, TransResult &result) { std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( dst == nullptr, - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION;); @@ -310,7 +316,8 @@ Status TransFormatNhwcToFz(const TransArgs &args, TransResult &result) { static_cast(data_size)); } else { if (protected_size < data_size) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Failed to operate the dst memory, protected_size is %ld and size is %ld", + GELOGE(ACL_ERROR_GE_PARAM_INVALID, + "Failed to operate the dst memory, protected_size is %ld and size is %ld", protected_size, data_size); return ACL_ERROR_GE_PARAM_INVALID; } @@ -322,7 +329,8 @@ Status TransFormatNhwcToFz(const TransArgs &args, TransResult &result) { } } if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d, pad mode %d", dst_offset, ret, pad_zero); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc index 7093aff2..3cccc664 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc @@ -140,7 +140,8 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size](), std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -179,7 +180,8 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -217,7 +219,8 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size](), std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -257,7 +260,8 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -273,7 +277,8 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } @@ -288,14 +293,16 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con Status FormatTransferFractalZz::TransFormat(const TransArgs &args, TransResult &result) { if (!IsDataTypeSupport(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, + "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); return ACL_ERROR_GE_DATATYPE_INVALID; } if (!CheckShape(args.src_format, args.src_shape) || !IsShapeValid(args.dst_shape)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); @@ -339,7 +346,8 @@ Status FormatTransferFractalZz::TransShape(Format src_format, const ShapeVector Status FormatTransferFractalZzND::TransFormat(const TransArgs &args, TransResult &result) { if (!IsDataTypeSupport(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, + "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); @@ -347,7 +355,8 @@ Status FormatTransferFractalZzND::TransFormat(const TransArgs &args, TransResult } if (!IsShapeValid(args.src_shape) || !CheckShape(args.dst_format, args.dst_shape)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Not support trans format from %s to %s, src shape %s, dst shape %s, data type %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), ShapeToString(args.src_shape).c_str(), ShapeToString(args.dst_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc index e84033ed..6d9e559c 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc @@ -66,7 +66,7 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { FmtToStr(ShapeToString(dst_shape)); GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_SHAPE_INVALID, error.c_str()); return ACL_ERROR_GE_SHAPE_INVALID; - } + } return SUCCESS; } @@ -74,7 +74,8 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc index 3795208d..5233a72e 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc @@ -37,7 +37,7 @@ Status CheckArgsForFracZToNchw(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); + GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); return ACL_ERROR_GE_FORMAT_INVALID; } if (!CheckDataTypeSupported(args.src_data_type)) { @@ -59,9 +59,10 @@ Status CheckArgsForFracZToNchw(const TransArgs &args) { } int64_t c1 = Ceil(dst_shape.at(kNchwC), c0); int64_t n0 = Ceil(dst_shape.at(kNchwN), static_cast(kNiSize)); - if (src_shape.at(kFracZHWC1) != dst_shape.at(kNchwH) * dst_shape.at(kNchwW) * c1 || src_shape.at(kFracZC0) != c0 || - src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", + if (src_shape.at(kFracZHWC1) != dst_shape.at(kNchwH) * dst_shape.at(kNchwW) * c1 || + src_shape.at(kFracZC0) != c0 || src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); return ACL_ERROR_GE_SHAPE_INVALID; } @@ -72,7 +73,8 @@ Status CheckArgsForFracZToNchw(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc index a2c86300..1aed4a74 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc @@ -37,33 +37,34 @@ Status CheckArgsForFracZToNhwc(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); - return UNSUPPORTED; + GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); + return ACL_ERROR_GE_FORMAT_INVALID; } if (!CheckDataTypeSupported(args.src_data_type)) { - GELOGE(UNSUPPORTED, "Failed to trans shape from FORMAT_FRACTAL_Z to NHWC, invalid data type %s", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Failed to trans shape from FORMAT_FRACTAL_Z to NHWC, invalid data type %s", TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); - return UNSUPPORTED; + return ACL_ERROR_GE_DATATYPE_INVALID; } if (!CheckShapeValid(src_shape, kFracZDimsNum)) { - GELOGE(PARAM_INVALID, "Failed to check src shape %s", ShapeToString(src_shape).c_str()); - return PARAM_INVALID; + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check src shape %s", ShapeToString(src_shape).c_str()); + return ACL_ERROR_GE_SHAPE_INVALID; } if (!CheckShapeValid(dst_shape, kNhwcDimsNum)) { - GELOGE(PARAM_INVALID, "Failed to check dst shape %s", ShapeToString(dst_shape).c_str()); - return PARAM_INVALID; + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check dst shape %s", ShapeToString(dst_shape).c_str()); + return ACL_ERROR_GE_SHAPE_INVALID; } int64_t c0 = GetCubeSizeByDataType(args.src_data_type); if (c0 < 0) { - return PARAM_INVALID; + return ACL_ERROR_GE_DATATYPE_INVALID; } int64_t c1 = Ceil(dst_shape.at(kNhwcC), c0); int64_t n0 = Ceil(dst_shape.at(kNhwcN), static_cast(kNiSize)); - if (src_shape.at(kFracZHWC1) != dst_shape.at(kNhwcH) * dst_shape.at(kNhwcW) * c1 || src_shape.at(kFracZC0) != c0 || - src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { - GELOGE(PARAM_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", + if (src_shape.at(kFracZHWC1) != dst_shape.at(kNhwcH) * dst_shape.at(kNhwcW) * c1 || + src_shape.at(kFracZC0) != c0 || src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); - return PARAM_INVALID; + return ACL_ERROR_GE_SHAPE_INVALID; } return SUCCESS; @@ -72,10 +73,11 @@ Status CheckArgsForFracZToNhwc(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size, int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(OUT_OF_MEMORY, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); - return OUT_OF_MEMORY; + return ACL_ERROR_GE_MEMORY_ALLOCATION; } auto n0 = args.src_shape.at(kFracZN0); @@ -111,10 +113,10 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(INTERNAL_ERROR, + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to copy data from FracZ offset %ld to HHWC[%ld, %ld, %ld, %ld] offset %ld, err-code %d", src_offset, n_idx, h_idx, w_idx, c_idx, dst_offset, ret); - return INTERNAL_ERROR; + return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } } @@ -127,8 +129,9 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size } // namespace Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult &result) { - if (CheckArgsForFracZToNhwc(args) != SUCCESS) { - return PARAM_INVALID; + Status ret = CheckArgsForFracZToNhwc(args); + if (ret != SUCCESS) { + return ret; } int size = GetSizeByDataType(args.src_data_type); auto total_size = GetItemNumByShape(args.dst_shape) * size; @@ -139,18 +142,19 @@ Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult & return SUCCESS; } - GELOGE(INTERNAL_ERROR, "Get %ld total size from dst shape %s, src shape %s", total_size, + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Get %ld total size from dst shape %s, src shape %s", total_size, ShapeToString(args.dst_shape).c_str(), ShapeToString(args.src_shape).c_str()); - return PARAM_INVALID; + return ACL_ERROR_GE_PARAM_INVALID; } GELOGD("Begin to trans format from FracZ to NHWC, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - if (GetDstDataAfterTrans(args, result, size, total_size) != SUCCESS) { - GELOGE(INTERNAL_ERROR, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", + ret = GetDstDataAfterTrans(args, result, size, total_size); + if (ret != SUCCESS) { + GELOGE(ret, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - return INTERNAL_ERROR; + return ret; } return SUCCESS; } @@ -158,7 +162,7 @@ Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult & Status FormatTransferFracZNhwc::TransShape(Format src_format, const std::vector &src_shape, DataType data_type, Format dst_format, std::vector &dst_shape) { GELOGD("The shape derivation from FracZ to NHWC is not unique. Trans shape in this direction is not supported"); - return UNSUPPORTED; + return ACL_ERROR_GE_FORMAT_INVALID; } REGISTER_FORMAT_TRANSFER(FormatTransferFracZNhwc, FORMAT_FRACTAL_Z, FORMAT_NHWC) diff --git a/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc b/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc index 16aa26f8..1f2477fd 100755 --- a/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc +++ b/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc @@ -91,7 +91,8 @@ Status CheckArgsForHwcnToC1hwncoc0(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc index df8e5a29..4c1e896f 100755 --- a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc @@ -37,33 +37,33 @@ Status CheckArgsForNc1hwc0ToNchw(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); - return UNSUPPORTED; + GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); + return ACL_ERROR_GE_FORMAT_INVALID; } if (!CheckDataTypeSupported(args.src_data_type)) { - GELOGE(UNSUPPORTED, "Failed to trans shape from NC1HWC0 to NCHW, invalid data type %s", + GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Failed to trans shape from NC1HWC0 to NCHW, invalid data type %s", TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); - return UNSUPPORTED; + return ACL_ERROR_GE_DATATYPE_INVALID; } if (!CheckShapeValid(args.src_shape, kNc1hwc0DimsNum)) { - GELOGE(PARAM_INVALID, "Failed to check src shape %s", ShapeToString(args.src_shape).c_str()); - return PARAM_INVALID; + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check src shape %s", ShapeToString(args.src_shape).c_str()); + return ACL_ERROR_GE_SHAPE_INVALID; } if (!CheckShapeValid(args.dst_shape, kNchwDimsNum)) { - GELOGE(PARAM_INVALID, "Failed to check dst shape %s", ShapeToString(args.dst_shape).c_str()); - return PARAM_INVALID; + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check dst shape %s", ShapeToString(args.dst_shape).c_str()); + return ACL_ERROR_GE_SHAPE_INVALID; } int64_t c0 = GetCubeSizeByDataType(args.src_data_type); if (c0 <= 0) { - GELOGE(PARAM_INVALID, "Failed to get cube size, the data type is invalid"); - return PARAM_INVALID; + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to get cube size, the data type is invalid"); + return ACL_ERROR_GE_SHAPE_INVALID; } if (src_shape.at(kNc1hwc0H) != dst_shape.at(kNchwH) || src_shape.at(kNc1hwc0W) != dst_shape.at(kNchwW) || src_shape.at(kNc1hwc0N) != dst_shape.at(kNchwN) || src_shape.at(kNc1hwc0C0) != c0 || src_shape.at(kNc1hwc0C1) != (Ceil(dst_shape.at(kNchwC), c0))) { - GELOGE(PARAM_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); - return PARAM_INVALID; + return ACL_ERROR_GE_SHAPE_INVALID; } return SUCCESS; @@ -72,10 +72,11 @@ Status CheckArgsForNc1hwc0ToNchw(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(OUT_OF_MEMORY, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); - return OUT_OF_MEMORY; + return ACL_ERROR_GE_MEMORY_ALLOCATION; } auto h = args.src_shape.at(kNc1hwc0H); @@ -109,11 +110,11 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(INTERNAL_ERROR, + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to copy data from NC1HWC0[%ld, %ld, %ld, %ld, %ld] offset %ld to NCHW[%ld, %ld, %ld, %ld]" " offset %ld, err-code %d", n_idx, c1_idx, h_idx, w_idx, c0_idx, src_offset, n_idx, c_idx, h_idx, w_idx, dst_offset, ret); - return INTERNAL_ERROR; + return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } } @@ -126,8 +127,9 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in } // namespace Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult &result) { - if (CheckArgsForNc1hwc0ToNchw(args) != SUCCESS) { - return PARAM_INVALID; + Status ret = CheckArgsForNc1hwc0ToNchw(args); + if (ret != SUCCESS) { + return ret; } int size = GetSizeByDataType(args.src_data_type); auto total_size = GetItemNumByShape(args.dst_shape) * size; @@ -138,18 +140,19 @@ Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult return SUCCESS; } - GELOGE(INTERNAL_ERROR, "Get %ld total size from dst shape %s, src shape %s", total_size, + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Get %ld total size from dst shape %s, src shape %s", total_size, ShapeToString(args.dst_shape).c_str(), ShapeToString(args.src_shape).c_str()); - return PARAM_INVALID; + return ACL_ERROR_GE_PARAM_INVALID; } GELOGD("Begin to trans format from NC1HWC0 to NCHW, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - if (GetDstDataAfterTrans(args, result, size, total_size) != SUCCESS) { - GELOGE(INTERNAL_ERROR, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", + ret = GetDstDataAfterTrans(args, result, size, total_size); + if (ret != SUCCESS) { + GELOGE(ret, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - return INTERNAL_ERROR; + return ret; } return SUCCESS; } @@ -157,7 +160,7 @@ Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult Status FormatTransferNc1hwc0Nchw::TransShape(Format src_format, const std::vector &src_shape, DataType data_type, Format dst_format, std::vector &dst_shape) { GELOGD("The shape derivation from NC1HWC0 to NCHW is not unique. Trans shape in this direction is not supported"); - return UNSUPPORTED; + return ACL_ERROR_GE_FORMAT_INVALID; } REGISTER_FORMAT_TRANSFER(FormatTransferNc1hwc0Nchw, FORMAT_NC1HWC0, FORMAT_NCHW) diff --git a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc index 2234bf05..53b96fd3 100755 --- a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc +++ b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc @@ -61,7 +61,8 @@ Status CheckArgsForNc1hwc0ToNhwc(const TransArgs &args) { if (src_shape.at(kNc1hwc0H) != dst_shape.at(kNhwcH) || src_shape.at(kNc1hwc0W) != dst_shape.at(kNhwcW) || src_shape.at(kNc1hwc0N) != dst_shape.at(kNhwcN) || src_shape.at(kNc1hwc0C0) != c0 || src_shape.at(kNc1hwc0C1) != (Ceil(dst_shape.at(kNhwcC), c0))) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", + GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); return ACL_ERROR_GE_SHAPE_INVALID; } @@ -72,7 +73,8 @@ Status CheckArgsForNc1hwc0ToNhwc(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc b/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc index 795f8ff5..bb9e8762 100644 --- a/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc +++ b/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc @@ -125,7 +125,8 @@ Status TransFormatFromNchwToFzC04(const TransArgs &args, TransResult &result) { return ACL_ERROR_GE_INTERNAL_ERROR); auto t1 = h_o * w_o; auto t2 = n_o * c_o; - GE_IF_BOOL_EXEC(!CheckInt64MulOverflow(t1, t2), GELOGE(INTERNAL_ERROR, "int64 mul overflow.A[%ld], B[%ld]", t1, t2); + GE_IF_BOOL_EXEC(!CheckInt64MulOverflow(t1, t2), + GELOGE(INTERNAL_ERROR, "int64 mul overflow.A[%ld], B[%ld]", t1, t2); return ACL_ERROR_GE_INTERNAL_ERROR); int64_t total_ele_cnt = n_o * c_o * h_o * w_o; @@ -140,7 +141,8 @@ Status TransFormatFromNchwToFzC04(const TransArgs &args, TransResult &result) { std::shared_ptr dst(new (std::nothrow) uint8_t[dst_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -212,7 +214,8 @@ Status PaddingNC(const TransArgs &args, TransArgs &args_tmp, std::shared_ptr()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), dst_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -275,7 +279,8 @@ Status FormatTransferNchwToFZC04::TransFormat(const TransArgs &args, TransResult } std::vector expect_shape; - ret = TransShape(args_tmp.src_format, args_tmp.src_shape, args_tmp.src_data_type, args_tmp.dst_format, expect_shape); + ret = TransShape(args_tmp.src_format, args_tmp.src_shape, args_tmp.src_data_type, + args_tmp.dst_format, expect_shape); if (ret != SUCCESS) { return ret; } diff --git a/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc b/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc index b09fd168..6817713a 100755 --- a/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc +++ b/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc @@ -92,7 +92,8 @@ Status CheckArgsForNhwcToNc1hwc0(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); return ACL_ERROR_GE_MEMORY_ALLOCATION; diff --git a/ge/graph/build/memory/block_mem_assigner.cc b/ge/graph/build/memory/block_mem_assigner.cc index 288b7b29..4612f319 100755 --- a/ge/graph/build/memory/block_mem_assigner.cc +++ b/ge/graph/build/memory/block_mem_assigner.cc @@ -598,9 +598,9 @@ void BlockMemAssigner::GetOutAndWorkSpaceMem(vector &all_memory_size) { GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(output_desc, size) != SUCCESS, GELOGI("Get size failed")); GE_IF_BOOL_EXEC(size < 0, GELOGE(FAILED, "[Check][TensorSize]tensor_size:%ld is invalid, maybe it is unknown shape node, Node_name:%s", - size, node_op_desc->GetName().c_str()); + size, node_op_desc->GetName().c_str()); REPORT_INNER_ERROR("E19999", "tensor_size:%ld is invalid, maybe it is unknown shape node, Node_name:%s", - size, node_op_desc->GetName().c_str()); + size, node_op_desc->GetName().c_str()); return;); batch_all_memory_size[batch_label].emplace_back(size); if (batch_total_size.find(batch_label) == batch_total_size.end()) { @@ -692,23 +692,23 @@ bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t ou auto out_anchor = n->GetOutDataAnchor(out_index); GE_IF_BOOL_EXEC(out_anchor == nullptr, GELOGE(FAILED, "[Check][Anchor]Node[%s] output[%u] anchor is null.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); REPORT_INNER_ERROR("E19999", "output anchor is null, node_name: %s output_index: %u.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); return false;); for (auto const &peer_in_anchor : out_anchor->GetPeerInDataAnchors()) { GE_IF_BOOL_EXEC(peer_in_anchor == nullptr, GELOGE(FAILED, "[Check][Anchor]Node[%s] output[%u] peer_in_anchor 0 is null.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); REPORT_INNER_ERROR("E19999", "output anchor peer is null, node_name: %s output_index: %u.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); return false;); auto peer_node = peer_in_anchor->GetOwnerNode(); GE_IF_BOOL_EXEC(peer_node == nullptr, GELOGE(FAILED, "[Check][Node]Node[%s] output[%u] peer node is null.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); REPORT_INNER_ERROR("E19999", "output anchor peer node is null, node_name: %s output_index: %u.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); return false;); // Get the continuous input type of the node, default is false @@ -716,9 +716,9 @@ bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t ou auto peer_in_node_desc = peer_node->GetOpDesc(); GE_IF_BOOL_EXEC(peer_in_node_desc == nullptr, GELOGE(FAILED, "[Check][OpDesc]Node[%s] output[%u] nodedesc is null.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); REPORT_INNER_ERROR("E19999", "output anchor peer op_desc is null, node_name:%s output_index:%u.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); return false;); // If GetBool fail, is_input_continuous is false. @@ -819,7 +819,7 @@ bool BlockMemAssigner::IsContinuousMemoryReuse(const NodePtr &n, const NodePtr & (in_anchor->GetPeerOutAnchor()->GetOwnerNode() == nullptr) || (in_anchor->GetPeerOutAnchor()->GetOwnerNode()->GetOpDesc() == nullptr)) { GELOGE(FAILED, "[Check][OpDesc]Node[%s] output[%u] peer input node desc is null.", - n->GetName().c_str(), out_index); + n->GetName().c_str(), out_index); REPORT_INNER_ERROR("E19999", "get output anchor peer op_desc fail, node_name: %s output_index: %u.", n->GetName().c_str(), out_index); return false; @@ -1242,7 +1242,7 @@ Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vectorGetName().c_str()); + n->GetName().c_str()); GELOGE(INTERNAL_ERROR, "[Check][OutRefStatus]continuous output node ref part input, not support, node_name:%s", n->GetName().c_str()); return INTERNAL_ERROR; @@ -1255,7 +1255,7 @@ Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vectorGetOutputDescPtr(index); if (output_op_desc == nullptr) { REPORT_INNER_ERROR("E19999", "get output_desc failed, node_name:%s, output_index:%u", - n->GetName().c_str(), index); + n->GetName().c_str(), index); GELOGE(INTERNAL_ERROR, "[Get][OutputDesc]node_name:%s, output_index:%u", n->GetName().c_str(), index); return INTERNAL_ERROR; } @@ -1268,7 +1268,7 @@ Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vectorGetName().c_str(), index); + n->GetName().c_str(), index); GELOGE(INTERNAL_ERROR, "[Get][TensorSize]node_name:%s, output_index:%u", n->GetName().c_str(), index); return INTERNAL_ERROR; } @@ -1310,7 +1310,7 @@ Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vectorref_count_); } else { REPORT_CALL_ERROR("E19999", "apply continuousMemory failed, node_name:%s, total_size:%ld", - n->GetName().c_str(), total_size); + n->GetName().c_str(), total_size); GELOGE(INTERNAL_ERROR, "[Apply][ContinuousMemory]node_name:%s, total_size:%ld", n->GetName().c_str(), total_size); return INTERNAL_ERROR; } @@ -1346,9 +1346,9 @@ MemoryBlock *BlockMemAssigner::ApplyOutMemory(const NodePtr &n, uint32_t index, block = symbol_blocks_[symbol]; GE_IF_BOOL_EXEC(block == nullptr, REPORT_INNER_ERROR("E19999", "get ref block failed, node_name:%s, symbol:%s", - node_op_desc->GetName().c_str(), node_index_io.ToString().c_str()); + node_op_desc->GetName().c_str(), node_index_io.ToString().c_str()); GELOGE(FAILED, "[Get][RefBlock]node_name:%s, symbol:%s", - node_op_desc->GetName().c_str(), node_index_io.ToString().c_str()); + node_op_desc->GetName().c_str(), node_index_io.ToString().c_str()); return nullptr); // reduce old size size_t align_size = block->Size(); @@ -1401,9 +1401,9 @@ MemoryBlock *BlockMemAssigner::ApplyOutMemory(const NodePtr &n, uint32_t index, int out_count = 0; GE_IF_BOOL_EXEC(index >= n->GetAllOutDataAnchors().size(), REPORT_INNER_ERROR("E19999", "out index:%u exceed out_size:%lu, node_name:%s", - index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); + index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); GELOGE(FAILED, "[Check][OutIndex]index:%u exceed out_size:%lu, node_name:%s", - index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); + index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); return nullptr); auto out_data_anchor = n->GetOutDataAnchor(index); GE_IF_BOOL_EXEC(out_data_anchor == nullptr, @@ -1616,12 +1616,12 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector op_desc->GetOutputsSize(), memorys_type.size()); if (has_mem_type_attr && (memorys_type.size() != op_desc->GetOutputsSize())) { REPORT_INNER_ERROR("E19999", "Attr[%s] size:%zu not equal to node output size:%zu, node_name:%s", - ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), - op_desc->GetOutputsSize(), op_desc->GetName().c_str()); + ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), + op_desc->GetOutputsSize(), op_desc->GetName().c_str()); GELOGE(INTERNAL_ERROR, - "[Check][MemTypeAttr]Attr %s size:%zu not equal to node output size:%zu, node_name:%s", - ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), - op_desc->GetOutputsSize(), op_desc->GetName().c_str()); + "[Check][MemTypeAttr]Attr %s size:%zu not equal to node output size:%zu, node_name:%s", + ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), + op_desc->GetOutputsSize(), op_desc->GetName().c_str()); return INTERNAL_ERROR; } @@ -1748,9 +1748,10 @@ void BlockMemAssigner::AssignMemoryWithReuse(vector &ranges) { if (has_tvm_workspace_mem_type_attr && (temp.size() != tvm_workspace_memory_type.size())) { REPORT_INNER_ERROR("E19999", "Attr[%s]size:%zu is not equal to workspace size:%zu, node_name:%s", - TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), temp.size(), n->GetName().c_str()); + TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), + temp.size(), n->GetName().c_str()); GELOGE(INTERNAL_ERROR, "[Check][Attr]Attr %s size:%zu is not equal to workspace size:%zu, node_name:%s", - TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), temp.size(), n->GetName().c_str()); + TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), temp.size(), n->GetName().c_str()); return; } for (size_t i = 0; i < temp.size(); i++) { @@ -2160,10 +2161,11 @@ bool BlockMemAssigner::GetWorkSpaceMemoryType(const NodePtr &node, size_t index, ge::AttrUtils::GetListInt(op_desc, TVM_ATTR_NAME_WORKSPACE_TYPE, workspace_memory_type); if (has_workspace_mem_type_attr && (workspace_memory_type.size() <= index)) { REPORT_INNER_ERROR("E19999", "get workspace mem_type failed, " - "index %zu invalid, bigger than attr %s size:%zu, node_name:%s", - index, TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), workspace_memory_type.size(), node->GetName().c_str()); + "index %zu invalid, bigger than attr %s size:%zu, node_name:%s", + index, TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), + workspace_memory_type.size(), node->GetName().c_str()); GELOGE(INTERNAL_ERROR, "[Get][WorkspaceMemType]index %zu invalid, bigger than attr %s size:%zu, node_name:%s", - index, TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), workspace_memory_type.size(), node->GetName().c_str()); + index, TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), workspace_memory_type.size(), node->GetName().c_str()); return false; } memory_type = has_workspace_mem_type_attr ? workspace_memory_type[index] : RT_MEMORY_HBM; diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index b433ad02..e97d343d 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -496,7 +496,7 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, REPORT_INNER_ERROR("E19999", "find memory offset fail for mem_type:%ld, " "when assign continuous input memory for node:%s, ", memory_type, node->GetName().c_str()); GELOGE(FAILED, "[Find][MemOffset]fail for mem_type:%ld, when AssignContinuousInputMemory for node:%s", - memory_type, node->GetName().c_str()); + memory_type, node->GetName().c_str()); return FAILED; } // The head and tail of hcom continuous input should be added 512 @@ -929,8 +929,8 @@ Status GraphMemoryAssigner::AssignReferenceMemory() { if (out_op_desc->GetOutputsSize() > output_list.size()) { REPORT_INNER_ERROR("E19999", "Output size:%zu more than output offset size:%zu, judge invalid in node:%s " - "when AssignReferenceMemory", - out_op_desc->GetOutputsSize(), output_list.size(), node->GetName().c_str()); + "when AssignReferenceMemory", + out_op_desc->GetOutputsSize(), output_list.size(), node->GetName().c_str()); GELOGE(ge::FAILED, "[Check][InnerData]Output size:%zu more than output offset size:%zu, invalid in node:%s", out_op_desc->GetOutputsSize(), output_list.size(), node->GetName().c_str()); return ge::FAILED; diff --git a/ge/graph/load/model_manager/model_utils.cc b/ge/graph/load/model_manager/model_utils.cc index 8648d892..015fefcc 100755 --- a/ge/graph/load/model_manager/model_utils.cc +++ b/ge/graph/load/model_manager/model_utils.cc @@ -384,7 +384,8 @@ Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDesc switch (mem_type) { case RT_MEMORY_RDMA_HBM: if (offset < 0) { - GELOGE(PARAM_INVALID, "rdma var addr is invalid, addr=%p", reinterpret_cast(offset)); + GELOGE(PARAM_INVALID, "rdma var addr is invalid, addr=%p", + reinterpret_cast(static_cast(offset))); return PARAM_INVALID; } var_addr = reinterpret_cast(static_cast(offset)); diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 26c37a1d..01e95f84 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -1772,8 +1772,8 @@ Status GraphPrepare::CheckUserInput(const std::vector &user_input) { if (dim < UNKNOWN_DIM_NUM) { std::string situation = "data dim[" + std::to_string(i) + "][" + std::to_string(dim) + "]" ; std::string reason = "it need >= -2"; - REPORT_INPUT_ERROR( - "E19025", std::vector({"situation", "reason"}),std::vector({situation, reason})); + REPORT_INPUT_ERROR("E19025", std::vector({"situation", "reason"}), + std::vector({situation, reason})); GELOGE(GE_GRAPH_INIT_FAILED, "[Check][InputDim]data dim %zu is not supported, need >= -2, real:%ld.", i, dim); return GE_GRAPH_INIT_FAILED; } diff --git a/ge/host_kernels/gather_v2_kernel.cc b/ge/host_kernels/gather_v2_kernel.cc index 326bfbd1..65a51b13 100644 --- a/ge/host_kernels/gather_v2_kernel.cc +++ b/ge/host_kernels/gather_v2_kernel.cc @@ -407,7 +407,8 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vectorGetTensorDesc().GetDataType(); if (supported_type.find(x_data_type) == supported_type.end()) { - GELOGI("GatherV2Kernel does not support this Data type:%s.", TypeUtils::DataTypeToSerialString(x_data_type).c_str()); + GELOGI("GatherV2Kernel does not support this Data type:%s.", + TypeUtils::DataTypeToSerialString(x_data_type).c_str()); return NOT_CHANGED; } // calc output shape diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 9f37e7d5..f56aba0a 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -105,7 +105,7 @@ Status HybridModelAsyncExecutor::Init() { executor_ = std::unique_ptr(new(std::nothrow) HybridModelExecutor(model_, device_id_, stream_)); GE_CHECK_NOTNULL(executor_); GE_CHK_STATUS_RET(executor_->Init(), "Failed to init hybrid engine"); - GE_CHK_STATUS_RET(DumpOpDebug(),"Dump op debug failed in hybrid engine"); + GE_CHK_STATUS_RET(DumpOpDebug(), "Dump op debug failed in hybrid engine"); GELOGI("HybridModel stage nums:%zu", model_->GetRootGraphItem()->NumGroups()); if (model_->GetRootGraphItem()->NumGroups() >= kMinimumPiplineStages) { diff --git a/ge/ir_build/atc_ir_common.cc b/ge/ir_build/atc_ir_common.cc index ff156c75..3511a113 100755 --- a/ge/ir_build/atc_ir_common.cc +++ b/ge/ir_build/atc_ir_common.cc @@ -34,6 +34,8 @@ const int64_t kDynamicImageSizeNum = 2; const size_t kMaxDynamicDimNum = 100; const size_t kMaxNDDimNum = 4; const size_t kMinNDDimNum = 1; +const size_t kSquareBracketsSize = 2; +const size_t kRangePairSize = 2; // datatype/formats from user to GE, Unified to util interface file later const std::map kOutputTypeSupportDatatype = { {"FP32", ge::DT_FLOAT}, {"FP16", ge::DT_FLOAT16}, {"UINT8", ge::DT_UINT8}}; @@ -292,7 +294,8 @@ bool ParseSingleShapeRange(std::string &shape_range, vector>> shape_range_map; - if(!ParseInputShapeRange(input_shape_range, shape_range_map)) { + if (!ParseInputShapeRange(input_shape_range, shape_range_map)) { GELOGE(ge::PARAM_INVALID, "Failed to parse input shape range: %s", input_shape_range.c_str()); return ge::PARAM_INVALID; } diff --git a/ge/session/omg.cc b/ge/session/omg.cc index f7072c7d..63be4913 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -793,7 +793,7 @@ FMK_FUNC_HOST_VISIBILITY Status ParseGraph(ge::Graph &graph, const std::map(data), FORMAT_RESERVED, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_format_reserved) { @@ -61,7 +61,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_format_reserved) reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_RESERVED, {16, 1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape) { @@ -71,7 +71,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape) { TransArgs args{reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape2) { @@ -82,7 +82,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape2) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, -1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape) { @@ -93,7 +93,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape2) { @@ -104,7 +104,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape2) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, -1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relation1) { @@ -115,7 +115,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relatio reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {17, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relation2) { @@ -126,7 +126,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relatio reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, 17}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_lt_cube) { @@ -301,7 +301,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_eq_cube) { } Status status = transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape); - EXPECT_EQ(status, UNSUPPORTED); + EXPECT_EQ(status, ACL_ERROR_GE_FORMAT_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_gt_cube) { diff --git a/tests/ut/ge/graph/load/model_utils_unittest.cc b/tests/ut/ge/graph/load/model_utils_unittest.cc index ac886cea..630a75aa 100644 --- a/tests/ut/ge/graph/load/model_utils_unittest.cc +++ b/tests/ut/ge/graph/load/model_utils_unittest.cc @@ -67,4 +67,22 @@ TEST_F(UtestModelUtils, get_var_addr_rdma_hbm) { EXPECT_EQ(reinterpret_cast(offset), var_addr); VarManager::Instance(runtime_param.session_id)->Destory(); } + +TEST_F(UtestModelUtils, get_var_addr_rdma_hbm_negative_offset) { + uint8_t test = 2; + uint8_t *pf = &test; + RuntimeParam runtime_param; + runtime_param.session_id = 0; + runtime_param.logic_var_base = 0; + runtime_param.var_base = pf; + + int64_t offset = -1; + EXPECT_EQ(VarManager::Instance(runtime_param.session_id)->Init(0, 0, 0, 0), SUCCESS); + EXPECT_NE(VarManager::Instance(runtime_param.session_id)->var_resource_, nullptr); + VarManager::Instance(runtime_param.session_id)->var_resource_->var_offset_map_[offset] = RT_MEMORY_RDMA_HBM; + std::shared_ptr op_desc = std::make_shared("test", "test"); + uint8_t *var_addr = nullptr; + EXPECT_NE(ModelUtils::GetVarAddr(runtime_param, op_desc, offset, var_addr), SUCCESS); + VarManager::Instance(runtime_param.session_id)->Destory(); +} } // namespace ge From e9868abe29781b7d55d04b7a217ca7154334cd3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Tue, 23 Mar 2021 11:17:43 +0800 Subject: [PATCH 06/14] fixed sc warning by wangxiaotian --- ge/generator/ge_generator.cc | 2 +- ge/graph/build/logical_stream_allocator.cc | 43 +++--- ge/graph/build/memory/block_mem_assigner.cc | 124 ++++++++++-------- ge/graph/build/memory/graph_mem_assigner.cc | 16 +-- ge/graph/preprocess/insert_op/ge_aipp_op.cc | 3 +- .../insert_op/util_insert_aipp_op.cc | 14 +- ge/plugin/engine/CMakeLists.txt | 1 + .../format_transfer_fractal_nz_unittest.cc | 34 ++--- .../format_transfer_nhwc_fractalz_unittest.cc | 16 +-- .../ut/ge/common/format_transfer_unittest.cc | 58 ++++---- 10 files changed, 169 insertions(+), 142 deletions(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index aa40f6ba..b5f184c5 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -592,7 +592,7 @@ Status GeGenerator::SetModelNameForDump(const GeRootModelPtr &ge_root_model) { ErrorManager::GetInstance().ATCReportErrMessage("E10000", {"parameter"}, {"output"}); GELOGE(FAILED, "[Check][GetModelNameStep]Get model_name failed. Param --output is invalid, root graph name: %s", ge_root_model->GetRootGraph()->GetName().c_str()); - REPORT_CALL_ERROR("E19999", "Get model_name failed. Param --output is invalid,", + REPORT_CALL_ERROR("E19999", "Get model_name failed. Param --output is invalid," "root graph name: %s", ge_root_model->GetRootGraph()->GetName().c_str()); return PARAM_INVALID; } diff --git a/ge/graph/build/logical_stream_allocator.cc b/ge/graph/build/logical_stream_allocator.cc index 3bc29b70..1f054841 100644 --- a/ge/graph/build/logical_stream_allocator.cc +++ b/ge/graph/build/logical_stream_allocator.cc @@ -70,7 +70,7 @@ Status AssignByLabelPass::Run(ComputeGraphPtr graph, const vector & auto iter = label_streams.find(stream_label); if (iter == label_streams.end()) { subgraph->stream_id = next_stream; - GELOGI("Assign new stream %ld for label %s.", next_stream, stream_label.c_str()); + GELOGI("[Assign][NewStreamId] %ld for label %s.", next_stream, stream_label.c_str()); label_streams.emplace(stream_label, next_stream); next_stream++; @@ -102,7 +102,7 @@ Status IndependentStreamPass::Run(ComputeGraphPtr graph, const vectorstream_id = next_stream; - GELOGI("Assign new independent stream %ld for engine %s (label: %s).", next_stream, engine.c_str(), + GELOGI("[Assign][NewStreamId:independent] %ld for engine %s (label: %s).", next_stream, engine.c_str(), stream_label.c_str()); label_streams.emplace(stream_label, next_stream); @@ -137,8 +137,8 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorstream_id = stream_id; - GELOGI("Reusable subgraph %s has not been assigned a stream, now assign new stream %ld.", - reusable_subgraph->name.c_str(), stream_id); + GELOGI("[Assign][NewStreamId] %ld for Reusable subgraph %s cause has not been assigned before.", + stream_id, reusable_subgraph->name.c_str()); } if (reusable_subgraph->reused_subgraph != nullptr) { @@ -147,7 +147,8 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorreused_subgraph = reusable_subgraph; reused_subgraphs_.emplace_back(subgraph, reusable_subgraph); - GELOGI("Subgraph %s of engine %s reuses stream of subgraph %s of engine %s.", subgraph->name.c_str(), + GELOGI("[Reuse][Stream]Subgraph %s of engine %s reuses stream of subgraph %s of engine %s.", + subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), reusable_subgraph->name.c_str(), reusable_subgraph->engine_conf.id.c_str()); } @@ -259,7 +260,7 @@ int64_t AssignByDependencyPass::AssignNewStream(SubgraphPtr subgraph) { engine_stream_num_[engine_name] = stream_id + 1; } - GELOGI("Subgraph %s assigns new temp stream %ld (engine: %s).", subgraph->name.c_str(), stream_id, + GELOGI("[Assign][NewStreamId:temp]id:%ld for Subgraph %s (engine: %s).", stream_id, subgraph->name.c_str(), engine_name.c_str()); return stream_id; @@ -292,7 +293,7 @@ void AssignByDependencyPass::UpdateAssignedSubgraphs(Context &context) { GELOGI("Subgraph %s of engine %s reuses default stream %ld.", subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), context.default_stream); } else { - GELOGI("Stream of subgraph %s has been updated to %ld.", subgraph->name.c_str(), subgraph->stream_id); + GELOGI("[Update][StreamId]id:%ld for subgraph %s.", subgraph->stream_id, subgraph->name.c_str()); } } } @@ -303,7 +304,7 @@ void AssignByDependencyPass::UpdateReusedSubgraphs() { auto &cur_subgraph = item.first; auto &reused_graph = item.second; cur_subgraph->stream_id = reused_graph->stream_id; - GELOGI("Stream of subgraph %s has been updated to %ld.", cur_subgraph->name.c_str(), cur_subgraph->stream_id); + GELOGI("[Update][StreamId]id:%ld for subgraph %s.", cur_subgraph->stream_id, cur_subgraph->name.c_str()); } } @@ -340,7 +341,7 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorname.c_str(), subgraph->stream_id, + GELOGI("[Assign][StreamId] %ld for Subgraph %s (engine: %s).", subgraph->stream_id, subgraph->name.c_str(), engine_name.c_str()); } } @@ -363,12 +364,12 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorGetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), context.default_stream, engine_name.c_str()); } else if (IsEngineSkip(*subgraph) && node->GetInNodes().empty()) { - GELOGD("Node %s of type %s in subgraph %s doesn't need to assign a stream (engine: %s).", + GELOGD("[Skip][StreamIdAssign]Node %s of type %s in subgraph %s doesn't need (engine: %s).", node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), engine_name.c_str()); } else { node->GetOpDesc()->SetStreamId(stream_id); - GELOGD("Node %s of type %s in subgraph %s is assigned stream %ld (engine: %s).", node->GetName().c_str(), - node->GetType().c_str(), subgraph->name.c_str(), stream_id, engine_name.c_str()); + GELOGD("[Assign][StreamId]id:%ld for Node %s of type %s in subgraph %s (engine: %s).", stream_id, + node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), engine_name.c_str()); } } } @@ -397,8 +398,8 @@ int64_t UpdateForSkippedEnginePass::GetSingleInoutStream(const NodePtr &node) co if (stream_ids.size() == 1) { int64_t stream_id = *(stream_ids.begin()); - GELOGI("The stream of all input and output nodes of node %s (type: %s) is %ld.", node->GetName().c_str(), - node->GetType().c_str(), stream_id); + GELOGI("[Get][SingleStreamId]The stream of all input and output nodes of node %s (type: %s) is %ld.", + node->GetName().c_str(), node->GetType().c_str(), stream_id); return stream_id; } @@ -437,8 +438,8 @@ Status UpdateForSkippedEnginePass::Run(ComputeGraphPtr graph, const vectorSetStreamId(inout_stream); - GELOGI("Node %s of type %s reassign to stream %ld from stream %ld.", node->GetName().c_str(), - node->GetType().c_str(), inout_stream, stream_id); + GELOGI("[Reassign][StreamId]%ld for Node %s of type %s from stream %ld.", + inout_stream, node->GetName().c_str(), node->GetType().c_str(), stream_id); } } } @@ -465,7 +466,7 @@ Status AllReduceParallelPass::Run(ComputeGraphPtr graph, const vectorGetName().c_str()); + GELOGD("[Show][Subgraphs] in graph %s", graph->GetName().c_str()); for (const auto &subgraph : subgraphs) { if (subgraph != nullptr) { GELOGD("subgraph: %s", subgraph->name.c_str()); @@ -674,9 +675,9 @@ Status LogicalStreamAllocator::RunPasses(const ComputeGraphPtr &graph, const vec Status status = pass->Run(graph, subgraphs, context_); if (status == SUCCESS) { - GELOGD("Stream pass %s return SUCCESS.", pass->GetName().c_str()); + GELOGD("[Show][Status]Stream pass %s return SUCCESS.", pass->GetName().c_str()); } else if (status == NOT_CHANGED) { - GELOGD("Stream pass %s return NOT_CHANGED.", pass->GetName().c_str()); + GELOGD("[Show][Status]Stream pass %s return NOT_CHANGED.", pass->GetName().c_str()); } else { GELOGE(status, "Stream pass %s failed.", pass->GetName().c_str()); return status; diff --git a/ge/graph/build/memory/block_mem_assigner.cc b/ge/graph/build/memory/block_mem_assigner.cc index 4612f319..ae0c6e0d 100755 --- a/ge/graph/build/memory/block_mem_assigner.cc +++ b/ge/graph/build/memory/block_mem_assigner.cc @@ -508,7 +508,7 @@ BlockMemAssigner::BlockMemAssigner(ComputeGraphPtr compute_graph, const map &all_memory_size) { int64_t size = 0; GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(output_desc, size) != SUCCESS, GELOGI("Get size failed")); GE_IF_BOOL_EXEC(size < 0, - GELOGE(FAILED, "[Check][TensorSize]tensor_size:%ld is invalid, maybe it is unknown shape node, Node_name:%s", - size, node_op_desc->GetName().c_str()); - REPORT_INNER_ERROR("E19999", "tensor_size:%ld is invalid, maybe it is unknown shape node, Node_name:%s", - size, node_op_desc->GetName().c_str()); - return;); + GELOGE(FAILED, "[Check][TensorSize]tensor_size:%ld is invalid, " + "maybe it is unknown shape node, Node_name:%s", + size, node_op_desc->GetName().c_str()); + REPORT_INNER_ERROR("E19999", "tensor_size:%ld is invalid, " + "maybe it is unknown shape node, Node_name:%s", + size, node_op_desc->GetName().c_str()); + return;); batch_all_memory_size[batch_label].emplace_back(size); if (batch_total_size.find(batch_label) == batch_total_size.end()) { batch_total_size[batch_label] = size; @@ -1105,9 +1107,10 @@ MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size, OpMemoryType mem_type, const NodePtr &n, uint32_t out_index, const vector &workspace_reuse_flag, const bool is_op_reuse_mem, const bool continuous, int64_t memory_type) { - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(n == nullptr, - REPORT_INNER_ERROR("E19999", "Input parameter n(type:node_ptr) is null, apply memory failed"); - return nullptr, "[Check][Param]Input parameter n(type:node_ptr) is null."); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + n == nullptr, + REPORT_INNER_ERROR("E19999", "Input parameter n(type:node_ptr) is null, apply memory failed"); + return nullptr, "[Check][Param]Input parameter n(type:node_ptr) is null."); auto node_op_desc = n->GetOpDesc(); GE_IF_BOOL_EXEC(node_op_desc == nullptr, return nullptr); std::string batch_label; @@ -1159,10 +1162,12 @@ MemoryBlock *BlockMemAssigner::ApplyMemory(size_t block_size, size_t real_size, } auto block = new (std::nothrow) MemoryBlock(block_size, node_op_desc->GetStreamId(), is_reuse_memory, memory_type); - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(block == nullptr, - REPORT_INNER_ERROR("E19999", "new a memoryblock object failed. node_name:%s out_index:%u", - n->GetName().c_str(), out_index); - return nullptr, "[New][Object]new MemoryBlock failed, node_name:%s out_index:%u", n->GetName().c_str(), out_index); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + block == nullptr, + REPORT_INNER_ERROR("E19999", "new a memoryblock object failed. node_name:%s out_index:%u", + n->GetName().c_str(), out_index); + return nullptr, + "[New][Object]new MemoryBlock failed, node_name:%s out_index:%u", n->GetName().c_str(), out_index); // Data and netoutput need zero copy block block->is_zero_copy_ = IsZeroCopyBlock(n, continuous); @@ -1221,13 +1226,15 @@ void BlockMemAssigner::ContinuousOutRefCheck(bool &isAllOutputRef, bool &isOutpu Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vector &ranges, const bool is_op_reuse_mem) { - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(n == nullptr, - REPORT_INNER_ERROR("E19999", "Input parameter n(type:node_ptr) is null"); - return INTERNAL_ERROR, "[check][param]Input parameter n(type:NodePtr) is null."); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + n == nullptr, + REPORT_INNER_ERROR("E19999", "Input parameter n(type:node_ptr) is null"); + return INTERNAL_ERROR, "[check][param]Input parameter n(type:NodePtr) is null."); auto node_op_desc = n->GetOpDesc(); - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(node_op_desc == nullptr, - REPORT_INNER_ERROR("E19999", "Input parameter n(type:OpDescPtr) is null"); - return INTERNAL_ERROR, "[Check][Param]Input parameter n(type:OpDescPtr) is null"); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + node_op_desc == nullptr, + REPORT_INNER_ERROR("E19999", "Input parameter n(type:OpDescPtr) is null"); + return INTERNAL_ERROR, "[Check][Param]Input parameter n(type:OpDescPtr) is null"); // continuous output support ref only when all output ref input bool isAllOutputRef = true; @@ -1319,26 +1326,33 @@ Status BlockMemAssigner::ApplyContinuousMemory(const NodePtr &n, const vector &ranges, const bool is_op_reuse_mem, const bool continuous) { - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(n == nullptr, - REPORT_INNER_ERROR("E19999", "Input parameter n(type:NodePtr) is null"); - return nullptr, "[Check][Param]Input parameter n(type:NodePtr) is null"); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + n == nullptr, + REPORT_INNER_ERROR("E19999", "Input parameter n(type:NodePtr) is null"); + return nullptr, "[Check][Param]Input parameter n(type:NodePtr) is null"); auto node_op_desc = n->GetOpDesc(); - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(node_op_desc == nullptr, - REPORT_INNER_ERROR("E19999", "Input parameter n(type:OpDescPtr) is null"); - return nullptr, "[Check][Param]Input parameter n(type:OpDescPtr) is null"); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + node_op_desc == nullptr, + REPORT_INNER_ERROR("E19999", "Input parameter n(type:OpDescPtr) is null"); + return nullptr, "[Check][Param]Input parameter n(type:OpDescPtr) is null"); MemoryBlock *block = nullptr; NodeIndexIO node_index_io(n, index, kOut); int64_t size = 0; auto output_op_desc = node_op_desc->GetOutputDescPtr(index); - GE_IF_BOOL_EXEC(output_op_desc == nullptr, - REPORT_INNER_ERROR("E19999", "get output_desc failed, node_name:%s, output_index:%u", n->GetName().c_str(), index); - GELOGE(FAILED, "[Get][OutputDesc]node_name:%s, output_index:%u", n->GetName().c_str(), index); - return nullptr); + GE_IF_BOOL_EXEC( + output_op_desc == nullptr, + REPORT_INNER_ERROR("E19999", "get output_desc failed, node_name:%s, output_index:%u", + n->GetName().c_str(), index); + GELOGE(FAILED, "[Get][OutputDesc]node_name:%s, output_index:%u", n->GetName().c_str(), index); + return nullptr); GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(*output_op_desc, size) != SUCCESS, GELOGI("Get size failed")); size_t no_align_size = 0; - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(GetNoAlignSize(*node_op_desc, index, no_align_size) != SUCCESS, - REPORT_CALL_ERROR("E19999", "Get no align size failed, node_name:%s, output_index:%u", n->GetName().c_str(), index); - return nullptr, "[Get][TensorSize]Get no align size, node_name:%s, output_index:%u", n->GetName().c_str(), index); + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + GetNoAlignSize(*node_op_desc, index, no_align_size) != SUCCESS, + REPORT_CALL_ERROR("E19999", "Get no align size failed, node_name:%s, output_index:%u", + n->GetName().c_str(), index); + return nullptr, + "[Get][TensorSize]Get no align size, node_name:%s, output_index:%u", n->GetName().c_str(), index); std::string symbol; bool reuse_input = false; @@ -1392,24 +1406,28 @@ MemoryBlock *BlockMemAssigner::ApplyOutMemory(const NodePtr &n, uint32_t index, vector workspace_reuse_flag; block = ApplyMemory(block_size, size, no_align_size, kOutput, n, index, workspace_reuse_flag, is_op_reuse_mem, continuous, memory_type); - GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(block == nullptr, - REPORT_CALL_ERROR("E19999", "apply out Memory failed, node_name:%s, block_size:%ld, out_index:%u", - n->GetName().c_str(), block_size, index); - return nullptr, "[Apply][Memory]node_name:%s, block_size:%ld, out_index:%u", + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( + block == nullptr, + REPORT_CALL_ERROR("E19999", "apply out Memory failed, node_name:%s, block_size:%ld, out_index:%u", + n->GetName().c_str(), block_size, index); + return nullptr, + "[Apply][Memory]node_name:%s, block_size:%ld, out_index:%u", n->GetName().c_str(), block_size, index); } int out_count = 0; - GE_IF_BOOL_EXEC(index >= n->GetAllOutDataAnchors().size(), - REPORT_INNER_ERROR("E19999", "out index:%u exceed out_size:%lu, node_name:%s", - index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); - GELOGE(FAILED, "[Check][OutIndex]index:%u exceed out_size:%lu, node_name:%s", - index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); - return nullptr); + GE_IF_BOOL_EXEC( + index >= n->GetAllOutDataAnchors().size(), + REPORT_INNER_ERROR("E19999", "out index:%u exceed out_size:%lu, node_name:%s", + index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); + GELOGE(FAILED, "[Check][OutIndex]index:%u exceed out_size:%lu, node_name:%s", + index, n->GetAllOutDataAnchors().size(), n->GetName().c_str()); + return nullptr); auto out_data_anchor = n->GetOutDataAnchor(index); - GE_IF_BOOL_EXEC(out_data_anchor == nullptr, - REPORT_INNER_ERROR("E19999", "out anchor is null, index:%u, node_name:%s", index, n->GetName().c_str()); - GELOGE(FAILED, "[Check][OutAnchor]is null, index:%u, node_name:%s", index, n->GetName().c_str()); - return nullptr); + GE_IF_BOOL_EXEC( + out_data_anchor == nullptr, + REPORT_INNER_ERROR("E19999", "out anchor is null, index:%u, node_name:%s", index, n->GetName().c_str()); + GELOGE(FAILED, "[Check][OutAnchor]is null, index:%u, node_name:%s", index, n->GetName().c_str()); + return nullptr); for (const auto &in_anchor : out_data_anchor->GetPeerInDataAnchors()) { auto owner_node = in_anchor->GetOwnerNode(); auto op_desc = owner_node->GetOpDesc(); @@ -1618,10 +1636,11 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector REPORT_INNER_ERROR("E19999", "Attr[%s] size:%zu not equal to node output size:%zu, node_name:%s", ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), op_desc->GetOutputsSize(), op_desc->GetName().c_str()); - GELOGE(INTERNAL_ERROR, - "[Check][MemTypeAttr]Attr %s size:%zu not equal to node output size:%zu, node_name:%s", - ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), - op_desc->GetOutputsSize(), op_desc->GetName().c_str()); + GELOGE( + INTERNAL_ERROR, + "[Check][MemTypeAttr]Attr %s size:%zu not equal to node output size:%zu, node_name:%s", + ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), memorys_type.size(), + op_desc->GetOutputsSize(), op_desc->GetName().c_str()); return INTERNAL_ERROR; } @@ -1751,7 +1770,8 @@ void BlockMemAssigner::AssignMemoryWithReuse(vector &ranges) { TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), temp.size(), n->GetName().c_str()); GELOGE(INTERNAL_ERROR, "[Check][Attr]Attr %s size:%zu is not equal to workspace size:%zu, node_name:%s", - TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), temp.size(), n->GetName().c_str()); + TVM_ATTR_NAME_WORKSPACE_TYPE.c_str(), tvm_workspace_memory_type.size(), + temp.size(), n->GetName().c_str()); return; } for (size_t i = 0; i < temp.size(); i++) { @@ -2136,7 +2156,7 @@ void BlockMemAssigner::SetOpMemOffset(bool is_zero_copy) { Status BlockMemAssigner::Assign() { vector ranges; if (GetMemoryRanges(ranges) != SUCCESS) { - GELOGE(FAILED, "GetMemoryRanges Fail!"); + GELOGE(FAILED, "[Get][MemoryRanges] Fail!"); return FAILED; } GE_IF_BOOL_EXEC(ranges.empty(), return SUCCESS); diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index e97d343d..9d5b9d08 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -337,7 +337,7 @@ uint32_t GetContinuousMemoryType(const OpDescPtr &op_desc) { } if (continuous_type != 0) { - GELOGI("Current node %s continuous type %d", op_desc->GetName().c_str(), continuous_type); + GELOGI("[Get][MemType:Continuous]Current node %s, value is %d", op_desc->GetName().c_str(), continuous_type); } return continuous_type; } @@ -482,7 +482,7 @@ Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) { "[Assign][Memory:Continuous:Input]fail for node:%s.", node->GetName().c_str()) } for (auto pair : memory_offset_) { - GELOGD("After reassign continuous memory, memory type = %ld, mem offset = %zu.", pair.first, + GELOGD("[Reassign][Memory:Continuous]At last, memory type = %ld, mem offset = %zu.", pair.first, pair.second.mem_offset_); } return ge::SUCCESS; @@ -490,7 +490,7 @@ Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) { Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start, int64_t &continuous_mem_size, int64_t memory_type, uint32_t continuous_type, bool reverse_refresh) { - GELOGI("Current node %s needs continuous input", node->GetName().c_str()); + GELOGI("[Assign][Memory:Input:Continuous]start for Current node %s", node->GetName().c_str()); auto iter = memory_offset_.find(memory_type); if (iter == memory_offset_.end()) { REPORT_INNER_ERROR("E19999", "find memory offset fail for mem_type:%ld, " @@ -566,9 +566,9 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, auto peer_output_offset = output_list.at(peer_out_data_anchor->GetIdx()); output_list.at(peer_out_data_anchor->GetIdx()) = output_list_this.at(out2ins.begin()->first); peer_op_desc->SetOutputOffset(output_list); - GELOGI("Node %s out %d ref in %d input node %s, use output offset %ld update %ld", node->GetName().c_str(), - out2ins.begin()->first, out2ins.begin()->second, peer_op_desc->GetName().c_str(), - output_list_this.at(out2ins.begin()->first), peer_output_offset); + GELOGI("[Update][Offset]Node %s out %d ref in %d input node %s, use output offset %ld update %ld", + node->GetName().c_str(), out2ins.begin()->first, out2ins.begin()->second, + peer_op_desc->GetName().c_str(), output_list_this.at(out2ins.begin()->first), peer_output_offset); } else { GELOGD("Node %s out %d ref in %d input node %s with total ref numbers %zu.", node->GetName().c_str(), out2ins.begin()->first, out2ins.begin()->second, peer_op_desc->GetName().c_str(), out2ins.size()); @@ -1671,7 +1671,7 @@ bool GraphMemoryAssigner::AssignContinuousInputMemoryWithAtomicProcessDirectly( auto continuous_type = iter->second; bool continuous_input = ((continuous_type & kTypeInput) != 0) || ((continuous_type & kTypeInputNoPadding) != 0); if (continuous_input) { - GELOGI("Node %s 's precursor node %s need assign continuous input memory, store node firstly", + GELOGI("[Store][Node] of %s cause it's precursor node %s need assign continuous input memory", input_continuous_node->GetName().c_str(), in_node->GetName().c_str()); return false; } @@ -1681,7 +1681,7 @@ bool GraphMemoryAssigner::AssignContinuousInputMemoryWithAtomicProcessDirectly( node_2_continuous_type.emplace(out_node, continuous_type); bool continuous_input = ((continuous_type & kTypeInput) != 0) || ((continuous_type & kTypeInputNoPadding) != 0); if (continuous_input) { - GELOGI("Node %s 's succeed node %s need assign continuous input memory, store node firstly", + GELOGI("[Store][Node] of %s cause it's succeed node %s need assign continuous input memory", input_continuous_node->GetName().c_str(), out_node->GetName().c_str()); return false; } diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 7c8d9073..25af98b8 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -428,7 +428,8 @@ Status AippOp::ConvertRelatedInputNameToRank() { if (!convert_flag) { string error_msg = "Top name " + related_input_name + "convert rank failed, Please" " ensure top name in aipp config is the top name of data node."; - GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); + GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); + REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); return PARAM_INVALID; } diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc index b1534eb4..41a32173 100755 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc @@ -124,13 +124,15 @@ Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() { if (another_item->related_input_name().empty()) { string error_msg = "Can not both set related_input_name and related_input_rank!" " Please ensure param is the same with the first aipp config(related_input_name)."; - GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); + GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); + REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); return PARAM_INVALID; } if (item->related_input_name() == another_item->related_input_name()) { string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name" " param is different in different aipp config."; - GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); + GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); + REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); return PARAM_INVALID; } } @@ -150,13 +152,15 @@ Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() { if (!another_item->related_input_name().empty()) { string error_msg = "Can not both set related_input_rank and related_input_name!" " Please ensure param is the same with the first aipp config(related_input_rank)."; - GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); + GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); + REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); return PARAM_INVALID; } if (item->related_input_rank() == another_item->related_input_rank()) { string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank" " param is different in different aipp config."; - GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); + GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); + REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); return PARAM_INVALID; } } @@ -212,7 +216,7 @@ Status InsertNewOpUtil::CheckGraph(const ComputeGraphPtr &graph) { } } } - GE_CHK_LOG_AND_ERRORMSG((aippNodes.size() == 0) || (aippNodes.size() == next_nodes_cnt), + GE_CHK_LOG_AND_ERRORMSG((aippNodes.size() == 0) || (aippNodes.size() == next_nodes_cnt), PARAM_INVALID, "Can not config part of outputs of Data node to support AIPP, config all " "of the outputs of Data to support AIPP, or config none of them"); diff --git a/ge/plugin/engine/CMakeLists.txt b/ge/plugin/engine/CMakeLists.txt index e5736b51..3aace4ac 100644 --- a/ge/plugin/engine/CMakeLists.txt +++ b/ge/plugin/engine/CMakeLists.txt @@ -41,6 +41,7 @@ target_link_options(engine PRIVATE target_link_libraries(engine PRIVATE $ -Wl,--no-as-needed + c_sec slog -Wl,--as-needed -lrt diff --git a/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc b/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc index 5bbc5776..02f8251a 100644 --- a/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc +++ b/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc @@ -9136,23 +9136,23 @@ TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type2) { EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); } -// TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type3) { -// uint16_t data[1 * 1 * 1 * 16 * 16] = {0}; -// TransArgs args{reinterpret_cast(data), -// FORMAT_FRACTAL_NZ, -// FORMAT_NHWC, -// {1, 1, 1, 16, 16}, -// { -// 1, -// 1, -// 4, -// 4, -// }, -// DT_VARIANT}; -// TransResult result; -// FormatTransferFractalNzND transfer; -// EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); -// } +TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type3) { + uint16_t data[1 * 1 * 1 * 16 * 16] = {0}; + TransArgs args{reinterpret_cast(data), + FORMAT_FRACTAL_NZ, + FORMAT_NHWC, + {1, 1, 1, 16, 16}, + { + 1, + 1, + 4, + 4, + }, + DT_STRING}; + TransResult result; + FormatTransferFractalNzND transfer; + EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); +} TEST_F(UtestFormatTransferNdFractNz, invalid_dst_format2) { uint16_t data[1 * 1 * 1 * 1 * 16 * 16] = {0}; diff --git a/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc b/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc index b2cfe2db..7431440b 100644 --- a/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc +++ b/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc @@ -5354,14 +5354,14 @@ TEST_F(UtestFormatTransferNhwcFz, build_transfer_uint8) { EXPECT_NE(transfer, nullptr); } -// TEST_F(UtestFormatTransferNhwcFz, invalid_data_type) { -// uint16_t data[1 * 4 * 4 * 1] = {0}; -// TransArgs args{ -// reinterpret_cast(data), FORMAT_NHWC, FORMAT_FRACTAL_NZ, {1, 4, 4}, {1, 1, 1, 16, 16}, DT_VARIANT}; -// FormatTransferFractalZ transfer; -// EXPECT_EQ(transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape), -// ACL_ERROR_GE_DATATYPE_INVALID); -// } +TEST_F(UtestFormatTransferNhwcFz, invalid_data_type) { + uint16_t data[1 * 4 * 4 * 1] = {0}; + TransArgs args{ + reinterpret_cast(data), FORMAT_NHWC, FORMAT_FRACTAL_NZ, {1, 4, 4}, {1, 1, 1, 16, 16}, DT_STRING}; + FormatTransferFractalZ transfer; + EXPECT_EQ(transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape), + ACL_ERROR_GE_DATATYPE_INVALID); +} TEST_F(UtestFormatTransferNhwcFz, invalid_data_format) { uint16_t data[1 * 4 * 4 * 1] = {0}; diff --git a/tests/ut/ge/common/format_transfer_unittest.cc b/tests/ut/ge/common/format_transfer_unittest.cc index 1a56d2f9..73b7703d 100644 --- a/tests/ut/ge/common/format_transfer_unittest.cc +++ b/tests/ut/ge/common/format_transfer_unittest.cc @@ -52,34 +52,34 @@ TEST_F(UtestFormatTransfer, build_unsupported_transfer) { EXPECT_EQ(transfer2, nullptr); } -// TEST_F(UtestFormatTransfer, get_size_by_data_type) { -// EXPECT_EQ(GetSizeByDataType(DT_FLOAT), 4); -// EXPECT_EQ(GetSizeByDataType(DT_FLOAT16), 2); -// EXPECT_EQ(GetSizeByDataType(DT_INT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_INT16), 2); -// EXPECT_EQ(GetSizeByDataType(DT_UINT16), 2); -// EXPECT_EQ(GetSizeByDataType(DT_UINT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_INT32), 4); -// EXPECT_EQ(GetSizeByDataType(DT_INT64), 8); -// EXPECT_EQ(GetSizeByDataType(DT_UINT32), 4); -// EXPECT_EQ(GetSizeByDataType(DT_UINT64), 8); -// EXPECT_EQ(GetSizeByDataType(DT_BOOL), 1); -// EXPECT_EQ(GetSizeByDataType(DT_DOUBLE), 8); -// EXPECT_EQ(GetSizeByDataType(DT_STRING), -1); -// EXPECT_EQ(GetSizeByDataType(DT_DUAL_SUB_INT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_DUAL_SUB_UINT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_COMPLEX64), 8); -// EXPECT_EQ(GetSizeByDataType(DT_COMPLEX128), 16); -// EXPECT_EQ(GetSizeByDataType(DT_QINT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_QINT16), 2); -// EXPECT_EQ(GetSizeByDataType(DT_QINT32), 4); -// EXPECT_EQ(GetSizeByDataType(DT_QUINT8), 1); -// EXPECT_EQ(GetSizeByDataType(DT_QUINT16), 2); -// EXPECT_EQ(GetSizeByDataType(DT_RESOURCE), -1); -// EXPECT_EQ(GetSizeByDataType(DT_STRING_REF), -1); -// EXPECT_EQ(GetSizeByDataType(DT_DUAL), 5); -// EXPECT_EQ(GetSizeByDataType(DT_UNDEFINED), -1); -// EXPECT_EQ(DT_UNDEFINED, 27); -// } +TEST_F(UtestFormatTransfer, get_size_by_data_type) { + EXPECT_EQ(GetSizeByDataType(DT_FLOAT), 4); + EXPECT_EQ(GetSizeByDataType(DT_FLOAT16), 2); + EXPECT_EQ(GetSizeByDataType(DT_INT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_INT16), 2); + EXPECT_EQ(GetSizeByDataType(DT_UINT16), 2); + EXPECT_EQ(GetSizeByDataType(DT_UINT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_INT32), 4); + EXPECT_EQ(GetSizeByDataType(DT_INT64), 8); + EXPECT_EQ(GetSizeByDataType(DT_UINT32), 4); + EXPECT_EQ(GetSizeByDataType(DT_UINT64), 8); + EXPECT_EQ(GetSizeByDataType(DT_BOOL), 1); + EXPECT_EQ(GetSizeByDataType(DT_DOUBLE), 8); + EXPECT_EQ(GetSizeByDataType(DT_STRING), -1); + EXPECT_EQ(GetSizeByDataType(DT_DUAL_SUB_INT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_DUAL_SUB_UINT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_COMPLEX64), 8); + EXPECT_EQ(GetSizeByDataType(DT_COMPLEX128), 16); + EXPECT_EQ(GetSizeByDataType(DT_QINT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_QINT16), 2); + EXPECT_EQ(GetSizeByDataType(DT_QINT32), 4); + EXPECT_EQ(GetSizeByDataType(DT_QUINT8), 1); + EXPECT_EQ(GetSizeByDataType(DT_QUINT16), 2); + EXPECT_EQ(GetSizeByDataType(DT_RESOURCE), 8); + EXPECT_EQ(GetSizeByDataType(DT_STRING_REF), -1); + EXPECT_EQ(GetSizeByDataType(DT_DUAL), 5); + EXPECT_EQ(GetSizeByDataType(DT_UNDEFINED), -1); + EXPECT_EQ(DT_UNDEFINED, 28); +} } // namespace formats } // namespace ge From 1d0359d1c6aefeac288df66b5cdd5053ab75a0c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Thu, 25 Mar 2021 20:02:03 +0800 Subject: [PATCH 07/14] fixed pclint warning --- inc/framework/common/string_util.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/inc/framework/common/string_util.h b/inc/framework/common/string_util.h index f0368363..e506cb8e 100644 --- a/inc/framework/common/string_util.h +++ b/inc/framework/common/string_util.h @@ -52,7 +52,7 @@ class GE_FUNC_VISIBILITY StringUtils { return s; } // lint -esym(551,*) - static std::string &Rtrim(std::string &s) { /*lint !e618*/ + static std::string &Rtrim(std::string &s) { /*lint !e618*/ #if __cplusplus >= 201103L (void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return !std::isspace(c); })); #else @@ -76,8 +76,8 @@ class GE_FUNC_VISIBILITY StringUtils { /// @param [in] delim separator /// @return string array after segmentation /// - static std::vector Split(const std::string &str, char delim) { - std::vector elems; + static std::vector Split(const std::string &str, char delim) { /*lint !e1077*/ + std::vector elems; /*lint !e1077*/ if (str.empty()) { elems.emplace_back(""); From 7ec6e4fe61ad0d1d52125cfdee0927f5c413abca Mon Sep 17 00:00:00 2001 From: yangwei Date: Tue, 30 Mar 2021 14:19:54 +0800 Subject: [PATCH 08/14] r13_l2 --- ge/graph/load/model_manager/davinci_model.cc | 39 +++--- ge/graph/load/model_manager/davinci_model.h | 3 - .../task_info/kernel_task_info.cc | 115 +++++++++++------- .../task_info/kernel_task_info.h | 5 + .../compiledsubgraph/known_node_executor.cc | 5 - 5 files changed, 92 insertions(+), 75 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index ccf17fe8..0aac173e 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -2875,23 +2875,16 @@ Status DavinciModel::UpdateKnownNodeArgs(const vector &inputs, const vec GELOGI("DavinciModel::UpdateKnownNodeArgs in"); GE_CHK_STATUS_RET(CreateKnownZeroCopyMap(inputs, outputs), "DavinciModel::UpdateKnownNodeArgs create map for input/output zero copy."); - if (!base_addr_not_changed_) { - total_io_addrs_.clear(); - orig_total_io_addrs_.clear(); - for (size_t task_index = 0; task_index < task_list_.size(); ++task_index) { - auto &task = task_list_[task_index]; - if (task != nullptr) { - Status ret = task->UpdateArgs(); - if (ret != SUCCESS) { - GELOGE(FAILED, "task %zu created by davinci model is nullptr.", task_index); - return FAILED; - } + total_io_addrs_.clear(); + for (size_t task_index = 0; task_index < task_list_.size(); ++task_index) { + auto &task = task_list_[task_index]; + if (task != nullptr) { + Status ret = task->UpdateArgs(); + if (ret != SUCCESS) { + GELOGE(FAILED, "task %zu created by davinci model is nullptr.", task_index); + return FAILED; } } - // cache latest iterator io addr - orig_total_io_addrs_ = total_io_addrs_; - } else { - total_io_addrs_ = orig_total_io_addrs_; } GE_CHK_STATUS_RET(UpdateKnownZeroCopyAddr(total_io_addrs_, false), "DavinciModel::UpdateKnownZeroCopyAddr failed."); @@ -2949,16 +2942,14 @@ Status DavinciModel::MallocKnownArgs() { return ret; } } + rtError_t rt_ret; // malloc args memory - if (total_args_size_ == 0) { - GELOGW("DavinciModel::MallocKnownArgs total_args_size_ equals to zero."); - return SUCCESS; - } - - rtError_t rt_ret = rtMalloc(&args_, total_args_size_, RT_MEMORY_HBM); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); - return RT_ERROR_TO_GE_STATUS(rt_ret); + if (total_args_size_ != 0) { + rt_ret = rtMalloc(&args_, total_args_size_, RT_MEMORY_HBM); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } } // malloc dynamic and static hybrid memory if (total_hybrid_args_size_ != 0) { diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 58478b0f..93f968ee 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -534,7 +534,6 @@ class DavinciModel { Status UpdateKnownNodeArgs(const vector &inputs, const vector &outputs); Status CreateKnownZeroCopyMap(const vector &inputs, const vector &outputs); Status UpdateKnownZeroCopyAddr(vector &total_io_addrs, bool update_args = true); - void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; } Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) const; Status GetAllAippInputOutputDims(uint32_t index, vector &input_dims, @@ -1007,8 +1006,6 @@ class DavinciModel { map known_input_data_info_; map known_output_data_info_; vector total_io_addrs_; - vector orig_total_io_addrs_; - bool base_addr_not_changed_ = false; vector> batch_info_; vector> combined_batch_info_; diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index c8d9f97a..e7dca2a1 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -124,7 +124,8 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci return FAILED; } - ret = InitTVMTask(args_offset_tmp[0], kernel_def); + io_addr_offset_ = args_offset_tmp[0]; + ret = InitTVMTask(io_addr_offset_, kernel_def); } else if (kernel_type_ == ccKernelType::CUSTOMIZED) { ret = InitAICPUCustomTask(context.op_index(), kernel_def); } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { @@ -380,7 +381,8 @@ Status KernelTaskInfo::Distribute() { GELOGD("KernelTaskInfo Distribute Start."); if (davinci_model_->IsKnownNode()) { if (kernel_type_ == ccKernelType::TE) { - args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); + args_ = l2_buffer_on_ ? davinci_model_->GetCurrentHybridArgsAddr(hybrid_args_offset_) + : davinci_model_->GetCurrentArgsAddr(args_offset_); } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { args_ = davinci_model_->GetCurrentHybridArgsAddr(hybrid_args_offset_); } @@ -449,29 +451,41 @@ void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) { } } +Status KernelTaskInfo::CopyNoncontinuousArgs(uint16_t offset) { + GE_CHECK_NOTNULL(davinci_model_); + // copy new io addrs + vector io_addrs = io_addrs_; + davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); + auto addr_size = kAddrLen * io_addrs.size(); + + // copy io addr + errno_t sec_ret = memcpy_s(args_addr.get() + offset, addr_size, io_addrs.data(), addr_size); + if (sec_ret != EOK) { + GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); + return FAILED; + } + + // copy args to device + rtError_t rt_ret = rtMemcpy(args_, args_size_, args_addr.get(), args_size_, RT_MEMCPY_HOST_TO_DEVICE); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + GELOGD("Copy noncontinuous args success, kernel type %d.", kernel_type_); + return SUCCESS; +} + Status KernelTaskInfo::UpdateArgs() { GELOGI("KernelTaskInfo::UpdateArgs in."); + GE_CHECK_NOTNULL(davinci_model_); if (kernel_type_ == ccKernelType::TE) { + if (l2_buffer_on_) { + return CopyNoncontinuousArgs(io_addr_offset_); + } davinci_model_->SetTotalIOAddrs(io_addrs_); } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { - vector io_addrs = io_addrs_; - davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); - uintptr_t io_addr = reinterpret_cast(args_addr.get()) + sizeof(aicpu::AicpuParamHead); - auto addrs_size = sizeof(uint64_t) * io_addrs.size(); - errno_t sec_ret = memcpy_s(reinterpret_cast(io_addr), addrs_size, io_addrs.data(), addrs_size); - if (sec_ret != EOK) { - GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); - return FAILED; - } - // copy args to device - rtError_t rt_ret = rtMemcpy(args_, args_size_, args_addr.get(), args_size_, RT_MEMCPY_HOST_TO_DEVICE); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret); - return RT_ERROR_TO_GE_STATUS(rt_ret); - } + return CopyNoncontinuousArgs(sizeof(aicpu::AicpuParamHead)); } - - GELOGI("KernelTaskInfo::UpdateArgs success."); return SUCCESS; } @@ -516,8 +530,8 @@ Status KernelTaskInfo::UpdateL2Data(const domi::KernelDef &kernel_def) { return SUCCESS; } - char *sm_contrl = const_cast(sm_desc.data()); - rtL2Ctrl_t *l2_ctrl_info = reinterpret_cast(sm_contrl); + char *sm_control = const_cast(sm_desc.data()); + rtL2Ctrl_t *l2_ctrl_info = reinterpret_cast(sm_control); uint64_t gen_base_addr = davinci_model_->GetRtBaseAddr(); // There is no weight for te op now. Update L2_mirror_addr by data memory base. @@ -545,19 +559,31 @@ Status KernelTaskInfo::UpdateL2Data(const domi::KernelDef &kernel_def) { return SUCCESS; } +void KernelTaskInfo::SetContinuousArgs(uint32_t args_size, DavinciModel *davinci_model) { + args_offset_ = davinci_model->GetTotalArgsSize(); + davinci_model->SetTotalArgsSize(args_size); +} + +void KernelTaskInfo::SetNoncontinuousArgs(uint32_t args_size, DavinciModel *davinci_model) { + hybrid_args_offset_ = davinci_model->GetHybridArgsSize(); + davinci_model->SetHybridArgsSize(args_size); +} + Status KernelTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { + GE_CHECK_NOTNULL(davinci_model); const domi::KernelDef &kernel_def = task_def.kernel(); const domi::KernelContext &context = kernel_def.context(); kernel_type_ = static_cast(context.kernel_type()); + uint32_t args_size = kernel_def.args_size(); if (kernel_type_ == ccKernelType::TE) { - uint32_t args_size = kernel_def.args_size(); - args_offset_ = davinci_model->GetTotalArgsSize(); - davinci_model->SetTotalArgsSize(args_size); - GELOGI("kernel task name , args_size %u, args_offset %u", args_size, args_offset_); + if (kernel_def.sm_desc().empty()) { + SetContinuousArgs(args_size, davinci_model); + return SUCCESS; + } + l2_buffer_on_ = true; + SetNoncontinuousArgs(args_size, davinci_model); } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { - hybrid_args_offset_ = davinci_model->GetHybridArgsSize(); - davinci_model->SetHybridArgsSize(kernel_def.args_size()); - GELOGI("aicpu kernel task name , args_size %u, args_offset %u", kernel_def.args_size(), hybrid_args_offset_); + SetNoncontinuousArgs(args_size, davinci_model); } return SUCCESS; } @@ -568,8 +594,23 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne // get tvm op desc OpDescPtr op_desc = davinci_model_->GetOpByIndex(ctx_.opIndex); GE_CHECK_NOTNULL(op_desc); + + args_addr = std::unique_ptr(new (std::nothrow) uint8_t[args_size_]); + errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_); + if (sec_ret != EOK) { + GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); + return FAILED; + } + + Status ge_ret = UpdateL2Data(kernel_def); + // update origin l2 data + if (ge_ret != SUCCESS) { + return ge_ret; + } + if (davinci_model_->IsKnownNode()) { - args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); + args_ = l2_buffer_on_ ? davinci_model_->GetCurrentHybridArgsAddr(hybrid_args_offset_) + : davinci_model_->GetCurrentArgsAddr(args_offset_); InitDumpTask(offset); return SUCCESS; } @@ -609,12 +650,6 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } - vector args_info(args_size_); - errno_t sec_ret = memcpy_s(args_info.data(), args_size_, kernel_def.args().data(), args_size_); - if (sec_ret != EOK) { - GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); - return FAILED; - } if ((args_size_ <= offset) || (args_size_ - offset < kAddrLen * tensor_device_addrs.size())) { GELOGE(FAILED, "offset >= kernelInfo.argsSize or copy content beyond applied memory."); @@ -628,7 +663,7 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } - sec_ret = memcpy_s(args_info.data() + offset, args_size_ - offset, tensor_device_addrs.data(), + sec_ret = memcpy_s(args_addr.get() + offset, args_size_ - offset, tensor_device_addrs.data(), kAddrLen * tensor_device_addrs.size()); if (sec_ret != EOK) { GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); @@ -640,19 +675,13 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne GE_CHK_BOOL_TRUE_EXEC_INFO(davinci_model_->GetOpDugReg(), dump_args_ = static_cast(args_) + offset, "Op debug is open in TVM task info"); - Status ge_ret = UpdateL2Data(kernel_def); - // update origin l2 data - if (ge_ret != SUCCESS) { - return ge_ret; - } - vector virtual_io_addrs; // use virtual address for zero copy key. virtual_io_addrs.insert(virtual_io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end()); virtual_io_addrs.insert(virtual_io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end()); if (op_desc->GetType() == ATOMICADDRCLEAN) { virtual_io_addrs.insert(virtual_io_addrs.end(), workspace_data_addrs.begin(), workspace_data_addrs.end()); } - davinci_model_->SetZeroCopyAddr(op_desc, virtual_io_addrs, args_info.data(), args_, args_size_, offset); + davinci_model_->SetZeroCopyAddr(op_desc, virtual_io_addrs, args_addr.get(), args_, args_size_, offset); GELOGD("Do InitTVMTask end"); return SUCCESS; diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.h b/ge/graph/load/model_manager/task_info/kernel_task_info.h index 7cabf259..4156c511 100644 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.h +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.h @@ -129,6 +129,9 @@ class KernelTaskInfo : public TaskInfo { bool IsL1FusionOp(const OpDescPtr &op_desc); void SetIoAddrs(const OpDescPtr &op_desc); void InitDumpTask(uint32_t offset); + void SetContinuousArgs(uint32_t args_size, DavinciModel *davinci_model); + void SetNoncontinuousArgs(uint32_t args_size, DavinciModel *davinci_model); + Status CopyNoncontinuousArgs(uint16_t offset); // For super kernel Status SaveSKTDumpInfo(); @@ -163,6 +166,8 @@ class KernelTaskInfo : public TaskInfo { uint32_t hybrid_args_offset_ = 0; int64_t fixed_addr_offset_ = 0; std::unique_ptr args_addr = nullptr; + uint16_t io_addr_offset_ = 0; + bool l2_buffer_on_ = false; bool call_save_dump_ = false; // aicpu ext_info device mem diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 45882343..1c46db20 100644 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -105,11 +105,6 @@ Status KnownNodeTask::Init(TaskContext &context) { "known node task allocate workspace failed."); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeTask_AllocateWorkspace] End, size %zu", davinci_model_->TotalMemSize()); - bool addr_not_changed = false; - if (davinci_model_->GetRuntimeParam().mem_base == buffer) { - addr_not_changed = true; - } - davinci_model_->SetKnownNodeAddrNotChanged(addr_not_changed); // update mem base davinci_model_->UpdateMemBase(static_cast(buffer)); GELOGI("KnownNodeTask::Init mem base is %p, size %lu.", From 7516130c7e7412114e35815d5db712444ad7e3fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Tue, 30 Mar 2021 20:39:05 +0800 Subject: [PATCH 09/14] delete code --- ge/analyzer/analyzer.cc | 8 +-- .../format_transfer_fracz_nchw.cc | 2 +- .../format_transfer_fracz_nhwc.cc | 50 +++++++++---------- .../format_transfer_nc1hwc0_nchw.cc | 50 +++++++++---------- ge/generator/ge_generator.cc | 4 +- ge/graph/build/logical_stream_allocator.cc | 43 ++++++++-------- ge/graph/build/memory/block_mem_assigner.cc | 4 +- ge/graph/build/memory/graph_mem_assigner.cc | 16 +++--- ge/graph/preprocess/insert_op/ge_aipp_op.cc | 3 +- .../insert_op/util_insert_aipp_op.cc | 12 ++--- ge/plugin/engine/CMakeLists.txt | 1 - .../format_transfer_5d_nchw_unittest.cc | 18 +++---- .../format_transfer_fractal_nz_unittest.cc | 34 ++++++------- .../format_transfer_fracz_nhwc_unittest.cc | 20 ++++---- .../format_transfer_nhwc_fractalz_unittest.cc | 16 +++--- 15 files changed, 135 insertions(+), 146 deletions(-) diff --git a/ge/analyzer/analyzer.cc b/ge/analyzer/analyzer.cc index b2f30db7..e35a3723 100755 --- a/ge/analyzer/analyzer.cc +++ b/ge/analyzer/analyzer.cc @@ -155,12 +155,12 @@ std::shared_ptr Analyzer::GetJsonObject(uint64_t session_id, uint64_t std::lock_guard lg(mutex_); auto iter = graph_infos_.find(session_id); if (iter == graph_infos_.end()) { - GELOGE(PARAM_INVALID, "[Check][SessionId]session_id:%lu does not exist! graph_id:%lu", session_id, graph_id); + GELOGE(PARAM_INVALID, "[Check][Session_id]session_id:%lu does not exist! graph_id:%lu.", session_id, graph_id); return nullptr; } else { auto iter1 = (iter->second).find(graph_id); if (iter1 == (iter->second).end()) { - GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%lu does not exist! session_id:%lu.", graph_id, session_id); + GELOGE(PARAM_INVALID, "[Check][Graph_id]graph_id:%lu does not exist! session_id:%lu.", graph_id, session_id); return nullptr; } GELOGI("GetJsonObject Success!session_id:%lu graph_id:%lu", session_id, graph_id); @@ -200,7 +200,7 @@ ge::Status Analyzer::CreateAnalyzerFile() { } ge::Status Analyzer::SaveAnalyzerDataToFile(uint64_t session_id, uint64_t graph_id) { - GELOGD("start to save analyze file"); + GELOGD("start to save analyze file."); auto graph_info = GetJsonObject(session_id, graph_id); GE_CHECK_NOTNULL(graph_info); @@ -232,7 +232,7 @@ ge::Status Analyzer::SaveAnalyzerDataToFile(uint64_t session_id, uint64_t graph_ } ge::Status Analyzer::DoAnalyze(DataInfo &data_info) { - GELOGD("start to do analyzer process"); + GELOGD("start to do analyzer process!"); auto pnode = data_info.node_ptr; GE_CHECK_NOTNULL(pnode); diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc index 5233a72e..394e7126 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc @@ -37,7 +37,7 @@ Status CheckArgsForFracZToNchw(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); + GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); return ACL_ERROR_GE_FORMAT_INVALID; } if (!CheckDataTypeSupported(args.src_data_type)) { diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc index 1aed4a74..96938cea 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc @@ -37,34 +37,34 @@ Status CheckArgsForFracZToNhwc(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); - return ACL_ERROR_GE_FORMAT_INVALID; + GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); + return UNSUPPORTED; } if (!CheckDataTypeSupported(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Failed to trans shape from FORMAT_FRACTAL_Z to NHWC, invalid data type %s", + GELOGE(UNSUPPORTED, "Failed to trans shape from FORMAT_FRACTAL_Z to NHWC, invalid data type %s", TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); - return ACL_ERROR_GE_DATATYPE_INVALID; + return UNSUPPORTED; } if (!CheckShapeValid(src_shape, kFracZDimsNum)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check src shape %s", ShapeToString(src_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + GELOGE(PARAM_INVALID, "Failed to check src shape %s", ShapeToString(src_shape).c_str()); + return PARAM_INVALID; } if (!CheckShapeValid(dst_shape, kNhwcDimsNum)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check dst shape %s", ShapeToString(dst_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + GELOGE(PARAM_INVALID, "Failed to check dst shape %s", ShapeToString(dst_shape).c_str()); + return PARAM_INVALID; } int64_t c0 = GetCubeSizeByDataType(args.src_data_type); if (c0 < 0) { - return ACL_ERROR_GE_DATATYPE_INVALID; + return PARAM_INVALID; } int64_t c1 = Ceil(dst_shape.at(kNhwcC), c0); int64_t n0 = Ceil(dst_shape.at(kNhwcN), static_cast(kNiSize)); if (src_shape.at(kFracZHWC1) != dst_shape.at(kNhwcH) * dst_shape.at(kNhwcW) * c1 || src_shape.at(kFracZC0) != c0 || src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, + GELOGE(PARAM_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + return PARAM_INVALID; } return SUCCESS; @@ -73,11 +73,11 @@ Status CheckArgsForFracZToNhwc(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size, int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + GELOGE(OUT_OF_MEMORY, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); - return ACL_ERROR_GE_MEMORY_ALLOCATION; + return OUT_OF_MEMORY; } auto n0 = args.src_shape.at(kFracZN0); @@ -113,10 +113,10 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + GELOGE(INTERNAL_ERROR, "Failed to copy data from FracZ offset %ld to HHWC[%ld, %ld, %ld, %ld] offset %ld, err-code %d", src_offset, n_idx, h_idx, w_idx, c_idx, dst_offset, ret); - return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; + return INTERNAL_ERROR; } } } @@ -129,9 +129,8 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size } // namespace Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult &result) { - Status ret = CheckArgsForFracZToNhwc(args); - if (ret != SUCCESS) { - return ret; + if (CheckArgsForFracZToNhwc(args) != SUCCESS) { + return PARAM_INVALID; } int size = GetSizeByDataType(args.src_data_type); auto total_size = GetItemNumByShape(args.dst_shape) * size; @@ -142,19 +141,18 @@ Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult & return SUCCESS; } - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Get %ld total size from dst shape %s, src shape %s", total_size, - ShapeToString(args.dst_shape).c_str(), ShapeToString(args.src_shape).c_str()); - return ACL_ERROR_GE_PARAM_INVALID; + GELOGE(INTERNAL_ERROR, "Get %ld total size from dst shape %s, src shape %s", total_size, + ShapeToString(args.dst_shape).c_str(), ShapeToString(args.src_shape).c_str()); + return PARAM_INVALID; } GELOGD("Begin to trans format from FracZ to NHWC, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - ret = GetDstDataAfterTrans(args, result, size, total_size); - if (ret != SUCCESS) { - GELOGE(ret, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", + if (GetDstDataAfterTrans(args, result, size, total_size) != SUCCESS) { + GELOGE(INTERNAL_ERROR, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - return ret; + return INTERNAL_ERROR; } return SUCCESS; } @@ -162,7 +160,7 @@ Status FormatTransferFracZNhwc::TransFormat(const TransArgs &args, TransResult & Status FormatTransferFracZNhwc::TransShape(Format src_format, const std::vector &src_shape, DataType data_type, Format dst_format, std::vector &dst_shape) { GELOGD("The shape derivation from FracZ to NHWC is not unique. Trans shape in this direction is not supported"); - return ACL_ERROR_GE_FORMAT_INVALID; + return UNSUPPORTED; } REGISTER_FORMAT_TRANSFER(FormatTransferFracZNhwc, FORMAT_FRACTAL_Z, FORMAT_NHWC) diff --git a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc index 4c1e896f..4695de6b 100755 --- a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc @@ -37,33 +37,33 @@ Status CheckArgsForNc1hwc0ToNchw(const TransArgs &args) { std::string error = "Dose not support trans format from " + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); - GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); - return ACL_ERROR_GE_FORMAT_INVALID; + GE_ERRORLOG_AND_ERRORMSG(UNSUPPORTED, error.c_str()); + return UNSUPPORTED; } if (!CheckDataTypeSupported(args.src_data_type)) { - GELOGE(ACL_ERROR_GE_DATATYPE_INVALID, "Failed to trans shape from NC1HWC0 to NCHW, invalid data type %s", + GELOGE(UNSUPPORTED, "Failed to trans shape from NC1HWC0 to NCHW, invalid data type %s", TypeUtils::DataTypeToSerialString(args.src_data_type).c_str()); - return ACL_ERROR_GE_DATATYPE_INVALID; + return UNSUPPORTED; } if (!CheckShapeValid(args.src_shape, kNc1hwc0DimsNum)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check src shape %s", ShapeToString(args.src_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + GELOGE(PARAM_INVALID, "Failed to check src shape %s", ShapeToString(args.src_shape).c_str()); + return PARAM_INVALID; } if (!CheckShapeValid(args.dst_shape, kNchwDimsNum)) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check dst shape %s", ShapeToString(args.dst_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + GELOGE(PARAM_INVALID, "Failed to check dst shape %s", ShapeToString(args.dst_shape).c_str()); + return PARAM_INVALID; } int64_t c0 = GetCubeSizeByDataType(args.src_data_type); if (c0 <= 0) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to get cube size, the data type is invalid"); - return ACL_ERROR_GE_SHAPE_INVALID; + GELOGE(PARAM_INVALID, "Failed to get cube size, the data type is invalid"); + return PARAM_INVALID; } if (src_shape.at(kNc1hwc0H) != dst_shape.at(kNchwH) || src_shape.at(kNc1hwc0W) != dst_shape.at(kNchwW) || src_shape.at(kNc1hwc0N) != dst_shape.at(kNchwN) || src_shape.at(kNc1hwc0C0) != c0 || src_shape.at(kNc1hwc0C1) != (Ceil(dst_shape.at(kNchwC), c0))) { - GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", + GELOGE(PARAM_INVALID, "Failed to check relationship between src and dst shape, src shape %s, dst shape %s", ShapeToString(src_shape).c_str(), ShapeToString(dst_shape).c_str()); - return ACL_ERROR_GE_SHAPE_INVALID; + return PARAM_INVALID; } return SUCCESS; @@ -72,11 +72,11 @@ Status CheckArgsForNc1hwc0ToNchw(const TransArgs &args) { Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const int size, const int64_t total_size) { std::shared_ptr dst(new (std::nothrow) uint8_t[total_size], std::default_delete()); if (dst == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, + GELOGE(OUT_OF_MEMORY, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld, shape %s", TypeUtils::FormatToSerialString(args.src_format).c_str(), TypeUtils::FormatToSerialString(args.dst_format).c_str(), total_size, ShapeToString(args.dst_shape).c_str()); - return ACL_ERROR_GE_MEMORY_ALLOCATION; + return OUT_OF_MEMORY; } auto h = args.src_shape.at(kNc1hwc0H); @@ -110,11 +110,11 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + GELOGE(INTERNAL_ERROR, "Failed to copy data from NC1HWC0[%ld, %ld, %ld, %ld, %ld] offset %ld to NCHW[%ld, %ld, %ld, %ld]" " offset %ld, err-code %d", n_idx, c1_idx, h_idx, w_idx, c0_idx, src_offset, n_idx, c_idx, h_idx, w_idx, dst_offset, ret); - return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; + return INTERNAL_ERROR; } } } @@ -127,9 +127,8 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in } // namespace Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult &result) { - Status ret = CheckArgsForNc1hwc0ToNchw(args); - if (ret != SUCCESS) { - return ret; + if (CheckArgsForNc1hwc0ToNchw(args) != SUCCESS) { + return PARAM_INVALID; } int size = GetSizeByDataType(args.src_data_type); auto total_size = GetItemNumByShape(args.dst_shape) * size; @@ -140,19 +139,18 @@ Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult return SUCCESS; } - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Get %ld total size from dst shape %s, src shape %s", total_size, + GELOGE(INTERNAL_ERROR, "Get %ld total size from dst shape %s, src shape %s", total_size, ShapeToString(args.dst_shape).c_str(), ShapeToString(args.src_shape).c_str()); - return ACL_ERROR_GE_PARAM_INVALID; + return PARAM_INVALID; } GELOGD("Begin to trans format from NC1HWC0 to NCHW, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - ret = GetDstDataAfterTrans(args, result, size, total_size); - if (ret != SUCCESS) { - GELOGE(ret, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", + if (GetDstDataAfterTrans(args, result, size, total_size) != SUCCESS) { + GELOGE(INTERNAL_ERROR, "Failed to get data after trans, src shape %s, data type %s, dst shape %s, memory size %ld", ShapeToString(args.src_shape).c_str(), TypeUtils::DataTypeToSerialString(args.src_data_type).c_str(), ShapeToString(args.dst_shape).c_str(), total_size); - return ret; + return INTERNAL_ERROR; } return SUCCESS; } @@ -160,7 +158,7 @@ Status FormatTransferNc1hwc0Nchw::TransFormat(const TransArgs &args, TransResult Status FormatTransferNc1hwc0Nchw::TransShape(Format src_format, const std::vector &src_shape, DataType data_type, Format dst_format, std::vector &dst_shape) { GELOGD("The shape derivation from NC1HWC0 to NCHW is not unique. Trans shape in this direction is not supported"); - return ACL_ERROR_GE_FORMAT_INVALID; + return UNSUPPORTED; } REGISTER_FORMAT_TRANSFER(FormatTransferNc1hwc0Nchw, FORMAT_NC1HWC0, FORMAT_NCHW) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index b5f184c5..2a4d076b 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -592,8 +592,8 @@ Status GeGenerator::SetModelNameForDump(const GeRootModelPtr &ge_root_model) { ErrorManager::GetInstance().ATCReportErrMessage("E10000", {"parameter"}, {"output"}); GELOGE(FAILED, "[Check][GetModelNameStep]Get model_name failed. Param --output is invalid, root graph name: %s", ge_root_model->GetRootGraph()->GetName().c_str()); - REPORT_CALL_ERROR("E19999", "Get model_name failed. Param --output is invalid," - "root graph name: %s", ge_root_model->GetRootGraph()->GetName().c_str()); + REPORT_CALL_ERROR("E19999", "Get model_name failed. Param --output is invalid, root graph name: %s", + ge_root_model->GetRootGraph()->GetName().c_str()); return PARAM_INVALID; } map name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel(); diff --git a/ge/graph/build/logical_stream_allocator.cc b/ge/graph/build/logical_stream_allocator.cc index 1f054841..3bc29b70 100644 --- a/ge/graph/build/logical_stream_allocator.cc +++ b/ge/graph/build/logical_stream_allocator.cc @@ -70,7 +70,7 @@ Status AssignByLabelPass::Run(ComputeGraphPtr graph, const vector & auto iter = label_streams.find(stream_label); if (iter == label_streams.end()) { subgraph->stream_id = next_stream; - GELOGI("[Assign][NewStreamId] %ld for label %s.", next_stream, stream_label.c_str()); + GELOGI("Assign new stream %ld for label %s.", next_stream, stream_label.c_str()); label_streams.emplace(stream_label, next_stream); next_stream++; @@ -102,7 +102,7 @@ Status IndependentStreamPass::Run(ComputeGraphPtr graph, const vectorstream_id = next_stream; - GELOGI("[Assign][NewStreamId:independent] %ld for engine %s (label: %s).", next_stream, engine.c_str(), + GELOGI("Assign new independent stream %ld for engine %s (label: %s).", next_stream, engine.c_str(), stream_label.c_str()); label_streams.emplace(stream_label, next_stream); @@ -137,8 +137,8 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorstream_id = stream_id; - GELOGI("[Assign][NewStreamId] %ld for Reusable subgraph %s cause has not been assigned before.", - stream_id, reusable_subgraph->name.c_str()); + GELOGI("Reusable subgraph %s has not been assigned a stream, now assign new stream %ld.", + reusable_subgraph->name.c_str(), stream_id); } if (reusable_subgraph->reused_subgraph != nullptr) { @@ -147,8 +147,7 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorreused_subgraph = reusable_subgraph; reused_subgraphs_.emplace_back(subgraph, reusable_subgraph); - GELOGI("[Reuse][Stream]Subgraph %s of engine %s reuses stream of subgraph %s of engine %s.", - subgraph->name.c_str(), + GELOGI("Subgraph %s of engine %s reuses stream of subgraph %s of engine %s.", subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), reusable_subgraph->name.c_str(), reusable_subgraph->engine_conf.id.c_str()); } @@ -260,7 +259,7 @@ int64_t AssignByDependencyPass::AssignNewStream(SubgraphPtr subgraph) { engine_stream_num_[engine_name] = stream_id + 1; } - GELOGI("[Assign][NewStreamId:temp]id:%ld for Subgraph %s (engine: %s).", stream_id, subgraph->name.c_str(), + GELOGI("Subgraph %s assigns new temp stream %ld (engine: %s).", subgraph->name.c_str(), stream_id, engine_name.c_str()); return stream_id; @@ -293,7 +292,7 @@ void AssignByDependencyPass::UpdateAssignedSubgraphs(Context &context) { GELOGI("Subgraph %s of engine %s reuses default stream %ld.", subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), context.default_stream); } else { - GELOGI("[Update][StreamId]id:%ld for subgraph %s.", subgraph->stream_id, subgraph->name.c_str()); + GELOGI("Stream of subgraph %s has been updated to %ld.", subgraph->name.c_str(), subgraph->stream_id); } } } @@ -304,7 +303,7 @@ void AssignByDependencyPass::UpdateReusedSubgraphs() { auto &cur_subgraph = item.first; auto &reused_graph = item.second; cur_subgraph->stream_id = reused_graph->stream_id; - GELOGI("[Update][StreamId]id:%ld for subgraph %s.", cur_subgraph->stream_id, cur_subgraph->name.c_str()); + GELOGI("Stream of subgraph %s has been updated to %ld.", cur_subgraph->name.c_str(), cur_subgraph->stream_id); } } @@ -341,7 +340,7 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorstream_id, subgraph->name.c_str(), + GELOGI("Subgraph %s is assigned stream %ld (engine: %s).", subgraph->name.c_str(), subgraph->stream_id, engine_name.c_str()); } } @@ -364,12 +363,12 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorGetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), context.default_stream, engine_name.c_str()); } else if (IsEngineSkip(*subgraph) && node->GetInNodes().empty()) { - GELOGD("[Skip][StreamIdAssign]Node %s of type %s in subgraph %s doesn't need (engine: %s).", + GELOGD("Node %s of type %s in subgraph %s doesn't need to assign a stream (engine: %s).", node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), engine_name.c_str()); } else { node->GetOpDesc()->SetStreamId(stream_id); - GELOGD("[Assign][StreamId]id:%ld for Node %s of type %s in subgraph %s (engine: %s).", stream_id, - node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), engine_name.c_str()); + GELOGD("Node %s of type %s in subgraph %s is assigned stream %ld (engine: %s).", node->GetName().c_str(), + node->GetType().c_str(), subgraph->name.c_str(), stream_id, engine_name.c_str()); } } } @@ -398,8 +397,8 @@ int64_t UpdateForSkippedEnginePass::GetSingleInoutStream(const NodePtr &node) co if (stream_ids.size() == 1) { int64_t stream_id = *(stream_ids.begin()); - GELOGI("[Get][SingleStreamId]The stream of all input and output nodes of node %s (type: %s) is %ld.", - node->GetName().c_str(), node->GetType().c_str(), stream_id); + GELOGI("The stream of all input and output nodes of node %s (type: %s) is %ld.", node->GetName().c_str(), + node->GetType().c_str(), stream_id); return stream_id; } @@ -438,8 +437,8 @@ Status UpdateForSkippedEnginePass::Run(ComputeGraphPtr graph, const vectorSetStreamId(inout_stream); - GELOGI("[Reassign][StreamId]%ld for Node %s of type %s from stream %ld.", - inout_stream, node->GetName().c_str(), node->GetType().c_str(), stream_id); + GELOGI("Node %s of type %s reassign to stream %ld from stream %ld.", node->GetName().c_str(), + node->GetType().c_str(), inout_stream, stream_id); } } } @@ -466,7 +465,7 @@ Status AllReduceParallelPass::Run(ComputeGraphPtr graph, const vectorGetName().c_str()); + GELOGD("Subgraphs of graph %s", graph->GetName().c_str()); for (const auto &subgraph : subgraphs) { if (subgraph != nullptr) { GELOGD("subgraph: %s", subgraph->name.c_str()); @@ -675,9 +674,9 @@ Status LogicalStreamAllocator::RunPasses(const ComputeGraphPtr &graph, const vec Status status = pass->Run(graph, subgraphs, context_); if (status == SUCCESS) { - GELOGD("[Show][Status]Stream pass %s return SUCCESS.", pass->GetName().c_str()); + GELOGD("Stream pass %s return SUCCESS.", pass->GetName().c_str()); } else if (status == NOT_CHANGED) { - GELOGD("[Show][Status]Stream pass %s return NOT_CHANGED.", pass->GetName().c_str()); + GELOGD("Stream pass %s return NOT_CHANGED.", pass->GetName().c_str()); } else { GELOGE(status, "Stream pass %s failed.", pass->GetName().c_str()); return status; diff --git a/ge/graph/build/memory/block_mem_assigner.cc b/ge/graph/build/memory/block_mem_assigner.cc index ae0c6e0d..f9921044 100755 --- a/ge/graph/build/memory/block_mem_assigner.cc +++ b/ge/graph/build/memory/block_mem_assigner.cc @@ -508,7 +508,7 @@ BlockMemAssigner::BlockMemAssigner(ComputeGraphPtr compute_graph, const map ranges; if (GetMemoryRanges(ranges) != SUCCESS) { - GELOGE(FAILED, "[Get][MemoryRanges] Fail!"); + GELOGE(FAILED, "GetMemoryRanges Fail!"); return FAILED; } GE_IF_BOOL_EXEC(ranges.empty(), return SUCCESS); diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index 9d5b9d08..e97d343d 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -337,7 +337,7 @@ uint32_t GetContinuousMemoryType(const OpDescPtr &op_desc) { } if (continuous_type != 0) { - GELOGI("[Get][MemType:Continuous]Current node %s, value is %d", op_desc->GetName().c_str(), continuous_type); + GELOGI("Current node %s continuous type %d", op_desc->GetName().c_str(), continuous_type); } return continuous_type; } @@ -482,7 +482,7 @@ Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) { "[Assign][Memory:Continuous:Input]fail for node:%s.", node->GetName().c_str()) } for (auto pair : memory_offset_) { - GELOGD("[Reassign][Memory:Continuous]At last, memory type = %ld, mem offset = %zu.", pair.first, + GELOGD("After reassign continuous memory, memory type = %ld, mem offset = %zu.", pair.first, pair.second.mem_offset_); } return ge::SUCCESS; @@ -490,7 +490,7 @@ Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) { Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start, int64_t &continuous_mem_size, int64_t memory_type, uint32_t continuous_type, bool reverse_refresh) { - GELOGI("[Assign][Memory:Input:Continuous]start for Current node %s", node->GetName().c_str()); + GELOGI("Current node %s needs continuous input", node->GetName().c_str()); auto iter = memory_offset_.find(memory_type); if (iter == memory_offset_.end()) { REPORT_INNER_ERROR("E19999", "find memory offset fail for mem_type:%ld, " @@ -566,9 +566,9 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, auto peer_output_offset = output_list.at(peer_out_data_anchor->GetIdx()); output_list.at(peer_out_data_anchor->GetIdx()) = output_list_this.at(out2ins.begin()->first); peer_op_desc->SetOutputOffset(output_list); - GELOGI("[Update][Offset]Node %s out %d ref in %d input node %s, use output offset %ld update %ld", - node->GetName().c_str(), out2ins.begin()->first, out2ins.begin()->second, - peer_op_desc->GetName().c_str(), output_list_this.at(out2ins.begin()->first), peer_output_offset); + GELOGI("Node %s out %d ref in %d input node %s, use output offset %ld update %ld", node->GetName().c_str(), + out2ins.begin()->first, out2ins.begin()->second, peer_op_desc->GetName().c_str(), + output_list_this.at(out2ins.begin()->first), peer_output_offset); } else { GELOGD("Node %s out %d ref in %d input node %s with total ref numbers %zu.", node->GetName().c_str(), out2ins.begin()->first, out2ins.begin()->second, peer_op_desc->GetName().c_str(), out2ins.size()); @@ -1671,7 +1671,7 @@ bool GraphMemoryAssigner::AssignContinuousInputMemoryWithAtomicProcessDirectly( auto continuous_type = iter->second; bool continuous_input = ((continuous_type & kTypeInput) != 0) || ((continuous_type & kTypeInputNoPadding) != 0); if (continuous_input) { - GELOGI("[Store][Node] of %s cause it's precursor node %s need assign continuous input memory", + GELOGI("Node %s 's precursor node %s need assign continuous input memory, store node firstly", input_continuous_node->GetName().c_str(), in_node->GetName().c_str()); return false; } @@ -1681,7 +1681,7 @@ bool GraphMemoryAssigner::AssignContinuousInputMemoryWithAtomicProcessDirectly( node_2_continuous_type.emplace(out_node, continuous_type); bool continuous_input = ((continuous_type & kTypeInput) != 0) || ((continuous_type & kTypeInputNoPadding) != 0); if (continuous_input) { - GELOGI("[Store][Node] of %s cause it's succeed node %s need assign continuous input memory", + GELOGI("Node %s 's succeed node %s need assign continuous input memory, store node firstly", input_continuous_node->GetName().c_str(), out_node->GetName().c_str()); return false; } diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 25af98b8..7c8d9073 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -428,8 +428,7 @@ Status AippOp::ConvertRelatedInputNameToRank() { if (!convert_flag) { string error_msg = "Top name " + related_input_name + "convert rank failed, Please" " ensure top name in aipp config is the top name of data node."; - GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); - REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); + GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); return PARAM_INVALID; } diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc index 41a32173..fbe78121 100755 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc @@ -124,15 +124,13 @@ Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() { if (another_item->related_input_name().empty()) { string error_msg = "Can not both set related_input_name and related_input_rank!" " Please ensure param is the same with the first aipp config(related_input_name)."; - GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); - REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); + GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); return PARAM_INVALID; } if (item->related_input_name() == another_item->related_input_name()) { string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name" " param is different in different aipp config."; - GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); - REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); + GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); return PARAM_INVALID; } } @@ -152,15 +150,13 @@ Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() { if (!another_item->related_input_name().empty()) { string error_msg = "Can not both set related_input_rank and related_input_name!" " Please ensure param is the same with the first aipp config(related_input_rank)."; - GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); - REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); + GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); return PARAM_INVALID; } if (item->related_input_rank() == another_item->related_input_rank()) { string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank" " param is different in different aipp config."; - GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str()); - REPORT_INPUT_ERROR("E19021", std::vector({"reason"}), std::vector({error_msg})); + GE_ERRORLOG_AND_ERRORMSG(PARAM_INVALID, error_msg.c_str()); return PARAM_INVALID; } } diff --git a/ge/plugin/engine/CMakeLists.txt b/ge/plugin/engine/CMakeLists.txt index 3aace4ac..e5736b51 100644 --- a/ge/plugin/engine/CMakeLists.txt +++ b/ge/plugin/engine/CMakeLists.txt @@ -41,7 +41,6 @@ target_link_options(engine PRIVATE target_link_libraries(engine PRIVATE $ -Wl,--no-as-needed - c_sec slog -Wl,--as-needed -lrt diff --git a/tests/ut/ge/common/format_transfer_5d_nchw_unittest.cc b/tests/ut/ge/common/format_transfer_5d_nchw_unittest.cc index 0eded4d7..64664a5c 100644 --- a/tests/ut/ge/common/format_transfer_5d_nchw_unittest.cc +++ b/tests/ut/ge/common/format_transfer_5d_nchw_unittest.cc @@ -569,7 +569,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_shape1) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_shape2) { @@ -579,7 +579,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_shape2) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_shape1) { @@ -588,7 +588,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_shape1) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_shape2) { @@ -598,7 +598,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_shape2) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_dst_shape_relation) { @@ -608,7 +608,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_dst_shape_relation) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_format) { @@ -618,10 +618,10 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_format) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); Status status = transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape); - EXPECT_EQ(status, ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(status, UNSUPPORTED); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_format) { @@ -631,7 +631,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_dst_format) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_data_type) { @@ -642,7 +642,7 @@ TEST_F(UTEST_FormatTransferNc1hwc0ToNchw, invalid_src_data_type) { TransResult result; FormatTransferNc1hwc0Nchw transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } } // namespace formats } // namespace ge diff --git a/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc b/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc index 02f8251a..5bbc5776 100644 --- a/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc +++ b/tests/ut/ge/common/format_transfer_fractal_nz_unittest.cc @@ -9136,23 +9136,23 @@ TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type2) { EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); } -TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type3) { - uint16_t data[1 * 1 * 1 * 16 * 16] = {0}; - TransArgs args{reinterpret_cast(data), - FORMAT_FRACTAL_NZ, - FORMAT_NHWC, - {1, 1, 1, 16, 16}, - { - 1, - 1, - 4, - 4, - }, - DT_STRING}; - TransResult result; - FormatTransferFractalNzND transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); -} +// TEST_F(UtestFormatTransferNdFractNz, invalid_src_data_type3) { +// uint16_t data[1 * 1 * 1 * 16 * 16] = {0}; +// TransArgs args{reinterpret_cast(data), +// FORMAT_FRACTAL_NZ, +// FORMAT_NHWC, +// {1, 1, 1, 16, 16}, +// { +// 1, +// 1, +// 4, +// 4, +// }, +// DT_VARIANT}; +// TransResult result; +// FormatTransferFractalNzND transfer; +// EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); +// } TEST_F(UtestFormatTransferNdFractNz, invalid_dst_format2) { uint16_t data[1 * 1 * 1 * 1 * 16 * 16] = {0}; diff --git a/tests/ut/ge/common/format_transfer_fracz_nhwc_unittest.cc b/tests/ut/ge/common/format_transfer_fracz_nhwc_unittest.cc index a4d6f9ae..e406eb43 100644 --- a/tests/ut/ge/common/format_transfer_fracz_nhwc_unittest.cc +++ b/tests/ut/ge/common/format_transfer_fracz_nhwc_unittest.cc @@ -39,7 +39,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_data_type) { TransResult result; FormatTransferFracZNhwc transfer; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_DATATYPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_format_reserved) { @@ -50,7 +50,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_format_reserved) reinterpret_cast(data), FORMAT_RESERVED, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_format_reserved) { @@ -61,7 +61,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_format_reserved) reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_RESERVED, {16, 1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape) { @@ -71,7 +71,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape) { TransArgs args{reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape2) { @@ -82,7 +82,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_shape2) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, -1, 16, 16}, {1, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape) { @@ -93,7 +93,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape2) { @@ -104,7 +104,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_dst_shape2) { reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, -1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relation1) { @@ -115,7 +115,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relatio reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {17, 4, 4, 1}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relation2) { @@ -126,7 +126,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_invalid_src_dst_shape_relatio reinterpret_cast(data), FORMAT_FRACTAL_Z, FORMAT_NHWC, {16, 1, 16, 16}, {1, 4, 4, 17}, DT_FLOAT}; TransResult result; - EXPECT_EQ(transfer.TransFormat(args, result), ACL_ERROR_GE_SHAPE_INVALID); + EXPECT_EQ(transfer.TransFormat(args, result), PARAM_INVALID); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_lt_cube) { @@ -301,7 +301,7 @@ TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_eq_cube) { } Status status = transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape); - EXPECT_EQ(status, ACL_ERROR_GE_FORMAT_INVALID); + EXPECT_EQ(status, UNSUPPORTED); } TEST_F(UtestFormatTransferFraczNhwc, fracz_to_nhwc_fp16_success_gt_cube) { diff --git a/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc b/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc index 7431440b..b2cfe2db 100644 --- a/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc +++ b/tests/ut/ge/common/format_transfer_nhwc_fractalz_unittest.cc @@ -5354,14 +5354,14 @@ TEST_F(UtestFormatTransferNhwcFz, build_transfer_uint8) { EXPECT_NE(transfer, nullptr); } -TEST_F(UtestFormatTransferNhwcFz, invalid_data_type) { - uint16_t data[1 * 4 * 4 * 1] = {0}; - TransArgs args{ - reinterpret_cast(data), FORMAT_NHWC, FORMAT_FRACTAL_NZ, {1, 4, 4}, {1, 1, 1, 16, 16}, DT_STRING}; - FormatTransferFractalZ transfer; - EXPECT_EQ(transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape), - ACL_ERROR_GE_DATATYPE_INVALID); -} +// TEST_F(UtestFormatTransferNhwcFz, invalid_data_type) { +// uint16_t data[1 * 4 * 4 * 1] = {0}; +// TransArgs args{ +// reinterpret_cast(data), FORMAT_NHWC, FORMAT_FRACTAL_NZ, {1, 4, 4}, {1, 1, 1, 16, 16}, DT_VARIANT}; +// FormatTransferFractalZ transfer; +// EXPECT_EQ(transfer.TransShape(args.src_format, args.src_shape, args.src_data_type, args.dst_format, args.dst_shape), +// ACL_ERROR_GE_DATATYPE_INVALID); +// } TEST_F(UtestFormatTransferNhwcFz, invalid_data_format) { uint16_t data[1 * 4 * 4 * 1] = {0}; From 12cef9e9b9e95fb35741152b48306578520e30ab Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 31 Mar 2021 13:08:25 +0800 Subject: [PATCH 10/14] support unknown while subgraph --- ge/hybrid/model/hybrid_model.h | 1 + ge/hybrid/model/hybrid_model_builder.cc | 46 ++++++++++++++-------- ge/hybrid/model/hybrid_model_builder.h | 5 +-- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 50 ++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 19 deletions(-) diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index fae53679..62095d42 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -135,6 +135,7 @@ class HybridModel { std::string model_name_; GeRootModelPtr ge_root_model_; std::map input_nodes_; + ComputeGraphPtr root_graph_; std::map device_variable_nodes_; //lint !e148 std::map host_variable_nodes_; //lint !e148 std::map> variable_tensors_; diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 25dabd78..d6d724ac 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -136,12 +136,12 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[%s] Failed to RecoverGraphUnknownFlag", GetGraphName()); GE_CHK_STATUS_RET(IndexSpecialNodes(), "[%s] Failed to index nodes", GetGraphName()); GE_CHK_STATUS_RET(IndexTaskDefs(), "[%s] Failed to index task defs", GetGraphName()); + GE_CHK_STATUS_RET(InitWeights(), "[%s] Failed to init weights", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[%s] Failed to load graph", GetGraphName()); GE_CHK_STATUS_RET(AssignUninitializedConstantOps(), "[%s] Failed to assign uninitialized constants", GetGraphName()); GE_CHK_STATUS_RET(TransAllVarData(), "[%s] Failed to trans all var data", GetGraphName()); GE_CHK_STATUS_RET(CopyVarData(), "[%s] Failed to copy var data", GetGraphName()); GE_CHK_STATUS_RET(InitModelMem(), "[%s] Failed to init memory", GetGraphName()); - GE_CHK_STATUS_RET(InitWeights(), "[%s] Failed to init weights", GetGraphName()); GE_CHK_STATUS_RET(InitConstantOps(), "[%s] Failed to init constant op", GetGraphName()); GE_CHK_STATUS_RET(InitVariableTensors(), "[%s] Failed to init variables", GetGraphName()); GE_CHK_STATUS_RET(LoadTasks(), "[%s] Failed to load tasks", GetGraphName()); @@ -599,9 +599,10 @@ Status HybridModelBuilder::MergeNetOutputNode(ComputeGraph &graph) { return SUCCESS; } -Status HybridModelBuilder::UnfoldSubgraphs(ComputeGraph &root_graph, ComputeGraphPtr &merged_graph) { +Status HybridModelBuilder::UnfoldSubgraphs(ComputeGraphPtr &root_graph, ComputeGraphPtr &merged_graph) { merged_graph = MakeShared("MergedGraph"); - for (const auto &node : root_graph.GetDirectNode()) { + merged_graph->SetGraphUnknownFlag(root_graph->GetGraphUnknownFlag()); + for (const auto &node : root_graph->GetDirectNode()) { GE_CHECK_NOTNULL(node); auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); @@ -631,7 +632,7 @@ Status HybridModelBuilder::UnfoldSubgraphs(ComputeGraph &root_graph, ComputeGrap } } } - GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraph(root_graph, *merged_graph, *subgraph), + GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraph(root_graph, merged_graph, *subgraph), "[%s] Failed to merge subgraph.", subgraph->GetName().c_str()); } @@ -647,18 +648,19 @@ Status HybridModelBuilder::UnfoldSubgraphs(ComputeGraph &root_graph, ComputeGrap return a_level < b_level; }); - for (auto &remained_subgraph : root_graph.GetAllSubgraphs()) { + for (auto &remained_subgraph : root_graph->GetAllSubgraphs()) { GELOGD("Adding subgraph [%s] to merged-graph.", remained_subgraph->GetName().c_str()); GE_CHK_GRAPH_STATUS_RET(merged_graph->AddSubgraph(remained_subgraph), "Failed to add subgraph [%s]", remained_subgraph->GetName().c_str()); + remained_subgraph->SetParentGraph(merged_graph); } return SUCCESS; } -Status HybridModelBuilder::UnfoldSubgraph(ComputeGraph &root_graph, - ComputeGraph &parent_graph, +Status HybridModelBuilder::UnfoldSubgraph(ComputeGraphPtr &root_graph, + ComputeGraphPtr &parent_graph, ComputeGraph &sub_graph) { auto parent_node = sub_graph.GetParentNode(); GE_CHECK_NOTNULL(parent_node); @@ -687,15 +689,23 @@ Status HybridModelBuilder::UnfoldSubgraph(ComputeGraph &root_graph, } } - parent_graph.AddNode(sub_node); + if (!sub_node->GetOpDesc()->GetSubgraphInstanceNames().empty()) { + for (size_t i = 0; i < sub_node->GetOpDesc()->GetSubgraphInstanceNames().size(); ++i) { + auto sub_sub_graph = NodeUtils::GetSubgraph(*sub_node, i); + GE_CHECK_NOTNULL(sub_sub_graph); + sub_sub_graph->SetParentGraph(parent_graph); + } + } + parent_graph->AddNode(sub_node); GELOGD("[%s::%s] added to parent graph: [%s].", sub_graph.GetName().c_str(), sub_node->GetName().c_str(), - parent_graph.GetName().c_str()); + parent_graph->GetName().c_str()); + sub_node->SetOwnerComputeGraph(parent_graph); } GELOGD("[%s] Done merging subgraph. remove it from root graph.", sub_graph.GetName().c_str()); - root_graph.RemoveSubgraph(sub_graph.GetName()); + root_graph->RemoveSubgraph(sub_graph.GetName()); return SUCCESS; } @@ -747,14 +757,14 @@ Status HybridModelBuilder::LoadGraph() { GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", root_graph->GetDirectNodesSize(), root_graph->GetAllNodesSize()); - GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(*root_graph, merged_graph), "Failed to unfold subgraphs."); + GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), "Failed to unfold subgraphs."); root_graph = std::move(merged_graph); GELOGI("After merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", root_graph->GetDirectNodesSize(), root_graph->GetAllNodesSize()); } - root_graph_ = root_graph; + hybrid_model_.root_graph_ = root_graph; // Reset node id by topological order across all subgraphs int64_t index = 0; for (const auto &node : root_graph->GetAllNodes()) { @@ -1030,9 +1040,13 @@ Status HybridModelBuilder::InitWeights() { GELOGI("Init weight mem successfully, weight base %p, weight size = %zu", weight_base, sub_weight_buffer->GetSize()); - auto root_graph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); - hybrid_model_.weight_buffer_map_.emplace(root_graph->GetName(),std::move(sub_weight_buffer)); - for (auto &node : root_graph->GetDirectNode()) { + auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); + if (subgraph != ge_root_model_->GetRootGraph()) { + subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); + } + GE_CHECK_NOTNULL(subgraph); + hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); + for (auto &node : subgraph->GetDirectNode()) { if (node->GetType() != CONSTANT) { continue; } @@ -2044,7 +2058,7 @@ Status HybridModelBuilder::CollectParallelGroups(NodeItem *node_item) { GELOGD("[%s] Start to get parallel group from subgraph: %s", node_item->NodeName().c_str(), subgraph_name.c_str()); - auto subgraph = root_graph_->GetSubgraph(subgraph_name); + auto subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_name); GE_CHECK_NOTNULL(subgraph); for (const auto &sub_node : subgraph->GetAllNodes()) { std::string parallel_group; diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index a59a282a..430637dc 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -47,8 +47,8 @@ class HybridModelBuilder { static Status HandleDtString(const GeTensor &tensor, void *var_addr); static Status MergeInputNodes(ComputeGraph &compute_graph); static Status MergeNetOutputNode(ComputeGraph &compute_graph); - static Status UnfoldSubgraphs(ComputeGraph &root_graph, ComputeGraphPtr &merged_graph); - static Status UnfoldSubgraph(ComputeGraph &root_graph, ComputeGraph &parent_graph, ComputeGraph &sub_graph); + static Status UnfoldSubgraphs(ComputeGraphPtr &root_graph, ComputeGraphPtr &merged_graph); + static Status UnfoldSubgraph(ComputeGraphPtr &root_graph, ComputeGraphPtr &parent_graph, ComputeGraph &sub_graph); static Status BuildInputMapping(GraphItem &graph_item, std::vector &data_nodes, bool is_root_graph); @@ -100,7 +100,6 @@ class HybridModelBuilder { NodeItem *MutableNodeItem(const NodePtr &node); GeRootModelPtr ge_root_model_; - ComputeGraphPtr root_graph_; std::map subgraph_models_; std::map constant_op_nodes_; std::map> parallel_group_to_nodes_; diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 3b5d19e6..60c0e883 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -256,3 +256,53 @@ TEST_F(UtestGeHybrid, init_weight_success) { HybridModelExecutor executor(model_ptr, device_id, stream); executor.Init(); } + +TEST_F(UtestGeHybrid, unfold_subgraphs_success) { + ComputeGraphPtr merged_graph = nullptr; + + ComputeGraphPtr sub_sub_graph1 = std::make_shared("while_cond"); + OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA); + NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc); + + ComputeGraphPtr sub_sub_graph2 = std::make_shared("while_body"); + /*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT); + NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/ + OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA); + NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc); + sub_sub_graph2->SetGraphUnknownFlag(true); + /*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD); + NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node); + sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node); + sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/ + + ComputeGraphPtr sub_graph = std::make_shared("sub_graph"); + OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE); + NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc); + sub_graph->SetGraphUnknownFlag(true); + sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond"); + sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body"); + sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond"); + sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body"); + + ComputeGraphPtr root_graph = std::make_shared("root_graph"); + auto partitioned_call_op_desc = MakeShared("partitioned_call", PARTITIONEDCALL); + auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc); + partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph"); + partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph"); + + root_graph->AddSubGraph(sub_sub_graph1); + root_graph->AddSubGraph(sub_sub_graph2); + sub_sub_graph1->SetParentGraph(root_graph); + sub_sub_graph2->SetParentGraph(root_graph); + sub_sub_graph1->SetParentNode(sub_graph_while_node); + sub_sub_graph2->SetParentNode(sub_graph_while_node); + + root_graph->AddSubGraph(sub_graph); + sub_graph->SetParentNode(partitioned_call_node); + sub_graph->SetParentGraph(root_graph); + + GeRootModelPtr root_model = MakeShared(root_graph); + HybridModel hybrid_model(root_model); + HybridModelBuilder hybrid_model_builder(hybrid_model); + EXPECT_EQ(hybrid_model_builder.UnfoldSubgraphs(root_graph, merged_graph), SUCCESS); +} From c93682162954d1f25a232dfc2acd5eb56f48bc70 Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Wed, 31 Mar 2021 16:39:44 +0800 Subject: [PATCH 11/14] modified: metadef --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index 0c4602a4..0e96f411 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 0c4602a4615a9368b06633a5087e2114518f29ca +Subproject commit 0e96f4117db0bf9646ab4173c5e6487ed46615d0 From 701b0d6c1ba13ee50f9b36eb6db1617d1a3c8dc9 Mon Sep 17 00:00:00 2001 From: lichun Date: Thu, 1 Apr 2021 10:04:01 +0800 Subject: [PATCH 12/14] support unknown while subgraph --- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 118 ++++++++++++++--------- 1 file changed, 71 insertions(+), 47 deletions(-) diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 60c0e883..f5a802a2 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -258,51 +258,75 @@ TEST_F(UtestGeHybrid, init_weight_success) { } TEST_F(UtestGeHybrid, unfold_subgraphs_success) { - ComputeGraphPtr merged_graph = nullptr; - - ComputeGraphPtr sub_sub_graph1 = std::make_shared("while_cond"); - OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA); - NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc); - - ComputeGraphPtr sub_sub_graph2 = std::make_shared("while_body"); - /*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT); - NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/ - OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA); - NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc); - sub_sub_graph2->SetGraphUnknownFlag(true); - /*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD); - NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node); - sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node); - sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/ - - ComputeGraphPtr sub_graph = std::make_shared("sub_graph"); - OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE); - NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc); - sub_graph->SetGraphUnknownFlag(true); - sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond"); - sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body"); - sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond"); - sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body"); - - ComputeGraphPtr root_graph = std::make_shared("root_graph"); - auto partitioned_call_op_desc = MakeShared("partitioned_call", PARTITIONEDCALL); - auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc); - partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph"); - partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph"); - - root_graph->AddSubGraph(sub_sub_graph1); - root_graph->AddSubGraph(sub_sub_graph2); - sub_sub_graph1->SetParentGraph(root_graph); - sub_sub_graph2->SetParentGraph(root_graph); - sub_sub_graph1->SetParentNode(sub_graph_while_node); - sub_sub_graph2->SetParentNode(sub_graph_while_node); - - root_graph->AddSubGraph(sub_graph); - sub_graph->SetParentNode(partitioned_call_node); - sub_graph->SetParentGraph(root_graph); - - GeRootModelPtr root_model = MakeShared(root_graph); - HybridModel hybrid_model(root_model); - HybridModelBuilder hybrid_model_builder(hybrid_model); - EXPECT_EQ(hybrid_model_builder.UnfoldSubgraphs(root_graph, merged_graph), SUCCESS); +ComputeGraphPtr merged_graph = nullptr; + +ComputeGraphPtr sub_sub_graph1 = std::make_shared("while_cond"); +OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA); +NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc); + +ComputeGraphPtr sub_sub_graph2 = std::make_shared("while_body"); +/*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT); +NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/ +OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA); +NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc); +sub_sub_graph2->SetGraphUnknownFlag(true); +/*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD); +NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node); +sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node); +sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/ + +ComputeGraphPtr sub_graph = std::make_shared("sub_graph"); +OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE); +NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc); +sub_graph->SetGraphUnknownFlag(true); +sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond"); +sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body"); +sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond"); +sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body"); + +ComputeGraphPtr root_graph = std::make_shared("root_graph"); +auto partitioned_call_op_desc = MakeShared("partitioned_call", PARTITIONEDCALL); +auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc); +partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph"); +partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph"); + +root_graph->AddSubGraph(sub_sub_graph1); +root_graph->AddSubGraph(sub_sub_graph2); +sub_sub_graph1->SetParentGraph(root_graph); +sub_sub_graph2->SetParentGraph(root_graph); +sub_sub_graph1->SetParentNode(sub_graph_while_node); +sub_sub_graph2->SetParentNode(sub_graph_while_node); + +root_graph->AddSubGraph(sub_graph); +sub_graph->SetParentNode(partitioned_call_node); +sub_graph->SetParentGraph(root_graph); + +GeRootModelPtr root_model = MakeShared(root_graph); +HybridModel hybrid_model(root_model); +HybridModelBuilder hybrid_model_builder(hybrid_model); + +// subgraph num before unfold: 1 +EXPECT_EQ(root_graph->GetAllSubgraphs().size(), 3); +// num of nodes in root_graph before unfold: 1, name: partitioned_call +EXPECT_EQ(root_graph->GetDirectNodesSize(), 1); +EXPECT_EQ(root_graph->GetDirectNode().at(0)->GetName(), "partitioned_call"); +// two sub_sub_graphs: while cond & while body, their parent graph is "subgraph" before unfold +EXPECT_EQ(sub_sub_graph1->GetParentGraph()->GetName(), "root_graph"); +EXPECT_EQ(sub_sub_graph1->GetParentGraph()->GetName(), "root_graph"); +// node "cond_data" & "body_data" has owner compute graph "subgraph" before unfold +EXPECT_EQ(sub_graph_while_node->GetOwnerComputeGraph()->GetName(), "sub_graph"); + +// unfold success +EXPECT_EQ(hybrid_model_builder.UnfoldSubgraphs(root_graph, merged_graph), SUCCESS); + +// subgraph num after unfold: 0 +EXPECT_EQ(merged_graph->GetAllSubgraphs().size(), 2); +// num of nodes in MergedGraph after unfold: 1, name: while +EXPECT_EQ(merged_graph->GetDirectNodesSize(), 1); +EXPECT_EQ(merged_graph->GetDirectNode().at(0)->GetName(), "while"); +// two sub_sub_graphs: while cond & while body, their parent graph is "MergedGraph" after unfold +EXPECT_EQ(sub_sub_graph1->GetParentGraph()->GetName(), "MergedGraph" ); +EXPECT_EQ(sub_sub_graph1->GetParentGraph()->GetName(), "MergedGraph"); +// node "cond_data" & "body_data" has owner compute graph "MergedGraph" before unfold +EXPECT_EQ(sub_graph_while_node->GetOwnerComputeGraph()->GetName(), "MergedGraph"); } From 890373c79c18b8f3a1d1325a3b6056cec39e7cd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Thu, 1 Apr 2021 11:35:39 +0800 Subject: [PATCH 13/14] fixed reviewbot warning --- .../formats/format_transfers/format_transfer_fractal_zz.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc index 3cccc664..c36bffb5 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc @@ -197,7 +197,8 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, + "Failed to operate the dst memory at offset %ld, error-code %d", dst_offset, ret); return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } } From 68595a656a60ba7d63c5cb7bc81d923feea6f9a9 Mon Sep 17 00:00:00 2001 From: wxl Date: Thu, 1 Apr 2021 16:48:32 +0800 Subject: [PATCH 14/14] fix ts 4g memory bug --- ge/graph/load/model_manager/davinci_model.cc | 18 +++++++++++++++--- ge/graph/load/model_manager/davinci_model.h | 1 + .../ut/ge/graph/load/davinci_model_unittest.cc | 6 ++++++ third_party/fwkacllib/inc/runtime/dev.h | 5 +++++ 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 0aac173e..fc861a24 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -2924,6 +2924,14 @@ Status DavinciModel::InitTaskInfo(domi::ModelTaskDef &model_task_def) { return SUCCESS; } +Status DavinciModel::CheckCapability(rtFeatureType_t featureType, int32_t featureInfo, bool &is_support) const { + int64_t value = RT_CAPABILITY_SUPPORT; + auto rt_ret = rtGetRtCapability(featureType, featureInfo, &value); + GE_CHK_BOOL_RET_STATUS(rt_ret == RT_ERROR_NONE, FAILED, "call rtGetRtCapability failed!"); + is_support = (value == RT_CAPABILITY_SUPPORT) ? true : false; + return SUCCESS; +} + Status DavinciModel::MallocKnownArgs() { GELOGI("DavinciModel::MallocKnownArgs in"); const auto &model_task_def = ge_model_->GetModelTaskDefPtr(); @@ -2944,8 +2952,12 @@ Status DavinciModel::MallocKnownArgs() { } rtError_t rt_ret; // malloc args memory + bool is_support = false; + GE_CHK_STATUS_RET_NOLOG(CheckCapability(FEATURE_TYPE_MEMORY, MEMORY_INFO_TS_4G_LIMITED, is_support)); + auto mem_type = is_support ? RT_MEMORY_TS_4G : RT_MEMORY_HBM; + if (total_args_size_ != 0) { - rt_ret = rtMalloc(&args_, total_args_size_, RT_MEMORY_HBM); + rt_ret = rtMalloc(&args_, total_args_size_, mem_type); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); @@ -2953,7 +2965,7 @@ Status DavinciModel::MallocKnownArgs() { } // malloc dynamic and static hybrid memory if (total_hybrid_args_size_ != 0) { - rt_ret = rtMalloc(&hybrid_addrs_, total_hybrid_args_size_, RT_MEMORY_HBM); + rt_ret = rtMalloc(&hybrid_addrs_, total_hybrid_args_size_, mem_type); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); @@ -2962,7 +2974,7 @@ Status DavinciModel::MallocKnownArgs() { // malloc fixed addr memory, eg: rts op if (total_fixed_addr_size_ != 0) { GELOGI("Begin to allocate fixed addr."); - rt_ret = rtMalloc(&fixed_addrs_, total_fixed_addr_size_, RT_MEMORY_HBM); + rt_ret = rtMalloc(&fixed_addrs_, total_fixed_addr_size_, mem_type); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 93f968ee..e4b73d7e 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -530,6 +530,7 @@ class DavinciModel { } void SetKnownNode(bool known_node) { known_node_ = known_node; } bool IsKnownNode() { return known_node_; } + Status CheckCapability(rtFeatureType_t featureType, int32_t featureInfo, bool &is_support) const; Status MallocKnownArgs(); Status UpdateKnownNodeArgs(const vector &inputs, const vector &outputs); Status CreateKnownZeroCopyMap(const vector &inputs, const vector &outputs); diff --git a/tests/ut/ge/graph/load/davinci_model_unittest.cc b/tests/ut/ge/graph/load/davinci_model_unittest.cc index 18cc622b..55f418d6 100644 --- a/tests/ut/ge/graph/load/davinci_model_unittest.cc +++ b/tests/ut/ge/graph/load/davinci_model_unittest.cc @@ -141,6 +141,12 @@ TEST_F(UtestDavinciModel, init_success) { ProfilingManager::Instance().is_load_profiling_ = false; } +TEST_F(UtestDavinciModel, CheckCapability) { + DavinciModel model(0, nullptr); + bool is_support = false; + (void)model.CheckCapability(FEATURE_TYPE_MEMORY, MEMORY_INFO_TS_4G_LIMITED, is_support); +} + TEST_F(UtestDavinciModel, init_data_op) { DavinciModel model(0, nullptr); model.ge_model_ = make_shared(); diff --git a/third_party/fwkacllib/inc/runtime/dev.h b/third_party/fwkacllib/inc/runtime/dev.h index 49f6a3f6..b028a5f4 100644 --- a/third_party/fwkacllib/inc/runtime/dev.h +++ b/third_party/fwkacllib/inc/runtime/dev.h @@ -59,6 +59,7 @@ typedef enum tagRtAicpuDeployType { typedef enum tagRtFeatureType { FEATURE_TYPE_MEMCPY = 0, + FEATURE_TYPE_MEMORY = 1, FEATURE_TYPE_RSV } rtFeatureType_t; @@ -67,6 +68,10 @@ typedef enum tagMemcpyInfo { MEMCPY_INFO_RSV } rtMemcpyInfo_t; +typedef enum tagMemoryInfo { + MEMORY_INFO_TS_4G_LIMITED = 0, + MEMORY_INFO_RSV +} rtMemoryInfo_t; /** * @ingroup dvrt_dev * @brief get total device number.