diff --git a/ge/graph/build/memory/graph_mem_assigner.h b/ge/graph/build/memory/graph_mem_assigner.h index f4d1366d..756781fe 100755 --- a/ge/graph/build/memory/graph_mem_assigner.h +++ b/ge/graph/build/memory/graph_mem_assigner.h @@ -131,7 +131,7 @@ class GraphMemoryAssigner { std::map &node_2_continuous_type); ge::Status AssignContinuousInputMemoryWithAtomicProcess(const NodePtr &input_continuous_node, - uint32_t continuous_type, bool reverse_refresh=false); + uint32_t continuous_type, bool reverse_refresh = false); ge::Status FilterAtomicNodesForMemoryAssign(map>> &normal_atomic_nodes_map, map> &connecting_output_atomic_nodes); diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 950ae5ca..b7bb97ce 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -124,7 +124,7 @@ inline bool IsDataOp(const std::string &node_type) { return (node_type == DATA_TYPE) || (node_type == AIPP_DATA_TYPE) || (node_type == ANN_DATA_TYPE); } -inline bool IsTbeTask(const OpDescPtr &op_desc) { +bool IsTbeTask(const OpDescPtr &op_desc) { uint32_t run_mode = static_cast(domi::ImplyType::INVALID); if (!AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, run_mode)) { return false; @@ -1214,7 +1214,7 @@ void DavinciModel::GetAllGearsInfo(const NodePtr &node) { } if (!gear_info.empty()) { all_gears_info_.emplace_back(gear_info); - GELOGD("Init all gears info from %s, gaer info is %s.", node->GetName().c_str(), + GELOGD("Init all gears info from %s, gaer info is %s", node->GetName().c_str(), formats::JoinToString(gear_info).c_str()); } } @@ -1283,7 +1283,7 @@ Status DavinciModel::GetGearAndRealOutSizeInfo(const ComputeGraphPtr &graph, con Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index, const NodePtr &case_node) { - GELOGD("Start get output size of %s, which is %zu input to netoutput.", case_node->GetName().c_str(), input_index); + GELOGD("Start get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index); const auto &func_desc = case_node->GetOpDesc(); GE_CHECK_NOTNULL(func_desc); std::map, int64_t> gear_and_real_out_size_info; @@ -1328,7 +1328,7 @@ Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_ } Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, const NodePtr &node) { - GELOGD("Start to get dynamic output dims of %s.", node->GetName().c_str()); + GELOGD("Start to get dynamic output dims of %s", node->GetName().c_str()); merge_nodes_gear_and_real_out_shape_info_.clear(); size_t idx = 0; for (const auto &in_anchor : node->GetAllInDataAnchors()) { @@ -1342,7 +1342,7 @@ Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, co if ((peer_node->GetType() == CASE) && (op_desc->HasAttr(ATTR_INSERT_BY_MBATCH))) { std::vector dynamic_output_shape_info; if (!AttrUtils::GetListStr(node->GetOpDesc(), ATTR_NAME_DYNAMIC_OUTPUT_DIMS, dynamic_output_shape_info)) { - GELOGD("Can not get dynamic output dims attr from %s.", node->GetName().c_str()); + GELOGD("Can not get dynamic output dims attr from %s", node->GetName().c_str()); return SUCCESS; } GELOGI("Dynamic output shape info is %s", formats::JoinToString(dynamic_output_shape_info).c_str()); @@ -1362,7 +1362,7 @@ Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, co output_shape.emplace_back(it[i]); } gear_and_real_out_shape_info[all_gears_info_[gear_index]] = output_shape; - GELOGD("Get real gear index is: %zu, gear info is %s, output shape is %s.", + GELOGD("Get real gear index is: %zu, gear info is %s, output shape is %s", gear_index, formats::JoinToString(all_gears_info_[gear_index]).c_str(), formats::JoinToString(output_shape).c_str()); } @@ -1385,7 +1385,7 @@ void DavinciModel::ParseDynamicOutShape(const std::vector &str_info } shape.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal)); } - GELOGI("Shape from attr is %s.", formats::JoinToString(shape).c_str()); + GELOGI("Shape from attr is %s", formats::JoinToString(shape).c_str()); vec_info.emplace_back(shape); } } @@ -1428,7 +1428,7 @@ Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) { return INTERNAL_ERROR; } - GELOGI("InitLabelSet: label[%u]=%p stream[%u]=%p.", label_index, rt_label, stream_id, stream); + GELOGI("InitLabelSet: label[%u]=%p stream[%u]=%p", label_index, rt_label, stream_id, stream); label_id_indication_.insert(label_index); label_list_[label_index] = rt_label; return SUCCESS; @@ -1831,7 +1831,7 @@ void DavinciModel::GetUserDesignateShapeOrder(std::vector &user_inp /// Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) { if (!op_desc->HasAttr(ATTR_NAME_AIPP)) { - GELOGW("There is not AIPP related with index %u.", index); + GELOGW("There is not AIPP related with index %u", index); return SUCCESS; } @@ -1861,7 +1861,7 @@ Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) { Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) const { const auto it = aipp_info_list_.find(index); if (it == aipp_info_list_.end()) { - GELOGW("there is not AIPP related with index %u.", index); + GELOGW("there is not AIPP related with index %u", index); return ACL_ERROR_GE_AIPP_NOT_EXIST; } @@ -1871,7 +1871,7 @@ Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) cons Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, const map &data_list) { if (!op_desc->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) { - GELOGW("There is no aipp releated info with index %u.", index); + GELOGW("There is no aipp releated info with index %u", index); return SUCCESS; } @@ -1916,7 +1916,7 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &aipp_type, size_ GE_CHK_BOOL_RET_STATUS(index < input_addrs_list_.size(), PARAM_INVALID, "Index %u is invalid", index); const auto it = aipp_type_list_.find(index); if (it == aipp_type_list_.end()) { - GELOGW("There is no aipp releated info with index %u.", index); + GELOGW("There is no aipp releated info with index %u", index); aipp_type = DATA_WITHOUT_AIPP; aipp_index = 0xFFFFFFFF; return SUCCESS; diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index cfee9e6d..e46bef88 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -271,7 +271,8 @@ ge::Status ModelManager::SetDynamicSize(uint32_t model_id, const std::vector &ge_root_model, +ge::Status ModelManager::DoLoadHybridModelOnline(uint32_t model_id, const string &model_name, + const shared_ptr &ge_root_model, const shared_ptr &listener) { auto hybrid_model = hybrid::HybridDavinciModel::Create(ge_root_model); GE_CHECK_NOTNULL(hybrid_model); diff --git a/ge/graph/load/model_manager/model_manager.h b/ge/graph/load/model_manager/model_manager.h index 00d8958f..f2d55db7 100755 --- a/ge/graph/load/model_manager/model_manager.h +++ b/ge/graph/load/model_manager/model_manager.h @@ -73,7 +73,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager { ge::Status LoadModelOnline(uint32_t &model_id, const std::shared_ptr &ge_root_model, std::shared_ptr listener); - ge::Status DoLoadHybridModelOnline(uint32_t model_id, const string &model_name, const shared_ptr &ge_root_model, + ge::Status DoLoadHybridModelOnline(uint32_t model_id, const string &model_name, + const shared_ptr &ge_root_model, const std::shared_ptr &listener); /// diff --git a/ge/graph/load/model_manager/model_utils.cc b/ge/graph/load/model_manager/model_utils.cc index 410e9364..8648d892 100755 --- a/ge/graph/load/model_manager/model_utils.cc +++ b/ge/graph/load/model_manager/model_utils.cc @@ -387,7 +387,7 @@ Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDesc GELOGE(PARAM_INVALID, "rdma var addr is invalid, addr=%p", reinterpret_cast(offset)); return PARAM_INVALID; } - var_addr = reinterpret_cast(offset); + var_addr = reinterpret_cast(static_cast(offset)); break; case RT_MEMORY_HBM: VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base); diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 8dc26ec7..7d163130 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -458,8 +458,8 @@ Status HybridModelAsyncExecutor::Execute(const std::vector &inputs, i, outputs[i].length, output_real_size); return FAILED; } - GE_CHK_RT_RET(rtMemcpy(outputs[i].data, outputs[i].length, args.outputs[i].GetData(), output_real_size, - RT_MEMCPY_DEVICE_TO_DEVICE)); + GE_CHK_RT_RET(rtMemcpy(outputs[i].data, outputs[i].length, args.outputs[i].GetData(), output_real_size, + RT_MEMCPY_DEVICE_TO_DEVICE)); } outputs[i].length = output_real_size; } diff --git a/ge/hybrid/executor/hybrid_model_pipeline_executor.h b/ge/hybrid/executor/hybrid_model_pipeline_executor.h index 3cb1fd23..cb08d872 100644 --- a/ge/hybrid/executor/hybrid_model_pipeline_executor.h +++ b/ge/hybrid/executor/hybrid_model_pipeline_executor.h @@ -60,7 +60,7 @@ class StageExecutor { BlockingQueue task_queue_; std::unique_ptr root_graph_executor_; GraphExecutionContext context_; - StageExecutor *next_executor_; + StageExecutor *next_executor_ = nullptr; rtStream_t stream_ = nullptr; }; diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index af09c2af..97df2335 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -30,7 +30,7 @@ namespace ge { namespace hybrid { class TbeHandleHolder { public: - TbeHandleHolder(void *bin_handle); + explicit TbeHandleHolder(void *bin_handle); ~TbeHandleHolder(); void SetBinHandle(void *bin_handle) { bin_handle_ = bin_handle; } diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index 55b41120..1e2fbfe8 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -360,6 +360,7 @@ Status AicpuTfNodeTask::Init(const HybridModel &model) { need_sync_ = true; } auto task_defs = model.GetTaskDefs(node_item_->node); + GE_CHECK_NOTNULL(task_defs); if (unknown_type_ == DEPEND_COMPUTE) { GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1])); } @@ -669,7 +670,7 @@ Status AicpuNodeTask::Init(const HybridModel &model) { auto kernel_type = static_cast(context.kernel_type()); if (kernel_type == ccKernelType::CUST_AI_CPU) { bool loaded = false; - GE_CHK_STATUS_RET(ModelManager::GetInstance()->LoadCustAicpuSo(op_desc, so_name, loaded), + GE_CHK_STATUS_RET(ModelManager::GetInstance()->LoadCustAicpuSo(op_desc, so_name, loaded), "load cust aicpu so failed."); if (!loaded) { GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "Launch cust aicpu so failed."); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 729386df..80c16968 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -70,7 +70,8 @@ Status OpTask::OpenDump(rtStream_t stream) { uint64_t output_addr = arg_base[input_size + j]; output_adds.emplace_back(output_addr); } - dump_op_.SetDumpInfo(DumpManager::GetInstance().GetDumpProperties(kInferSessionId), op_desc_, input_addrs, output_adds, stream); + dump_op_.SetDumpInfo(DumpManager::GetInstance().GetDumpProperties(kInferSessionId), + op_desc_, input_addrs, output_adds, stream); auto status = dump_op_.LaunchDumpOp(); if (status != SUCCESS) { GELOGE(status, "Launch dump op failed in single op"); @@ -504,7 +505,7 @@ Status AiCpuBaseTask::UpdateOutputShape(vector &output_desc) { "AiCpuCCTask Update [%zu]th output shape failed.", i); if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) { GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]), - "AiCpuCCTask Update [%zu]th output desc failed.", i); + "AiCpuCCTask Update [%zu]th output desc failed.", i); } } GELOGD("Update DEPEND_SHAPE_RANGE AiCpuBaseTask outputshape finished."); @@ -711,7 +712,7 @@ Status AiCpuTask::UpdateShapeByHbmBuffer(vector &output_desc) { "AiCpuTask update [%zu]th output shape failed.", i); if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) { GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]), - "AiCpuTask update [%zu]th output desc failed.", i); + "AiCpuTask update [%zu]th output desc failed.", i); } } return SUCCESS;