!1209 fixed sc warning

From: @li-lei0106
Reviewed-by: @wqtshg,@xchu42
Signed-off-by: @wqtshg
pull/1209/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit a56f5013b8

@ -131,7 +131,7 @@ class GraphMemoryAssigner {
std::map<NodePtr, uint32_t> &node_2_continuous_type); std::map<NodePtr, uint32_t> &node_2_continuous_type);
ge::Status AssignContinuousInputMemoryWithAtomicProcess(const NodePtr &input_continuous_node, ge::Status AssignContinuousInputMemoryWithAtomicProcess(const NodePtr &input_continuous_node,
uint32_t continuous_type, bool reverse_refresh=false); uint32_t continuous_type, bool reverse_refresh = false);
ge::Status FilterAtomicNodesForMemoryAssign(map<string, map<NodePtr, vector<NodePtr>>> &normal_atomic_nodes_map, ge::Status FilterAtomicNodesForMemoryAssign(map<string, map<NodePtr, vector<NodePtr>>> &normal_atomic_nodes_map,
map<string, vector<NodePtr>> &connecting_output_atomic_nodes); map<string, vector<NodePtr>> &connecting_output_atomic_nodes);

@ -124,7 +124,7 @@ inline bool IsDataOp(const std::string &node_type) {
return (node_type == DATA_TYPE) || (node_type == AIPP_DATA_TYPE) || (node_type == ANN_DATA_TYPE); return (node_type == DATA_TYPE) || (node_type == AIPP_DATA_TYPE) || (node_type == ANN_DATA_TYPE);
} }
inline bool IsTbeTask(const OpDescPtr &op_desc) { bool IsTbeTask(const OpDescPtr &op_desc) {
uint32_t run_mode = static_cast<uint32_t>(domi::ImplyType::INVALID); uint32_t run_mode = static_cast<uint32_t>(domi::ImplyType::INVALID);
if (!AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, run_mode)) { if (!AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, run_mode)) {
return false; return false;
@ -1214,7 +1214,7 @@ void DavinciModel::GetAllGearsInfo(const NodePtr &node) {
} }
if (!gear_info.empty()) { if (!gear_info.empty()) {
all_gears_info_.emplace_back(gear_info); all_gears_info_.emplace_back(gear_info);
GELOGD("Init all gears info from %s, gaer info is %s.", node->GetName().c_str(), GELOGD("Init all gears info from %s, gaer info is %s", node->GetName().c_str(),
formats::JoinToString(gear_info).c_str()); formats::JoinToString(gear_info).c_str());
} }
} }
@ -1283,7 +1283,7 @@ Status DavinciModel::GetGearAndRealOutSizeInfo(const ComputeGraphPtr &graph, con
Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index, Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index,
const NodePtr &case_node) { const NodePtr &case_node) {
GELOGD("Start get output size of %s, which is %zu input to netoutput.", case_node->GetName().c_str(), input_index); GELOGD("Start get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index);
const auto &func_desc = case_node->GetOpDesc(); const auto &func_desc = case_node->GetOpDesc();
GE_CHECK_NOTNULL(func_desc); GE_CHECK_NOTNULL(func_desc);
std::map<vector<int32_t>, int64_t> gear_and_real_out_size_info; std::map<vector<int32_t>, int64_t> gear_and_real_out_size_info;
@ -1328,7 +1328,7 @@ Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_
} }
Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, const NodePtr &node) { Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, const NodePtr &node) {
GELOGD("Start to get dynamic output dims of %s.", node->GetName().c_str()); GELOGD("Start to get dynamic output dims of %s", node->GetName().c_str());
merge_nodes_gear_and_real_out_shape_info_.clear(); merge_nodes_gear_and_real_out_shape_info_.clear();
size_t idx = 0; size_t idx = 0;
for (const auto &in_anchor : node->GetAllInDataAnchors()) { for (const auto &in_anchor : node->GetAllInDataAnchors()) {
@ -1342,7 +1342,7 @@ Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, co
if ((peer_node->GetType() == CASE) && (op_desc->HasAttr(ATTR_INSERT_BY_MBATCH))) { if ((peer_node->GetType() == CASE) && (op_desc->HasAttr(ATTR_INSERT_BY_MBATCH))) {
std::vector<std::string> dynamic_output_shape_info; std::vector<std::string> dynamic_output_shape_info;
if (!AttrUtils::GetListStr(node->GetOpDesc(), ATTR_NAME_DYNAMIC_OUTPUT_DIMS, dynamic_output_shape_info)) { if (!AttrUtils::GetListStr(node->GetOpDesc(), ATTR_NAME_DYNAMIC_OUTPUT_DIMS, dynamic_output_shape_info)) {
GELOGD("Can not get dynamic output dims attr from %s.", node->GetName().c_str()); GELOGD("Can not get dynamic output dims attr from %s", node->GetName().c_str());
return SUCCESS; return SUCCESS;
} }
GELOGI("Dynamic output shape info is %s", formats::JoinToString(dynamic_output_shape_info).c_str()); GELOGI("Dynamic output shape info is %s", formats::JoinToString(dynamic_output_shape_info).c_str());
@ -1362,7 +1362,7 @@ Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, co
output_shape.emplace_back(it[i]); output_shape.emplace_back(it[i]);
} }
gear_and_real_out_shape_info[all_gears_info_[gear_index]] = output_shape; gear_and_real_out_shape_info[all_gears_info_[gear_index]] = output_shape;
GELOGD("Get real gear index is: %zu, gear info is %s, output shape is %s.", GELOGD("Get real gear index is: %zu, gear info is %s, output shape is %s",
gear_index, formats::JoinToString(all_gears_info_[gear_index]).c_str(), gear_index, formats::JoinToString(all_gears_info_[gear_index]).c_str(),
formats::JoinToString(output_shape).c_str()); formats::JoinToString(output_shape).c_str());
} }
@ -1385,7 +1385,7 @@ void DavinciModel::ParseDynamicOutShape(const std::vector<std::string> &str_info
} }
shape.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal)); shape.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal));
} }
GELOGI("Shape from attr is %s.", formats::JoinToString(shape).c_str()); GELOGI("Shape from attr is %s", formats::JoinToString(shape).c_str());
vec_info.emplace_back(shape); vec_info.emplace_back(shape);
} }
} }
@ -1428,7 +1428,7 @@ Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) {
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }
GELOGI("InitLabelSet: label[%u]=%p stream[%u]=%p.", label_index, rt_label, stream_id, stream); GELOGI("InitLabelSet: label[%u]=%p stream[%u]=%p", label_index, rt_label, stream_id, stream);
label_id_indication_.insert(label_index); label_id_indication_.insert(label_index);
label_list_[label_index] = rt_label; label_list_[label_index] = rt_label;
return SUCCESS; return SUCCESS;
@ -1831,7 +1831,7 @@ void DavinciModel::GetUserDesignateShapeOrder(std::vector<std::string> &user_inp
/// ///
Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) { Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) {
if (!op_desc->HasAttr(ATTR_NAME_AIPP)) { if (!op_desc->HasAttr(ATTR_NAME_AIPP)) {
GELOGW("There is not AIPP related with index %u.", index); GELOGW("There is not AIPP related with index %u", index);
return SUCCESS; return SUCCESS;
} }
@ -1861,7 +1861,7 @@ Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) {
Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) const { Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) const {
const auto it = aipp_info_list_.find(index); const auto it = aipp_info_list_.find(index);
if (it == aipp_info_list_.end()) { if (it == aipp_info_list_.end()) {
GELOGW("there is not AIPP related with index %u.", index); GELOGW("there is not AIPP related with index %u", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST; return ACL_ERROR_GE_AIPP_NOT_EXIST;
} }
@ -1871,7 +1871,7 @@ Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) cons
Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, const map<uint32_t, OpDescPtr> &data_list) { Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, const map<uint32_t, OpDescPtr> &data_list) {
if (!op_desc->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) { if (!op_desc->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) {
GELOGW("There is no aipp releated info with index %u.", index); GELOGW("There is no aipp releated info with index %u", index);
return SUCCESS; return SUCCESS;
} }
@ -1916,7 +1916,7 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &aipp_type, size_
GE_CHK_BOOL_RET_STATUS(index < input_addrs_list_.size(), PARAM_INVALID, "Index %u is invalid", index); GE_CHK_BOOL_RET_STATUS(index < input_addrs_list_.size(), PARAM_INVALID, "Index %u is invalid", index);
const auto it = aipp_type_list_.find(index); const auto it = aipp_type_list_.find(index);
if (it == aipp_type_list_.end()) { if (it == aipp_type_list_.end()) {
GELOGW("There is no aipp releated info with index %u.", index); GELOGW("There is no aipp releated info with index %u", index);
aipp_type = DATA_WITHOUT_AIPP; aipp_type = DATA_WITHOUT_AIPP;
aipp_index = 0xFFFFFFFF; aipp_index = 0xFFFFFFFF;
return SUCCESS; return SUCCESS;

@ -271,7 +271,8 @@ ge::Status ModelManager::SetDynamicSize(uint32_t model_id, const std::vector<uin
return SUCCESS; return SUCCESS;
} }
ge::Status ModelManager::DoLoadHybridModelOnline(uint32_t model_id, const string &model_name, const shared_ptr<ge::GeRootModel> &ge_root_model, ge::Status ModelManager::DoLoadHybridModelOnline(uint32_t model_id, const string &model_name,
const shared_ptr<ge::GeRootModel> &ge_root_model,
const shared_ptr<ModelListener> &listener) { const shared_ptr<ModelListener> &listener) {
auto hybrid_model = hybrid::HybridDavinciModel::Create(ge_root_model); auto hybrid_model = hybrid::HybridDavinciModel::Create(ge_root_model);
GE_CHECK_NOTNULL(hybrid_model); GE_CHECK_NOTNULL(hybrid_model);

@ -73,7 +73,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
ge::Status LoadModelOnline(uint32_t &model_id, const std::shared_ptr<ge::GeRootModel> &ge_root_model, ge::Status LoadModelOnline(uint32_t &model_id, const std::shared_ptr<ge::GeRootModel> &ge_root_model,
std::shared_ptr<ModelListener> listener); std::shared_ptr<ModelListener> listener);
ge::Status DoLoadHybridModelOnline(uint32_t model_id, const string &model_name, const shared_ptr<ge::GeRootModel> &ge_root_model, ge::Status DoLoadHybridModelOnline(uint32_t model_id, const string &model_name,
const shared_ptr<ge::GeRootModel> &ge_root_model,
const std::shared_ptr<ModelListener> &listener); const std::shared_ptr<ModelListener> &listener);
/// ///

@ -387,7 +387,7 @@ Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDesc
GELOGE(PARAM_INVALID, "rdma var addr is invalid, addr=%p", reinterpret_cast<uint8_t *>(offset)); GELOGE(PARAM_INVALID, "rdma var addr is invalid, addr=%p", reinterpret_cast<uint8_t *>(offset));
return PARAM_INVALID; return PARAM_INVALID;
} }
var_addr = reinterpret_cast<uint8_t *>(offset); var_addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(offset));
break; break;
case RT_MEMORY_HBM: case RT_MEMORY_HBM:
VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base); VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base);

@ -60,7 +60,7 @@ class StageExecutor {
BlockingQueue<StageTask> task_queue_; BlockingQueue<StageTask> task_queue_;
std::unique_ptr<SubgraphExecutor> root_graph_executor_; std::unique_ptr<SubgraphExecutor> root_graph_executor_;
GraphExecutionContext context_; GraphExecutionContext context_;
StageExecutor *next_executor_; StageExecutor *next_executor_ = nullptr;
rtStream_t stream_ = nullptr; rtStream_t stream_ = nullptr;
}; };

@ -30,7 +30,7 @@ namespace ge {
namespace hybrid { namespace hybrid {
class TbeHandleHolder { class TbeHandleHolder {
public: public:
TbeHandleHolder(void *bin_handle); explicit TbeHandleHolder(void *bin_handle);
~TbeHandleHolder(); ~TbeHandleHolder();
void SetBinHandle(void *bin_handle) { bin_handle_ = bin_handle; } void SetBinHandle(void *bin_handle) { bin_handle_ = bin_handle; }

@ -360,6 +360,7 @@ Status AicpuTfNodeTask::Init(const HybridModel &model) {
need_sync_ = true; need_sync_ = true;
} }
auto task_defs = model.GetTaskDefs(node_item_->node); auto task_defs = model.GetTaskDefs(node_item_->node);
GE_CHECK_NOTNULL(task_defs);
if (unknown_type_ == DEPEND_COMPUTE) { if (unknown_type_ == DEPEND_COMPUTE) {
GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1])); GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1]));
} }

@ -70,7 +70,8 @@ Status OpTask::OpenDump(rtStream_t stream) {
uint64_t output_addr = arg_base[input_size + j]; uint64_t output_addr = arg_base[input_size + j];
output_adds.emplace_back(output_addr); output_adds.emplace_back(output_addr);
} }
dump_op_.SetDumpInfo(DumpManager::GetInstance().GetDumpProperties(kInferSessionId), op_desc_, input_addrs, output_adds, stream); dump_op_.SetDumpInfo(DumpManager::GetInstance().GetDumpProperties(kInferSessionId),
op_desc_, input_addrs, output_adds, stream);
auto status = dump_op_.LaunchDumpOp(); auto status = dump_op_.LaunchDumpOp();
if (status != SUCCESS) { if (status != SUCCESS) {
GELOGE(status, "Launch dump op failed in single op"); GELOGE(status, "Launch dump op failed in single op");

Loading…
Cancel
Save