Fix printf like format

pull/998/head
zhangxiaokun 4 years ago
parent a229654028
commit f18bb48087

@ -62,7 +62,7 @@ Status FileSaver::WriteData(const void *data, uint32_t size, int32_t fd) {
while (size > size_1g) {
write_count = mmWrite(fd, reinterpret_cast<void *>(seek), size_1g);
if (write_count == EN_INVALID_PARAM || write_count == EN_ERROR) {
GELOGE(FAILED, "Write data failed. mmpa_errorno = %d, %s", write_count, strerror(errno));
GELOGE(FAILED, "Write data failed. mmpa_errorno = %ld, %s", write_count, strerror(errno));
return FAILED;
}
size -= size_1g;
@ -75,7 +75,7 @@ Status FileSaver::WriteData(const void *data, uint32_t size, int32_t fd) {
// -1: Failed to write to file; - 2: Illegal parameter
if (write_count == EN_INVALID_PARAM || write_count == EN_ERROR) {
GELOGE(FAILED, "Write data failed. mmpa_errorno = %d, %s", write_count, strerror(errno));
GELOGE(FAILED, "Write data failed. mmpa_errorno = %ld, %s", write_count, strerror(errno));
return FAILED;
}
@ -133,7 +133,7 @@ Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFi
WriteData(static_cast<const void *>(&model_partition_table), table_size, fd) != SUCCESS, ret = FAILED; break);
// Write partition data
for (const auto &partitionData : partition_datas) {
GELOGI("GC:size[%zu]", partitionData.size);
GELOGI("GC:size[%u]", partitionData.size);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
WriteData(static_cast<const void *>(partitionData.data), partitionData.size, fd) != SUCCESS, ret = FAILED;
break);
@ -305,7 +305,7 @@ Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFi
// Write partition data
auto &cur_partition_datas = all_partition_datas[index];
for (const auto &partition_data : cur_partition_datas) {
GELOGI("GC:size[%zu]", partition_data.size);
GELOGI("GC:size[%u]", partition_data.size);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
WriteData(static_cast<const void *>(partition_data.data), partition_data.size, fd) != SUCCESS, ret = FAILED;
break);

@ -1000,8 +1000,8 @@ Status ModelCacheHelper::RecoverVarAddrAndTensorDesc(const Json &json) const {
auto offset = (tensor_addr_mgr.offset);
// Check logic address and offset
if (logic_address - offset != VarManager::Instance(session_id_)->GetVarMemLogicBase()) {
GELOGW("Check logic_address[%u] and offset [%u] of %s failed, var mem logic base is %u, abandon", logic_address,
offset, iter.first.c_str(), VarManager::Instance(session_id_)->GetVarMemLogicBase());
GELOGW("Check logic_address[%lu] and offset [%lu] of %s failed, var mem logic base is %lu, abandon",
logic_address, offset, iter.first.c_str(), VarManager::Instance(session_id_)->GetVarMemLogicBase());
return PARAM_INVALID;
}
// Offset is needed by SaveVarVddr instead of logic address

@ -537,7 +537,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadRootMod
//model verison 1.0 file header does not have model_num member
is_unknown_shape_model_ = file_header_->version >= ge::MODEL_VERSION &&
file_header_->model_num > kStatiOmFileModelNum;
GELOGD("cur om model is ge root model or no %d, model version %zu", is_unknown_shape_model_, file_header_->version);
GELOGD("cur om model is ge root model or no %d, model version %u", is_unknown_shape_model_, file_header_->version);
OmFileLoadHelper om_load_helper;
if (is_unknown_shape_model_) {
@ -746,7 +746,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(Om
GELOGE(INTERNAL_ERROR, "ReadProtoFromArray failed.");
return INTERNAL_ERROR;
}
GELOGD("TASK_INFO op_size:%zu, stream_num:%u", task->op().size(), task->stream_num());
GELOGD("TASK_INFO op_size:%d, stream_num:%u", task->op().size(), task->stream_num());
}
cur_model->SetModelTaskDef(task);
return SUCCESS;

@ -203,7 +203,7 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, uint32_t m
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_data + cur_offset);
size_t partition_table_size = SIZE_OF_MODEL_PARTITION_TABLE(*partition_table);
cur_offset += partition_table_size;
GELOGD("Cur model index %zu: ModelPartitionTable num :%u, "
GELOGD("Cur model index %u: ModelPartitionTable num :%u, "
"ModelFileHeader length :%zu, ModelPartitionTable length :%zu",
index, partition_table->num, sizeof(ModelFileHeader), partition_table_size);
if (model_data_size <= cur_offset) {
@ -219,7 +219,7 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, uint32_t m
partition.type = partition_table->partition[i].type;
if (index >= model_contexts_.size()) {
if (index != model_contexts_.size()) {
GELOGE(FAILED, "cur index is %zu make model_contexts_ overflow", index);
GELOGE(FAILED, "cur index is %u make model_contexts_ overflow", index);
return FAILED;
}
@ -231,16 +231,16 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, uint32_t m
}
if (partition.size > model_data_size || cur_offset > model_data_size - partition.size) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.",
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %u is greater than the model data size %u.",
partition.size + cur_offset, model_data_size);
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
cur_offset += partition.size;
GELOGD("Partition, type:%d, size:%u, model_index:%zu", static_cast<int>(partition.type), partition.size, index);
GELOGD("Partition, type:%d, size:%u, model_index:%u", static_cast<int>(partition.type), partition.size, index);
}
}
if (cur_offset != model_data_size) {
GELOGE(FAILED, "do not get the complete model, read end offset:%zu, all size:%zu", cur_offset, model_data_size);
GELOGE(FAILED, "do not get the complete model, read end offset:%u, all size:%u", cur_offset, model_data_size);
return FAILED;
}
return SUCCESS;

@ -51,7 +51,7 @@ bool KernelStore::Build() {
kernel_head.name_len = static_cast<uint32_t>(kernel->GetName().length());
kernel_head.bin_len = static_cast<uint32_t>(kernel->GetBinDataSize());
GELOGD("get kernel bin name %s, addr %p, size %u",
GELOGD("get kernel bin name %s, addr %p, size %zu",
kernel->GetName().c_str(), kernel->GetBinData(), kernel->GetBinDataSize());
mem_ret = memcpy_s(next_buffer, remain_len, &kernel_head, sizeof(kernel_head));
GE_CHK_BOOL_EXEC_NOLOG(mem_ret == EOK, return false);

@ -878,11 +878,11 @@ inline Status CheckInt32DivOverflow(int32_t a, int32_t b) {
return INTERNAL_ERROR; \
}
#define FMK_INT64_UINT32_MULCHECK(a, b) \
if (ge::CheckInt64Uint32MulOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and UINT32 %u multiplication can result in overflow!", static_cast<uint32_t>(a), \
static_cast<uint32_t>(b)); \
return INTERNAL_ERROR; \
#define FMK_INT64_UINT32_MULCHECK(a, b) \
if (ge::CheckInt64Uint32MulOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and Uint32 %u multiplication can result in overflow!", static_cast<int64_t>(a), \
static_cast<uint32_t>(b)); \
return INTERNAL_ERROR; \
}
#define FMK_FP16_ZEROCHECK(a) \

@ -454,7 +454,7 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector<uint64_t> &
if (all_data_dims[i] < 0) {
cur_dynamic_dims.push_back(dynamic_dims[i]);
} else if (static_cast<uint64_t>(all_data_dims[i]) != dynamic_dims[i]) {
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Static dims should be same, index: %zu value: %d should be %d",
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Static dims should be same, index: %zu value: %lu should be %ld",
i, dynamic_dims[i], all_data_dims[i]);
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}

@ -33,7 +33,7 @@ namespace {
uint64_t size = data_num * sizeof(TYPE); \
ge_tensor = MakeShared<GeTensor>(out_desc, size); \
GE_CHECK_NOTNULL(ge_tensor); \
GELOGD("node:%s allocate output %zu success, size=%lld", op_desc->GetName().c_str(), i, size); \
GELOGD("node:%s allocate output %zu success, size=%ld", op_desc->GetName().c_str(), i, size); \
ge_tensor->MutableTensorDesc().SetDataType(out_desc.GetDataType()); \
ge_tensor->MutableTensorDesc().SetShape(out_desc.GetShape()); \
} else { \
@ -72,7 +72,7 @@ Status GetDataNumber(const GeTensorDesc &out_desc, uint64_t &data_num) {
num_size = max_range_size;
}
if (num_size < 0) {
GELOGE(INTERNAL_ERROR, "Get negative size, num_size=%lld.", num_size);
GELOGE(INTERNAL_ERROR, "Get negative size, num_size=%ld.", num_size);
return INTERNAL_ERROR;
}
data_num = static_cast<uint64_t>(num_size);

@ -741,7 +741,7 @@ Status GraphBuilder::AddOutputMemTypeForNode(const NodePtr &node) {
if (!AttrUtils::GetInt(op_desc, ATTR_INPUT_MEMORY_TYPE, mem_type)) {
return SUCCESS;
}
GELOGD("[%s] has attr input_memory_type %ld", op_desc->GetName().c_str(), mem_type);
GELOGD("[%s] has attr input_memory_type %u", op_desc->GetName().c_str(), mem_type);
for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
const auto &peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
@ -751,7 +751,7 @@ Status GraphBuilder::AddOutputMemTypeForNode(const NodePtr &node) {
while (true) {
const auto &src_desc = src_node->GetOpDesc();
GE_IF_BOOL_EXEC(src_desc == nullptr, continue);
GELOGD("[%s:%u] set attr output_memory_type %ld", src_desc->GetName().c_str(), src_out_anchor->GetIdx(),
GELOGD("[%s:%u] set attr output_memory_type %d", src_desc->GetName().c_str(), src_out_anchor->GetIdx(),
mem_type);
if (!AttrUtils::SetInt(src_desc->MutableOutputDesc(src_out_anchor->GetIdx()), ATTR_OUTPUT_MEMORY_TYPE,
mem_type)) {

@ -1535,8 +1535,8 @@ ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<
GE_CHK_STATUS(TensorUtils::GetDataOffset(tensor_desc, input_offset));
}
GELOGD("%s node[%s] input[%d] is set from node[%s] out index[%lu] offset[%ld]",
has_mem_type_attr == true ? "Fusion" : "",
GELOGD("%s node[%s] input[%ld] is set from node[%s] out index[%lu] offset[%ld]",
has_mem_type_attr ? "Fusion" : "",
tmp_op_desc->GetName().c_str(),
valid_input_index,
peer_out_anchor->GetOwnerNode()->GetOpDesc()->GetName().c_str(),

@ -466,11 +466,10 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast<uintptr_t>(ops_kernel_info_store_ptr));
}
GELOGI(
"Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld]"
" task finished, generate %u task(s).",
op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
task_list_size_after - task_list_size_before);
GELOGI("Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld]"
" task finished, generate %zu task(s).",
op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
task_list_size_after - task_list_size_before);
// record nodes which have call generate task successfully
fusion_nodes_seen.insert(fusion_node.get());

@ -1876,7 +1876,7 @@ Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, cons
(void)AttrUtils::GetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, releated_name);
for (const auto item : data_list) {
if (item.second->GetName() == releated_name) {
GELOGI("Find aipp_data [%s] index %zu from index %u", releated_name.c_str(), item.first, index);
GELOGI("Find aipp_data [%s] index %u from index %u", releated_name.c_str(), item.first, index);
aipp_index = item.first;
}
}

@ -1704,7 +1704,7 @@ Status ModelManager::LaunchKernelCheckAicpuOp(std::vector<std::string> &aicpu_op
for (uint32_t i = 0; i < res_op_nums; i++) {
ReturnCode ret_code = res_ret_code_list.at(i);
SysOpInfo aicpu_info = res_aicpu_op_info_list.at(i);
GELOGI("Not support aicpu op type: %lu, kernel_type:%d, opLen:%d, ret_code:%d", aicpu_info.opType,
GELOGI("Not support aicpu op type: %lu, kernel_type:%d, opLen:%lu, ret_code:%d", aicpu_info.opType,
aicpu_info.kernelsType, aicpu_info.opLen, ret_code);
std::vector<char> op_name;
op_name.clear();

@ -2777,7 +2777,7 @@ Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector<NodePtr>
}
GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor.at(index).dims);
GELOGI("Shape dims of %d data is %s.", index, formats::JoinToString(input_tensor.at(index).dims).c_str());
GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(input_tensor.at(index).dims).c_str());
}
return SUCCESS;
}

@ -299,12 +299,12 @@ Status HbmMemResource::AssignVarMem(const std::string &var_name, uint64_t size,
Status RdmaMemResource::AssignVarMem(const std::string &var_name, uint64_t size, uint64_t session_id, size_t &address) {
uint8_t *buffer = MemManager::Instance().RdmaPoolInstance(RT_MEMORY_HBM).Malloc(size);
if (buffer == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to malloc rdma memory for node %s, size = %llu", var_name.c_str(), size);
GELOGE(MEMALLOC_FAILED, "Failed to malloc rdma memory for node %s, size = %lu", var_name.c_str(), size);
return MEMALLOC_FAILED;
}
address = static_cast<size_t>(reinterpret_cast<uintptr_t>(buffer));
var_mem_size_ += size;
GELOGI("[IMAS]AssignVarMem Set session_%llu name[%s] output[%d] addr to [%p] size[%llu].",
GELOGI("[IMAS]AssignVarMem Set session_%lu name[%s] output[%d] addr to [%p] size[%lu].",
session_id, var_name.c_str(), 0, buffer, size);
return SUCCESS;
}

@ -203,7 +203,7 @@ bool CondRemovePass::CheckIfCondConstInput(const OutDataAnchorPtr &cond_out_anch
// Get weights from peer node
auto weights = OpDescUtils::GetWeights(out_node);
if (weights.size() <= static_cast<size_t>(cond_out_anchor->GetIdx())) {
GELOGI("Get weights of node %s out index %d, weight size %u is not fit for data index %d.",
GELOGI("Get weights of node %s out index %d, weight size %zu is not fit for data index %d.",
out_node->GetName().c_str(), cond_out_anchor->GetIdx(), weights.size(), cond_out_anchor->GetIdx());
return false;
}
@ -241,7 +241,7 @@ Status CondRemovePass::ReplaceIfCaseNodeWithPartitioncall(const NodePtr &node, c
for (const auto &peerout_anchor : input_anchor->GetPeerAnchors()) {
if (GraphUtils::AddEdge(peerout_anchor, partitioncall_node->GetInAnchor(
input_anchor->GetIdx() - kConditionIndexNum)) != ge::GRAPH_SUCCESS) {
GELOGE(FAILED, "Add edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%d, output num:%d",
GELOGE(FAILED, "Add edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%zu, output num:%zu",
peerout_anchor->GetOwnerNode()->GetName().c_str(), peerout_anchor->GetIdx(),
partitioncall_node->GetName().c_str(), input_anchor->GetIdx(), input_desc_size,
output_desc_size);
@ -254,14 +254,14 @@ Status CondRemovePass::ReplaceIfCaseNodeWithPartitioncall(const NodePtr &node, c
for (const auto &output_anchor : node->GetAllOutAnchors()) {
for (const auto &peerin_anchor : output_anchor->GetPeerAnchors()) {
if (GraphUtils::RemoveEdge(node->GetOutAnchor(output_anchor->GetIdx()), peerin_anchor) != ge::GRAPH_SUCCESS) {
GELOGE(FAILED, "Remove edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%d, output num:%d",
GELOGE(FAILED, "Remove edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%zu, output num:%zu",
node->GetName().c_str(), output_anchor->GetIdx(), peerin_anchor->GetOwnerNode()->GetName().c_str(),
peerin_anchor->GetIdx(), input_desc_size, output_desc_size);
return FAILED;
}
if (GraphUtils::AddEdge(partitioncall_node->GetOutAnchor(output_anchor->GetIdx()), peerin_anchor) !=
ge::GRAPH_SUCCESS) {
GELOGE(FAILED, "Add edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%d, output num:%d",
GELOGE(FAILED, "Add edge failed, from node:%s idx:%d to node:%s idx:%d, input num:%zu, output num:%zu",
partitioncall_node->GetName().c_str(), output_anchor->GetIdx(),
peerin_anchor->GetOwnerNode()->GetName().c_str(), peerin_anchor->GetIdx(), input_desc_size,
output_desc_size);

@ -469,7 +469,7 @@ Status ForPass::BuildWhileLink(const WhileInfo &while_info) {
continue;
}
GE_CHK_GRAPH_STATUS_RET(GraphUtils::AddEdge(peer_out_anchor, in_data_anchor),
"Add data-edge %s:%d->%s:%d failed.",
"Add data-edge %s:%d->%s:%zu failed.",
peer_out_anchor->GetOwnerNode()->GetName().c_str(), peer_out_anchor->GetIdx(),
while_node->GetName().c_str(), i);
}
@ -480,7 +480,7 @@ Status ForPass::BuildWhileLink(const WhileInfo &while_info) {
GE_CHECK_NOTNULL(out_data_anchor);
for (auto &peer_in_anchor : while_info.data_outputs[i]) {
GE_CHK_GRAPH_STATUS_RET(GraphUtils::AddEdge(out_data_anchor, peer_in_anchor),
"Add data-edge %s:%d->%s:%d failed.",
"Add data-edge %s:%zu->%s:%d failed.",
while_node->GetName().c_str(), i + kWhileOutputIndex,
peer_in_anchor->GetOwnerNode()->GetName().c_str(), peer_in_anchor->GetIdx());
}

@ -928,7 +928,7 @@ Status MultiBatchClonePass::CreateOriGraph(const ComputeGraphPtr &graph) {
auto out_data_anchor = node->GetOutDataAnchor(out_index);
GE_IF_BOOL_EXEC(out_data_anchor == nullptr, continue);
NodePtr data_node = CreateDataNode(graph, out_data_anchor, data_index);
GE_IF_BOOL_EXEC(data_node == nullptr, GELOGE(INTERNAL_ERROR, "Create %zu data node failed.",
GE_IF_BOOL_EXEC(data_node == nullptr, GELOGE(INTERNAL_ERROR, "Create %d data node failed.",
out_data_anchor->GetIdx()); return INTERNAL_ERROR);
for (auto &in_anchor : out_data_anchor->GetPeerInDataAnchors()) {
GE_IF_BOOL_EXEC(in_anchor == nullptr, continue);

@ -85,7 +85,7 @@ Status RemoveSameConstPass::Run(ComputeGraphPtr graph) {
ret = GraphUtils::ReplaceNodeAnchors(iter->second, node, {}, output_map);
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to replace node %s by node %s", node->GetName().c_str(),
GELOGE(INTERNAL_ERROR, "Failed to replace node %s by node %s, ret=%u", node->GetName().c_str(),
iter->second->GetName().c_str(), ret);
return INTERNAL_ERROR;
}

@ -311,7 +311,7 @@ Status SubgraphPass::InsertInputMemcpy(const ComputeGraphPtr &graph, const std::
Status SubgraphPass::InsertOutputMemcpy(const ComputeGraphPtr &graph, const NodePtr &output_node,
const std::set<uint32_t> &bypass_index) {
if (output_node->GetAllInDataAnchorsSize() == bypass_index.size()) {
GELOGD("No need to insert output memcpy node in while_body %s, output_size=%zu, bypass_num=%zu.",
GELOGD("No need to insert output memcpy node in while_body %s, output_size=%u, bypass_num=%zu.",
graph->GetName().c_str(), output_node->GetAllInDataAnchorsSize(), bypass_index.size());
return SUCCESS;
}

@ -644,7 +644,7 @@ Status InsertNewOpUtil::RecordAIPPInfoToData(const ComputeGraphPtr &graph) {
std::vector<NodePtr> aipps;
GE_RETURN_IF_ERROR(GetAllAipps(data_node, *aipps_or_switchs_or_case.begin(), aipps));
GELOGI("RecordAIPPInfoToData: Data: name[%s], type[%s], batch size[%u]", data_node->GetName().c_str(),
GELOGI("RecordAIPPInfoToData: Data: name[%s], type[%s], batch size[%zu]", data_node->GetName().c_str(),
data_node->GetType().c_str(), aipps.size());
for (auto aipp_it : aipps) {

@ -371,7 +371,7 @@ Status MultiBatchGraphCopyer::GetEnterNodesGroupByFrame(map<string, vector<NodeP
GE_CHECK_NOTNULL(op_desc);
string frame_name;
if (!AttrUtils::GetStr(op_desc, ENTER_ATTR_FRAME_NAME, frame_name)) {
GELOGE(FAILED, "Get attr frame_name of enter[%] failed.", node->GetName().c_str());
GELOGE(FAILED, "Get attr frame_name of enter[%s] failed.", node->GetName().c_str());
return FAILED;
}
frame_enter[frame_name].emplace_back(node);
@ -850,19 +850,19 @@ NodePtr MultiBatchGraphCopyer::FindSwitchnNodeForDataEdge(const OutDataAnchorPtr
if (is_getnext_sink_data) {
auto output_idx = data_out_anchor->GetIdx();
size_t referenced_index = 0;
GELOGI("The output idx %zu has %zu referenced nums.", output_idx, data_out_anchor->GetPeerInDataAnchors().size());
GELOGI("The output idx %d has %zu referenced nums.", output_idx, data_out_anchor->GetPeerInDataAnchors().size());
for (const auto &peer_in_anchor : data_out_anchor->GetPeerInDataAnchors()) {
if (peer_in_anchor->GetOwnerNode()->GetOpDesc() == nullptr) {
GELOGE(INTERNAL_ERROR, "Op desc should not be nullptr.");
return nullptr;
}
if (getnext_nodes_to_switchn_.at(output_idx).empty()) {
GELOGI("Output idx %zu of %s is static output.", output_idx, data_node->GetName().c_str());
GELOGI("Output idx %d of %s is static output.", output_idx, data_node->GetName().c_str());
return nullptr;
}
if (output_idx >= static_cast<int>(getnext_nodes_to_switchn_.size()) ||
referenced_index >= getnext_nodes_to_switchn_.at(output_idx).size()) {
GELOGE(INTERNAL_ERROR, "Output idx is %zu, referenced index is %zu", output_idx, referenced_index);
GELOGE(INTERNAL_ERROR, "Output idx is %d, referenced index is %zu", output_idx, referenced_index);
return nullptr;
}
if (peer_in_anchor->GetOwnerNode()->GetOpDesc()->GetName() == origin_node->GetName()) {
@ -1203,7 +1203,7 @@ Status MultiBatchGraphCopyer::InsertSwitchNAndUpdateMaxShape(const NodePtr &node
for (size_t i = 0; i < getnext_sink_dynamic_out_mapping_.size(); ++i) {
if(UpdateMaxShapeToData(node, i) != SUCCESS) {
GELOGE(PARAM_INVALID, "Failed to update max shape of %zu out anchor", node->GetName().c_str(), i);
GELOGE(PARAM_INVALID, "Failed to update %s max shape of %zu out anchor", node->GetName().c_str(), i);
return PARAM_INVALID;
}
}

@ -435,7 +435,7 @@ Status CheckDynamicParams(const vector<vector<int64_t>> &shapes) {
"E10035", {"shapesize", "minshapesize"}, {std::to_string(shapes.size()), std::to_string(kMinShapesCount - 1)});
GELOGE(PARAM_INVALID,
"Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims]'s "
"value size [%zu] must be greater than [%zu].",
"value size [%zu] must be greater than [%d].",
shapes.size(), kMinShapesCount - 1);
return PARAM_INVALID;
}
@ -444,7 +444,7 @@ Status CheckDynamicParams(const vector<vector<int64_t>> &shapes) {
"E10036", {"shapesize", "maxshapesize"}, {std::to_string(shapes.size()), std::to_string(kMaxShapesCount + 1)});
GELOGE(PARAM_INVALID,
"Input parameter[--dynamic_batch_size, --dynamic_image_size or --dynamic_dims]'s "
"value size [%zu] must be less than [%zu].",
"value size [%zu] must be less than [%d].",
shapes.size(), kMaxShapesCount + 1);
return PARAM_INVALID;
}

@ -126,10 +126,10 @@ void DynamicStitchKernel::ComputeMergedShape(const vector<ConstGeTensorPtr> &inp
vector<int64_t> merged_dim_vec = {merged_first_dim + 1};
if (step > 0) {
merged_dim_vec.emplace_back(step);
GELOGD("merged_shape is [ %ld, %ld].", merged_first_dim, step);
GELOGD("merged_shape is [ %d, %ld].", merged_first_dim, step);
}
merged_shape = GeShape(merged_dim_vec);
GELOGD("merged_shape is [ %ld ].", merged_first_dim);
GELOGD("merged_shape is [ %d ].", merged_first_dim);
}
Status DynamicStitchKernel::GenData(const vector<ConstGeTensorPtr> &input, GeTensorPtr &output_ptr) {
@ -196,14 +196,14 @@ Status DynamicStitchKernel::StitchDataFollowIndices(int64_t data_unit, const vec
// if index repeated, need new data replace old data , so give more allowance
if (indices_set.find(input_indices[j]) != indices_set.end()) {
if (ge::CheckInt64AddOverflow(input_indices[j], data_unit) != SUCCESS) {
GELOGW("Check int64 mul overflow failed. Indices is %ld, data_unit is %ld.", input_indices[j], data_unit);
GELOGW("Check int64 mul overflow failed. Indices is %d, data_unit is %ld.", input_indices[j], data_unit);
return NOT_CHANGED;
}
allowance += data_unit;
}
indices_set.insert(input_indices[j]);
if (!CheckInt64MulOverflow(input_indices[j], data_unit)) {
GELOGW("Check int64 mul overflow failed. Indices is %ld, data_unit is %ld.", input_indices[j], data_unit);
GELOGW("Check int64 mul overflow failed. Indices is %d, data_unit is %ld.", input_indices[j], data_unit);
return NOT_CHANGED;
}
dst_offset = input_indices[j] * data_unit;

@ -124,7 +124,7 @@ Status PackKernel::ValidateInputs(const ge::OpDescPtr &op_desc_ptr, const std::v
int64_t num = 1;
for (auto dim : dst_shape.GetDims()) {
if (dim < 0) {
GELOGW("Invalid dim ld% in the shape %s", dim, formats::ShapeToString(shape).c_str());
GELOGW("Invalid dim %ld in the shape %s", dim, formats::ShapeToString(shape).c_str());
return NOT_CHANGED;
}
num *= dim;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save