for error code

pull/373/head
weiyang 4 years ago
parent 30e6954181
commit 28f911f5ca

@ -268,18 +268,18 @@ ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::strin
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(const ge::ModelData &model_data) {
if (model_data.model_data == nullptr || model_data.model_len == 0) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "Model_data is nullptr, or model_data_size is 0");
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "Model_data is nullptr, or model_data_size is 0");
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
if (is_assign_model_) {
GELOGE(GE_EXEC_LOAD_MODEL_REPEATED, "Model helper has already loaded!");
return GE_EXEC_LOAD_MODEL_REPEATED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED, "Model helper has already loaded!");
return ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED;
}
if (ReleaseLocalModelData() != SUCCESS) {
GELOGE(INTERNAL_ERROR, "ReleaseLocalModelData failed.");
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA, "ReleaseLocalModelData failed.");
return ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA;
}
Status status = ge::DavinciModelParser::ParseModelContent(model_data, model_addr_tmp_, model_len_tmp_);
@ -300,8 +300,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(c
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_addr_tmp_);
if (partition_table->num == kOriginalOmPartitionNum) {
model_addr_tmp_ = nullptr;
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
// Encrypt model need to del temp model/no encrypt model don't need to del model
model_addr_tmp_ = nullptr;
@ -321,23 +321,23 @@ Status ModelHelper::GenerateGeModel(OmFileLoadHelper &om_load_helper) {
GE_CHECK_NOTNULL(model_);
Status ret = LoadModelData(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_MODEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED;
}
ret = LoadWeights(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED;
}
ret = LoadTask(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}
ret = LoadTBEKernelStore(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
}
ret = LoadCustAICPUKernelStore(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
}
return SUCCESS;
}

@ -110,8 +110,8 @@ Status OmFileLoadHelper::CheckModelValid(const ge::ModelData &model) const {
Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint32_t model_data_size) {
if (model_data == nullptr) {
GELOGE(PARAM_INVALID, "Param model_data must not be null!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "Param model_data must not be null!");
return ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID;
}
// Init partition table
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_data);
@ -119,16 +119,16 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint
// Original model partition include graph-info
if ((partition_table->num != PARTITION_SIZE) && (partition_table->num != (PARTITION_SIZE - 1)) &&
(partition_table->num != (PARTITION_SIZE - kOptionalNum)) && (partition_table->num != 1)) {
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Invalid partition_table->num:%u", partition_table->num);
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Invalid partition_table->num:%u", partition_table->num);
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
size_t mem_offset = SIZE_OF_MODEL_PARTITION_TABLE(*partition_table);
GELOGI("ModelPartitionTable num :%u, ModelFileHeader length :%zu, ModelPartitionTable length :%zu",
partition_table->num, sizeof(ModelFileHeader), mem_offset);
if (model_data_size <= mem_offset) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "invalid model data, partition_table->num:%u, model data size %u",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "invalid model data, partition_table->num:%u, model data size %u",
partition_table->num, model_data_size);
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
for (uint32_t i = 0; i < partition_table->num; i++) {
ModelPartition partition;
@ -138,9 +138,9 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint
context_.partition_datas_.push_back(partition);
if (partition.size > model_data_size || mem_offset > model_data_size - partition.size) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.",
partition.size + mem_offset, model_data_size);
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
mem_offset += partition.size;
GELOGI("Partition, type:%d, size:%u", static_cast<int>(partition.type), partition.size);

@ -36,18 +36,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
std::string real_path = RealPath(model_path);
if (real_path.empty()) {
GELOGE(GE_EXEC_MODEL_PATH_INVALID, "Model file path '%s' is invalid", model_path);
return GE_EXEC_MODEL_PATH_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}
if (GetFileLength(model_path) == -1) {
GELOGE(GE_EXEC_READ_MODEL_FILE_FAILED, "File size not valid, file: %s.", model_path);
return GE_EXEC_READ_MODEL_FILE_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "File size not valid, file: %s.", model_path);
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}
std::ifstream fs(real_path.c_str(), std::ifstream::binary);
if (!fs.is_open()) {
GELOGE(GE_EXEC_READ_MODEL_FILE_FAILED, "Open file: %s failed, error: %s", model_path, strerror(errno));
return GE_EXEC_READ_MODEL_FILE_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Open file: %s failed, error: %s", model_path, strerror(errno));
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}
// get length of file:
@ -60,8 +60,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
char *data = new (std::nothrow) char[len];
if (data == nullptr) {
GELOGE(MEMALLOC_FAILED, "Load model From file failed, bad memory allocation occur. (need:%u)", len);
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Load model From file failed, bad memory allocation occur. (need:%u)", len);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
// read data as a block:
@ -84,7 +84,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
GE_CHECK_NOTNULL(model.model_data);
// Model length too small
GE_CHK_BOOL_RET_STATUS(model.model_len >= sizeof(ModelFileHeader), GE_EXEC_MODEL_DATA_SIZE_INVALID,
GE_CHK_BOOL_RET_STATUS(model.model_len >= sizeof(ModelFileHeader), ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID,
"Invalid model. Model data size %u must be greater than or equal to %zu.", model.model_len,
sizeof(ModelFileHeader));
// Get file header
@ -92,7 +92,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
// Determine whether the file length and magic number match
GE_CHK_BOOL_RET_STATUS(
file_header->length == model.model_len - sizeof(ModelFileHeader) && file_header->magic == MODEL_FILE_MAGIC_NUM,
GE_EXEC_MODEL_DATA_SIZE_INVALID,
ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID,
"Invalid model. file_header->length[%u] + sizeof(ModelFileHeader)[%zu] != model->model_len[%u] || "
"MODEL_FILE_MAGIC_NUM[%u] != file_header->magic[%u]",
file_header->length, sizeof(ModelFileHeader), model.model_len, MODEL_FILE_MAGIC_NUM, file_header->magic);
@ -102,15 +102,15 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
// Get data address
uint8_t *data = reinterpret_cast<uint8_t *>(model.model_data) + sizeof(ModelFileHeader);
if (file_header->is_encrypt == ModelEncryptType::UNENCRYPTED) { // Unencrypted model
GE_CHK_BOOL_RET_STATUS(model.key.empty(), GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION,
GE_CHK_BOOL_RET_STATUS(model.key.empty(), ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION,
"Invalid param. model is unencrypted, but key is not empty.");
model_data = data;
model_len = file_header->length;
GELOGI("Model_len is %u, model_file_head_len is %zu.", model_len, sizeof(ModelFileHeader));
} else {
GELOGE(GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Invalid model. ModelEncryptType not supported.");
res = GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Invalid model. ModelEncryptType not supported.");
res = ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION;
}
return res;

File diff suppressed because it is too large Load Diff

@ -91,7 +91,7 @@ Status GraphExecutor::SetDynamicSize(uint32_t model_id, const std::vector<uint64
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->SetDynamicSize(model_id, batch_num, dynamic_type);
if (ret != SUCCESS) {
GELOGE(FAILED, "SetDynamicSize failed");
GELOGE(ret, "SetDynamicSize failed");
return ret;
}
return SUCCESS;

@ -212,9 +212,9 @@ Status GraphLoader::CommandHandle(const Command &command) {
return ret;
}
} catch (std::bad_alloc &) {
GELOGE(MEMALLOC_FAILED, "Command handle failed, bad memory allocation occur !");
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Command handle failed, bad memory allocation occur !");
return MEMALLOC_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} catch (...) {
GELOGE(FAILED, "Command handle failed, some exceptions occur !");

@ -1502,8 +1502,8 @@ Status DavinciModel::InitVariable(const OpDescPtr &op_desc) {
Status DavinciModel::SetQueIds(const std::vector<uint32_t> &input_queue_ids,
const std::vector<uint32_t> &output_queue_ids) {
if (input_queue_ids.empty() && output_queue_ids.empty()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty");
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty");
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}
input_queue_ids_ = input_queue_ids;
@ -1524,15 +1524,15 @@ Status DavinciModel::LoadWithQueue() {
}
if (input_queue_ids_.size() != new_input_data_info_.size()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu",
input_queue_ids_.size(), new_input_data_info_.size());
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}
if (output_queue_ids_.size() != new_output_data_info_.size()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu",
output_queue_ids_.size(), new_output_data_info_.size());
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}
GE_CHK_STATUS_RET(AddHeadStream(), "Add head stream failed.");
@ -1875,7 +1875,7 @@ Status DavinciModel::GetAIPPInfo(uint32_t index, AippConfigInfo &aipp_info) {
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP)) {
GELOGW("GetAIPPInfo: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}
std::unique_ptr<domi::AippOpParams> aipp_params(new (std::nothrow) domi::AippOpParams());
@ -1914,8 +1914,9 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &ai
} else if (data_mode == "dynamic_aipp_conf") {
type = DYNAMIC_AIPP_NODE;
} else {
GELOGE(INTERNAL_ERROR, "The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID,
"The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
return ACL_ERROR_GE_AIPP_MODE_INVALID;
}
if (type == DATA_WITH_DYNAMIC_AIPP) {
@ -1929,8 +1930,8 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &ai
}
}
if (aipp_index == 0xFFFFFFFF) {
GELOGE(INTERNAL_ERROR, "Can not find aipp data node from index %u", index);
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "Can not find aipp data node from index %u", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}
}
return SUCCESS;
@ -4107,8 +4108,8 @@ Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_inpu
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}
vector<std::string> inputs;
@ -4151,8 +4152,8 @@ Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, std::vector<Input
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}
vector<std::string> inputs;

@ -927,7 +927,7 @@ Status ModelManager::GetInputOutputDescInfo(const uint32_t model_id, vector<Inpu
Status ModelManager::GetDynamicBatchInfo(const uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info,
int32_t &dynamic_type) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, GE_EXEC_MODEL_ID_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetDynamicBatchInfo failed, Invalid model id %u!", model_id);
return davinci_model->GetDynamicBatchInfo(batch_info, dynamic_type);
@ -942,8 +942,8 @@ Status ModelManager::GetDynamicBatchInfo(const uint32_t model_id, std::vector<st
///
Status ModelManager::GetCombinedDynamicDims(const uint32_t model_id, vector<vector<int64_t>> &batch_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetCombinedDynamicDims Failed, Invalid Model ID %u!",
model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetCombinedDynamicDims Failed, Invalid Model ID %u!", model_id);
davinci_model->GetCombinedDynamicDims(batch_info);
return SUCCESS;
@ -959,7 +959,7 @@ Status ModelManager::GetCombinedDynamicDims(const uint32_t model_id, vector<vect
Status ModelManager::GetUserDesignateShapeOrder(const uint32_t model_id,
std::vector<std::string> &user_input_shape_order) {
auto davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetUserDesignateShapeOrder Failed, Invalid Model ID %u!", model_id)
davinci_model->GetUserDesignateShapeOrder(user_input_shape_order);
return SUCCESS;
@ -1000,7 +1000,8 @@ Status ModelManager::GetInputOutputDescInfoForZeroCopy(const uint32_t model_id,
///
Status ModelManager::GetAIPPInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetAIPPInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAIPPInfo failed, invalid model_id is %u.",
model_id);
return davinci_model->GetAIPPInfo(index, aipp_info);
@ -1008,7 +1009,8 @@ Status ModelManager::GetAIPPInfo(const uint32_t model_id, uint32_t index, AippCo
Status ModelManager::GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetAIPPInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAIPPInfo failed, invalid model_id is %u.",
model_id);
return davinci_model->GetAippType(index, type, aipp_index);
@ -1035,7 +1037,8 @@ Status ModelManager::GenSessionId(uint64_t &session_id) {
Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model, shared_ptr<ModelListener> listener,
void *dev_ptr, size_t mem_size, void *weight_ptr, size_t weight_size) {
GE_CHK_BOOL_RET_STATUS(model.key.empty() || access(model.key.c_str(), F_OK) == 0, GE_EXEC_MODEL_KEY_PATH_INVALID,
GE_CHK_BOOL_RET_STATUS(model.key.empty() || access(model.key.c_str(), F_OK) == 0,
ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID,
"input key file path %s is invalid, %s", model.key.c_str(), strerror(errno));
GenModelId(&model_id);
@ -1054,8 +1057,8 @@ Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model
try {
davinci_model = std::make_shared<DavinciModel>(model.priority, listener);
} catch (std::bad_alloc &) {
GELOGE(MEMALLOC_FAILED, "Make shared failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Make shared failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} catch (...) {
GELOGE(INTERNAL_ERROR, "Make shared failed since other exception raise");
return INTERNAL_ERROR;
@ -1094,7 +1097,6 @@ Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, break, "DavinciInit failed.");
InsertModel(model_id, davinci_model);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(davinci_model == nullptr, ret = PARAM_INVALID; break, "Insert model failed");
GELOGI("Parse model %u success.", model_id);
@ -1122,7 +1124,7 @@ Status ModelManager::LoadModelWithQ(uint32_t &model_id, const ModelData &model_d
const std::vector<uint32_t> &input_queue_ids,
const std::vector<uint32_t> &output_queue_ids) {
GE_CHK_BOOL_RET_STATUS(model_data.key.empty() || access(model_data.key.c_str(), F_OK) == 0,
GE_EXEC_MODEL_KEY_PATH_INVALID, "input key file path %s is not valid, %s",
ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID, "input key file path %s is not valid, %s",
model_data.key.c_str(), strerror(errno));
ModelHelper model_helper;
@ -1134,8 +1136,8 @@ Status ModelManager::LoadModelWithQ(uint32_t &model_id, const ModelData &model_d
shared_ptr<DavinciModel> davinci_model = MakeShared<DavinciModel>(model_data.priority, nullptr);
if (davinci_model == nullptr) {
GELOGE(MEMALLOC_FAILED, "create model failed.");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create model failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
ret = davinci_model->Assign(model_helper.GetGeModel());
@ -1390,13 +1392,13 @@ Status ModelManager::GetModelMemAndWeightSize(const ModelData &model, size_t &me
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_data);
if (partition_table->num == 1) {
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
ModelPartition task_partition;
if (om_file_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition) != SUCCESS) {
GELOGE(GE_EXEC_LOAD_TASK_PARTITION_FAILED, "get task model partition failed.");
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "get task model partition failed.");
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}
std::shared_ptr<domi::ModelTaskDef> model_task_def = MakeShared<domi::ModelTaskDef>();
@ -1405,14 +1407,14 @@ Status ModelManager::GetModelMemAndWeightSize(const ModelData &model, size_t &me
}
if (task_partition.size != 0) {
if (!ReadProtoFromArray(task_partition.data, static_cast<int>(task_partition.size), model_task_def.get())) {
GELOGE(GE_EXEC_LOAD_TASK_PARTITION_FAILED, "ReadProtoFromArray failed.");
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "ReadProtoFromArray failed.");
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}
}
ModelPartition partition_weight;
ret = om_file_helper.GetModelPartition(ModelPartitionType::WEIGHTS_DATA, partition_weight);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED,
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED,
"Get weight partition failed. ret = %u", ret);
mem_size = model_task_def->memory_size();
@ -1431,7 +1433,8 @@ void ModelManager::GenModelId(uint32_t *id) {
Status ModelManager::GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetOrigInputInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetOrigInputInfo failed, invalid model_id is %u.",
model_id);
return davinci_model->GetOrigInputInfo(index, orig_input_info);
@ -1441,7 +1444,7 @@ Status ModelManager::GetAllAippInputOutputDims(uint32_t model_id, uint32_t index
std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAllAippInputOutputDims failed, invalid model_id is %u.", model_id);
return davinci_model->GetAllAippInputOutputDims(index, input_dims, output_dims);

@ -51,9 +51,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY SingleOp::~SingleOp() {
Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs) {
auto num_inputs = inputs.size();
if (num_inputs != input_sizes_.size()) {
GELOGE(PARAM_INVALID, "Input num mismatch. model expect %zu, but given %zu", input_addr_list_.size(),
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input num mismatch. model expect %zu, but given %zu", input_addr_list_.size(),
inputs.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
for (size_t i = 0; i < num_inputs; ++i) {
@ -62,16 +62,16 @@ Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::
GELOGI("Input [%zu], aligned_size:%zu, inputs.length:%lu, input_sizes_:%lu",
i, aligned_size, inputs[i].length, input_sizes_[i]);
if (aligned_size < input_sizes_[i]) {
GELOGE(PARAM_INVALID, "Input size mismatch. index = %zu, model expect %zu,"
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input size mismatch. index = %zu, model expect %zu,"
" but given %zu(after align)", i, input_sizes_[i], aligned_size);
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
}
auto num_outputs = outputs.size();
if (num_outputs != output_sizes_.size()) {
GELOGE(PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size());
return ACL_ERROR_GE_PARAM_INVALID;
}
for (size_t i = 0; i < num_outputs; ++i) {
@ -80,9 +80,9 @@ Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::
GELOGI("Output [%zu], aligned_size:%zu, outputs.length:%lu, output_sizes_:%lu",
i, aligned_size, outputs[i].length, output_sizes_[i]);
if (aligned_size < output_sizes_[i]) {
GELOGE(PARAM_INVALID, "Output size mismatch. index = %zu, model expect %zu,"
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output size mismatch. index = %zu, model expect %zu,"
"but given %zu(after align)", i, output_sizes_[i], aligned_size);
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
}
@ -134,8 +134,8 @@ Status SingleOp::UpdateArgs(const std::vector<DataBuffer> &inputs, const std::ve
RT_MEMCPY_HOST_TO_DEVICE_EX,
stream_);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpyAsync addresses failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMemcpyAsync addresses failed, ret = %d", rt_ret);
return rt_ret;
}
} else if (task->GetOpTaskType() == OP_TASK_AICPUCC) {
GELOGD("Update aicpu_CC task args");
@ -198,29 +198,29 @@ Status DynamicSingleOp::ValidateParams(const vector<GeTensorDesc> &input_desc,
std::vector<GeTensorDesc> &output_desc,
std::vector<DataBuffer> &outputs) const {
if (inputs.size() != input_desc.size()) {
GELOGE(PARAM_INVALID,
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Input number mismatches input desc number. Input num = %zu, input desc num = %zu",
inputs.size(),
input_desc.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
if (outputs.size() != output_desc.size()) {
GELOGE(PARAM_INVALID,
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Output number mismatches output desc number. Output num = %zu, output desc num = %zu",
outputs.size(),
output_desc.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
if (input_desc.size() != num_inputs_) {
GELOGE(PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size());
return ACL_ERROR_GE_PARAM_INVALID;
}
if (output_desc.size() != num_outputs_) {
GELOGE(PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size());
return ACL_ERROR_GE_PARAM_INVALID;
}
return SUCCESS;
@ -247,8 +247,8 @@ Status DynamicSingleOp::AllocateWorkspaces(const std::vector<int64_t> &workspace
GE_CHECK_NOTNULL(stream_resource);
auto ws_base = stream_resource->MallocMemory(kPurpose, static_cast<size_t>(total_size));
if (ws_base == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to allocate memory of size: %ld", total_size);
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to allocate memory of size: %ld", total_size);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
GELOGD("Done allocating workspace memory successfully.");
@ -293,10 +293,10 @@ Status DynamicSingleOp::ExecuteAsync(const vector<GeTensorDesc> &input_desc,
} else if (op_task_->GetOpTaskType() == OP_TASK_AICPU || op_task_->GetOpTaskType() == OP_TASK_AICPUCC) {
return op_task_->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_);
} else {
GELOGE(UNSUPPORTED,
GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID,
"Only TBE_Task, AI_CPU_Task and AI_CPUCC_Task are supported, but got %u",
op_task_->GetOpTaskType());
return UNSUPPORTED;
return ACL_ERROR_GE_OP_TASK_TYPE_INVALID;
}
}

@ -33,16 +33,16 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status SingleOpManager::GetOpFr
SingleOp **single_op) {
GELOGI("GetOpFromModel in. model name = %s", model_name.c_str());
if (single_op == nullptr) {
GELOGE(PARAM_INVALID, "single op is null");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "single op is null");
return ACL_ERROR_GE_INTERNAL_ERROR;
}
uintptr_t resource_id = 0;
GE_CHK_STATUS_RET(GetResourceId(stream, resource_id));
StreamResource *res = GetResource(resource_id, stream);
if (res == nullptr) {
GELOGE(MEMALLOC_FAILED, "GetResource failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "GetResource failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
SingleOp *op = res->GetOperator(model_data.model_data);
@ -109,8 +109,8 @@ Status SingleOpManager::GetDynamicOpFromModel(const string &model_name,
GE_CHK_STATUS_RET(GetResourceId(stream, resource_id));
StreamResource *res = GetResource(resource_id, stream);
if (res == nullptr) {
GELOGE(MEMALLOC_FAILED, "GetResource failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "GetResource failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
DynamicSingleOp *op = res->GetDynamicOperator(model_data.model_data);
@ -140,8 +140,8 @@ Status SingleOpManager::GetResourceId(rtStream_t stream, uintptr_t &resource_id)
rtContext_t rt_cur_ctx = nullptr;
auto rt_err = rtCtxGetCurrent(&rt_cur_ctx);
if (rt_err != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "get current context failed, runtime result is %d", static_cast<int>(rt_err));
return RT_FAILED;
GELOGE(rt_err, "get current context failed, runtime result is %d", static_cast<int>(rt_err));
return rt_err;
}
// use current context as resource key instead
GELOGI("use context as resource key instead when default stream");

@ -94,7 +94,7 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
GELOGI("total memory: %lu, zero_copy_mem: %lu", model_params_.memory_size, model_params_.zero_copy_mem_size);
model_params_.mem_base = res.MallocMemory(purpose, model_params_.memory_size - model_params_.zero_copy_mem_size);
if (model_params_.mem_base == nullptr) {
return RT_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
}
@ -103,7 +103,7 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
model_params_.weight_base = res.MallocWeight(purpose, model_params_.weight_size);
if (model_params_.weight_base == nullptr) {
// no need to free memory, for that was handled by StreamResources
return RT_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto weight_buffer = model_helper_.GetGeModel()->GetWeight();
@ -121,8 +121,9 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
Status SingleOpModel::ParseInputNode(const OpDescPtr &op_desc) {
vector<int64_t> offsets = op_desc->GetOutputOffset();
if (offsets.size() != kDataOutputNum) {
GELOGE(PARAM_INVALID, "Data op should have only one output, but got %zu", op_desc->GetOutputOffset().size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Data op should have only one output, but got %zu", op_desc->GetOutputOffset().size());
return ACL_ERROR_GE_PARAM_INVALID;
}
auto output_desc = op_desc->GetOutputDescPtr(0);
@ -158,8 +159,8 @@ Status SingleOpModel::LoadAllNodes() {
Graph graph = ge_model->GetGraph();
auto compute_graph = GraphUtils::GetComputeGraph(graph);
if (compute_graph == nullptr) {
GELOGE(PARAM_INVALID, "[%s] compute_graph is null", model_name_.c_str());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[%s] compute_graph is null", model_name_.c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
auto nodes = compute_graph->GetDirectNode();
@ -257,8 +258,8 @@ Status SingleOpModel::BuildTaskList(SingleOp &single_op) {
}
single_op.tasks_.emplace_back(task);
} else {
GELOGE(UNSUPPORTED, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID;
}
} else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
GELOGD("Building AICPU_TF task");
@ -282,7 +283,7 @@ Status SingleOpModel::BuildTaskList(SingleOp &single_op) {
void SingleOpModel::ParseArgTable(TbeOpTask *task, SingleOp &op) {
if (task == nullptr) {
GELOGE(PARAM_INVALID, "tbe op task is nullptr");
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "tbe op task is nullptr");
return;
}
// args: addr1, addr2, addr3 ...
@ -305,14 +306,14 @@ Status SingleOpModel::BuildKernelTask(const domi::KernelDef &kernel_def, TbeOpTa
const auto &context = kernel_def.context();
auto iter = op_list_.find(context.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
auto *tbe_task = new (std::nothrow) TbeOpTask();
if (tbe_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create tbe op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create tbe op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto builder = TbeTaskBuilder(model_name_, iter->second, kernel_def);
@ -331,14 +332,14 @@ Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiC
bool dynamic_flag, bool& depend_compute_flag, uint64_t session_id) {
auto iter = op_list_.find(kernel_def.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", kernel_def.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", kernel_def.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
std::unique_ptr<AiCpuTask> aicpu_task(new (std::nothrow) AiCpuTask());
if (aicpu_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create aicpu_TF op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create aicpu_TF op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto builder = AiCpuTaskBuilder(iter->second->GetOpDesc(), kernel_def);
auto ret = builder.BuildTask(*aicpu_task, model_params_, dynamic_flag, session_id);
@ -356,13 +357,13 @@ Status SingleOpModel::BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTa
const auto &context = kernel_def.context();
auto iter = op_list_.find(context.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
std::unique_ptr<AiCpuCCTask> aicpucc_task(new (std::nothrow) AiCpuCCTask());
if (aicpucc_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create aicpu_CC op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create aicpu_CC op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto builder = AiCpuCCTaskBuilder(iter->second->GetOpDesc(), kernel_def);
@ -398,8 +399,9 @@ Status SingleOpModel::BuildModelTaskKernel(const TaskDef &task_def, DynamicSingl
GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task));
single_op.op_task_.reset(task);
} else {
GELOGE(UNSUPPORTED, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID,
"Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID;
}
return SUCCESS;
}
@ -422,8 +424,8 @@ Status SingleOpModel::BuildTaskListForDynamicOp(DynamicSingleOp &single_op) {
GE_CHK_STATUS_RET_NOLOG(BuildModelTaskKernel(task_def, single_op));
} else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
if (single_op.op_task_ != nullptr) {
GELOGE(UNSUPPORTED, "Do not support dynamic op with multiple tasks.");
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "Do not support dynamic op with multiple tasks.");
return ACL_ERROR_GE_OP_TASK_TYPE_INVALID;
}
GELOGD("Building AICPU_TF task");
AiCpuTask *aicpu_task = nullptr;
@ -434,8 +436,8 @@ Status SingleOpModel::BuildTaskListForDynamicOp(DynamicSingleOp &single_op) {
depend_compute_flag, dynamic_singleop_sessionid));
if (depend_compute_flag) {
if (i >= tasks.size() - 1) {
GELOGE(FAILED, "The copy task of the fourth operator was not found.");
return FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "The copy task of the fourth operator was not found.");
return ACL_ERROR_GE_PARAM_INVALID;
}
++i;
const TaskDef &copy_task_def = tasks[i];

@ -160,8 +160,8 @@ Status StreamResource::BuildOperator(const string &model_name, const ModelData &
auto new_op = std::unique_ptr<SingleOp>(new(std::nothrow) SingleOp(&stream_mu_, stream_));
if (new_op == nullptr) {
GELOGE(MEMALLOC_FAILED, "new SingleOp failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "new SingleOp failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
GELOGI("To build operator: %s", model_name.c_str());

@ -25,20 +25,20 @@ AiCpuCCTaskBuilder::AiCpuCCTaskBuilder(const OpDescPtr &op_desc, const domi::Ker
Status AiCpuCCTaskBuilder::SetKernelArgs(AiCpuCCTask &task) {
size_t aicpu_arg_size = kernel_def_.args_size();
if (aicpu_arg_size <= 0) {
GELOGE(RT_FAILED, "aicpu_arg_size is invalid, value = %zu", aicpu_arg_size);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "aicpu_arg_size is invalid, value = %zu", aicpu_arg_size);
return ACL_ERROR_GE_PARAM_INVALID;
}
std::unique_ptr<uint8_t[]> aicpu_args;
aicpu_args.reset(new(std::nothrow) uint8_t[aicpu_arg_size]());
if (aicpu_args == nullptr) {
GELOGE(RT_FAILED, "malloc failed, size = %zu", aicpu_arg_size);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "malloc failed, size = %zu", aicpu_arg_size);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto err = memcpy_s(aicpu_args.get(), aicpu_arg_size, kernel_def_.args().data(), aicpu_arg_size);
if (err != EOK) {
GELOGE(RT_FAILED, "memcpy_s args failed, size = %zu, err = %d", aicpu_arg_size, err);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "memcpy_s args failed, size = %zu, err = %d", aicpu_arg_size, err);
return ACL_ERROR_GE_INTERNAL_ERROR;
}
task.SetIoAddr(aicpu_args.get() + sizeof(aicpu::AicpuParamHead));

@ -30,8 +30,8 @@ namespace ge {
size_t arg_size = kernel_def_.args_size();
auto rt_ret = rtMalloc(io_addr, arg_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMalloc failed, size = %zu, ret = %d", arg_size, rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMalloc failed, size = %zu, ret = %d", arg_size, rt_ret);
return rt_ret;
}
const void *src_addr = reinterpret_cast<const void *>(addresses.data());
@ -39,8 +39,8 @@ namespace ge {
rt_ret = rtMemcpy(*io_addr, arg_size, src_addr, src_len, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
(void)rtFree(*io_addr);
GELOGE(RT_FAILED, "rtMemcpy addresses failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMemcpy addresses failed, ret = %d", rt_ret);
return rt_ret;
}
return SUCCESS;
@ -50,8 +50,8 @@ namespace ge {
auto sec_ret = memcpy_s(&fwk_op_kernel, sizeof(STR_FWK_OP_KERNEL),
kernel_def_.args().data(), kernel_def_.args().size());
if (sec_ret != EOK) {
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "memcpy failed, ret: %d", sec_ret);
return ACL_ERROR_GE_INTERNAL_ERROR;
}
auto io_addr_val = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(io_addr));
@ -65,16 +65,16 @@ namespace ge {
void *fwk_op_args = nullptr;
auto rt_ret = rtMalloc(&fwk_op_args, sizeof(STR_FWK_OP_KERNEL), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "malloc arg memory failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "malloc arg memory failed, ret = %d", rt_ret);
return rt_ret;
}
rt_ret = rtMemcpy(fwk_op_args, sizeof(STR_FWK_OP_KERNEL), &fwk_op_kernel,
sizeof(STR_FWK_OP_KERNEL), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
(void)rtFree(fwk_op_args);
GELOGE(RT_FAILED, "copy args failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "copy args failed, ret = %d", rt_ret);
return rt_ret;
}
*args = fwk_op_args;
return SUCCESS;
@ -83,9 +83,9 @@ namespace ge {
Status AiCpuTaskBuilder::InitWorkspaceAndIO(void **io_addr, void **kernel_workspace,
const SingleOpModelParam &param, bool dynamic_flag) {
if (kernel_def_.args_size() > sizeof(STR_FWK_OP_KERNEL)) {
GELOGE(PARAM_INVALID, "sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d",
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d",
sizeof(STR_FWK_OP_KERNEL), kernel_def_.args_size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
auto addresses = BuildTaskUtils::GetAddresses(op_desc_, param);
auto ws_addr_vec = addresses.at(BuildTaskUtils::kAddressIndexWorkspace);
@ -94,8 +94,8 @@ namespace ge {
GE_CHK_RT_RET(rtMalloc(kernel_workspace, kernel_def_.task_info_size(), RT_MEMORY_HBM));
} else {
if (ws_addr_vec.empty()) {
GELOGE(PARAM_INVALID, "workspace Data Address is empty.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "workspace Data Address is empty.");
return ACL_ERROR_GE_PARAM_INVALID;
}
*kernel_workspace = ws_addr_vec[0];
}
@ -143,8 +143,8 @@ namespace ge {
GELOGI("Begin to CreateAicpuSession, session id: %lu", session_id);
GE_CHECK_NOTNULL(ModelManager::GetInstance());
GE_IF_BOOL_EXEC(ModelManager::GetInstance()->CreateAicpuSession(session_id) != SUCCESS,
GELOGE(FAILED, "CreateAicpuSession error. session id: %lu", session_id);
return FAILED;)
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "CreateAicpuSession error. session id: %lu", session_id);
return ACL_ERROR_GE_INTERNAL_ERROR;)
ret = SetKernelArgs(&task.args_, fwk_op_kernel);
if (ret != SUCCESS) {
return ret;

@ -693,8 +693,8 @@ Status AiCpuCCTask::LaunchKernel(rtStream_t stream) {
block_dim_, args_.get(), static_cast<uint32_t>(arg_size_),
sm_desc, stream, dump_flag_);
if (ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "Invoke rtCpuKernelLaunch failed. ret = %d", ret);
return RT_FAILED;
GELOGE(ret, "Invoke rtCpuKernelLaunch failed. ret = %d", ret);
return ret;
}
GELOGD("Invoke rtCpuKernelLaunch succeeded");

@ -91,9 +91,9 @@ Status TbeTaskBuilder::DoRegisterBinary(const OpKernelBin &kernel_bin, void **bi
binary.magic = param.core_type == 0 ? RT_DEV_BINARY_MAGIC_ELF : RT_DEV_BINARY_MAGIC_ELF_AIVEC;
auto ret = rtDevBinaryRegister(&binary, bin_handle);
if (ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtDevBinaryRegister failed, bin key = %s, core_type = %ld, rt ret = %d", stub_name_.c_str(),
GELOGE(ret, "rtDevBinaryRegister failed, bin key = %s, core_type = %ld, rt ret = %d", stub_name_.c_str(),
param.core_type, static_cast<int>(ret));
return RT_FAILED;
return ret;
}
return SUCCESS;
@ -106,9 +106,9 @@ Status TbeTaskBuilder::DoRegisterMeta(void *bin_handle) {
if (!meta_data.empty()) {
auto rt_ret = rtMetadataRegister(bin_handle, meta_data.c_str());
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMetadataRegister failed. bin key = %s, meta_data = %s, rt ret = %d", stub_name_.c_str(),
GELOGE(rt_ret, "rtMetadataRegister failed. bin key = %s, meta_data = %s, rt ret = %d", stub_name_.c_str(),
meta_data.c_str(), static_cast<int>(rt_ret));
return RT_FAILED;
return rt_ret;
}
}
@ -118,9 +118,9 @@ Status TbeTaskBuilder::DoRegisterMeta(void *bin_handle) {
Status TbeTaskBuilder::DoRegisterFunction(void *bin_handle, const char *stub_name, const char *kernel_name) {
auto rt_ret = rtFunctionRegister(bin_handle, stub_name, stub_name, kernel_name, FUNC_MODE_NORMAL);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtFunctionRegister failed. bin key = %s, kernel name = %s, rt ret = %d", stub_name, kernel_name,
GELOGE(rt_ret, "rtFunctionRegister failed. bin key = %s, kernel name = %s, rt ret = %d", stub_name, kernel_name,
static_cast<int>(rt_ret));
return RT_FAILED;
return rt_ret;
}
return SUCCESS;
@ -173,14 +173,14 @@ Status TbeTaskBuilder::RegisterKernel(TbeOpTask &task, const SingleOpModelParam
auto tbe_kernel = GetTbeKernel(op_desc_);
if (tbe_kernel == nullptr) {
GELOGE(PARAM_INVALID, "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
auto holder = std::unique_ptr<KernelHolder>(new (std::nothrow) KernelHolder(stub_func, tbe_kernel));
if (holder == nullptr) {
GELOGE(MEMALLOC_FAILED, "create KernelHodler failed.");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create KernelHodler failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
void *bin_handle = nullptr;
@ -189,8 +189,8 @@ Status TbeTaskBuilder::RegisterKernel(TbeOpTask &task, const SingleOpModelParam
holder->SetBinHandle(bin_handle);
if (!registry.AddKernel(stub_name_, std::move(holder))) {
// should not happen. only one thread can reach here
GELOGE(INTERNAL_ERROR, "Add kernel failed. stub name = %s", stub_name_.c_str());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "Add kernel failed. stub name = %s", stub_name_.c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
}
}
@ -218,15 +218,15 @@ Status TbeTaskBuilder::GetSmDesc(void **sm_desc, const SingleOpModelParam &param
auto rtRet = rtMemAllocManaged(sm_desc, sm_desc_str.size(), RT_MEMORY_SPM);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemAllocManaged failed, ret: %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemAllocManaged failed, ret: %d", static_cast<int>(rtRet));
return rtRet;
}
rtRet = rtMemcpy(*sm_desc, sm_desc_str.size(), sm_desc_str.data(), sm_desc_str.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rtRet != RT_ERROR_NONE) {
(void)rtMemFreeManaged(*sm_desc);
GELOGE(RT_FAILED, "rtMemcpy, ret: %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy, ret: %d", static_cast<int>(rtRet));
return rtRet;
}
}
@ -240,8 +240,8 @@ Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam &
auto rtRet = rtMemcpy(args.get(), arg_size, kernel_def_.args().data(), arg_size, RT_MEMCPY_HOST_TO_HOST);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpy args failed, size = %zu, ret = %d", arg_size, static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy args failed, size = %zu, ret = %d", arg_size, static_cast<int>(rtRet));
return rtRet;
}
const domi::KernelContext &context = kernel_def_.context();
@ -259,8 +259,8 @@ Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam &
uint64_t src_len = sizeof(void *) * tensor_device_addr_vec.size();
rtRet = rtMemcpy(args.get() + offset, arg_size - offset, src_addr, src_len, RT_MEMCPY_HOST_TO_HOST);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpy addresses failed, ret = %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy addresses failed, ret = %d", static_cast<int>(rtRet));
return rtRet;
}
}
@ -285,8 +285,8 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam &para
void *stub_func = nullptr;
auto rtRet = rtGetFunctionByName(stub_name_.c_str(), &stub_func);
if (rtRet != SUCCESS) {
GELOGE(RT_FAILED, "rtGetFunctionByName failed.");
return RT_FAILED;
GELOGE(rtRet, "rtGetFunctionByName failed.");
return rtRet;
}
task.SetStubFunc(stub_name_, stub_func);
@ -299,8 +299,8 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) {
(void)AttrUtils::GetInt(op_desc_, kAttrOpParamSize, max_size);
GELOGD("Got op param size by key: %s, ret = %ld", kAttrOpParamSize, max_size);
if (max_size <= 0) {
GELOGE(PARAM_INVALID, "[%s] Invalid op_param_size: %ld.", op_desc_->GetName().c_str(), max_size);
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[%s] Invalid op_param_size: %ld.", op_desc_->GetName().c_str(), max_size);
return ACL_ERROR_GE_PARAM_INVALID;
}
void *tiling_buffer = nullptr;

@ -19,6 +19,7 @@
#include <map>
#include <string>
#include "ge_error_codes.h"
namespace ge {
class StatusFactory {
@ -66,11 +67,47 @@ class ErrorNoRegisterar {
((0xFF & (static_cast<uint8_t>(modid))) << 12) | (0x0FFF & (static_cast<uint16_t>(value))); \
const ErrorNoRegisterar g_##name##_errorno(name, desc);
#define GE_ERRORNO_EXTERNAL(name, desc) const ErrorNoRegisterar g_##name##_errorno(name, desc);
using Status = uint32_t;
// General error code
GE_ERRORNO(0, 0, 0, 0, 0, SUCCESS, 0, "success");
GE_ERRORNO(0b11, 0b11, 0b111, 0xFF, 0b11111, FAILED, 0xFFF, "failed"); /*lint !e401*/
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_PARAM_INVALID, "Parameter invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_NOT_INIT, "GE executor not initialized yet.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Model file path invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "Model id invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID, "Model key path invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Model does not support encryption.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "Data size of model invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "Model addr invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Queue id of model invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED, "The model loaded repeatedly.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Model partition num invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Dynamic input size invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "Dynamic batch size invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "AIPP batch parameter empty.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_NOT_EXIST, "AIPP parameter not exist.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_MODE_INVALID, "AIPP mode invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "Task type invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Kernel type invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_MEMORY_ALLOCATION, "Memory allocation error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_INTERNAL_ERROR, "Internal error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_LOAD_MODEL, "Load model error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED, "Failed to load model partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED, "Failed to load weight partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "Failed to load task partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED, "Failed to load op kernel partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA, "Failed to release the model data.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_COMMAND_HANDLE, "Command handle error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_GET_TENSOR_INFO, "Get tensor info error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_UNLOAD_MODEL, "Load model error.");
} // namespace ge
#endif // INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_

@ -0,0 +1,58 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_
#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000;
static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID = 145004;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION = 145005;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID = 145010;
static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011;
static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012;
static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013;
static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014;
static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015;
static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016;
static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017;
static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018;
static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000;
static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000;
static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005;
static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006;
static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007;
static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008;
static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009;
#ifdef __cplusplus
} // namespace ge
#endif
#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_

@ -1 +1 @@
Subproject commit be949d5ff32baec332aa8765d2b211334ae84dbf
Subproject commit ba04e25e878af2ac5f9a697806daee0768ae3bad

@ -1 +1 @@
Subproject commit d865fa6e67c00c536e6df2f86d4912c1f1feff4c
Subproject commit 308e3587ec54fdd32ed7113d64a1335208701f59
Loading…
Cancel
Save