|
|
|
@ -51,8 +51,13 @@ void HybridModelAsyncExecutor::SetModelName(const string &model_name) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper> &data) {
|
|
|
|
|
GE_CHK_STATUS_EXEC(data_inputer_->Push(data), return domi::DATA_QUEUE_ISFULL,
|
|
|
|
|
"[Push][Data] Data queue is full, please call again later, model_id %u ", model_id_);
|
|
|
|
|
if(data_inputer_->Push(data) != SUCCESS){
|
|
|
|
|
REPORT_CALL_ERROR("E19999", "Data queue is full, please call again later when %s, model_id %u.",
|
|
|
|
|
__FUNCTION__, model_id_);
|
|
|
|
|
GELOGE(domi::DATA_QUEUE_ISFULL,
|
|
|
|
|
"[Push][Data] Data queue is full, please call again later, model_id %u ", model_id_);
|
|
|
|
|
return domi::DATA_QUEUE_ISFULL;
|
|
|
|
|
}
|
|
|
|
|
GELOGD("EnqueueData successfully. model_id = %u, data_index = %u", data->GetInput().model_id, data->GetInput().index);
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
}
|
|
|
|
@ -60,9 +65,12 @@ Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper>
|
|
|
|
|
Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &listener) {
|
|
|
|
|
GELOGD("HybridModelExecutor::Start IN, has listener = %d", listener != nullptr);
|
|
|
|
|
std::lock_guard<std::mutex> lk(mu_);
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(!run_flag_, INTERNAL_ERROR,
|
|
|
|
|
"[Check][RunState] Model already started when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
|
|
|
|
|
if(run_flag_){
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "Model already started when HybridModelAsyncExecutor %s, model_id:%u.",
|
|
|
|
|
__FUNCTION__, model_id_);
|
|
|
|
|
GELOGE(INTERNAL_ERROR, "[Check][RunState] Model already started, model_id:%u.", model_id_);
|
|
|
|
|
return INTERNAL_ERROR;
|
|
|
|
|
}
|
|
|
|
|
run_flag_ = true;
|
|
|
|
|
listener_ = listener;
|
|
|
|
|
future_ = std::async(std::launch::async, [&]() -> Status {
|
|
|
|
@ -73,7 +81,7 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &lis
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(future_.valid(), INTERNAL_ERROR,
|
|
|
|
|
"[Check][RunState] Failed to start when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
"[Check][RunState] Failed to start, model_id:%u.", model_id_);
|
|
|
|
|
GELOGD("HybridModelExecutor::Start successfully");
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
}
|
|
|
|
@ -108,8 +116,8 @@ Status HybridModelAsyncExecutor::Init() {
|
|
|
|
|
executor_ = std::unique_ptr<HybridModelExecutor>(new(std::nothrow) HybridModelExecutor(model_, device_id_, stream_));
|
|
|
|
|
GE_CHECK_NOTNULL(executor_);
|
|
|
|
|
GE_CHK_STATUS_RET(executor_->Init(),
|
|
|
|
|
"[Init][HybridModelExecutor] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
GE_CHK_STATUS_RET(DumpOpDebug(), "[Dump][OpDebug] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
"[Init][HybridModelExecutor] failed, model_id:%u.", model_id_);
|
|
|
|
|
GE_CHK_STATUS_RET(DumpOpDebug(), "[Dump][OpDebug] failed, model_id:%u.", model_id_);
|
|
|
|
|
|
|
|
|
|
GELOGI("HybridModel stage nums:%zu", model_->GetRootGraphItem()->NumGroups());
|
|
|
|
|
if (model_->GetRootGraphItem()->NumGroups() >= kMinimumPiplineStages) {
|
|
|
|
@ -117,19 +125,19 @@ Status HybridModelAsyncExecutor::Init() {
|
|
|
|
|
std::unique_ptr<HybridModelPipelineExecutor>(new(std::nothrow) HybridModelPipelineExecutor(model_, device_id_));
|
|
|
|
|
GE_CHECK_NOTNULL(pipe_executor_);
|
|
|
|
|
GE_CHK_STATUS_RET(pipe_executor_->Init(),
|
|
|
|
|
"[Init][HybridModelPipelineExecutor] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
"[Init][HybridModelPipelineExecutor] failed, model_id:%u.", model_id_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GE_CHK_STATUS_RET(InitInputDesc(), "[Init][InputDesc] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
GE_CHK_STATUS_RET(InitInputDesc(), "[Init][InputDesc] failed, model_id:%u.", model_id_);
|
|
|
|
|
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Status HybridModelAsyncExecutor::PreRun(InputData ¤t_data, HybridModelExecutor::ExecuteArgs &args) {
|
|
|
|
|
GE_CHK_STATUS_RET(SyncVarData(), "[Invoke][SyncVarData] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
GE_CHK_STATUS_RET(SyncVarData(), "[Invoke][SyncVarData] failed, model_id:%u.", model_id_);
|
|
|
|
|
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[SyncVarData] End");
|
|
|
|
|
GE_CHK_STATUS_RET(PrepareInputs(current_data, args),
|
|
|
|
|
"[Invoke][PrepareInputs] failed to copy input data to model when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
"[Invoke][PrepareInputs] failed to copy input data to model, model_id:%u.", model_id_);
|
|
|
|
|
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[CopyInputData] End");
|
|
|
|
|
return SUCCESS;
|
|
|
|
|
}
|
|
|
|
@ -160,7 +168,7 @@ Status HybridModelAsyncExecutor::RunInternal() {
|
|
|
|
|
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
|
|
|
|
|
ret != SUCCESS, (void) HandleResult(ret, current_data.index, args, data_wrapper->GetOutput());
|
|
|
|
|
CsaInteract::GetInstance().StoreInternalErrorCode(ret, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
|
|
|
|
|
continue, "[Invoke][PreRun] failed when HybridModelAsyncExecutor %s.", __FUNCTION__); // [No need to check value]
|
|
|
|
|
continue, "[Invoke][PreRun] failed, model_id:%u.", model_id_); // [No need to check value]
|
|
|
|
|
|
|
|
|
|
if (pipe_executor_ != nullptr) {
|
|
|
|
|
GELOGI("HybridModel will execute in pipeline mode");
|
|
|
|
@ -204,9 +212,7 @@ Status HybridModelAsyncExecutor::HandleResult(Status exec_ret,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (exec_ret != SUCCESS) {
|
|
|
|
|
GELOGE(exec_ret,
|
|
|
|
|
"[Check][Param:Status] failed to execute graph when HybridModelAsyncExecutor %s. model_id = %u",
|
|
|
|
|
__FUNCTION__, model_id_);
|
|
|
|
|
GELOGE(exec_ret, "[Check][Param:Status] failed to execute graph. model_id = %u", model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"failed to execute graph when HybridModelAsyncExecutor %s. model_id = %u", __FUNCTION__, model_id_);
|
|
|
|
|
return OnComputeDone(data_id, INTERNAL_ERROR, output_tensor_info_list);
|
|
|
|
@ -245,11 +251,11 @@ Status HybridModelAsyncExecutor::SyncVarData() {
|
|
|
|
|
Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, HybridModelExecutor::ExecuteArgs &args) {
|
|
|
|
|
if (current_data.blobs.size() < input_tensor_desc_.size()) {
|
|
|
|
|
GELOGE(PARAM_INVALID,
|
|
|
|
|
"[Check][Size]Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s.",
|
|
|
|
|
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__);
|
|
|
|
|
"[Check][Size]Blob size mismatches, expect at least %zu, but got %zu, model_id = %u",
|
|
|
|
|
input_tensor_desc_.size(), current_data.blobs.size(), model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s.",
|
|
|
|
|
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__);
|
|
|
|
|
"Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s, model_id = %u.",
|
|
|
|
|
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__, model_id_);
|
|
|
|
|
return PARAM_INVALID;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -262,11 +268,11 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy
|
|
|
|
|
if (is_input_dynamic_[input_index]) {
|
|
|
|
|
if (input_index >= current_data.shapes.size()) {
|
|
|
|
|
GELOGE(PARAM_INVALID,
|
|
|
|
|
"[Check][Range]Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s.",
|
|
|
|
|
input_index, current_data.shapes.size(), __FUNCTION__);
|
|
|
|
|
"[Check][Range]Shape index out of range, index = %zu, shape size = %zu model_id = %u.",
|
|
|
|
|
input_index, current_data.shapes.size(), model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s.",
|
|
|
|
|
input_index, current_data.shapes.size(), __FUNCTION__);
|
|
|
|
|
"Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s, model_id = %u.",
|
|
|
|
|
input_index, current_data.shapes.size(), __FUNCTION__, model_id_);
|
|
|
|
|
return PARAM_INVALID;
|
|
|
|
|
}
|
|
|
|
|
auto &tensor_desc = input_tensor_desc_[input_index];
|
|
|
|
@ -274,7 +280,7 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy
|
|
|
|
|
std::vector<std::pair<int64_t, int64_t>> range;
|
|
|
|
|
auto range_ret = tensor_desc->GetShapeRange(range);
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(range_ret == GRAPH_SUCCESS, INTERNAL_ERROR,
|
|
|
|
|
"[Invoke][GetShapeRange] failed, ret=%u.", range_ret);
|
|
|
|
|
"[Invoke][GetShapeRange] failed, ret=%u, model_id = %u.", range_ret, model_id_);
|
|
|
|
|
for (size_t k = 0; k < range.size(); ++k) {
|
|
|
|
|
if (k >= shape.GetDimNum()) {
|
|
|
|
|
break;
|
|
|
|
@ -282,11 +288,11 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy
|
|
|
|
|
// range[k].second can be -1
|
|
|
|
|
if (shape.GetDim(k) < range[k].first || (range[k].second >= 0 && shape.GetDim(k) > range[k].second)) {
|
|
|
|
|
GELOGE(PARAM_INVALID,
|
|
|
|
|
"[Check][Range]Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld]",
|
|
|
|
|
input_index, k, shape.GetDim(k), range[k].first, range[k].second);
|
|
|
|
|
"[Check][Range]Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.",
|
|
|
|
|
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld]",
|
|
|
|
|
input_index, k, shape.GetDim(k), range[k].first, range[k].second);
|
|
|
|
|
"Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.",
|
|
|
|
|
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_);
|
|
|
|
|
return PARAM_INVALID;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -294,8 +300,8 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy
|
|
|
|
|
args.input_desc[input_index] = tensor_desc;
|
|
|
|
|
GELOGD("Update shape of input[%zu] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str());
|
|
|
|
|
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, tensor_size),
|
|
|
|
|
"[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size, index = %zu, shape = [%s]",
|
|
|
|
|
input_index, tensor_desc->GetShape().ToString().c_str());
|
|
|
|
|
"[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size, index = %zu, shape = [%s], model_id = %u.",
|
|
|
|
|
input_index, tensor_desc->GetShape().ToString().c_str(), model_id_);
|
|
|
|
|
GELOGD("Input tensor[%zu] size = %zu", input_index, tensor_size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -311,12 +317,16 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy
|
|
|
|
|
GELOGD("To copy input data for input[%zu]", input_index);
|
|
|
|
|
const DataBuffer &data_buf = blobs[input_index];
|
|
|
|
|
auto mem_size = static_cast<uint64_t>(tensor_size);
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(mem_size >= data_buf.length,
|
|
|
|
|
PARAM_INVALID,
|
|
|
|
|
"[Check][Size]input data size(%lu) does not match model required size(%lu), ret failed.",
|
|
|
|
|
data_buf.length,
|
|
|
|
|
mem_size);
|
|
|
|
|
|
|
|
|
|
if(mem_size < data_buf.length){
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"input data size(%lu) does not match model required size(%lu) when %s, ret failed, model_id = %u.",
|
|
|
|
|
data_buf.length, mem_size, __FUNCTION__, model_id_);
|
|
|
|
|
GELOGE(PARAM_INVALID,
|
|
|
|
|
"[Check][Size]input data size(%lu) does not match model required size(%lu), ret failed, model_id = %u.",
|
|
|
|
|
data_buf.length, mem_size, model_id_);
|
|
|
|
|
return PARAM_INVALID;
|
|
|
|
|
}
|
|
|
|
|
if (data_buf.length > 0) {
|
|
|
|
|
GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]",
|
|
|
|
|
model_->root_runtime_param_.graph_id,
|
|
|
|
@ -371,7 +381,7 @@ Status HybridModelAsyncExecutor::OnComputeDone(uint32_t data_index, uint32_t res
|
|
|
|
|
GELOGD("OnComputeDone. model id = %u, data index = %u, execution ret = %u", model_id_, data_index, result_code);
|
|
|
|
|
if (listener_ != nullptr) {
|
|
|
|
|
GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_index, result_code, outputs),
|
|
|
|
|
"[Invoke][OnComputeDone] failed.");
|
|
|
|
|
"[Invoke][OnComputeDone] failed, model_id = %u.", model_id_);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return result_code;
|
|
|
|
@ -385,12 +395,11 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a
|
|
|
|
|
std::vector<TensorValue> &output_tensors = args.outputs;
|
|
|
|
|
if (output_tensor_desc_list.size() != output_tensors.size()) {
|
|
|
|
|
GELOGE(INTERNAL_ERROR,
|
|
|
|
|
"[Check][Size]Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu "
|
|
|
|
|
"when HybridModelAsyncExecutor %s.",
|
|
|
|
|
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__);
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu "
|
|
|
|
|
"when HybridModelAsyncExecutor %s.",
|
|
|
|
|
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__);
|
|
|
|
|
"[Check][Size]Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu, model_id = %u.",
|
|
|
|
|
output_tensor_desc_list.size(), output_tensors.size(), model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu, "
|
|
|
|
|
"when HybridModelAsyncExecutor %s, model_id = %u.",
|
|
|
|
|
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__, model_id_);
|
|
|
|
|
return INTERNAL_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -422,10 +431,12 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a
|
|
|
|
|
GE_CHECK_LE(output_size, UINT32_MAX);
|
|
|
|
|
if (output_tensor.GetSize() < static_cast<size_t>(output_size)) {
|
|
|
|
|
GELOGE(INTERNAL_ERROR,
|
|
|
|
|
"[Check][Size]output[%zu] tensor size(%zu) is not enough for output shape [%s]",
|
|
|
|
|
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str());
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "output[%zu] tensor size(%zu) is not enough for output shape [%s]",
|
|
|
|
|
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str());
|
|
|
|
|
"[Check][Size]output[%zu] tensor size(%zu) is not enough for output shape [%s], model_id = %u.",
|
|
|
|
|
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_);
|
|
|
|
|
REPORT_INNER_ERROR("E19999",
|
|
|
|
|
"output[%zu] tensor size(%zu) is not enough for output shape [%s] model_id = %u,"
|
|
|
|
|
" when HybridModelAsyncExecutor %s.",
|
|
|
|
|
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_, __FUNCTION__);
|
|
|
|
|
return INTERNAL_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -481,7 +492,7 @@ Status HybridModelAsyncExecutor::Execute(const std::vector<DataBuffer> &inputs,
|
|
|
|
|
args.input_desc.emplace_back(tensor_desc_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
|
|
|
|
|
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_);
|
|
|
|
|
for (const auto &output_tensor_desc : args.output_desc) {
|
|
|
|
|
output_desc.emplace_back(*output_tensor_desc);
|
|
|
|
|
}
|
|
|
|
@ -502,14 +513,15 @@ Status HybridModelAsyncExecutor::Execute(const vector<GeTensor> &inputs, vector<
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HybridModelExecutor::ExecuteArgs args;
|
|
|
|
|
GE_CHK_STATUS_RET(PrepareInputs(input_data, args), "[Invoke][PrepareInputs]Failed to copy input data to model");
|
|
|
|
|
GE_CHK_STATUS_RET(PrepareInputs(input_data, args),
|
|
|
|
|
"[Invoke][PrepareInputs]Failed to copy input data to model, model_id = %u", model_id_);
|
|
|
|
|
GELOGD("Done copying input data successfully.");
|
|
|
|
|
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed.");
|
|
|
|
|
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_);
|
|
|
|
|
|
|
|
|
|
std::vector<ge::OutputTensorInfo> output_tensor_info_list;
|
|
|
|
|
OutputData output_data;
|
|
|
|
|
GE_CHK_STATUS_RET(CopyOutputs(args, &output_data, output_tensor_info_list),
|
|
|
|
|
"[Invoke][CopyOutputs]Failed to copy outputs.");
|
|
|
|
|
"[Invoke][CopyOutputs]Failed to copy outputs, model_id = %u.", model_id_);
|
|
|
|
|
GELOGD("Done copying output data successfully. output count = %zu", output_tensor_info_list.size());
|
|
|
|
|
|
|
|
|
|
int out_index = 0;
|
|
|
|
@ -560,7 +572,8 @@ Status HybridModelAsyncExecutor::DumpOpDebug() {
|
|
|
|
|
loop_cond = const_cast<void *>(varible_loop_cond->GetData());
|
|
|
|
|
}
|
|
|
|
|
data_dumper_.SetLoopAddr(global_step, loop_per_iter, loop_cond);
|
|
|
|
|
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(), "[Invoke][LoadDumpInfo] failed in hybrid engine");
|
|
|
|
|
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(),
|
|
|
|
|
"[Invoke][LoadDumpInfo] failed in hybrid engine, model_id = %u.", model_id_);
|
|
|
|
|
GELOGD("Dump op debug SUCCESS in hybrid engine");
|
|
|
|
|
}
|
|
|
|
|
return SUCCESS;
|
|
|
|
|