diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index e4b8d8d2..cc19fe9f 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -329,7 +329,7 @@ set(COMMON_FORMAT_SRC_FILES "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc" "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc" "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc" - "${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc" + "${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc" ) set(GRAPH_OPTIMIZE_COMMON_SRC_FILES @@ -565,6 +565,11 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES "graph/load/new_model_manager_event_manager_unittest.cc" #"graph/load/output_net_output_unittest.cc" "graph/load/tbe_handle_store_unittest.cc" + "graph/load/hccl_task_info_unittest.cc" + "graph/load/kernel_ex_task_info_unittest.cc" + "graph/load/kernel_task_info_unittest.cc" + "graph/load/memcpy_addr_async_task_info_unittest.cc" + "graph/load/memcpy_async_task_info_unittest.cc" #"graph/graph_load_unittest.cc" "graph/ge_executor_unittest.cc" ) @@ -914,7 +919,7 @@ target_compile_definitions(ut_libge_distinct_load_utest PRIVATE google=ascend_private ) -target_link_libraries(ut_libge_distinct_load_utest +target_link_libraries(ut_libge_distinct_load_utest ${COMMON_SHARED_LIBRARIES} $ ge_execute_common ge_ut_common_format ge_load_common diff --git a/tests/ut/ge/graph/load/hccl_task_info_unittest.cc b/tests/ut/ge/graph/load/hccl_task_info_unittest.cc new file mode 100644 index 00000000..1caf49c1 --- /dev/null +++ b/tests/ut/ge/graph/load/hccl_task_info_unittest.cc @@ -0,0 +1,145 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/new_model_manager/davinci_model.h" +#include "graph/load/new_model_manager/task_info/kernel_task_info.h" +#include "graph/load/new_model_manager/task_info/hccl_task_info.h" + +using domi::EventExDef; +using domi::KernelContext; +using domi::KernelDef; +using domi::LogTimeStampDef; +using domi::ModelTaskDef; +using domi::StreamActiveDef; +using domi::TaskDef; + +namespace ge { +class UtestHcclTaskInfo : public testing::Test { + protected: + void SetUp() {} + + void TearDown() {} +}; + + +// test success GetTaskID +TEST_F(UtestHcclTaskInfo, success_get_task_id) { + domi::ModelTaskDef model_task_def; + domi::TaskDef *task = model_task_def.add_task(); + task->set_type(RT_MODEL_TASK_KERNEL); + TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); + + EXPECT_EQ(task_info->GetTaskID(), 0); + + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.GetTaskID(), 0); + + HcclTaskInfo hccl_task_info; + EXPECT_EQ(hccl_task_info.GetTaskID(), 0); +} + +// test init EventRecordTaskInfo +TEST_F(UtestHcclTaskInfo, success_create_stream) { + DavinciModel *model1 = nullptr; + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.CreateStream(3, &model, 0), SUCCESS); +} + +// test hccl_Distribute +TEST_F(UtestHcclTaskInfo, success_distribute7) { + DavinciModel model(0, nullptr); + + domi::ModelTaskDef model_task_def; + domi::TaskDef *task7 = model_task_def.add_task(); + task7->set_type(RT_MODEL_TASK_HCCL); + TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); + Status ret = task_info7->Init(task7[0], &model); + EXPECT_EQ(FAILED, ret); + + std::vector task_list; + task_list.push_back(task_info7); + model.task_list_ = task_list; + + EXPECT_EQ(task_info7->Release(), SUCCESS); +} + +// test hccl_Distribute +TEST_F(UtestHcclTaskInfo, success_distribute7_with_hccl_type) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + + domi::TaskDef task_def; + HcclTaskInfo hccl_task_info; + EXPECT_EQ(hccl_task_info.Init(task_def, nullptr), PARAM_INVALID); + + + domi::KernelHcclDef *kernel_hccl_def = task_def.mutable_kernel_hccl(); + kernel_hccl_def->set_op_index(0); + kernel_hccl_def->set_hccl_type("HcomBroadcast") + model.op_list_[0] = std::make_shared("FrameworkOp", "FrameworkOp"); + EXPECT_EQ(hccl_task_info.Init(task_def, &model), SUCCESS); + + task_def.clear_kernel_hccl(); +} + +// test hccl_GetPrivateDefByTaskDef +TEST_F(UtestHcclTaskInfo, success_hccl_get_private_def_by_task_def) { + DavinciModel model(0, nullptr); + + domi::ModelTaskDef model_task_def; + TaskDef *task7 = model_task_def.add_task(); + task7->set_type(RT_MODEL_TASK_HCCL); + // for SetStream + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + // for GetPrivateDefByTaskDef + task7->set_ops_kernel_store_ptr(10); + std::string value = "hccl_task"; + task7->set_private_def(value); + + TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); + // for Distribute + EXPECT_EQ(task_info7->Init(task7[0], &model), PARAM_INVALID); + + EXPECT_EQ(task_info7->Release(), SUCCESS); +} + +// test hccl_task_TransToGETaskInfo +TEST_F(UtestHcclTaskInfo, success_hccl_trans_to_ge_task_info) { + DavinciModel model(0, nullptr); + + domi::ModelTaskDef model_task_def; + domi::TaskDef *task7 = model_task_def.add_task(); + // for type + task7->set_type(RT_MODEL_TASK_HCCL); + TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); + + GETaskInfo ge_task; + HcclTaskInfo hccl_task_info; + hccl_task_info.TransToGETaskInfo(ge_task); + + EXPECT_EQ(task_info7->Release(), SUCCESS); +} + +} // namespace ge diff --git a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc new file mode 100644 index 00000000..ee047369 --- /dev/null +++ b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc @@ -0,0 +1,142 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/new_model_manager/davinci_model.h" + +#include "graph/load/new_model_manager/task_info/kernel_ex_task_info.h" +#include "cce/aicpu_engine_struct.h" + +namespace ge { +class UtestKernelExTaskInfo : public testing::Test { + protected: + void SetUp() {} + + void TearDown() {} +}; + +// test kernel_ex_task_Release +TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_init) { + domi::TaskDef task_def; + KernelExTaskInfo kernel_ex_task_info; + EXPECT_EQ(kernel_ex_task_info.Init(task_def, nullptr), PARAM_INVALID); + + DavinciModel model(0, nullptr); + EXPECT_EQ(kernel_ex_task_info1.Init(task_def, &model), FAILED); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex(); + kernel_ex_def->set_op_index(1); + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), INTERNAL_ERROR); + + kernel_ex_def->clear_op_index(); + kernel_ex_def->set_op_index(0); + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); + + kernel_ex_def->set_task_info("KernelEx"); + kernel_ex_def->set_task_info_size(1); + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); + + + constexpr uint32_t arg_size = sizeof(STR_FWK_OP_KERNEL); + string value1(arg_size, 'a'); + kernel_ex_def->set_args_size(arg_size); + kernel_ex_def->set_args(value1); + OpDescPtr v_op_desc = CreateOpDesc("ge_global_step", "Variable"); + model.variable_op_list_.push_back(v_op_desc); + model.op_list_[0]->SetWorkspace({100331008}); // offset + model.op_list_[0]->SetWorkspaceBytes({150}); // length + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); + + + task_def.clear_kernel_ex(); +} + +// test kernel_ex_task_Release +TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_release) { + KernelExTaskInfo kernel_ex_task_info; + EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS); + + kernel_ex_task_info.kernel_buf_ = nullptr; + rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM); + EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS); + + kernel_ex_task_info.input_output_addr_ = nullptr; + rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM); + EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS); + + rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM); + rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM); + EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS); +} + +// test kernel_ex_task_Release +TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_info_copy) { + DavinciModel model(0, nullptr); + model.runtime_param_.mem_base = (uint8_t *)0x12345; + model.runtime_param_.mem_size = 100332000; + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + + domi::TaskDef task_def; + KernelExTaskInfo kernel_ex_task_info; + + domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex(); + kernel_ex_def->set_task_info_size(150); + kernel_ex_def->set_op_index(0); + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace empty. + + model.op_list_[0]->SetWorkspace({100331008}); // offset + model.op_list_[0]->SetWorkspaceBytes({0}); // length + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is null. + + model.op_list_[0]->SetWorkspace({100331008}); // offset + model.op_list_[0]->SetWorkspaceBytes({10}); // length + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is small. + + model.op_list_[0]->SetWorkspace({100331008}); // offset + model.op_list_[0]->SetWorkspaceBytes({150}); // length + EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), SUCCESS); + + task_def.clear_kernel_ex(); + model.runtime_param_.mem_base = nullptr; +} + +TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex(); + kernel_ex_def->set_op_index(0); + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree"); + + KernelExTaskInfo kernel_ex_task_info; + EXPECT_EQ(kernel_task_info.CalculateArgs(task_def, &model), SUCCESS); +} + +} // namespace ge diff --git a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc new file mode 100644 index 00000000..db770a5f --- /dev/null +++ b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc @@ -0,0 +1,1203 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/new_model_manager/davinci_model.h" +#include "graph/load/new_model_manager/task_info/kernel_task_info.h" +#include "graph/load/new_model_manager/task_info/hccl_task_info.h" + +namespace ge { +extern OpDescPtr CreateOpDesc(string name, string type); + +class UtestKernelTaskInfo : public testing::Test { + protected: + void SetUp() {} + + void TearDown() {} +}; + +// test KernelTaskInfo Init. +TEST_F(UtestKernelTaskInfo, success_kernel_taskInfo_not_te) { + DavinciModel model(0, nullptr); + domi::ModelTaskDef model_task_def; + domi::TaskDef *task = model_task_def.add_task(); + task->set_type(RT_MODEL_TASK_KERNEL); + TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); + + task->stream_id_ = 0; + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + + domi::KernelDef *kernel_def = task->mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + model.op_list_[0] = CreateOpDesc("relu", RELU); + ctx->set_op_index(0); + + EXPECT_EQ(task_info->Init(*task, &model), FAILED); + + kernel_def->set_block_dim(10) + kernel_def->set_args("args111111", 10); + kernel_def->set_args_size(10); + + ctx->set_kernel_type(0); + EXPECT_EQ(task_info->Init(*task, &model), INTERNAL_ERROR); + + task_info->Release(); +} + +TEST_F(UtestKernelTaskInfo, success_init_kernel_task_info_fail) { + DavinciModel model(0, nullptr); + KernelTaskInfo kernel_task_info; + domi::TaskDef task_def; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + + model.op_list_[0] = CreateOpDesc("relu", RELU); + ctx->set_op_index(0); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + + // Failed by rtGetFunctionByName. + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); +} + +// test InitTVMTask failed +TEST_F(UtestKernelTaskInfo, init_tvm_task_fail) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + EXPECT_EQ(kernel_task_info.InitTVMTask(0, *kernel_def), PARAM_INVALID); + task_def.clear_kernel(); +} + +// test InitTVMTask with kernel_type is TE +TEST_F(UtestKernelTaskInfo, init_tvm_task_info_with_te_kernel_type) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + // DavinciModel is nullptr + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.Init(task_def, nullptr), PARAM_INVALID); + + task->stream_id_ = 0; + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + rtSmDesc_t l2CtrlInfo; + l2CtrlInfo.data[0].L2_mirror_addr = 1024; + + kernel_def->set_args("args111111", 10); + kernel_def->set_args_size(10); + kernel_def->set_sm_desc(&l2CtrlInfo, sizeof(rtSmDesc_t)); + kernel_def->set_flowtable("fl", 2); + kernel_def->set_block_dim(10); + + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(2); + ctx->set_op_index(4); + ctx->set_args_offset("\0\0"); // args_offset = 0 + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + ctx->clear_args_offset(); + ctx->set_args_offset("args111111", 10); + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + ctx->clear_op_index(); + ctx->set_op_index(0); + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask with kernel_type is CUSTOMIZED +TEST_F(UtestKernelTaskInfo, init_kernel_task_info_with_customized_kernel_type) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + + rtSmDesc_t l2CtrlInfo; + l2CtrlInfo.data[0].L2_mirror_addr = 1024; + + kernel_def->set_args("args111111", 10); + kernel_def->set_args_size(10); + kernel_def->set_sm_desc(&l2CtrlInfo, sizeof(rtSmDesc_t)); + kernel_def->set_flowtable("fl", 2); + kernel_def->set_block_dim(10); + + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(3); + ctx->set_op_index(4); + ctx->set_args_offset("\0\0"); // args_offset = 0 + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + ctx->clear_args_offset(); + ctx->set_args_offset("args111111", 10); + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + ctx->clear_args_offset(); + ctx->set_op_index(0); + + const char task[] = "opattr"; + AttrUtils::SetBytes(model.op_list_[0], ATTR_NAME_OPATTR, Buffer::CopyFrom((uint8_t *)task, sizeof(task))); + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_args_offset("\0\0"); + kernel_task_info.davinci_model_ = &model; + + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), PARAM_INVALID); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + ctx->clear_args_offset(); + ctx->set_args_offset("args111111", 10); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed2) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + kernel_task_info.davinci_model_ = &model; + + context->set_args_offset("\0\0"); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + // AttrUtils::GetBytes -> true + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed3) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + kernel_task_info.davinci_model_ = &model; + + context->set_args_offset("\0\0"); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed4) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + const char task[] = "opattr"; + AttrUtils::SetBytes(model.op_list_[0], ATTR_NAME_OPATTR, Buffer::CopyFrom((uint8_t *)task, sizeof(task))); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + kernel_task_info.davinci_model_ = &model; + + context->set_args_offset("args111111", 10); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + // rtMalloc RT_ERROR_NONE + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed5) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + const char task[] = "opattr"; + AttrUtils::SetBytes(model.op_list_[0], ATTR_NAME_OPATTR, Buffer::CopyFrom((uint8_t *)task, sizeof(task))); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + kernel_task_info.davinci_model_ = &model; + + context->set_args_offset("args111111", 10); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + // rtMalloc RT_ERROR_NONE + // rtMemcpy RT_ERROR_INVALID_VALIUE + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test InitAICPUCustomTask failed +TEST_F(UtestKernelTaskInfo, init_aicpu_custom_task_failed6) { + DavinciModel model(0, nullptr); + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + const char task[] = "opattr"; + AttrUtils::SetBytes(model.op_list_[0], ATTR_NAME_OPATTR, Buffer::CopyFrom((uint8_t *)task, sizeof(task))); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + kernel_task_info.davinci_model_ = &model; + + context->set_args_offset("args111111", 10); + // KernelTaskInfo::StoreInputOutputTensor -> SUCCESS + // rtMalloc RT_ERROR_NONE + // rtMemcpy RT_ERROR_NONE + EXPECT_EQ(kernel_task_info.InitAICPUCustomTask(0, *kernel_def), FAILED); + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + + task_def.set_type(RT_MODEL_TASK_KERNEL); + string args; + args.append(100, '1'); + kernel_def->set_so_name("libDvpp.so"); + kernel_def->set_kernel_name("DvppResize"); + kernel_def->set_args(args.data(), 100); + kernel_def->set_args_size(100); + + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(6); + ctx->set_op_index(0); + + // ModelUtils::GetInputDataAddrs -> ok + // ModelUtils::GetOutputDataAddrs -> ok + // rtMalloc -> RT_ERROR_NONE + // rtMemcpy -> RT_ERROR_NONE + EXPECT_EQ(kernel_task_info->Init(task_def, &model), FAILED); + + EXPECT_EQ(kernel_task_info->Distribute(), SUCCESS); + EXPECT_EQ(kernel_task_info->Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type_fail) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + + task_def.set_type(RT_MODEL_TASK_KERNEL); + string args; + args.append(100, '1'); + kernel_def->set_so_name("libDvpp.so"); + kernel_def->set_kernel_name("DvppResize"); + kernel_def->set_args(args.data(), 100); + kernel_def->set_args_size(100); + + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(6); + ctx->set_op_index(0); + + // ModelUtils::GetInputDataAddrs -> ok + // ModelUtils::GetOutputDataAddrs -> ok + // rtMalloc -> RT_ERROR_NONE + // rtMemcpy -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->Init(task_def, &model), FAILED); + + EXPECT_EQ(kernel_task_info->Distribute(), SUCCESS); + EXPECT_EQ(kernel_task_info->Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type_fail2) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + + task_def.set_type(RT_MODEL_TASK_KERNEL); + string args; + args.append(100, '1'); + kernel_def->set_so_name("libDvpp.so"); + kernel_def->set_kernel_name("DvppResize"); + kernel_def->set_args(args.data(), 100); + kernel_def->set_args_size(100); + + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(6); + ctx->set_op_index(0); + + // ModelUtils::GetInputDataAddrs -> ok + // ModelUtils::GetOutputDataAddrs -> ok + // rtMalloc -> RT_ERROR_INVALID_VALUE + // rtMemcpy -> RT_ERROR_NONE + EXPECT_EQ(kernel_task_info->Init(task_def, &model), FAILED); + + EXPECT_EQ(kernel_task_info->Distribute(), SUCCESS); + EXPECT_EQ(kernel_task_info->Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test StoreInputOutputTensor failed +TEST_F(UtestKernelTaskInfo, store_input_output_tensor_fail) { + std::vector input_data_addrs; + std::vector output_data_addrs; + std::vector<::tagCcAICPUTensor> input_descs; + std::vector<::tagCcAICPUTensor> output_descs; + + KernelTaskInfo kernel_task_info; + // rtMalloc -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->StoreInputOutputTensor(input_data_addrs, output_data_addrs, input_descs, output_descs), SUCCESS); +} + + +TEST_F(UtestKernelTaskInfo, store_input_output_tensor_fail2) { + std::vector input_data_addrs; + std::vector output_data_addrs; + std::vector<::tagCcAICPUTensor> input_descs; + std::vector<::tagCcAICPUTensor> output_descs; + + KernelTaskInfo kernel_task_info; + // rtMalloc -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->StoreInputOutputTensor(input_data_addrs, output_data_addrs, input_descs, output_descs), SUCCESS); +} + +// test InitCceTask success +TEST_F(UtestKernelTaskInfo, kernel_task_info_init_cce_task) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + kernel_task_info.davinci_model_ = &model; + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + rtSmDesc_t l2CtrlInfo; + l2CtrlInfo.data[0].L2_mirror_addr = 1024; + kernel_def->set_sm_desc(&l2CtrlInfo, sizeof(rtSmDesc_t)); + + model.runtime_param_.logic_mem_base = 0; + model.runtime_param_.mem_size = 0; + model.runtime_param_.logic_weight_base = 0; + model.runtime_param_.weight_size = 0; + model.runtime_param_.logic_var_base = 0; + model.runtime_param_.var_size = 0; + + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // rtMalloc -> RT_ERROR_NONE + // rtMemcpy -> RT_ERROR_NONE + // rtMemAllocManaged -> RT_ERROR_NONE + EXPECT_EQ(kernel_task_info->InitCceTask(kernel_def), INTERNAL_ERROR); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed1) { + DavinciModel model(0, nullptr); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed2) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed3) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + // KernelTaskInfo::UpdateCceArgs -> CCE_FAILED + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed4) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // KernelTaskInfo::SetFlowtable -> RT_FAILED + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed5) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // KernelTaskInfo::SetFlowtable -> SUCCESS + // rtMalloc -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed6) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // KernelTaskInfo::SetFlowtable -> SUCCESS + // rtMalloc -> RT_ERROR_NONE + // rtMemcpy -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed7) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("", ""); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + // KernelTaskInfo::SetContext -> SUCCESS + + kernel_def->set_flowtable("InitCceTask"); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_is_flowtable(true); + + // KernelTaskInfo::UpdateCceArgs -> SUCCESS + // KernelTaskInfo::SetFlowtable -> SUCCESS + rtSmDesc_t l2CtrlInfo; + l2CtrlInfo.data[0].L2_mirror_addr = 1024; + kernel_def->set_sm_desc(&l2CtrlInfo, sizeof(rtSmDesc_t)); + + // rtMalloc -> RT_ERROR_NONE + // rtMemcpy -> RT_ERROR_NONE + // rtMemAllocManaged -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info->InitCceTask(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_kernel(); + task_def.clear_kernel(); +} + +// test SetContext success +TEST_F(UtestKernelTaskInfo, success_kernel_taskInfo_init_set_context) { + DavinciModel model(0, nullptr); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_op_id(1); + context->set_kernel_func_id(1); + context->set_is_flowtable(true); + context->set_args_count(1); + context->set_args_offset("args111111", 10); + + EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), SUCCESS); + + EXPECT_EQ(kernel_task_info->Release(), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test SetContext failed +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_set_context_failed1) { + DavinciModel model(0, nullptr); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_op_id(1); + context->set_kernel_func_id(1); + context->set_is_flowtable(true); + context->set_args_count(0); + + EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), INTERNAL_ERROR); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_set_context_failed2) { + DavinciModel model(0, nullptr); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_op_id(1); + context->set_kernel_func_id(1); + context->set_is_flowtable(true); + context->set_args_count(5); + context->set_args_offset("\0\0"); // args_offset = 0 + + EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), PARAM_INVALID); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +// test UpdateCceArgs success +TEST_F(UtestKernelTaskInfo, kernel_task_info_update_cce_args) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("InitCceTask"); + string sm_desc("args"); + + uint8_t test = 2; + uint8_t *p = &test; + model.mem_base_ = &test; + model.runtime_param_.logic_mem_base = 0; + + model.weights_mem_base_ = &test; + model.runtime_param_.logic_weight_base = 0; + + uint8_t test1 = 16; + model.var_mem_base_ = &test1; + model.runtime_param_.logic_var_base = 0; + + context->set_is_flowtable(true); + // KernelTaskInfo::CceUpdateKernelArgs ->SUCCESS + EXPECT_EQ(kernel_task_info.UpdateCceArgs(sm_desc, flowtable, *kernel_def), FAILED); + + + context->clear_is_flowtable(); + context->set_is_flowtable(false); + // KernelTaskInfo::CceUpdateKernelArgs ->SUCCESS + EXPECT_EQ(kernel_task_info.UpdateCceArgs(sm_desc, flowtable, *kernel_def), FAILED); + + kernel_def->clear_context(); + task_def.clear_kernel(); + + model.mem_base_ = nullptr; + model.weights_mem_base_ = nullptr; + model.var_mem_base_ = nullptr; +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_update_cce_args_failed1) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("InitCceTask"); + string sm_desc("args"); + + uint8_t test = 2; + uint8_t *p = &test; + model.mem_base_ = &test; + model.runtime_param_.logic_mem_base = 0; + + uint8_t test1 = 10; + model.weights_mem_base_ = &test1; + model.runtime_param_.logic_weight_base = 0; + + model.var_mem_base_ = &test1; + model.runtime_param_.logic_var_base = 0; + + context->set_is_flowtable(true); + // KernelTaskInfo::CceUpdateKernelArgs -> FAILED + EXPECT_EQ(kernel_task_info.UpdateCceArgs(sm_desc, flowtable, *kernel_def), FAILED); + + kernel_def->clear_context(); + task_def.clear_kernel(); + + model.mem_base_ = nullptr; + model.weights_mem_base_ = nullptr; + model.var_mem_base_ = nullptr; +} + +// test SetFlowtable +TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("InitCceTask"); + context->set_is_flowtable(false); + EXPECT_EQ(kernel_task_info.SetFlowtable(flowtable, *kernel_def), SUCCESS); + + + context->clear_is_flowtable(); + context->set_is_flowtable(true); + // rtMalloc ->RT_ERROR_NONE + // rtMemcpy ->RT_ERROR_NONE + kernel_def->set_args("args111111", 10); + context->set_args_offset("\0\0"); + EXPECT_EQ(kernel_task_info.SetFlowtable(flowtable, *kernel_def), SUCCESS); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed1) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("SetFlowtable"); + context->set_is_flowtable(true); + + // rtMalloc -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info.SetFlowtable(flowtable, *kernel_def), FAILED); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed2) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("SetFlowtable"); + context->set_is_flowtable(true); + // rtMalloc ->RT_ERROR_NONE + // rtMemcpy ->RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info.SetFlowtable(flowtable, *kernel_def), FAILED); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed3) { + DavinciModel model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_ = { stream }; + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + + string flowtable("SetFlowtable"); + context->set_is_flowtable(true); + // rtMalloc ->RT_ERROR_NONE + // rtMemcpy ->RT_ERROR_NONE + kernel_def->set_args("args", 4); + context->set_args_offset("args111111", 10); + EXPECT_EQ(kernel_task_info.SetFlowtable(flowtable, *kernel_def), FAILED); + + kernel_def->clear_context(); + task_def.clear_kernel(); +} + +TEST_F(UtestKernelTaskInfo, distribute_failed) { + KernelTaskInfo kernel_task_info; + DavinciModel model(0, nullptr); + + domi::TaskDef task_def; + + // Failed for SetStream + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + // rtKernelLaunchWithFlag -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, distribute_success) { + KernelTaskInfo kernel_task_info; + DavinciModel model(0, nullptr); + model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + domi::TaskDef task_def; + // rtModelGetTaskId -> RT_ERROR_INVALID_VALUE + rtModel_t rt_model_handle = (rtModel_t *)0x12345678; + model.rt_model_handle_ = rt_model_handle; + + // Failed for SetStream + EXPECT_EQ(kernel_task_info.Init(task_def, &model), FAILED); + + // rtKernelLaunchWithFlag -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); + model.rt_model_handle_ = nullptr; +} + +// test success DistributeDumpTask +TEST_F(UtestKernelTaskInfo, success_distribute_dump_task) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + + kernel_def->set_stub_func("kerneltaskinfo"); + kernel_def->set_block_dim(10); + kernel_def->set_args("args111111", 10); + kernel_def->set_args_size(10); + rtSmDesc_t l2CtrlInfo; + l2CtrlInfo.data[0].L2_mirror_addr = 1024; + kernel_def->set_sm_desc((void *)&l2CtrlInfo, sizeof(rtSmDesc_t)); + + // for SetStream + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + std::vector stream_list = { stream }; + EXPECT_EQ(kernel_task_info.SetStream(0, stream_list), SUCCESS); + + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); + + rtStreamDestroy(stream); + task_def.clear_kernel(); +} + +// test success GetTaskID +TEST_F(UtestKernelTaskInfo, success_get_task_id) { + domi::ModelTaskDef model_task_def; + domi::TaskDef *task = model_task_def->add_task(); + task->set_type(RT_MODEL_TASK_KERNEL); + TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); + + EXPECT_EQ(task_info->GetTaskID(), 0); + + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.GetTaskID(), 0); + + HcclTaskInfo hccl_task_info; + EXPECT_EQ(hccl_task_info.GetTaskID(), 0); +} + +// test StoreInputOutputTensor success +TEST_F(UtestKernelTaskInfo, success_store_input_output_tensor) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + std::vector input_data_addrs; + std::vector output_data_addrs; + std::vector<::tagCcAICPUTensor> input_descs; + std::vector<::tagCcAICPUTensor> output_descs; + + int test = 1; + int *addr = &test; + void *input = addr; + void *output = addr; + input_data_addrs.push_back(input); + output_data_addrs.push_back(output); + + tagCcAICPUTensor input_desc; + tagCcAICPUTensor output_desc; + input_descs.push_back(input_desc); + output_descs.push_back(output_desc); + + EXPECT_EQ(kernel_task_info.StoreInputOutputTensor(input_data_addrs, output_data_addrs, input_descs, output_descs), SUCCESS); + + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); +} + +// test KernelTaskInfo release fail +TEST_F(UtestKernelTaskInfo, fail_release) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + std::vector input_data_addrs; + std::vector output_data_addrs; + std::vector<::tagCcAICPUTensor> input_descs; + std::vector<::tagCcAICPUTensor> output_descs; + + int test = 1; + int *addr = &test; + void *input = addr; + void *output = addr; + input_data_addrs.push_back(input); + output_data_addrs.push_back(output); + + tagCcAICPUTensor input_desc; + tagCcAICPUTensor output_desc; + input_descs.push_back(input_desc); + output_descs.push_back(output_desc); + + EXPECT_EQ(kernel_task_info.StoreInputOutputTensor(input_data_addrs, output_data_addrs, input_descs, output_descs), SUCCESS); + + // rtMemFreeManaged -> RT_ERROR_INVALID_VALUE + EXPECT_EQ(kernel_task_info.Release(), SUCCESS); +} + +// test KernelTaskInfo release fail +TEST_F(UtestKernelTaskInfo, update_l2data_success) { + DavinciModel model(0, nullptr); + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + domi::KernelDef kernel_def; + + EXPECT_EQ(kernel_task_info.UpdateL2Data(kernel_def), SUCCESS); +} + +// test fusion_end_task Init +TEST_F(UtestKernelTaskInfo, kernel_task_info_init_success) { + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + + DavinciModel model(0, nullptr); + auto model_def = MakeShared(); + + model.model_id_ = 1; + model.name_ = "test"; + model.version_ = 0x01; + + model.stream_list_ = { stream }; + model.ge_model_ = MakeShared(); + model.ge_model_->SetModelTaskDef(model_def); + + auto op_desc = GreateOpDesc("data", DATA); + op_desc->SetInputOffset({1}); + op_desc->SetOutputOffset({100}); + + GeTensorDesc descin(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT); + TensorUtils::SetSize(descin, 4); + op_desc->AddInputDesc(descin); + GeTensorDesc descout(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT16); + TensorUtils::SetSize(descout, 32); + op_desc->AddOutputDesc(descout); + op_desc->SetId(0); + + model.data_op_list_.push_back(op_desc); + model.op_list_[0] = op_desc; + + domi::TaskDef task_def; + task_def.set_stream_id(0); + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_op_index(0); + vector original_op_names = { "conv", "add" }; + AttrUtils::GetListStr(op_desc, ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_op_names); + + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.Init(task_def, &model), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_calculate_args_te) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(2); + + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.CalculateArgs(task_def, &model), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_calculate_args_aicpu) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_kernel_type(6); + + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.CalculateArgs(task_def, &model), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_update_args_te) { + DavinciModel model(0, nullptr); + + KernelTaskInfo kernel_task_info; + kernel_task_info.kernel_type_ = ccKernelType::TE; + kernel_task_info.davinci_model_ = &model; + EXPECT_EQ(kernel_task_info.UpdateArgs(), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, kernel_task_info_update_args_aicpu) { + DavinciModel model(0, nullptr); + + KernelTaskInfo kernel_task_info; + kernel_task_info.kernel_type_ = ccKernelType::TE; + kernel_task_info.davinci_model_ = &model; + kernel_task_info.args_size_ = 120; + kernel_task_info.args_addr = std::unique_ptr(new (std::nothrow) uint8_t[kernel_task_info.args_size_]); + kernel_task_info.io_addrs_ - { (void*)0x12345678, (void*)0x22345678 }; + rtMalloc(&kernel_task_info.args_, kernel_task_info.args_size_, RT_MEMORY_HBM); + + EXPECT_EQ(kernel_task_info.UpdateArgs(), SUCCESS); +} + + +TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) { + DavinciModel model(0, nullptr); + + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &model; + + EXPECT_EQ(kernel_task_info.SaveSuperKernelInfo(), SUCCESS); + + kernel_task_info.UpdateSKTTaskId(); + + EXPECT_EQ(kernel_task_info.SKTFinalize(), SUCCESS); +} + +} // namespace ge diff --git a/tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc b/tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc new file mode 100644 index 00000000..b8c18348 --- /dev/null +++ b/tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc @@ -0,0 +1,138 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/new_model_manager/davinci_model.h" +#include "graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.h" + +namespace ge { +class UtestMemcpyAddrAsyncTaskInfo : public testing::Test { + protected: + void SetUp() {} + + void TearDown() {} +}; + +extern OpDescPtr CreateOpDesc(string name, string type); + +TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_addr_async_task_init) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + task_def.set_stream_id(0); + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(10); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(10); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(6); + + model.runtime_param_.logic_mem_base = 0x8003000; + model.runtime_param_.logic_weight_base = 0x8008000; + model.runtime_param_.logic_var_base = 0x800e000; + model.runtime_param_.mem_size = 0x5000; + model.runtime_param_.weight_size = 0x6000; + model.runtime_param_.var_size = 0x1000; + + // DavinciModel is null + MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info; + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, nullptr), INTERNAL_ERROR); + + // SetStream failed. + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), FAILED); + + // GetOpByIndex src failed + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), INTERNAL_ERROR); + + // GetRuntimeAddress src failed. + model.op_list_[6] = CreateOpDesc("memcpyaddrasync", MEMCPYADDRASYNC); + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID); + + // GetRuntimeAddress dst failed. + memcpy_async->set_src(0x08003000); + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID); + + memcpy_async->set_dst(0x08008000); + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS); + + task_def.clear_memcpy_async(); +} + +TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_task_init_failed) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + task_def.set_stream_id(0); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(10); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(10); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(6); + + model.runtime_param_.logic_mem_base = 0x8003000; + model.runtime_param_.logic_weight_base = 0x8008000; + model.runtime_param_.logic_var_base = 0x800e000; + model.runtime_param_.mem_size = 0x5000; + model.runtime_param_.weight_size = 0x6000; + model.runtime_param_.var_size = 0x1000; + + + GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); + model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYADDRASYNC); + model.op_list_[6]->AddInputDesc(tensor); + model.op_list_[6]->AddOutputDesc(tensor); + model.op_list_[6]->SetInputOffset({1024}); + model.op_list_[6]->SetOutputOffset({5120}); + + // DavinciModel is null + MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info; + EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS); + + task_def.clear_memcpy_async(); +} + +TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_calculate_args) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(0x08003000); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(0x08008000); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(0); + + // DavinciModel is null + MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info; + EXPECT_EQ(memcpy_addr_async_task_info.CalculateArgs(task_def, &model), SUCCESS); +} + +} // namespace ge diff --git a/tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc b/tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc new file mode 100644 index 00000000..bb4c5cf4 --- /dev/null +++ b/tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc @@ -0,0 +1,269 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/new_model_manager/davinci_model.h" +#include "graph/load/new_model_manager/task_info/memcpy_async_task_info.h" + + +namespace ge { +class UtestMemcpyAsyncTaskInfo : public testing::Test { + protected: + void SetUp() {} + + void TearDown() {} +}; + +OpDescPtr CreateOpDesc(string name = "", string type = "") { + auto op_desc = std::make_shared(name, type); + op_desc->SetStreamId(0); + op_desc->SetId(0); + + AttrUtils::SetFloat(op_desc, ATTR_NAME_ALPHA, 0); + AttrUtils::SetFloat(op_desc, ATTR_NAME_BETA, 0); + + op_desc->SetWorkspace({}); + op_desc->SetWorkspaceBytes({}); + op_desc->SetInputOffset({}); + op_desc->SetOutputOffset({}); + + AttrUtils::SetListStr(op_desc, ATTR_NAME_WEIGHT_NAME, {}); + AttrUtils::SetInt(op_desc, POOLING_ATTR_MODE, 0); + AttrUtils::SetInt(op_desc, POOLING_ATTR_PAD_MODE, 0); + AttrUtils::SetInt(op_desc, POOLING_ATTR_DATA_MODE, 0); + AttrUtils::SetInt(op_desc, POOLING_ATTR_CEIL_MODE, 0); + AttrUtils::SetInt(op_desc, POOLING_ATTR_NAN_OPT, 0); + AttrUtils::SetListInt(op_desc, POOLING_ATTR_WINDOW, {}); + AttrUtils::SetListInt(op_desc, POOLING_ATTR_PAD, {}); + AttrUtils::SetListInt(op_desc, POOLING_ATTR_STRIDE, {}); + AttrUtils::SetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1}); + AttrUtils::SetInt(op_desc, ATTR_NAME_STREAM_SWITCH_COND, 0); + return op_desc; +} + +TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + task_def.set_stream_id(0); + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(10); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(10); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(6); + + model.runtime_param_.logic_mem_base = 0x8003000; + model.runtime_param_.logic_weight_base = 0x8008000; + model.runtime_param_.logic_var_base = 0x800e000; + model.runtime_param_.mem_size = 0x5000; + model.runtime_param_.weight_size = 0x6000; + model.runtime_param_.var_size = 0x1000; + + // GetOpByIndex src failed + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR); + + model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC); + memcpy_async->set_src(0x08008000); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID); + + // set OpDesc attr + std::vector memory_type = { RT_MEMORY_TS_4G }; + AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type); + GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); + model.op_list_[6]->AddInputDesc(tensor); + model.op_list_[6]->AddOutputDesc(tensor); + memcpy_async->set_dst_max(0); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), FAILED); + + memcpy_async->set_dst_max(0); + model.op_list_[6]->SetInputOffset({1024}); + model.op_list_[6]->SetOutputOffset({5120}); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID); + + + task_def.clear_memcpy_async(); +} + +TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init_failed) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + task_def.set_stream_id(0); + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(10); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(10); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(6); + + model.runtime_param_.logic_mem_base = 0x8003000; + model.runtime_param_.logic_weight_base = 0x8008000; + model.runtime_param_.logic_var_base = 0x800e000; + model.runtime_param_.mem_size = 0x5000; + model.runtime_param_.weight_size = 0x6000; + model.runtime_param_.var_size = 0x1000; + + + // DavinciModel is null + MemcpyAsyncTaskInfo memcpy_async_task_info; + EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), PARAM_INVALID); + + // SetStream failed + EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), FAILED); + + // GetOpByIndex failed + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR); + + model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID); + memcpy_async->set_src(0x08008000); + + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID); + memcpy_async->set_dst(0x08003000); + + // set OpDesc attr + std::vector memory_type = { RT_MEMORY_TS_4G }; + AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type); + memcpy_async->set_dst_max(0); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), FAILED); + memcpy_async->set_dst_max(512); + + + GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); + model.op_list_[6]->AddInputDesc(tensor); + model.op_list_[6]->AddOutputDesc(tensor); + model.op_list_[6]->SetInputOffset({1024}); + model.op_list_[6]->SetOutputOffset({5120}); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS); + + memcpy_async->set_dst(0x08009000); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS); + + task_def.clear_memcpy_async(); +} + +TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init) { + DavinciModel model(0, nullptr); + model.SetKnownNode(trues); + domi::TaskDef task_def; + task_def.set_stream_id(0); + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(10); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(10); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(6); + + model.runtime_param_.logic_mem_base = 0x8003000; + model.runtime_param_.logic_weight_base = 0x8008000; + model.runtime_param_.logic_var_base = 0x800e000; + model.runtime_param_.mem_size = 0x5000; + model.runtime_param_.weight_size = 0x6000; + model.runtime_param_.var_size = 0x1000; + + // GetOpByIndex src failed + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + model.stream_list_.push_back(stream); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR); + + model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC); + memcpy_async->set_src(0x08008000); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID); + + // set OpDesc attr + AttrUtils::SetStr(model.op_list_[6], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree"); + GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); + model.op_list_[6]->AddInputDesc(tensor); + model.op_list_[6]->AddOutputDesc(tensor); + memcpy_async->set_dst_max(0); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS); + + memcpy_async->set_dst_max(0); + model.op_list_[6]->SetInputOffset({1024}); + model.op_list_[6]->SetOutputOffset({5120}); + EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS); + + + task_def.clear_memcpy_async(); +} + +TEST_F(UtestMemcpyAsyncTaskInfo, success_distribute) { + DavinciModel model(0, nullptr); + model.ge_model_ = MakeShared(); + + auto model_task_def = MakeShared(); + domi::TaskDef *task_def = model_task_def->add_task(); + task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC); + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + domi::KernelContext *ctx = kernel_def->mutable_context(); + ctx->set_op_index(0); + model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC); + TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task_def->type())); + + model.task_list_ = { task_info }; + model.ge_model_->SetModelTaskDef(model_task_def); + + EXPECT_EQ(model.DistributeTask(), SUCCESS); + EXPECT_EQ(task_info->Distribute(), SUCCESS); + task_info->Release(); +} + +TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_calculate_args) { + DavinciModel model(0, nullptr); + domi::TaskDef task_def; + + domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async(); + memcpy_async->set_dst(0x08003000); + memcpy_async->set_dst_max(512); + memcpy_async->set_src(0x08008000); + memcpy_async->set_count(1); + memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE); + memcpy_async->set_op_index(0); + + model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC); + AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree"); + + // DavinciModel is null + MemcpyAsyncTaskInfo memcpy_async_task_info; + EXPECT_EQ(memcpy_async_task_info.CalculateArgs(task_def, &model), SUCCESS); +} + +TEST_F(UtestMemcpyAsyncTaskInfo, memcpy_async_update_args) { + DavinciModel model(0, nullptr); + + MemcpyAsyncTaskInfo memcpy_async_task_info; + memcpy_async_task_info.davinci_model_ = &model; + + EXPECT_EQ(memcpy_async_task_info.UpdateArgs(), SUCCESS); +} + +} // namespace ge