!750 Add SetIoAddrs for UpdateArgs.

From: @zhangxiaokun9
Reviewed-by: @wangxiaotian22,@xchu42,@wangxiaotian22,@xchu42
Signed-off-by: @ji_chen
pull/750/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 0996fda674

@ -108,6 +108,7 @@ std::mutex DavinciModel::tvm_bin_mutex_;
DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener> &listener)
: weights_mem_base_(nullptr),
var_mem_base_(nullptr),
fixed_mem_base_(0),
mem_base_(nullptr),
is_inner_mem_base_(false),
is_inner_weight_base_(false),
@ -139,6 +140,7 @@ DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener
is_l1_fusion_enable_(false),
is_first_execute_(true) {
op_list_.clear();
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
}
DavinciModel::~DavinciModel() {
@ -261,6 +263,7 @@ Status DavinciModel::Assign(const GeModelPtr &ge_model) {
/// @return: void
///
void DavinciModel::Shrink() {
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
ge_model_.reset(); // delete object.
}
@ -668,6 +671,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size
data_inputer_ = new (std::nothrow) DataInputer();
GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, MEMALLOC_FAILED, "data_inputer_ is nullptr.");
}
fixed_mem_base_ = reinterpret_cast<uintptr_t>(mem_base_);
GE_TIMESTAMP_END(InitModelMem, "GraphLoader::InitModelMem");
for (const ge::NodePtr &node : compute_graph->GetDirectNode()) {
@ -2826,7 +2830,32 @@ Status DavinciModel::CreateKnownZeroCopyMap(const vector<void *> &inputs, const
return SUCCESS;
}
void DavinciModel::SetTotalIOAddrs(const vector<void *> &io_addrs) {
if (fixed_mem_base_ == reinterpret_cast<uintptr_t>(mem_base_)) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
return;
}
for (size_t i = 0; i < io_addrs.size(); ++i) {
uintptr_t addr = reinterpret_cast<uintptr_t>(io_addrs[i]);
if ((fixed_mem_base_ <= addr) && (addr < fixed_mem_base_ + runtime_param_.mem_size)) {
total_io_addrs_.emplace_back(mem_base_ + (addr - fixed_mem_base_));
} else {
total_io_addrs_.emplace_back(io_addrs[i]);
}
}
}
Status DavinciModel::UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs) {
if (fixed_mem_base_ != reinterpret_cast<uintptr_t>(mem_base_)) {
for (size_t i = 0; i < total_io_addrs.size(); ++i) {
uintptr_t addr = reinterpret_cast<uintptr_t>(total_io_addrs[i]);
if ((fixed_mem_base_ <= addr) && (addr < fixed_mem_base_ + runtime_param_.mem_size)) {
total_io_addrs[i] = mem_base_ + (addr - fixed_mem_base_);
}
}
}
for (size_t i = 0; i < total_io_addrs.size(); ++i) {
auto it_in = knonw_input_data_info_.find(total_io_addrs[i]);
if (it_in != knonw_input_data_info_.end()) {

@ -76,6 +76,25 @@ struct timeInfo {
int64_t dumpEndTime;
};
// For super kernel
struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
};
struct TaskMemInfo {
int64_t input_size{0};
int64_t output_size{0};
@ -261,6 +280,9 @@ class DavinciModel {
// get updated task info list
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; }
// Modified from KernelTaskInfo.
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; }
///
/// @ingroup ge
/// @brief get model input and output format
@ -481,9 +503,7 @@ class DavinciModel {
void *cur_args = static_cast<char *>(args_) + offset;
return cur_args;
}
void SetTotalIOAddrs(vector<void *> &io_addrs) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
}
void SetTotalIOAddrs(const vector<void *> &io_addrs);
void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; }
uint32_t GetHybridArgsSize() {
return total_hybrid_args_size_;
@ -533,6 +553,7 @@ class DavinciModel {
uint8_t *weights_mem_base_;
uint8_t *var_mem_base_;
// memory address of model
uintptr_t fixed_mem_base_; // Initial of mem_base_, keep forever.
uint8_t *mem_base_;
uint8_t *p2p_mem_base_;
bool is_inner_mem_base_;
@ -996,6 +1017,9 @@ class DavinciModel {
std::multimap<uint32_t, uint32_t> op_id_map_;
std::vector<ProfileInfo> profile_list_;
// For super kernel.
SuperKernelTaskInfo skt_info_;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_

@ -59,40 +59,40 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
GELOGI("HcclTaskInfo Init, op_index is: %u", op_index);
// Get HCCL op
op_desc_ = davinci_model->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc_);
const auto op_desc = davinci_model_->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc);
// Create the kernel hccl infos
CreateKernelHcclInfo(op_desc_);
CreateKernelHcclInfo(op_desc);
// Initialize the hccl_type of all kernel hccl info
HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_);
// Only in Horovod scenario should get the inputName and GeShape
ret = HcomOmeUtil::GetHorovodInputs(op_desc_, kernel_hccl_infos_);
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret);
return ret;
}
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc_, kernel_hccl_infos_);
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt);
return dmrt;
}
dmrt = HcomOmeUtil::GetHcclCount(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt);
return dmrt;
}
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId
dmrt = HcomOmeUtil::GetAllRootId(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt);
return dmrt;
}
// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl
ret = SetFollowStream(op_desc_, davinci_model);
ret = SetFollowStream(op_desc, davinci_model);
if (ret != SUCCESS) {
GELOGE(ret, "SetStream Fail.");
return ret;
@ -100,21 +100,22 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
if (davinci_model_->IsKnownNode()) {
args_ = davinci_model_->GetCurrentArgsAddr(args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc->GetName().c_str(), args_, args_offset_);
}
ret = SetAddrs(op_desc_, kernel_hccl_infos_);
ret = SetAddrs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "Setaddrs Fail.");
return ret;
}
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace
ret = SetWorkspace(op_desc_, kernel_hccl_infos_);
ret = SetWorkspace(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "SetWorkspace Fail.");
return ret;
}
SetIoAddrs(op_desc);
GELOGI("HcclTaskInfo Init Success");
return SUCCESS;
}
@ -229,20 +230,19 @@ Status HcclTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *
return SUCCESS;
}
Status HcclTaskInfo::UpdateArgs() {
GELOGI("HcclTaskInfo::UpdateArgs in.");
void HcclTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
input_data_addrs_ = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_);
vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), input_data_addrs_.begin(), input_data_addrs_.end());
io_addrs.insert(io_addrs.end(), output_data_addrs_.begin(), output_data_addrs_.end());
io_addrs.insert(io_addrs.end(), workspace_data_addrs_.begin(), workspace_data_addrs_.end());
davinci_model_->SetTotalIOAddrs(io_addrs);
const auto input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
const auto output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
}
Status HcclTaskInfo::UpdateArgs() {
GELOGI("HcclTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("HcclTaskInfo::UpdateArgs success.");
return SUCCESS;
}
@ -261,9 +261,11 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
HcclReduceOp op_type = HCCL_REDUCE_SUM;
GE_CHECK_NOTNULL(davinci_model_);
GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str());
vector<void *> input_data_addrs;
vector<void *> output_data_addrs;
if (!davinci_model_->IsKnownNode()) {
input_data_addrs_ = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
input_data_addrs = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
}
void *input_data_addr = nullptr;
void *output_data_addr = nullptr;
@ -275,8 +277,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i);
GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr);
} else {
input_data_addr = input_data_addrs_.empty() ? nullptr : input_data_addrs_[i];
output_data_addr = output_data_addrs_.empty() ? nullptr : output_data_addrs_[i];
input_data_addr = input_data_addrs.empty() ? nullptr : input_data_addrs[i];
output_data_addr = output_data_addrs.empty() ? nullptr : output_data_addrs[i];
}
kernel_hccl_infos[i].inputDataAddr = input_data_addr;
if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) {
@ -366,8 +368,8 @@ Status HcclTaskInfo::SetWorkspace(const std::shared_ptr<OpDesc> &op_desc,
workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() +
op_desc->GetOutputsSize());
} else {
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs_.empty() ? nullptr : workspace_data_addrs_[0];
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs.empty() ? nullptr : workspace_data_addrs[0];
}
}
}

@ -35,7 +35,6 @@ class HcclTaskInfo : public TaskInfo {
ops_kernel_store_(nullptr),
private_def_(nullptr),
private_def_len_(0),
op_desc_(nullptr),
args_(nullptr),
args_offset_(0) {}
@ -52,7 +51,7 @@ class HcclTaskInfo : public TaskInfo {
Status UpdateArgs() override;
private:
ge::Status SetAddrs(const std::string &hccl_type, const std::shared_ptr<OpDesc> &op);
void SetIoAddrs(const OpDescPtr &op_desc);
Status SetAddrs(const std::shared_ptr<OpDesc> &op_desc, std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos);
@ -76,10 +75,7 @@ class HcclTaskInfo : public TaskInfo {
uint32_t private_def_len_;
static std::mutex hccl_follow_stream_mutex_;
vector<GETaskKernelHcclInfo> kernel_hccl_infos_;
vector<void *> input_data_addrs_;
vector<void *> output_data_addrs_;
vector<void *> workspace_data_addrs_;
OpDescPtr op_desc_;
vector<void *> io_addrs_;
void *args_;
uint32_t args_offset_;
};

@ -30,11 +30,7 @@
namespace ge {
Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("KernelExTaskInfo Init Start.");
if (davinci_model == nullptr) {
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
GE_CHECK_NOTNULL(davinci_model);
davinci_model_ = davinci_model;
Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList());
if (ret != SUCCESS) {
@ -51,7 +47,6 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
return INTERNAL_ERROR;
}
op_desc_ = op_desc;
// 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL
STR_FWK_OP_KERNEL fwk_op_kernel = {0};
@ -79,8 +74,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);)
}
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), ext_info.size(), ext_info_addr_);
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_);
// 2.1 get loop cond variable for tensor array write
uint64_t step_id_addr = 0;
@ -132,6 +127,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
SetIoAddrs(op_desc);
GELOGI("KernelExTaskInfo knonw node Init Success.");
return SUCCESS;
}
@ -195,7 +191,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);)
davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0);
SetIoAddrs(op_desc);
GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id);
return SUCCESS;
}
@ -236,36 +232,38 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
return SUCCESS;
}
Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
void KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
vector<void *> io_addrs;
if (!op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
if (!op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
} else {
string peer_input_name;
if (AttrUtils::GetStr(op_desc_, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > output_data_addrs.size()) {
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.",
output_data_addrs.size(), output_index);
return FAILED;
return;
}
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
for (size_t i = 0; i < output_data_addrs.size(); ++i) {
if (i == output_index) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
io_addrs_.emplace_back(fixed_addr);
continue;
}
io_addrs.emplace_back(output_data_addrs[i]);
io_addrs_.emplace_back(output_data_addrs[i]);
}
}
}
}
davinci_model_->SetTotalIOAddrs(io_addrs);
Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("KernelExTaskInfo::UpdateArgs success.");
return SUCCESS;
}

@ -59,6 +59,7 @@ class KernelExTaskInfo : public TaskInfo {
};
private:
Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc);
void SetIoAddrs(const OpDescPtr &op_desc);
uint32_t task_id_;
uint32_t stream_id_;
@ -69,7 +70,7 @@ class KernelExTaskInfo : public TaskInfo {
void *input_output_addr_;
void *ext_info_addr_;
void *dump_args_;
OpDescPtr op_desc_ = nullptr;
vector<void *> io_addrs_;
uint32_t args_offset_ = 0;
int64_t fixed_addr_offset_ = 0;
};

@ -38,7 +38,6 @@ class KernelTaskInfo : public TaskInfo {
flowtable_(nullptr),
block_dim_(0),
args_size_(0),
flowtable_size_(0),
task_id_(0),
stream_id_(0),
so_name_(""),
@ -128,6 +127,7 @@ class KernelTaskInfo : public TaskInfo {
Status SuperKernelDistribute();
bool IsL1FusionOp(const OpDescPtr &op_desc);
void SetIoAddrs(const OpDescPtr &op_desc);
// For super kernel
Status SaveSKTDumpInfo();
@ -148,7 +148,6 @@ class KernelTaskInfo : public TaskInfo {
void *flowtable_;
uint32_t block_dim_;
uint32_t args_size_;
uint32_t flowtable_size_;
uint32_t task_id_;
uint32_t stream_id_;
std::string so_name_;
@ -156,7 +155,8 @@ class KernelTaskInfo : public TaskInfo {
ccKernelType kernel_type_;
uint32_t dump_flag_;
void *dump_args_;
OpDescPtr op_desc_;
OpDescPtr op_desc_; // Clear after distribute.
vector<void *> io_addrs_;
DavinciModel *davinci_model_;
uint32_t args_offset_ = 0;
uint32_t hybrid_args_offset_ = 0;
@ -186,25 +186,6 @@ class KernelTaskInfo : public TaskInfo {
void *output_addrs = nullptr;
void *attr_handle = nullptr;
} custom_info_;
// For super kernel
static struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
} skt_info_;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_

@ -30,14 +30,13 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
return ret;
}
memcpy_async_ = task_def.memcpy_async();
count_ = memcpy_async_.count();
kind_ = memcpy_async_.kind();
dst_max_ = memcpy_async_.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async_.op_index());
op_desc_ = op_desc;
const domi::MemcpyAsyncDef &memcpy_async = task_def.memcpy_async();
count_ = memcpy_async.count();
kind_ = memcpy_async.kind();
dst_max_ = memcpy_async.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index());
if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async_.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR;
}
@ -46,13 +45,14 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
dst_ = reinterpret_cast<uint8_t *>(reinterpret_cast<uintptr_t>(src_) + sizeof(void *));
// for zero copy
kind_ = RT_MEMCPY_ADDR_DEVICE_TO_DEVICE;
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GELOGI("MemcpyAsyncTaskInfo op name %s, src_ %p, dst_ %p, args_offset %u.",
op_desc->GetName().c_str(), src_, dst_, args_offset_);
return SUCCESS;
}
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.src(), src_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.src(), src_);
if (ret != SUCCESS) {
return ret;
}
@ -61,23 +61,23 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
vector<int64_t> memory_type_list;
(void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list);
if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one.
uint64_t mem_offset = memcpy_async_.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async_.dst_max()));
uint64_t mem_offset = memcpy_async.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async.dst_max()));
if (dst_ == nullptr) {
return FAILED;
}
} else {
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.dst(), dst_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.dst(), dst_);
if (ret != SUCCESS) {
return ret;
}
}
GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async_.src(), memcpy_async_.dst(), src_, dst_, dst_max_, count_);
davinci_model_->DisableZeroCopy(src_);
davinci_model_->DisableZeroCopy(dst_);
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async.src(), memcpy_async.dst(), src_, dst_, dst_max_, count_);
return SUCCESS;
}
@ -115,29 +115,33 @@ Status MemcpyAsyncTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davinci
return SUCCESS;
}
Status MemcpyAsyncTaskInfo::UpdateArgs() {
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in.");
GE_CHECK_NOTNULL(davinci_model_);
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.src(), src_);
Status MemcpyAsyncTaskInfo::SetIoAddrs(const OpDescPtr &op_desc, const domi::MemcpyAsyncDef &memcpy_async) {
uint8_t *src = nullptr;
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async.src(), src);
if (ret != SUCCESS) {
return ret;
}
io_addrs_.emplace_back(reinterpret_cast<void *>(src));
ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.dst(), dst_);
if (op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs_.emplace_back(fixed_addr);
} else {
uint8_t *dst = nullptr;
ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async.dst(), dst);
if (ret != SUCCESS) {
return ret;
}
io_addrs_.emplace_back(reinterpret_cast<void *>(dst));
}
vector<void *> io_addrs;
io_addrs.emplace_back(reinterpret_cast<void *>(src_));
if (op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
} else {
io_addrs.emplace_back(reinterpret_cast<void *>(dst_));
return SUCCESS;
}
davinci_model_->SetTotalIOAddrs(io_addrs);
Status MemcpyAsyncTaskInfo::UpdateArgs() {
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in.");
GE_CHECK_NOTNULL(davinci_model_);
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success.");
return SUCCESS;
}

@ -39,16 +39,17 @@ class MemcpyAsyncTaskInfo : public TaskInfo {
Status CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) override;
private:
Status SetIoAddrs(const OpDescPtr &op_desc, const domi::MemcpyAsyncDef &memcpy_async);
uint8_t *dst_;
uint64_t dst_max_;
uint8_t *src_;
uint64_t count_;
uint32_t kind_;
OpDescPtr op_desc_;
vector<void *> io_addrs_;
int64_t fixed_addr_offset_;
DavinciModel *davinci_model_ = nullptr;
uint32_t args_offset_ = 0;
domi::MemcpyAsyncDef memcpy_async_;
};
} // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_

@ -388,6 +388,7 @@ set(DISTINCT_GRAPH_LOAD_SRC_FILES
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/label_set_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/profiler_trace_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/stream_active_task_info.cc"
@ -565,6 +566,11 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES
"graph/load/new_model_manager_event_manager_unittest.cc"
#"graph/load/output_net_output_unittest.cc"
"graph/load/tbe_handle_store_unittest.cc"
"graph/load/hccl_task_info_unittest.cc"
"graph/load/kernel_ex_task_info_unittest.cc"
"graph/load/kernel_task_info_unittest.cc"
"graph/load/memcpy_addr_async_task_info_unittest.cc"
"graph/load/memcpy_async_task_info_unittest.cc"
#"graph/graph_load_unittest.cc"
"graph/ge_executor_unittest.cc"
)

@ -0,0 +1,134 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#define private public
#define protected public
#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/hccl_task_info.h"
namespace ge {
class UtestHcclTaskInfo : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};
// test success GetTaskID
TEST_F(UtestHcclTaskInfo, success_get_task_id) {
domi::ModelTaskDef model_task_def;
domi::TaskDef *task = model_task_def.add_task();
task->set_type(RT_MODEL_TASK_KERNEL);
TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task->type()));
EXPECT_EQ(task_info->GetTaskID(), 0);
HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.GetTaskID(), 0);
}
// test init EventRecordTaskInfo
TEST_F(UtestHcclTaskInfo, success_create_stream) {
DavinciModel model(0, nullptr);
HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.CreateStream(3, &model, 0), SUCCESS);
}
// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7) {
DavinciModel model(0, nullptr);
domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
Status ret = task_info7->Init(task7[0], &model);
EXPECT_EQ(FAILED, ret);
std::vector<TaskInfoPtr> task_list;
task_list.push_back(task_info7);
model.task_list_ = task_list;
EXPECT_EQ(task_info7->Release(), SUCCESS);
}
// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7_with_hccl_type) {
DavinciModel model(0, nullptr);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_ = { stream };
domi::TaskDef task_def;
HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.Init(task_def, nullptr), PARAM_INVALID);
domi::KernelHcclDef *kernel_hccl_def = task_def.mutable_kernel_hccl();
kernel_hccl_def->set_op_index(0);
kernel_hccl_def->set_hccl_type("HcomBroadcast");
model.op_list_[0] = std::make_shared<OpDesc>("FrameworkOp", "FrameworkOp");
EXPECT_EQ(hccl_task_info.Init(task_def, &model), SUCCESS);
task_def.clear_kernel_hccl();
}
// test hccl_GetPrivateDefByTaskDef
TEST_F(UtestHcclTaskInfo, success_hccl_get_private_def_by_task_def) {
DavinciModel model(0, nullptr);
domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
// for SetStream
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
// for GetPrivateDefByTaskDef
task7->set_ops_kernel_store_ptr(10);
std::string value = "hccl_task";
task7->set_private_def(value);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
// for Distribute
EXPECT_EQ(task_info7->Init(task7[0], &model), PARAM_INVALID);
EXPECT_EQ(task_info7->Release(), SUCCESS);
}
// test hccl_task_TransToGETaskInfo
TEST_F(UtestHcclTaskInfo, success_hccl_trans_to_ge_task_info) {
DavinciModel model(0, nullptr);
domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
// for type
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
GETaskInfo ge_task;
HcclTaskInfo hccl_task_info;
hccl_task_info.TransToGETaskInfo(ge_task);
EXPECT_EQ(task_info7->Release(), SUCCESS);
}
} // namespace ge

@ -0,0 +1,144 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#define private public
#define protected public
#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/kernel_ex_task_info.h"
#include "cce/aicpu_engine_struct.h"
namespace ge {
extern OpDescPtr CreateOpDesc(string name, string type);
class UtestKernelExTaskInfo : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};
// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_init) {
domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Init(task_def, nullptr), PARAM_INVALID);
DavinciModel model(0, nullptr);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(1);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), INTERNAL_ERROR);
kernel_ex_def->clear_op_index();
kernel_ex_def->set_op_index(0);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);
kernel_ex_def->set_task_info("KernelEx");
kernel_ex_def->set_task_info_size(1);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);
constexpr uint32_t arg_size = sizeof(STR_FWK_OP_KERNEL);
string value1(arg_size, 'a');
kernel_ex_def->set_args_size(arg_size);
kernel_ex_def->set_args(value1);
OpDescPtr v_op_desc = CreateOpDesc("ge_global_step", "Variable");
model.variable_op_list_.push_back(v_op_desc);
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);
task_def.clear_kernel_ex();
}
// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_release) {
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
kernel_ex_task_info.kernel_buf_ = nullptr;
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
kernel_ex_task_info.input_output_addr_ = nullptr;
rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
}
// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_info_copy) {
DavinciModel model(0, nullptr);
model.runtime_param_.mem_base = (uint8_t *)0x12345;
model.runtime_param_.mem_size = 100332000;
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_task_info_size(150);
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace empty.
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({0}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is null.
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({10}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is small.
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), SUCCESS);
task_def.clear_kernel_ex();
model.runtime_param_.mem_base = nullptr;
}
TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.CalculateArgs(task_def, &model), FAILED);
}
} // namespace ge

File diff suppressed because it is too large Load Diff

@ -0,0 +1,138 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#define private public
#define protected public
#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.h"
namespace ge {
class UtestMemcpyAddrAsyncTaskInfo : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};
extern OpDescPtr CreateOpDesc(string name, string type);
TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_addr_async_task_init) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);
domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);
model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;
// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, nullptr), PARAM_INVALID);
// SetStream failed.
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), FAILED);
// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), INTERNAL_ERROR);
// GetRuntimeAddress src failed.
model.op_list_[6] = CreateOpDesc("memcpyaddrasync", MEMCPYADDRASYNC);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);
// GetRuntimeAddress dst failed.
memcpy_async->set_src(0x08003000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);
memcpy_async->set_dst(0x08008000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS);
task_def.clear_memcpy_async();
}
TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_task_init_failed) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);
model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYADDRASYNC);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);
task_def.clear_memcpy_async();
}
TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(0x08003000);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(0x08008000);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(0);
// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.CalculateArgs(task_def, &model), SUCCESS);
}
} // namespace ge

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save