TaskInfo release OpDesc after distribute

pull/703/head
zhangxiaokun 4 years ago
parent eed5c0e0bc
commit 1158bf03c8

@ -77,7 +77,7 @@ struct timeInfo {
};
// For super kernel
static struct SuperKernelTaskInfo {
struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
@ -117,7 +117,7 @@ enum ExecuteMode {
// comments
class DavinciModel {
public:
public:
///
/// @ingroup ge
/// @brief DavinciModel constructor
@ -283,7 +283,7 @@ public:
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; }
// Modified from KernelTaskInfo.
SuperKernelTaskInfo &GetSupperKernelTaskInfo() { return skt_info_; }
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; }
///
/// @ingroup ge
@ -445,7 +445,6 @@ public:
const RuntimeParam &GetRuntimeParam() { return runtime_param_; }
int32_t GetDataInputTid() const { return dataInputTid; }
void SetDataInputTid(int32_t data_input_tid) { dataInputTid = data_input_tid; }
void DisableZeroCopy(const void *addr);
@ -484,7 +483,6 @@ public:
}
void SetEndGraphId(uint32_t task_id, uint32_t stream_id);
DavinciModel &operator=(const DavinciModel &model) = delete;
DavinciModel(const DavinciModel &model) = delete;
@ -492,46 +490,34 @@ public:
const map<int64_t, std::vector<rtStream_t>> &GetHcclFolowStream() {
return main_follow_stream_mapping_;
}
void SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream);
void InitRuntimeParams();
Status InitVariableMem();
void UpdateMemBase(uint8_t *mem_base) {
runtime_param_.mem_base = mem_base;
mem_base_ = mem_base;
}
void SetTotalArgsSize(uint32_t args_size) { total_args_size_ += args_size; }
uint32_t GetTotalArgsSize() { return total_args_size_; }
void *GetCurrentArgsAddr(uint32_t offset) {
void *cur_args = static_cast<char *>(args_) + offset;
return cur_args;
}
void SetTotalIOAddrs(vector<void *> &io_addrs) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
}
void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; }
uint32_t GetHybridArgsSize() {
return total_hybrid_args_size_;
}
void *GetCurrentHybridArgsAddr(uint32_t offset) {
void *cur_args = static_cast<char *>(hybrid_addrs_) + offset;
return cur_args;
}
void SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size);
int64_t GetFixedAddrsSize(string tensor_name);
void *GetCurrentFixedAddr(int64_t offset) const {
void *cur_addr = static_cast<char *>(fixed_addrs_) + offset;
return cur_addr;
@ -543,42 +529,30 @@ public:
}
return UINT32_MAX;
}
void SetKnownNode(bool known_node) { known_node_ = known_node; }
bool IsKnownNode() { return known_node_; }
Status MallocKnownArgs();
Status UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs);
Status CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs);
Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs);
void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; }
Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info);
Status GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims);
void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; }
// om file name
void SetOmName(string om_name) { om_name_ = om_name; }
void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); }
const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); }
bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info);
}
Status InitInputOutputForDynamic(const ComputeGraphPtr &compute_graph);
private:
private:
// memory address of weights
uint8_t *weights_mem_base_;
uint8_t *var_mem_base_;
@ -753,7 +727,6 @@ private:
Status InitTbeHandle(const OpDescPtr &op_desc);
void StoreTbeHandle(const std::string &handle_key);
void CleanTbeHandle();
///
@ -792,7 +765,6 @@ private:
/// @return: 0 for success / others for fail
///
Status BindOutputQueue();
Status CpuModelPrepareOutput(uintptr_t addr, uint32_t size);
///
@ -830,9 +802,7 @@ private:
Status CpuWaitEndGraph();
Status BindEnqueue();
Status CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf);
///
/// @ingroup ge
/// @brief definiteness queue schedule, repeat run model.
@ -841,7 +811,6 @@ private:
Status CpuModelRepeat();
Status InitEntryTask();
Status AddHeadStream();
///
@ -869,7 +838,6 @@ private:
void SetDataDumperArgs(const ComputeGraphPtr &compute_graph);
Status InitModelProfile();
Status SinkModelProfile();
Status SinkTimeProfile(const InputData &current_data);
@ -878,21 +846,14 @@ private:
std::vector<ge::OutputTensorInfo> &outputs);
void ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info);
void SetLabelForDynamic(const NodePtr &node);
void ParseDynamicOutShape(const std::vector<std::string> &str_info, std::vector<vector<int64_t>> &vec_info);
bool IsGetNextSinkDynamic(const OpDescPtr &op_desc);
void GetAllGearsInfo(const NodePtr &node);
Status GetGetDynamicDimsNodeInfo(const NodePtr &node);
Status GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node);
Status GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node);
Status GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc);
bool is_weight_mem_has_inited_;

@ -59,40 +59,40 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
GELOGI("HcclTaskInfo Init, op_index is: %u", op_index);
// Get HCCL op
op_desc_ = davinci_model->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc_);
const auto op_desc = davinci_model->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc);
// Create the kernel hccl infos
CreateKernelHcclInfo(op_desc_);
CreateKernelHcclInfo(op_desc);
// Initialize the hccl_type of all kernel hccl info
HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_);
// Only in Horovod scenario should get the inputName and GeShape
ret = HcomOmeUtil::GetHorovodInputs(op_desc_, kernel_hccl_infos_);
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret);
return ret;
}
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc_, kernel_hccl_infos_);
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt);
return dmrt;
}
dmrt = HcomOmeUtil::GetHcclCount(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt);
return dmrt;
}
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId
dmrt = HcomOmeUtil::GetAllRootId(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt);
return dmrt;
}
// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl
ret = SetFollowStream(op_desc_, davinci_model);
ret = SetFollowStream(op_desc, davinci_model);
if (ret != SUCCESS) {
GELOGE(ret, "SetStream Fail.");
return ret;
@ -100,21 +100,28 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
if (davinci_model_->IsKnownNode()) {
args_ = davinci_model_->GetCurrentArgsAddr(args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc->GetName().c_str(), args_, args_offset_);
}
ret = SetAddrs(op_desc_, kernel_hccl_infos_);
ret = SetAddrs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "Setaddrs Fail.");
return ret;
}
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace
ret = SetWorkspace(op_desc_, kernel_hccl_infos_);
ret = SetWorkspace(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
GELOGE(ret, "SetWorkspace Fail.");
return ret;
}
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
const auto input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
const auto output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
GELOGI("HcclTaskInfo Init Success");
return SUCCESS;
}
@ -231,18 +238,7 @@ Status HcclTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *
Status HcclTaskInfo::UpdateArgs() {
GELOGI("HcclTaskInfo::UpdateArgs in.");
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
input_data_addrs_ = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_);
vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), input_data_addrs_.begin(), input_data_addrs_.end());
io_addrs.insert(io_addrs.end(), output_data_addrs_.begin(), output_data_addrs_.end());
io_addrs.insert(io_addrs.end(), workspace_data_addrs_.begin(), workspace_data_addrs_.end());
davinci_model_->SetTotalIOAddrs(io_addrs);
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("HcclTaskInfo::UpdateArgs success.");
return SUCCESS;
}
@ -261,9 +257,11 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
HcclReduceOp op_type = HCCL_REDUCE_SUM;
GE_CHECK_NOTNULL(davinci_model_);
GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str());
vector<void *> input_data_addrs;
vector<void *> output_data_addrs;
if (!davinci_model_->IsKnownNode()) {
input_data_addrs_ = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
input_data_addrs = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
}
void *input_data_addr = nullptr;
void *output_data_addr = nullptr;
@ -275,8 +273,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i);
GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr);
} else {
input_data_addr = input_data_addrs_.empty() ? nullptr : input_data_addrs_[i];
output_data_addr = output_data_addrs_.empty() ? nullptr : output_data_addrs_[i];
input_data_addr = input_data_addrs.empty() ? nullptr : input_data_addrs[i];
output_data_addr = output_data_addrs.empty() ? nullptr : output_data_addrs[i];
}
kernel_hccl_infos[i].inputDataAddr = input_data_addr;
if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) {
@ -366,8 +364,8 @@ Status HcclTaskInfo::SetWorkspace(const std::shared_ptr<OpDesc> &op_desc,
workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() +
op_desc->GetOutputsSize());
} else {
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs_.empty() ? nullptr : workspace_data_addrs_[0];
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs.empty() ? nullptr : workspace_data_addrs[0];
}
}
}

@ -76,9 +76,7 @@ class HcclTaskInfo : public TaskInfo {
uint32_t private_def_len_;
static std::mutex hccl_follow_stream_mutex_;
vector<GETaskKernelHcclInfo> kernel_hccl_infos_;
vector<void *> input_data_addrs_;
vector<void *> output_data_addrs_;
vector<void *> workspace_data_addrs_;
vector<void *> io_addrs_;
OpDescPtr op_desc_;
void *args_;
uint32_t args_offset_;

@ -128,7 +128,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);)
GELOGI("KernelExTaskInfo knonw node Init Success.");
return SUCCESS;
return SetIoAddr(op_desc);
}
// 3. Set workspaceaddr, inputOutputDataAddr
@ -192,7 +192,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0);
GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id);
return SUCCESS;
return SetIoAddr(op_desc);
}
Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
@ -258,8 +258,10 @@ Status KernelExTaskInfo::SetIoAddr(const OpDescPtr &op_desc) {
}
}
}
return SUCCESS;
}
Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);

@ -146,7 +146,7 @@ Status KernelTaskInfo::SaveSKTDumpInfo() {
return SUCCESS;
}
// all op in super kernel share one taskid and streamid
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) {
davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i],
skt_info.dump_args_list[i]);
@ -163,7 +163,7 @@ void KernelTaskInfo::UpdateSKTTaskId() {
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return;
}
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.last_task_id = task_id;
skt_info.last_stream_id = stream_id;
skt_id_ = skt_info.last_task_id;
@ -191,7 +191,7 @@ Status KernelTaskInfo::SKTFinalize() {
UpdateSKTTaskId();
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed");
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_);
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.kernel_list.clear();
skt_info.arg_list.clear();
skt_info.dump_flag_list.clear();
@ -208,7 +208,7 @@ Status KernelTaskInfo::SKTFinalize() {
}
uint32_t KernelTaskInfo::GetDumpFlag() {
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
for (auto flag : skt_info.dump_flag_list) {
if (flag == RT_KERNEL_DUMPFLAG) {
return RT_KERNEL_DUMPFLAG;
@ -218,7 +218,7 @@ uint32_t KernelTaskInfo::GetDumpFlag() {
}
Status KernelTaskInfo::SuperKernelLaunch() {
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
if (skt_info.kernel_list.empty()) {
GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return");
return SUCCESS;
@ -272,7 +272,7 @@ Status KernelTaskInfo::SuperKernelLaunch() {
}
Status KernelTaskInfo::SaveSuperKernelInfo() {
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.kernel_list.push_back(stub_func_);
skt_info.arg_list.push_back(args_);
skt_info.last_stream = stream_;
@ -328,7 +328,7 @@ bool KernelTaskInfo::IsMarkedFirstNode() {
// then may be saved to skt task list; else
// call skt launch those saved tasks before
bool KernelTaskInfo::FirstCallSKTLaunchCheck() {
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) ||
(has_group_key_ && (group_key_ != skt_info.last_group_key)));
}
@ -397,7 +397,7 @@ Status KernelTaskInfo::Distribute() {
call_save_dump_ = true;
} else {
/* default: not skt launch */
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
GELOGD(
"KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s "
"stubfunc:%p blockdim:%u stream:%p",
@ -803,7 +803,6 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
GELOGE(FAILED, "flowtable is null.");
return FAILED;
}
flowtable_size_ = flowtable.size();
}
// get smDesc stored in model
@ -899,8 +898,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size());
return init_ret;
}
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_);
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_);
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_);
aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size());

@ -38,7 +38,6 @@ class KernelTaskInfo : public TaskInfo {
flowtable_(nullptr),
block_dim_(0),
args_size_(0),
flowtable_size_(0),
task_id_(0),
stream_id_(0),
so_name_(""),
@ -46,7 +45,6 @@ class KernelTaskInfo : public TaskInfo {
kernel_type_(ccKernelType::CCE_AI_CORE),
dump_flag_(RT_KERNEL_DEFAULT),
dump_args_(nullptr),
op_desc_(nullptr),
davinci_model_(nullptr),
skt_id_(0),
stub_func_name_(""),
@ -149,7 +147,6 @@ class KernelTaskInfo : public TaskInfo {
void *flowtable_;
uint32_t block_dim_;
uint32_t args_size_;
uint32_t flowtable_size_;
uint32_t task_id_;
uint32_t stream_id_;
std::string so_name_;

@ -35,7 +35,6 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
kind_ = memcpy_async.kind();
dst_max_ = memcpy_async.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index());
op_desc_ = op_desc;
if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR;

Loading…
Cancel
Save