profiling file to json

pull/1160/head
zhengyuanhua 4 years ago
parent 90e9c8c1e5
commit 696c7f4b8f

File diff suppressed because it is too large Load Diff

@ -54,6 +54,8 @@ namespace {
} // namespace
namespace ge {
class OpDesc;
using OpDescPtr = std::shared_ptr<OpDesc>;
struct DeviceSubsInfo {
uint64_t module;
uint32_t subscribe_count;
@ -82,12 +84,10 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
bool ProfilingModelExecuteOn() const;
// is_execute_profiling_ only used by ge option and env
bool ProfilingOn() const { return is_load_profiling_ && is_execute_profiling_; }
void ReportProfilingData(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info,
const std::vector<ComputeGraphDescInfo> &compute_graph_desc_info);
void ReportProfilingData(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info);
void ProfilingTaskDescInfo(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info,
const int32_t &device_id);
void ProfilingGraphDescInfo(uint32_t model_id, const std::vector<ComputeGraphDescInfo> &compute_graph_desc_info,
const int32_t &device_id);
void ProfilingOpInputOutInfo(const TaskDescInfo &task, Json &task_json);
Status PluginInit() const;
void PluginUnInit() const;
Status CallMsprofReport(ReporterData &reporter_data) const;
@ -95,6 +95,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
void SetMsprofCtrlCallback(MsprofCtrlCallback func) { prof_cb_.msprofCtrlCallback = func; }
void SetMsprofReporterCallback(MsprofReporterCallback func) { prof_cb_.msprofReporterCallback = func; }
void GetFpBpPoint(std::string &fp_point, std::string &bp_point);
void GetOpInputOutputInfo(const OpDescPtr &op, TaskDescInfo &task_desc_info) const;
void ReportData(const int32_t &device_id, const std::string &data, const std::string &tag_name);
private:
Status InitFromOptions(const Options &options, MsprofGeOptions &prof_conf);
Status ParseOptions(const std::string &options);
@ -103,7 +105,6 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
Status ProfParseDeviceId(const std::map<std::string, std::string> &config_para,
vector<int32_t> &device_list);
uint64_t GetProfilingModule();
void GraphDescReport(const int32_t &device_id, const string &data);
void UpdateDeviceIdModuleMap(string prof_type, uint64_t module, const vector<int32_t> &device_list);
void UpdateSubscribeDeviceModuleMap(std::string prof_type, uint32_t device_id, uint64_t module);

@ -852,7 +852,7 @@ Status TaskGenerator::FindProfilingTaskIndex(const ComputeGraphPtr &graph, Profi
// subgraph of dynamic graph no need to find index, has been found in parent graph
if (IsSubGraphOfDynamicGraph(graph)) {
GELOGI("Graph[%s] is subgraph of dynamic graph, no nned to find index.", graph->GetName().c_str());
GELOGI("Graph[%s] is subgraph of dynamic graph, no need to find index.", graph->GetName().c_str());
return SUCCESS;
}

File diff suppressed because it is too large Load Diff

@ -840,9 +840,6 @@ class DavinciModel {
Status TransAllVarData(ComputeGraphPtr &graph, uint32_t graph_id);
// get desc info of graph for profiling
Status GetComputeGraphInfo(vector<ComputeGraphDescInfo> &graph_desc_info);
void SetDataDumperArgs(const ComputeGraphPtr &graph, const map<string, OpDescPtr> &variable_by_name);
Status InitL1DataDumperArgs();

@ -70,8 +70,6 @@ class NodeDoneCallback {
Status PrepareConstInputs(const NodeItem &node_item);
Status DumpDynamicNode();
Status ProfilingReport();
Status GetGraphDescInfo(const NodePtr node, const HybridModel *model,
std::vector<ComputeGraphDescInfo> &compute_graph_info);
Status GetTaskDescInfo(const NodePtr node, const HybridModel *model,
std::vector<TaskDescInfo> &task_desc_info);
GraphExecutionContext *graph_context_;
@ -159,51 +157,14 @@ Status NodeDoneCallback::GetTaskDescInfo(const NodePtr node, const HybridModel *
}
GELOGD("GetTaskDescInfo of node [%s] start.", node->GetName().c_str());
auto &prof_mgr = ProfilingManager::Instance();
task_desc_info = context_->GetProfilingTaskDescInfo();
context_->ClearProfilingTaskDescInfo();
return SUCCESS;
}
Status NodeDoneCallback::GetGraphDescInfo(const NodePtr node, const HybridModel *model,
std::vector<ComputeGraphDescInfo> &compute_graph_info) {
GE_CHECK_NOTNULL(node);
GE_CHECK_NOTNULL(model);
GELOGD("GetComputeGraphInfo of node [%s] start.", node->GetName().c_str());
compute_graph_info = context_->GetProfilingGraphDescInfo();
context_->ClearProfilingGraphDescInfo();
for (auto &tmp_task_desc : task_desc_info) {
// save op input and output info
auto op_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
for (auto &tmp_compute_graph_info : compute_graph_info) {
// default
if (op_desc->GetAllInputsSize() == 0) {
tmp_compute_graph_info.input_format = { FORMAT_NULL };
tmp_compute_graph_info.input_shape = { {0} };
tmp_compute_graph_info.input_data_type = { DT_UNDEFINED };
}
for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
GeTensorDescPtr input_desc = op_desc->MutableInputDesc(i);
if (input_desc == nullptr) {
continue;
}
tmp_compute_graph_info.input_format.emplace_back(input_desc->GetFormat());
tmp_compute_graph_info.input_shape.emplace_back(input_desc->GetShape().GetDims());
tmp_compute_graph_info.input_data_type.emplace_back(input_desc->GetDataType());
}
if (op_desc->GetOutputsSize() == 0) {
tmp_compute_graph_info.output_format = { FORMAT_NULL };
tmp_compute_graph_info.output_shape = { {0} };
tmp_compute_graph_info.output_data_type = { DT_UNDEFINED };
}
for (size_t j = 0; j < op_desc->GetOutputsSize(); ++j) {
GeTensorDesc output_desc = op_desc->GetOutputDesc(j);
tmp_compute_graph_info.output_format.emplace_back(output_desc.GetFormat());
tmp_compute_graph_info.output_shape.emplace_back(output_desc.GetShape().GetDims());
tmp_compute_graph_info.output_data_type.emplace_back(output_desc.GetDataType());
}
prof_mgr.GetOpInputOutputInfo(op_desc, tmp_task_desc);
}
return SUCCESS;
@ -233,15 +194,8 @@ Status NodeDoneCallback::ProfilingReport() {
return profiling_ret;
}
std::vector<ComputeGraphDescInfo> compute_graph_info;
profiling_ret = GetGraphDescInfo(node, model, compute_graph_info);
if (profiling_ret != RT_ERROR_NONE) {
GELOGE(profiling_ret, "Get graph info of node[%s] failed.", node->GetName().c_str());
return profiling_ret;
}
auto &profiling_manager = ProfilingManager::Instance();
profiling_manager.ReportProfilingData(model->GetModelId(), task_desc_info, compute_graph_info);
profiling_manager.ReportProfilingData(model->GetModelId(), task_desc_info);
return SUCCESS;
}

@ -189,12 +189,11 @@ Status AiCoreNodeTask::ExecuteAsync(TaskContext &context, std::function<void()>
uint32_t stream_id = 0;
rtError_t rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id); // must be called after Launch kernel
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "Get task_id and stream_id failed.");
return FAILED;
GELOGE(RT_FAILED, "Get task_id and stream_id failed, ret: 0x%X.", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("Aicore node[%s] task_id: %u, stream_id: %u.", context.GetNodeName(), task_id, stream_id);
(void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicore, (*it)->GetBlockDim());
(void)context.SaveProfilingGraphDescInfo(task_id, stream_id);
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] End");
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] End");
}

@ -201,12 +201,11 @@ Status AicpuNodeTaskBase::ExecuteAsync(TaskContext &context, std::function<void(
uint32_t stream_id = 0;
rtError_t rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id); // must be called after Launch kernel
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "Get task_id and stream_id failed.");
return FAILED;
GELOGE(RT_FAILED, "Get task_id and stream_id failed, ret: 0x%X.", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("Aicpu node[%s] task_id: %u, stream_id: %u.", context.GetNodeName(), task_id, stream_id);
(void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicpu, 0);
(void)context.SaveProfilingGraphDescInfo(task_id, stream_id);
auto callback = [=, &context]() {
GELOGD("Node[%s] callback start.", node_name_.c_str());
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[TaskCallback] Start");

@ -515,7 +515,7 @@ Status TaskContext::Synchronize() {
}
Status TaskContext::SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id,
uint32_t task_type, uint32_t block_dim) {
const std::string &task_type, uint32_t block_dim) {
if (ProfilingManager::Instance().ProfilingModelExecuteOn()) {
const NodeItem &node_item = GetNodeItem();
auto op_desc = node_item.GetOpDesc();
@ -525,11 +525,11 @@ Status TaskContext::SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream
const HybridModel *model = graph_context->model;
GE_CHECK_NOTNULL(model);
std::string op_name = op_desc->GetName();
std::string dynamic_model_name = model->GetModelName();
TaskDescInfo tmp_task_desc_info;
tmp_task_desc_info.model_name = dynamic_model_name;
tmp_task_desc_info.op_name = op_name;
tmp_task_desc_info.op_name = op_desc->GetName();
tmp_task_desc_info.op_type = op_desc->GetType();
tmp_task_desc_info.block_dim = block_dim;
tmp_task_desc_info.task_type = task_type;
tmp_task_desc_info.task_id = task_id;
@ -546,31 +546,5 @@ NodeState *TaskContext::GetNodeState() const {
return node_state_;
}
Status TaskContext::SaveProfilingGraphDescInfo(uint32_t task_id, uint32_t stream_id) {
if (ProfilingManager::Instance().ProfilingModelExecuteOn()) {
const NodeItem &node_item = GetNodeItem();
auto op_desc = node_item.GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
const GraphExecutionContext *graph_context = GetExecutionContext();
GE_CHECK_NOTNULL(graph_context);
const HybridModel *model = graph_context->model;
GE_CHECK_NOTNULL(model);
std::string dynamic_model_name = model->GetModelName();
auto op_mode = static_cast<uint32_t>(domi::ImplyType::INVALID);
if (AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, op_mode) &&
op_mode == static_cast<uint32_t>(domi::ImplyType::TVM)) {
ComputeGraphDescInfo tmp_compute_graph_info;
tmp_compute_graph_info.model_name = dynamic_model_name;
tmp_compute_graph_info.op_name = op_desc->GetName();
tmp_compute_graph_info.op_type = op_desc->GetType();
tmp_compute_graph_info.task_id = task_id;
tmp_compute_graph_info.stream_id = stream_id;
compute_graph_info.emplace_back(tmp_compute_graph_info);
}
}
return SUCCESS;
}
} // namespace hybrid
} // namespace ge

@ -113,13 +113,10 @@ class TaskContext {
void *handle_ = nullptr;
const std::vector<TaskDescInfo>& GetProfilingTaskDescInfo() const { return task_desc_info; }
Status SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id, uint32_t task_type, uint32_t block_dim);
Status SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id,
const std::string &task_type, uint32_t block_dim);
void ClearProfilingTaskDescInfo() { task_desc_info.clear(); }
const std::vector<ComputeGraphDescInfo>& GetProfilingGraphDescInfo() const { return compute_graph_info; }
Status SaveProfilingGraphDescInfo(uint32_t task_id, uint32_t stream_id);
void ClearProfilingGraphDescInfo() { compute_graph_info.clear(); }
private:
TaskContext(GraphExecutionContext *execution_context,
NodeState *node_state,
@ -141,7 +138,6 @@ class TaskContext {
uint32_t task_id_ = 0;
uint32_t stream_id_ = 0;
std::vector<TaskDescInfo> task_desc_info;
std::vector<ComputeGraphDescInfo> compute_graph_info;
};
} // namespace hybrid
} // namespace ge

@ -45,40 +45,24 @@ Status ProfilingTaskInfo(OpTask *op_task, const string &shape_type) {
return SUCCESS;
}
string model_name;
string op_name;
TaskDescInfo tmp_task_desc_info;
uint32_t model_id;
uint32_t block_dim;
if (op_task->GetProfilingArgs(model_name, op_name, model_id, block_dim) != SUCCESS) {
if (op_task->GetProfilingArgs(tmp_task_desc_info, model_id) != SUCCESS) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Get profiling data of task failed");
return ACL_ERROR_GE_PARAM_INVALID;
}
GELOGD("ProfilingReport of op[%s] model[%s] start.", op_name.c_str(), model_name.c_str());
std::vector<TaskDescInfo> task_desc_info;
uint32_t task_id = 0;
uint32_t stream_id = 0;
auto rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "Get task_id and stream_id failed.");
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GELOGD("ProfilingReport of op[%s] model[%s] start.",
tmp_task_desc_info.op_name.c_str(), tmp_task_desc_info.model_name.c_str());
TaskDescInfo tmp_task_desc_info;
tmp_task_desc_info.model_name = model_name;
tmp_task_desc_info.op_name = op_name;
tmp_task_desc_info.block_dim = block_dim;
tmp_task_desc_info.task_id = task_id;
tmp_task_desc_info.stream_id = stream_id;
tmp_task_desc_info.shape_type = shape_type;
tmp_task_desc_info.cur_iter_num = 0;
tmp_task_desc_info.task_type = op_task->GetTaskType();
GELOGD("GetTaskDescInfo of op [%s] end, task_id[%u], stream_id[%u]", op_name.c_str(), task_id, stream_id);
task_desc_info.emplace_back(tmp_task_desc_info);
std::vector<ComputeGraphDescInfo> compute_graph_info;
std::vector<TaskDescInfo> task_desc_info;
task_desc_info.emplace_back(tmp_task_desc_info);
auto &profiling_manager = ProfilingManager::Instance();
profiling_manager.ReportProfilingData(model_id, task_desc_info, compute_graph_info);
profiling_manager.ReportProfilingData(model_id, task_desc_info);
return SUCCESS;
}
} // namespace

@ -23,6 +23,7 @@
#include "aicpu/common/aicpu_task_struct.h"
#include "common/dump/dump_manager.h"
#include "common/dump/dump_op.h"
#include "common/profiling/profiling_manager.h"
#include "common/formats/formats.h"
#include "common/math/math_util.h"
#include "framework/common/debug/log.h"
@ -108,15 +109,29 @@ void OpTask::SetModelArgs(std::string model_name, uint32_t model_id) {
model_id_ = model_id;
}
Status OpTask::GetProfilingArgs(std::string &model_name, std::string &op_name, uint32_t &model_id,
uint32_t &block_dim) {
model_name = model_name_;
model_id = model_id_;
block_dim = block_dim_;
Status OpTask::GetProfilingArgs(TaskDescInfo &task_desc_info, uint32_t &model_id) {
uint32_t task_id = 0;
uint32_t stream_id = 0;
auto rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "Get task_id and stream_id failed ret: 0x%X.", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
GE_CHECK_NOTNULL(op_desc_);
op_name = op_desc_->GetName();
string op_name = op_desc_->GetName();
GELOGD("Get profiling args of op [%s] end, task_id[%u], stream_id[%u]", op_name.c_str(), task_id, stream_id);
model_id = model_id_;
task_desc_info.model_name = model_name_;
task_desc_info.block_dim = block_dim_;
task_desc_info.task_id = task_id;
task_desc_info.stream_id = stream_id;
task_desc_info.op_name = op_name;
task_desc_info.op_type = op_desc_->GetType();
auto &prof_mgr = ProfilingManager::Instance();
prof_mgr.GetOpInputOutputInfo(op_desc_, task_desc_info);
return SUCCESS;
}
Status OpTask::UpdateRunInfo(const vector<GeTensorDesc> &input_desc, const vector<GeTensorDesc> &output_desc) {
return UNSUPPORTED;
}
@ -153,7 +168,7 @@ Status OpTask::LaunchKernel(const vector<GeTensorDesc> &input_desc,
return UNSUPPORTED;
}
uint32_t OpTask::GetTaskType() const { return kTaskTypeInvalid; }
const std::string &OpTask::GetTaskType() const { return kTaskTypeInvalid; }
TbeOpTask::~TbeOpTask() {
if (sm_desc_ != nullptr) {
@ -171,7 +186,7 @@ size_t TbeOpTask::GetArgSize() const { return arg_size_; }
const std::string &TbeOpTask::GetStubName() const { return stub_name_; }
uint32_t TbeOpTask::GetTaskType() const { return kTaskTypeAicore; }
const std::string &TbeOpTask::GetTaskType() const { return kTaskTypeAicore; }
void TbeOpTask::SetHandle(void *handle) {
this->handle_ = handle;
@ -834,7 +849,7 @@ Status AiCpuBaseTask::UpdateArgTable(const SingleOpModelParam &param) {
return DoUpdateArgTable(param, false);
}
uint32_t AiCpuBaseTask::GetTaskType() const { return kTaskTypeAicpu; }
const std::string &AiCpuBaseTask::GetTaskType() const { return kTaskTypeAicpu; }
void AiCpuTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
arg_base = reinterpret_cast<uintptr_t *>(io_addr_host_.data());

@ -43,7 +43,7 @@ class OpTask {
const vector<GeTensorDesc> &output_desc);
virtual Status UpdateArgTable(const SingleOpModelParam &param);
void SetModelArgs(std::string model_name, uint32_t model_id);
Status GetProfilingArgs(std::string &model_name, std::string &op_name, uint32_t &model_id, uint32_t &block_dim);
Status GetProfilingArgs(TaskDescInfo &task_desc_info, uint32_t &model_id);
const OpDescPtr &GetOpdesc() const {return op_desc_;}
Status OpenDump(rtStream_t stream);
virtual void GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) = 0;
@ -52,7 +52,7 @@ class OpTask {
std::vector<GeTensorDesc> &output_desc,
std::vector<DataBuffer> &output_buffers,
rtStream_t stream);
virtual uint32_t GetTaskType() const;
virtual const std::string &GetTaskType() const;
protected:
Status DoUpdateArgTable(const SingleOpModelParam &param, bool keep_workspace);
@ -88,7 +88,7 @@ class TbeOpTask : public OpTask {
size_t GetArgSize() const;
const std::string &GetStubName() const;
void EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, size_t max_tiling_size);
uint32_t GetTaskType() const override;
const std::string &GetTaskType() const override;
void SetHandle(void *handle);
private:
@ -123,7 +123,7 @@ class AiCpuBaseTask : public OpTask {
~AiCpuBaseTask() override;
UnknowShapeOpType GetUnknownType() const { return unknown_type_; }
Status UpdateArgTable(const SingleOpModelParam &param) override;
uint32_t GetTaskType() const override;
const std::string &GetTaskType() const override;
protected:
Status UpdateIoAddr(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs);

@ -57,9 +57,9 @@ const char *const GE_ENGINE_ATTR_MEM_TYPE_HBM = "HBM";
const char *const GE_OPTION_EXEC_PLACEMENT = "ge.exec.placement";
// profiling data
const uint32_t kTaskTypeAicore = 0;
const uint32_t kTaskTypeAicpu = 1;
const uint32_t kTaskTypeInvalid = 0xFFFF;
const std::string kTaskTypeAicore = "AI_CORE";
const std::string kTaskTypeAicpu = "AI_CPU";
const std::string kTaskTypeInvalid = "TASK_TYPE_INVALID";
// Data cache, including data address and length
struct DataBuffer {
@ -251,27 +251,19 @@ struct Options {
struct TaskDescInfo {
std::string model_name;
std::string op_name;
std::string op_type;
uint32_t block_dim;
uint32_t task_id;
uint32_t stream_id;
std::string shape_type;
int64_t cur_iter_num;
uint32_t task_type;
};
// Profiling info of graph
struct ComputeGraphDescInfo {
std::string model_name;
std::string op_name;
std::string op_type;
std::string task_type;
std::vector<Format> input_format;
std::vector<std::vector<int64_t>> input_shape;
std::vector<DataType> input_data_type;
std::vector<Format> output_format;
std::vector<std::vector<int64_t>> output_shape;
std::vector<DataType> output_data_type;
uint32_t task_id;
uint32_t stream_id;
};
struct OpDescInfo {

@ -761,7 +761,7 @@ set(GENERATOR_TEST_FILES
)
set(SINGLE_OP_TEST_FILES
#"single_op/single_op_model_unittest.cc"
"single_op/single_op_model_unittest.cc"
"single_op/single_op_manager_unittest.cc"
"single_op/stream_resource_unittest.cc"
"single_op/single_op_task_unittest.cc"

@ -890,4 +890,11 @@ TEST_F(UtestDavinciModel, Sink_model_profile) {
model.SinkModelProfile();
}
TEST_F(UtestDavinciModel, Sink_time_profile) {
ProfilingManager::Instance().prof_cb_.msprofReporterCallback = MsprofReport;
DavinciModel model(0, nullptr);
InputData current_data;
model.SinkTimeProfile(current_data);
}
} // namespace ge

@ -40,6 +40,10 @@ class UtestSingleOpModel : public testing::Test {
void TearDown() {}
};
//rt api stub
rtError_t rtGetTaskIdAndStreamID(uint32_t *taskId, uint32_t *streamId) {
return RT_ERROR_NONE;
}
/*
TEST_F(UtestSingleOpModel, test_init_model) {
string model_data_str = "123456789";
@ -101,9 +105,9 @@ TEST_F(UtestSingleOpModel, test_set_inputs_and_outputs) {
std::mutex stream_mu_;
rtStream_t stream_ = nullptr;
SingleOp single_op(&stream_mu_, stream_);
ASSERT_EQ(model.SetInputsAndOutputs(single_op), SUCCESS);
// SingleOp single_op(&stream_mu_, stream_);
//
// ASSERT_EQ(model.SetInputsAndOutputs(single_op), SUCCESS);
}
/*
TEST_F(UtestSingleOpModel, test_build_kernel_task) {
@ -148,7 +152,7 @@ TEST_F(UtestSingleOpModel, test_init) {
ASSERT_EQ(op_model.Init(), FAILED);
}
*/
/*
TEST_F(UtestSingleOpModel, test_parse_arg_table) {
string model_data_str = "123456789";
SingleOpModel op_model("model", model_data_str.c_str(), model_data_str.size());
@ -173,3 +177,23 @@ TEST_F(UtestSingleOpModel, test_parse_arg_table) {
ASSERT_EQ(op.arg_table_[1].size(), 1);
ASSERT_EQ(op.arg_table_[1].front(), &arg_base[0]);
}
*/
TEST_F(UtestSingleOpModel, test_op_task_get_profiler_args) {
string name = "relu";
string type = "relu";
auto op_desc = std::make_shared<ge::OpDesc>(name, type);
op_desc->SetStreamId(0);
op_desc->SetId(0);
TbeOpTask task;
task.op_desc_ = op_desc;
task.model_name_ = "resnet_50";
task.model_id_ = 1;
TaskDescInfo task_desc_info;
uint32_t model_id;
task.GetProfilingArgs(task_desc_info, model_id);
ASSERT_EQ(task_desc_info.model_name, "resnet_50");
ASSERT_EQ(model_id, 1);
}

Loading…
Cancel
Save