Merge branch 'development' of https://gitee.com/zhao_zhixuan/graphengine into development

pull/147/head
unknown 5 years ago
commit 5f152c6dbf

2
.gitmodules vendored

@ -1,6 +1,8 @@
[submodule "parser"]
path = parser
url = https://gitee.com/ascend/parser.git
branch = development
[submodule "metadef"]
path = metadef
url = https://gitee.com/ascend/metadef.git
branch = development

@ -62,12 +62,13 @@ if (ENABLE_OPEN_SRC)
endif()
set(ASCEND_DRIVER_DIR ${ASCEND_DIR}/driver/lib64)
set(ASCEND_DRIVER_COMMON_DIR ${ASCEND_DIR}/driver/lib64/common)
set(ASCEND_DRIVER_SHARE_DIR ${ASCEND_DIR}/driver/lib64/share)
set(ASCEND_RUNTIME_DIR ${ASCEND_DIR}/fwkacllib/lib64)
set(ASCEND_ATC_DIR ${ASCEND_DIR}/atc/lib64)
set(ASCEND_ACL_DIR ${ASCEND_DIR}/acllib/lib64)
find_module(slog libslog.so ${ASCEND_ATC_DIR})
find_module(mmpa libmmpa.so ${ASCEND_ATC_DIR})
if(PLATFORM STREQUAL "train")
find_module(slog libslog.so ${ASCEND_ATC_DIR})
find_module(mmpa libmmpa.so ${ASCEND_ATC_DIR})
find_module(msprof libmsprof.so ${ASCEND_DRIVER_COMMON_DIR})
find_module(hccl libhccl.so ${ASCEND_RUNTIME_DIR})
find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR})
@ -80,8 +81,6 @@ if (ENABLE_OPEN_SRC)
message(FATAL_ERROR "This platform is not supported in train mode, build terminated")
endif()
elseif(PLATFORM STREQUAL "inference")
find_module(slog libslog.so ${ASCEND_ATC_DIR})
find_module(mmpa libmmpa.so ${ASCEND_ATC_DIR})
find_module(adump_server libadump_server.a ${ASCEND_ACL_DIR})
find_module(runtime libruntime.so ${ASCEND_ACL_DIR})
find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR})
@ -94,21 +93,21 @@ if (ENABLE_OPEN_SRC)
find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR})
find_module(msprof libmsprof.so ${ASCEND_DRIVER_DIR})
else()
find_module(msprof libmsprof.so ${ASCEND_ATC_DIR})
find_module(msprof libmsprof.so ${ASCEND_DRIVER_SHARE_DIR})
endif()
elseif(PLATFORM STREQUAL "all")
find_module(slog libslog.so ${ASCEND_DRIVER_DIR})
find_module(mmpa libmmpa.so ${ASCEND_DRIVER_DIR})
find_module(msprof libmsprof.so ${ASCEND_DRIVER_DIR})
#mdc dcdriver
find_module(msprof libmsprof.so ${ASCEND_DRIVER_COMMON_DIR})
find_module(hccl libhccl.so ${ASCEND_RUNTIME_DIR})
find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR})
find_module(runtime libruntime.so ${ASCEND_RUNTIME_DIR})
find_module(adump_server libadump_server.a ${ASCEND_ACL_DIR})
find_module(runtime libruntime.so ${ASCEND_ACL_DIR})
find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR})
find_module(resource libresource.so ${ASCEND_RUNTIME_DIR})
find_module(error_manager liberror_manager.so ${ASCEND_RUNTIME_DIR})
find_module(resource libresource.so ${ASCEND_ATC_DIR})
find_module(error_manager liberror_manager.so ${ASCEND_ATC_DIR})
find_module(error_manager_static liberror_manager.a ${ASCEND_ACL_DIR})
find_module(msprofiler libmsprofiler.a ${ASCEND_RUNTIME_DIR})
find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR})
find_module(msprofiler libmsprofiler.a ${ASCEND_ACL_DIR})
#mdc dcdriver/lib64/driver
find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}/driver)
find_module(ascendcl_static libascendcl.a ${ASCEND_ACL_DIR})
else()
message(FATAL_ERROR "PLATFORM param is invalid, should be train or inference, build terminated")

@ -4,11 +4,11 @@ endif()
include(ExternalProject)
set(JSON_SRC_DIR ${GE_CODE_DIR}/../third_party/json/include)
set(JSON_SRC_DIR ${CMAKE_BINARY_DIR}/opensrc/json/include)
ExternalProject_Add(json_build
URL https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip
#URL /home/txd/workspace/cloud_code/pkg/include.zip
#SOURCE_DIR ${JSON_SRC_DIR}
SOURCE_DIR ${JSON_SRC_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""

@ -0,0 +1,27 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: task.proto
#ifndef STUB_TASK_PROTO_H
#define STUB_TASK_PROTO_H
namespace domi {
class TaskDef;
}
#endif // STUB_TASK_PROTO_H

@ -528,9 +528,19 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr
return SUCCESS;
}
Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff,
bool is_offline) {
namespace {
bool IsNeedConnectInputOpForSingleOp(GeTensorDesc &tensor_desc) {
bool is_need = true;
// format and dtype is all reserved, stand for Optional input. When singleop scene
if (tensor_desc.GetFormat() == FORMAT_RESERVED && tensor_desc.GetDataType() == DT_UNDEFINED) {
is_need = false;
}
return is_need;
}
}
Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs) {
GE_CHECK_NOTNULL_EXEC(op_desc, return PARAM_INVALID);
if (!inputs.empty() && (inputs.size() != op_desc->GetAllInputsSize())) {
GELOGE(PARAM_INVALID, "Tensor size: %zu, Inputs size: %zu", inputs.size(), op_desc->GetAllInputsSize());
@ -540,7 +550,17 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GELOGE(PARAM_INVALID, "Tensor size: %zu, Outputs size: %zu", outputs.size(), op_desc->GetOutputsSize());
return PARAM_INVALID;
}
return SUCCESS;
}
Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff,
bool is_offline) {
if (CheckForSingleOp(op_desc, inputs, outputs) != SUCCESS) {
GELOGE(PARAM_INVALID, "input param is invalid when build single op!");
return PARAM_INVALID;
}
OmgContext &omg_context = (impl_ == nullptr) ? domi::GetContext() : impl_->omg_context_;
omg_context.is_dynamic_input = ContainsDynamicInpus(*op_desc);
@ -575,12 +595,18 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
if (inputs.empty()) {
for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) {
GE_CHECK_NOTNULL_EXEC(input_desc, return INTERNAL_ERROR);
if (!IsNeedConnectInputOpForSingleOp(*input_desc)) {
continue;
}
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, *input_desc, arg_index, false));
arg_index++;
}
} else {
for (const auto &in_desc : inputs) {
GeTensorDesc input_desc = in_desc.GetTensorDesc();
if (!IsNeedConnectInputOpForSingleOp(input_desc)) {
continue;
}
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, input_desc, arg_index, true));
arg_index++;
}

@ -545,7 +545,8 @@ bool CanReuseBySize(const map<string, uint64_t> &reusable_block_counts, const Me
}
bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name,
uint32_t &peer_input_index, bool &no_need_assign_memory) {
uint32_t &peer_input_index,
bool &no_need_assign_memory, bool &reset_zero_copy_flag) {
if (n == nullptr || n->GetAllOutDataAnchors().size() <= 0) {
return false;
}
@ -571,6 +572,13 @@ bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t ou
return false;);
// If GetBool fail, is_input_continuous is false.
bool is_input_continuous_no_padding = false;
(void)ge::AttrUtils::GetBool(peer_in_node_desc, ATTR_NAME_NOPADDING_CONTINUOUS_INPUT,
is_input_continuous_no_padding);
if (is_input_continuous_no_padding) {
reset_zero_copy_flag = true;
return false;
}
(void)ge::AttrUtils::GetBool(peer_in_node_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous);
GE_IF_BOOL_EXEC(is_input_continuous && CheckIsZeroMemNodeType(peer_node->GetType()),
@ -1249,10 +1257,11 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector
std::string peer_name;
uint32_t peer_input_index = 0;
bool out_node_set_continuous_input = false;
bool reset_zero_copy_flag = false;
bool no_need_assign_memory = ((size == 0) || CheckIsZeroMemNodeType(node->GetType()));
if (!no_need_assign_memory) {
out_node_set_continuous_input =
IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, no_need_assign_memory);
IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, no_need_assign_memory, reset_zero_copy_flag);
GE_IF_BOOL_EXEC(!no_need_assign_memory,
no_need_assign_memory = IsAtomicOutputMemory(node, i, is_atomic, out_node_set_continuous_input););
}
@ -1269,6 +1278,9 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector
MemoryBlock *mem_block = ApplyOutMemory(node, i, ranges, is_op_reuse_mem_, out_node_set_continuous_input);
if (mem_block != nullptr) {
GE_IF_BOOL_EXEC(reset_zero_copy_flag,
mem_block->is_zero_copy_ = false;
GELOGI("Node[%s] output[%u] need assign memory before reassign.", op_desc->GetName().c_str(), i););
node_out_blocks_[node->GetName()].emplace_back(mem_block);
if (out_node_set_continuous_input) {
node_continuous_input_blocks_[peer_name][peer_input_index] = mem_block;

@ -390,7 +390,7 @@ class BlockMemAssigner : public MemAssigner {
bool IsZeroCopyBlock(const NodePtr &node, bool continuous);
bool IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name,
uint32_t &peer_input_index, bool &no_need_assign_memory);
uint32_t &peer_input_index, bool &no_need_assign_memory, bool &reset_zero_copy_flag);
///
/// @ingroup GE

@ -171,7 +171,7 @@ class Impl {
graphStatus InitDomiOmgContext(const string &input_shape, const string &input_format, const string &net_format,
bool is_dynamic_input);
void SetRtSocVersion();
void UpdateThreadContext();
public:
ge::GeGenerator generator_;
std::map<std::string, std::string> options_;
@ -225,8 +225,6 @@ graphStatus Impl::Init(const std::map<std::string, std::string> &options) {
return ret;
}
GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions());
GetThreadLocalContext().SetGraphOption(options_);
std::string build_mode = (options_.find(BUILD_MODE) == options_.end() || options_[BUILD_MODE] == BUILD_MODE_NORMAL)
? "" : options_[BUILD_MODE];
options_[BUILD_MODE] = build_mode;
@ -286,7 +284,7 @@ graphStatus Impl::Init(const std::map<std::string, std::string> &options) {
ge::PrintOptionMap(options_, "ge option");
SetRtSocVersion();
UpdateThreadContext();
// 3. init generator with options_
ret = generator_.Initialize(options_, omg_context_);
if (ret != GRAPH_SUCCESS) {
@ -310,6 +308,11 @@ void Impl::SetRtSocVersion() {
}
}
void Impl::UpdateThreadContext() {
GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions());
GetThreadLocalContext().SetGraphOption(options_);
}
graphStatus Impl::CreateInputsForIRBuild(const ge::Graph &graph, vector<ge::GeTensor> &inputs) {
auto compute_graph = ge::GraphUtils::GetComputeGraph(graph);
GE_CHECK_NOTNULL(compute_graph);
@ -333,13 +336,15 @@ graphStatus Impl::CreateInputsForIRBuild(const ge::Graph &graph, vector<ge::GeTe
data_shape = tensor.GetShape();
GELOGI("Data op get shape from InputDesc in ge ir graph.");
}
// If user point input format, do work for all data ops; else do according to tensor_desc
auto data_format = omg_context_.format != domi::DOMI_TENSOR_ND ?
ge::TypeUtils::DomiFormatToFormat(omg_context_.format) : tensor.GetFormat();
ge::DataType data_type = tensor.GetDataType();
string data_type_str = ge::TypeUtils::DataTypeToSerialString(data_type);
GELOGI("Data op get data type:%s from InputDesc in ge ir graph.", data_type_str.c_str());
ge::GeTensor inputTensor;
ge::GeTensorDesc desc(data_shape, ge::Format(omg_context_.format), data_type);
ge::GeTensorDesc desc(data_shape, ge::Format(data_format), data_type);
inputTensor.SetTensorDesc(desc);
inputs.push_back(inputTensor);
}

@ -226,16 +226,11 @@ bool SingleOpParser::Validate(const SingleOpDesc &op_desc) {
}
int index = 0;
for (auto &tensor_desc : op_desc.input_desc) {
if (tensor_desc.type == DT_UNDEFINED) {
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "index"}, {"input", std::to_string(index)});
GELOGE(false, "Input's dataType is invalid when the index is %d", index);
return false;
}
if (tensor_desc.format == FORMAT_RESERVED) {
ErrorManager::GetInstance().ATCReportErrMessage("E10028", {"input", "index"}, {"input", std::to_string(index)});
GELOGE(PARAM_INVALID, "Input's format is invalid when the index is %d", index);
for (auto &tensor_desc : op_desc.output_desc) {
if ((tensor_desc.type == DT_UNDEFINED && tensor_desc.format != FORMAT_RESERVED) ||
(tensor_desc.type != DT_UNDEFINED && tensor_desc.format == FORMAT_RESERVED)){
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "index"}, {"output", std::to_string(index)});
GELOGE(PARAM_INVALID, "Input's dataType or format is invalid when the index is %d", index);
return false;
}
++index;

@ -53,7 +53,7 @@ class GeGenerator {
Status GenerateOfflineModel(const Graph &graph, const std::string &file_name_prefix,
const std::vector<GeTensor> &inputs = std::vector<GeTensor>());
Status GenerateOnlineModel(const Graph &graph, const vector<GeTensor> &inputs, ge::ModelBufferData& model);
Status GenerateOnlineModel(const Graph &graph, const vector<GeTensor> &inputs, ge::ModelBufferData &model);
Status GenerateInfershapeGraph(const Graph &graph);
@ -77,16 +77,16 @@ class GeGenerator {
/// @param [in] engine_type: specific engine.
/// @param [out] model_buff: model buff of single op.
/// @return SUCCESS or FAILED
Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs, OpEngineType engine_type,
ModelBufferData &model_buff);
Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
OpEngineType engine_type, ModelBufferData &model_buff);
private:
Status GenerateModel(const Graph &graph, const string &file_name_prefix,
const vector<GeTensor> &inputs, ge::ModelBufferData& model, bool is_offline = true);
Status GenerateModel(const Graph &graph, const string &file_name_prefix, const vector<GeTensor> &inputs,
ge::ModelBufferData &model, bool is_offline = true);
Status BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
const string &model_file_name, OpEngineType engine_type,
ModelBufferData &model_buff, bool is_offline = true);
const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff,
bool is_offline = true);
Status CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs);
class Impl;

@ -1 +1 @@
Subproject commit e79f45025b95a7295bdffa38a1b720bfd6609ae9
Subproject commit 1cc55bcae09902b3d158993dd57bfbd1d3337066

@ -1 +1 @@
Subproject commit be653a17038395f3901d43b600a51f6ab33af5e5
Subproject commit db4e6070bb2cec01cead264a44ceae07e7f3048e

@ -528,7 +528,6 @@ uint32_t Fusion(ComputeGraphPtr model_graph, ComputeGraphPtr fusion_graph, kScop
int stream_num = 1;
int flag = 0;
// make_graph_nd(graph);
NodePtr node_a = fusion_graph->AddNode(op_def_a);
NodePtr node_b = fusion_graph->AddNode(op_def_b);

@ -746,7 +746,6 @@ int TestBuildGraphTest(Func fun, Graph &graph, vector<ge::Tensor> &inputs, vecto
shapeTensor.SetTensorDesc(shape_desc);
vector<float> dataValuec;
for (int i = 0; i < sizeshape; i++) {
// dataValuec.push_back((float)(i%255));
dataValuec.push_back(1);
}
@ -764,7 +763,6 @@ int TestBuildGraphTest(Func fun, Graph &graph, vector<ge::Tensor> &inputs, vecto
}
shapeTensor1.SetData((uint8_t *)dataValuec1.data(), 4 * sizeshape1);
// inputs.push_back(shapeTensor1);
return 0;
}

@ -69,12 +69,10 @@ TEST_F(UtestGeModelUnittest, save_model_to_file_success) {
ge::Graph ge_graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
string file_name = "model_data.pb";
setenv("DUMP_MODEL", "1", true);
// EXPECT_EQ(ge_graph.SaveToFile(file_name), GRAPH_FAILED);
setenv("DUMP_MODEL", "0", true);
}
TEST_F(UtestGeModelUnittest, load_model_from_file_success) {
ge::Graph ge_graph;
string file_name = "model_data.pb";
// EXPECT_EQ(ge_graph.LoadFromFile(file_name), GRAPH_SUCCESS);
}

@ -182,8 +182,6 @@ TEST_F(UtestModelManagerDavinciModel, contruct_modeldef_createfail) {
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, vector<int>({1, 1}));
ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, vector<int>({1, 1}));
// EXPECT_EQ(ge::SUCCESS, model.Init());
model.GetEventList();
}
@ -200,7 +198,6 @@ TEST_F(UtestModelManagerDavinciModel, copy_input_data_to_model_fail) {
input_data.blobs.push_back(data_buffer);
model.op_list_.clear();
// EXPECT_EQ(ge::PARAM_INVALID, model.CopyInputDataToModel(input_data.blobs, 0));
delete[](char *) data_buffer.data;
}
@ -210,7 +207,6 @@ TEST_F(UtestModelManagerDavinciModel, streamnum_success) {
DavinciModel *model = new DavinciModel(0, g_label_call_back);
OmeTestOpUtils::InitModel(*model);
// EXPECT_EQ(ge::SUCCESS, model->Init());
EXPECT_EQ(0, model->StreamNum());
EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart());
@ -226,8 +222,6 @@ TEST_F(UtestModelManagerDavinciModel, eventnum_success) {
OmeTestOpUtils::InitModel(*model);
// EXPECT_EQ(ge::SUCCESS, model->Init());
EXPECT_EQ(0, model->EventNum());
EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart());
@ -241,8 +235,6 @@ TEST_F(UtestModelManagerDavinciModel, handlelist_success) {
OmeTestOpUtils::InitModel(*model);
// EXPECT_EQ(ge::SUCCESS, model->Init());
EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart());
EXPECT_EQ(ge::SUCCESS, model->ModelRunStop());
@ -256,8 +248,6 @@ TEST_F(UtestModelManagerDavinciModel, eventlist_success) {
OmeTestOpUtils::InitModel(*model);
// EXPECT_EQ(ge::SUCCESS, model->Init());
EXPECT_EQ(true, model->GetEventList().empty());
EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart());
@ -282,7 +272,6 @@ TEST_F(UtestModelManagerDavinciModel, failed_reset_device) {
TEST_F(UtestModelManagerDavinciModel, init_not_support_priority) {
int32_t priority = 8;
DavinciModel model(priority, g_label_call_back);
// EXPECT_EQ(ge::PARAM_INVALID, model.Init());
}
// test GetInputOutputDescInfo
@ -346,7 +335,6 @@ TEST_F(UtestModelManagerDavinciModel, CopyTensorFromSrcVarNode_success) {
NodePtr dst_node = graph->AddNode(op_desc_ptr);
DavinciModel model(0, g_label_call_back);
Status ret = model.CopyTensorFromSrcVarNode(src_node, dst_node);
// EXPECT_EQ(SUCCESS, ret);
}
TEST_F(UtestModelManagerDavinciModel, CopyVarData_graph_is_nullptr) {
@ -370,7 +358,6 @@ TEST_F(UtestModelManagerDavinciModel, copy_var_data_success) {
DavinciModel model(0, g_label_call_back);
Status ret = model.CopyVarData(graph);
// EXPECT_EQ(SUCCESS, ret);
}
TEST_F(UtestModelManagerDavinciModel, get_input_output_desc_info_without_data_op_list) {
@ -540,7 +527,6 @@ TEST_F(UtestModelManagerDavinciModel, get_flow_ctrl_op_list_success) {
std::map<uint32_t, uint32_t> flowctrl_op_index_internal_map;
flowctrl_op_index_internal_map.insert(pair<uint32_t, uint32_t>(1, 1));
model.flowctrl_op_index_internal_map_ = flowctrl_op_index_internal_map;
// EXPECT_EQ(flowctrl_op_index_internal_map_, model.GetFlowctrlOpList());
}
// test SetFlowctrlOpList
@ -1204,10 +1190,8 @@ TEST_F(UtestModelManagerDavinciModel, profiling_model_success) {
input_data.index = 0;
input_data.model_id = 1;
input_data.blobs.push_back(data_buffer);
// model.SinkModelProfile(&model);
rtFreeHost(data.model_data);
// delete stream;
delete[](char *) data_buffer.data;
delete model_def;
}

@ -153,20 +153,6 @@ TEST_F(UtestModelManagerModelManager, case_load_model_encypt_not_match) {
delete[](uint8_t *) data.model_data;
}
#if 0
TEST_F(UtestModelManagerModelManager, case_load_model_signature_failed)
{
ModelManager mm;
ge::ModelData data;
GenUnencryptModelData(data);
uint32_t model_id = 1;
MOCKER(&WBDecryptor::CheckSignature).stubs().will(returnValue(false));
EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
#endif
TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) {
ModelManager mm;
ge::ModelData data;
@ -178,87 +164,6 @@ TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) {
delete[](uint8_t *) data.model_data;
}
#if 0
TEST_F(UtestModelManagerModelManager, case_load_model_header_len_failed)
{
ModelManager mm;
ge::ModelData data;
GenEncryptModelData(data);
ModelFileHeader *header = (ModelFileHeader*)data.model_data;
data.model_len -= header->length;
header->length = 0;
uint32_t model_id = 1;
EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
#endif
#if 0
TEST_F(UtestModelManagerModelManager, case_load_success)
{
const char* model_file = "bin/llt/framework/domi/ut/omg/data/leakyrelu.dav";
const char* json_file = "test.json";
const char* key = "bin/llt/framework/domi/ut/omg/data/leakyrelu.dav.PASSCODE";
ge::ModelData model;
Status ret = ModelParserBase::LoadFromFile(model_file, key, 0, &model);
EXPECT_EQ(ge::SUCCESS, ret);
ModelManager mm;
uint32_t model_id = 1;
ret = mm.LoadModelOffline(model_id, model, UTEST_CALL_BACK_FUN);
EXPECT_EQ(ge::SUCCESS, ret);
if (model.model_data)
delete[](uint8_t*)model.model_data;
}
#endif
#if 0
TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_signature_failed)
{
ModelManager mm;
ge::ModelData data;
GenEncryptModelData(data);
uint32_t model_id = 1;
data.key;
EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_invalid_key_len)
{
ModelManager mm;
ge::ModelData data;
GenEncryptModelData(data);
data.key = "0123456789abcdef0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0";
uint32_t model_id = 1;
EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_invalid_key_char)
{
ModelManager mm;
ge::ModelData data;
GenEncryptModelData(data);
data.key = "0123456789abcdef0123456789ABCDEF0123456789ABCDEF0123456789ABCDEG";
uint32_t model_id = 1;
EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_load_failed)
{
ModelManager mm;
ge::ModelData data;
GenEncryptModelData(data);
uint32_t model_id = 1;
EXPECT_EQ(ge::INTERNAL_ERROR, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN));
delete[](uint8_t*)data.model_data;
}
#endif
shared_ptr<ge::ModelListener> LabelCallBack(new DModelListener());
// test HandleCommand

@ -76,7 +76,6 @@ class OmeTestOpUtils {
return nullptr;
}
// return std::make_shared<ge::Node>(op_desc, nullptr);
auto g = std::make_shared<ge::ComputeGraph>("g");
return g->AddNode(std::move(op_desc));
}
@ -403,8 +402,6 @@ class OmeTestOpDescBuilder {
if (SUCCESS != res) {
GELOGE(ge::FAILED, "Finish: GraphUtils::AddEdge failed");
}
// ge::NodePtr src_node = node->GetOwnerComputeGraph()->AddNodeFront(src_op_desc);
// node->AddLinkFrom(src_node);
}
{
@ -434,8 +431,6 @@ class OmeTestOpDescBuilder {
vector<ge::GeTensorPtr> weights_;
int64_t eventId_ = -1;
int64_t scopeid_ = -1;
// std::shared_ptr<ge::ComputeGraph> graph_;
};
#endif // OME_REBUILD_OME_OP_TEST_UTILS_H

@ -122,7 +122,6 @@ TEST_F(UtestGraphPassesDimensionAdjustPass, node_get_original_type_failed) {
std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
ge::Status ret = pass->Run(op_node);
// EXPECT_EQ(ge::SUCCESS, ret);
}
TEST_F(UtestGraphPassesDimensionAdjustPass, node_not_register_op) {

@ -93,7 +93,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test2) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test3) {
@ -123,7 +122,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test3) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test4) {
@ -154,7 +152,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test4) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test5) {
@ -186,7 +183,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test5) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test6) {
@ -219,7 +215,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test6) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test7) {
@ -253,7 +248,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test7) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test8) {
@ -288,7 +282,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test8) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test9) {
@ -322,7 +315,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test9) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test10) {
@ -357,7 +349,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test10) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test11) {
@ -392,7 +383,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test11) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test12) {
@ -427,7 +417,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test12) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test13) {
@ -462,7 +451,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test13) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test14) {
@ -497,7 +485,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test14) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test15) {
@ -532,7 +519,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test15) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test16) {
@ -567,7 +553,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test16) {
shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE);
ge::Status status = kernel->Compute(op_desc_ptr, input, outputs);
// EXPECT_EQ(PARAM_INVALID, status);
}
TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test17) {

@ -167,7 +167,6 @@ TEST_F(UtestGraphPassesGuaranteeConstPass, get_origenal_type_fail) {
string type2 = "FrameworkOp";
node->GetOpDesc()->SetType(type2);
ge::Status ret = guarantee_const_op_remove_pass_->Run(node);
// EXPECT_EQ(ge::SUCCESS, ret);
}
TEST_F(UtestGraphPassesGuaranteeConstPass, int32_success_6) {

@ -135,7 +135,6 @@ TEST_F(UtestIdentityPass, succ) {
string type2 = "FrameworkOp";
node->GetOpDesc()->SetType(type2);
status = pass.Run(node);
// EXPECT_EQ(ge::SUCCESS, status);
NodePtr node_err = AddNode(graph, "Identity", IDENTITY, 1, 2);
status = pass.Run(node_err);

@ -845,7 +845,6 @@ TEST_F(UtestGraphPassesNetOutputPass, out_node_remove_check_fail) {
ge::NodePtr mul2 = compute_graph->FindNode("Mul2");
std::vector<std::pair<ge::NodePtr, int32_t>> output_nodes = {{mul1, 0}, {mul2, 0}};
compute_graph->SetGraphOutNodesInfo(output_nodes);
// compute_graph->RemoveNode(mul1);
mul1->GetInDataAnchor(0)->UnlinkAll();
mul1->GetInDataAnchor(1)->UnlinkAll();
GraphUtils::RemoveNodeWithoutRelink(compute_graph, mul1);

@ -75,5 +75,4 @@ TEST_F(UtestPlaceholderWithDefaultPass, succ) {
string type2 = "FrameworkOp";
node->GetOpDesc()->SetType(type2);
pass.Run(node);
// EXPECT_EQ(ge::SUCCESS, status);
}

@ -75,5 +75,4 @@ TEST_F(UtestPreventGradientPass, succ) {
string type2 = "FrameworkOp";
node->GetOpDesc()->SetType(type2);
status = pass.Run(node);
// EXPECT_EQ(ge::SUCCESS, status);
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save