Compare commits

...

7 Commits
master ... r0.6

Author SHA1 Message Date
mindspore-ci-bot 885af56694 !212 fix securec download links due to mistakes made by openeuler community
4 years ago
yanghaoran 9f8bdb838e fix securec download links due to mistakes made by openeuler community
4 years ago
mindspore-ci-bot 2cb83c8f4d !52 Revert "Op debug feature"
5 years ago
yanghaoran 20f86e636b Revert "Op debug feature"
5 years ago
mindspore-ci-bot efd823cc18 !51 runpackage sync C75B050 for r0.6
5 years ago
wuweikang ca11480c34 runpackage sync C75B050
5 years ago
lujiale db2ea7a6ff update RELEASE.md.
5 years ago

@ -1,3 +1,24 @@
# Release 0.6.0-beta
## Major Features and Improvements
- GE supports function control operators such as If/Case/While/For.
- In a single operator call scenario, GE supports recording the correspondence between operators and tasks for performance commissioning.
- GE supports new operator overflow positioning solution.
## Bugfixes
- Fix the problem that the aclmdlGetCurOutputDims interface failed to query output Dims in dynamic batch scenarios.
- Fix the problem that the operator compilation options (advanced and advanced) cannot be selected.
- Fix the problem that zero copy function cannot be performed in the scene of converging conditional operators after Data operators.
- Fix the problem that the empty graph cannot be handled.
## Thanks to our Contributors
Thanks goes to these wonderful people:
wangcongweiyangyanghaorangxutianchunshibeijizhouchao, tanghuikang, zhoulili, liujunzhu, zhengyuanhua, taoxiangdong
Contributions of any kind are welcome!
# Release 0.5.0-beta # Release 0.5.0-beta
## Major Features and Improvements ## Major Features and Improvements

@ -1,7 +1,7 @@
graphengine_add_pkg(securec graphengine_add_pkg(securec
VER 1.1.10 VER 1.1.10
URL https://gitee.com/openeuler/bounds_checking_function/repository/archive/v1.1.10.tar.gz URL https://gitee.com/openeuler/libboundscheck/repository/archive/v1.1.10.tar.gz
MD5 0782dd2351fde6920d31a599b23d8c91 MD5 193f0ca5246c1dd84920db34d2d8249f
LIBS c_sec LIBS c_sec
PATCHES ${GE_SOURCE_DIR}/third_party/patch/securec/securec.patch001 PATCHES ${GE_SOURCE_DIR}/third_party/patch/securec/securec.patch001
CMAKE_OPTION " " CMAKE_OPTION " "

@ -63,5 +63,12 @@ struct HcomOpertion {
int32_t root; int32_t root;
}; };
struct HcomRemoteAccessAddrInfo {
uint32_t remotetRankID;
uint64_t remoteAddr; // host embedding table address
uint64_t localAddr; // device HBM address
uint64_t length; // memory Length in Bytes
};
} // namespace ge } // namespace ge
#endif // INC_COMMON_OPSKERNEL_GE_TASK_INFO_H_ #endif // INC_COMMON_OPSKERNEL_GE_TASK_INFO_H_

@ -31,27 +31,37 @@ class ErrorManager {
/// ///
/// @brief init /// @brief init
/// @param [in] path current so path /// @param [in] path: current so path
/// @return int 0(success) -1(fail) /// @return int 0(success) -1(fail)
/// ///
int Init(std::string path); int Init(std::string path);
/// ///
/// @brief Report error message /// @brief Report error message
/// @param [in] errCode error code /// @param [in] error_code: error code
/// @param [in] mapArgs parameter map /// @param [in] args_map: parameter map
/// @return int 0(success) -1(fail) /// @return int 0(success) -1(fail)
/// ///
int ReportErrMessage(std::string error_code, const std::map<std::string, std::string> &args_map); int ReportErrMessage(std::string error_code, const std::map<std::string, std::string> &args_map);
///
/// @brief output error message /// @brief output error message
/// @param [in] handle print handle /// @param [in] handle: print handle
/// @return int 0(success) -1(fail) /// @return int 0(success) -1(fail)
/// ///
int OutputErrMessage(int handle); int OutputErrMessage(int handle);
///
/// @brief output message
/// @param [in] handle: print handle
/// @return int 0(success) -1(fail)
///
int OutputMessage(int handle);
///
/// @brief Report error message /// @brief Report error message
/// @param [in] vector parameter key, vector parameter value /// @param [in] key: vector parameter key
/// @param [in] value: vector parameter value
/// ///
void ATCReportErrMessage(std::string error_code, const std::vector<std::string> &key = {}, void ATCReportErrMessage(std::string error_code, const std::vector<std::string> &key = {},
const std::vector<std::string> &value = {}); const std::vector<std::string> &value = {});
@ -60,7 +70,7 @@ class ErrorManager {
struct ErrorInfo { struct ErrorInfo {
std::string error_id; std::string error_id;
std::string error_message; std::string error_message;
std::vector<std::string> arglist; std::vector<std::string> arg_list;
}; };
ErrorManager() {} ErrorManager() {}
@ -77,7 +87,8 @@ class ErrorManager {
bool is_init_ = false; bool is_init_ = false;
std::map<std::string, ErrorInfo> error_map_; std::map<std::string, ErrorInfo> error_map_;
std::vector<std::string> error_message_evc_; std::vector<std::string> error_messages_;
std::vector<std::string> warning_messages_;
}; };
#endif // ERROR_MANAGER_H_ #endif // ERROR_MANAGER_H_

@ -82,6 +82,8 @@ class PlatformInfoManager {
void ParseVectorCoreMemoryRates(map<string, string> &vectorCoreMemoryRatesMap, PlatformInfo &platformInfoTemp); void ParseVectorCoreMemoryRates(map<string, string> &vectorCoreMemoryRatesMap, PlatformInfo &platformInfoTemp);
void ParseCPUCache(map<string, string> &CPUCacheMap, PlatformInfo &platformInfoTemp);
void ParseVectorCoreintrinsicDtypeMap(map<string, string> &vectorCoreintrinsicDtypeMap, void ParseVectorCoreintrinsicDtypeMap(map<string, string> &vectorCoreintrinsicDtypeMap,
PlatformInfo &platformInfoTemp); PlatformInfo &platformInfoTemp);

@ -73,6 +73,8 @@ typedef struct tagAiCoreSpec {
typedef struct tagAiCoreMemoryRates { typedef struct tagAiCoreMemoryRates {
double ddrRate; double ddrRate;
double ddrReadRate;
double ddrWriteRate;
double l2Rate; double l2Rate;
double l2ReadRate; double l2ReadRate;
double l2WriteRate; double l2WriteRate;
@ -86,6 +88,7 @@ typedef struct tagAiCoreMemoryRates {
} AiCoreMemoryRates; } AiCoreMemoryRates;
typedef struct tagVectorCoreSpec { typedef struct tagVectorCoreSpec {
double vecFreq;
uint64_t vecCalcSize; uint64_t vecCalcSize;
uint64_t smaskBuffer; uint64_t smaskBuffer;
uint64_t ubSize; uint64_t ubSize;
@ -94,10 +97,15 @@ typedef struct tagVectorCoreSpec {
uint64_t ubbankNum; uint64_t ubbankNum;
uint64_t ubburstInOneBlock; uint64_t ubburstInOneBlock;
uint64_t ubbankGroupNum; uint64_t ubbankGroupNum;
uint64_t vectorRegSize;
uint64_t predicateRegSize;
uint64_t addressRegSize;
} VectorCoreSpec; } VectorCoreSpec;
typedef struct tagVectorCoreMemoryRates { typedef struct tagVectorCoreMemoryRates {
double ddrRate; double ddrRate;
double ddrReadRate;
double ddrWriteRate;
double l2Rate; double l2Rate;
double l2ReadRate; double l2ReadRate;
double l2WriteRate; double l2WriteRate;
@ -105,6 +113,11 @@ typedef struct tagVectorCoreMemoryRates {
double ubToDdrRate; double ubToDdrRate;
} VectorCoreMemoryRates; } VectorCoreMemoryRates;
typedef struct tagCPUCache {
uint32_t AICPUSyncBySW;
uint32_t TSCPUSyncBySW;
} CPUCache;
typedef struct tagPlatformInfo { typedef struct tagPlatformInfo {
StrInfo strInfo; StrInfo strInfo;
SoCInfo socInfo; SoCInfo socInfo;
@ -113,6 +126,7 @@ typedef struct tagPlatformInfo {
map<string, vector<string>> aiCoreIntrinsicDtypeMap; map<string, vector<string>> aiCoreIntrinsicDtypeMap;
VectorCoreSpec vectorCoreSpec; VectorCoreSpec vectorCoreSpec;
VectorCoreMemoryRates vectorCoreMemoryRates; VectorCoreMemoryRates vectorCoreMemoryRates;
CPUCache cpucache;
map<string, vector<string>> vectorCoreIntrinsicDtypeMap; map<string, vector<string>> vectorCoreIntrinsicDtypeMap;
} PlatformInfo; } PlatformInfo;

@ -46,7 +46,6 @@ const char *const OPTION_EXEC_DUMP_STEP = "ge.exec.dumpStep";
const char *const OPTION_EXEC_DUMP_MODE = "ge.exec.dumpMode"; const char *const OPTION_EXEC_DUMP_MODE = "ge.exec.dumpMode";
const char *const OPTION_EXEC_ENABLE_DUMP_DEBUG = "ge.exec.enableDumpDebug"; const char *const OPTION_EXEC_ENABLE_DUMP_DEBUG = "ge.exec.enableDumpDebug";
const char *const OPTION_EXEC_DUMP_DEBUG_MODE = "ge.exec.dumpDebugMode"; const char *const OPTION_EXEC_DUMP_DEBUG_MODE = "ge.exec.dumpDebugMode";
const char *const OPTION_EXEC_OP_DEBUG_LEVEL = "ge.exec.opDebugLevel";
const char *const OPTION_EXEC_ENABLE_INCRE_BUILD = "ge.exec.enableIncreBuild"; const char *const OPTION_EXEC_ENABLE_INCRE_BUILD = "ge.exec.enableIncreBuild";
const char *const OPTION_EXEC_INCRE_BUILD_CACHE_PATH = "ge.exec.increBuildCachePath"; const char *const OPTION_EXEC_INCRE_BUILD_CACHE_PATH = "ge.exec.increBuildCachePath";
const char *const OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES = "ge.exec.enableScopeFusionPasses"; const char *const OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES = "ge.exec.enableScopeFusionPasses";
@ -174,6 +173,9 @@ const char *const kDynamicBatchSize = "ge.dynamicBatchSize";
// configure whether to use dynamic image size // configure whether to use dynamic image size
const char *const kDynamicImageSize = "ge.dynamicImageSize"; const char *const kDynamicImageSize = "ge.dynamicImageSize";
// Configure whether to use dynamic dims
const char *const kDynamicDims = "ge.dynamicDims";
// Configure auto tune mode, this option only take effect while AUTO_TUNE_FLAG is Y, // Configure auto tune mode, this option only take effect while AUTO_TUNE_FLAG is Y,
// example: GA|RL, support configure multiple, split by | // example: GA|RL, support configure multiple, split by |
const std::string AUTO_TUNE_MODE = "ge.autoTuneMode"; const std::string AUTO_TUNE_MODE = "ge.autoTuneMode";
@ -269,6 +271,7 @@ static const char *const INPUT_SHAPE = "input_shape";
static const char *const OP_NAME_MAP = "op_name_map"; static const char *const OP_NAME_MAP = "op_name_map";
static const char *const DYNAMIC_BATCH_SIZE = kDynamicBatchSize; static const char *const DYNAMIC_BATCH_SIZE = kDynamicBatchSize;
static const char *const DYNAMIC_IMAGE_SIZE = kDynamicImageSize; static const char *const DYNAMIC_IMAGE_SIZE = kDynamicImageSize;
static const char *const DYNAMIC_DIMS = kDynamicDims;
static const char *const INSERT_OP_FILE = ge::INSERT_OP_FILE.c_str(); static const char *const INSERT_OP_FILE = ge::INSERT_OP_FILE.c_str();
static const char *const PRECISION_MODE = ge::PRECISION_MODE.c_str(); static const char *const PRECISION_MODE = ge::PRECISION_MODE.c_str();
static const char *const EXEC_DISABLE_REUSED_MEMORY = ge::OPTION_EXEC_DISABLE_REUSED_MEMORY; static const char *const EXEC_DISABLE_REUSED_MEMORY = ge::OPTION_EXEC_DISABLE_REUSED_MEMORY;
@ -291,10 +294,11 @@ static const char *const OPTYPELIST_FOR_IMPLMODE = ge::OPTYPELIST_FOR_IMPLMODE.c
// for interface: aclgrphBuildModel // for interface: aclgrphBuildModel
const std::set<std::string> ir_builder_suppported_options = { const std::set<std::string> ir_builder_suppported_options = {
INPUT_FORMAT, INPUT_SHAPE, OP_NAME_MAP, DYNAMIC_BATCH_SIZE, INPUT_FORMAT, INPUT_SHAPE, OP_NAME_MAP,
DYNAMIC_IMAGE_SIZE, INSERT_OP_FILE, PRECISION_MODE, EXEC_DISABLE_REUSED_MEMORY, DYNAMIC_BATCH_SIZE, DYNAMIC_IMAGE_SIZE, DYNAMIC_DIMS,
AUTO_TUNE_MODE, OUTPUT_TYPE, OUT_NODES, INPUT_FP16_NODES, INSERT_OP_FILE, PRECISION_MODE, EXEC_DISABLE_REUSED_MEMORY,
LOG_LEVEL}; AUTO_TUNE_MODE, OUTPUT_TYPE, OUT_NODES,
INPUT_FP16_NODES, LOG_LEVEL};
// for interface: aclgrphBuildInitialize // for interface: aclgrphBuildInitialize
const std::set<std::string> global_options = {CORE_TYPE, const std::set<std::string> global_options = {CORE_TYPE,
SOC_VERSION, SOC_VERSION,

@ -343,6 +343,7 @@ class OpReg {
auto x_type = op.GetInputDesc(in_name).GetDataType(); \ auto x_type = op.GetInputDesc(in_name).GetDataType(); \
TensorDesc op_output_desc = op.GetOutputDesc(out_name); \ TensorDesc op_output_desc = op.GetOutputDesc(out_name); \
op_output_desc.SetShape(ge::Shape(x_shape)); \ op_output_desc.SetShape(ge::Shape(x_shape)); \
op_output_desc.SetOriginShape(ge::Shape(x_shape)); \
op_output_desc.SetDataType(x_type); \ op_output_desc.SetDataType(x_type); \
return op.UpdateOutputDesc(out_name, op_output_desc); \ return op.UpdateOutputDesc(out_name, op_output_desc); \
} }

@ -232,7 +232,7 @@
rtError_t _rt_ret = (expr); \ rtError_t _rt_ret = (expr); \
if (_rt_ret != RT_ERROR_NONE) { \ if (_rt_ret != RT_ERROR_NONE) { \
DOMI_LOGE("Call rt api failed, ret: 0x%X", _rt_ret); \ DOMI_LOGE("Call rt api failed, ret: 0x%X", _rt_ret); \
return ge::RT_FAILED; \ return RT_ERROR_TO_GE_STATUS(_rt_ret); \
} \ } \
} while (0); } while (0);

@ -280,8 +280,25 @@ GE_ERRORNO_RUNTIME(GE_RTI_CALL_HCCL_REDUCE_SCATTER_FAILED, 47, "call hccl hcom r
// Executor module error code definition // Executor module error code definition
GE_ERRORNO_EXECUTOR(GE_EXEC_NOT_INIT, 1, "GE Executor is not yet initialized."); GE_ERRORNO_EXECUTOR(GE_EXEC_NOT_INIT, 1, "GE Executor is not yet initialized.");
GE_ERRORNO_EXECUTOR(GE_AIPP_NOT_EXIST, 2, "GE AIPP is not exist."); GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_PATH_INVALID, 2, "Model file path is invalid.");
GE_ERRORNO_EXECUTOR(GE_DYNAMIC_AIPP_NOT_SUPPORT_QUERY, 3, "GE Dynamic AIPP is not support to query temporarily."); GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_KEY_PATH_INVALID, 3, "Key file path of model is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_ID_INVALID, 4, "Model id is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_DATA_SIZE_INVALID, 5, "Data size of model is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_WEIGHT_SIZE_INVALID, 6, "Weight size of model is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_PARTITION_NUM_INVALID, 7, "Partition number of model is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_QUEUE_ID_INVALID, 8, "Queue id of model is invalid.");
GE_ERRORNO_EXECUTOR(GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, 9, "Model does not support encryption.");
GE_ERRORNO_EXECUTOR(GE_EXEC_READ_MODEL_FILE_FAILED, 10, "Failed to read model file.");
GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_MODEL_REPEATED, 11, "The model is loaded repeatedly.");
GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_MODEL_PARTITION_FAILED, 12, "Failed to load model partition.");
GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED, 13, "Failed to load weight partition.");
GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_TASK_PARTITION_FAILED, 14, "Failed to load task partition.");
GE_ERRORNO_EXECUTOR(GE_EXEC_LOAD_KERNEL_PARTITION_FAILED, 15, "Failed to load kernel partition.");
GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_FEATURE_MAP_MEM_FAILED, 16, "Failed to allocate feature map memory.");
GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_WEIGHT_MEM_FAILED, 17, "Failed to allocate weight memory.");
GE_ERRORNO_EXECUTOR(GE_EXEC_ALLOC_VAR_MEM_FAILED, 18, "Failed to allocate variable memory.");
GE_ERRORNO_EXECUTOR(GE_AIPP_NOT_EXIST, 19, "GE AIPP is not exist.");
GE_ERRORNO_EXECUTOR(GE_DYNAMIC_AIPP_NOT_SUPPORT_QUERY, 20, "GE Dynamic AIPP is not support to query temporarily.");
// Generator module error code definition // Generator module error code definition
GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, 1, "Graph manager initialize failed."); GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_INIT_FAILED, 1, "Graph manager initialize failed.");
@ -289,6 +306,8 @@ GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_ADD_GRAPH_FAILED, 2, "Graph mana
GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, 3, "Graph manager build graph failed."); GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, 3, "Graph manager build graph failed.");
GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, 4, "Graph manager finalize failed."); GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_FINALIZE_FAILED, 4, "Graph manager finalize failed.");
GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_SAVE_MODEL_FAILED, 5, "Graph manager save model failed."); GE_ERRORNO_GENERATOR(GE_GENERATOR_GRAPH_MANAGER_SAVE_MODEL_FAILED, 5, "Graph manager save model failed.");
#define RT_ERROR_TO_GE_STATUS(RT_ERROR) static_cast<Status>(RT_ERROR)
} // namespace ge } // namespace ge
#endif // INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_ #endif // INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_

@ -339,6 +339,7 @@ REGISTER_OPTYPE_DECLARE(GETNEXT, "GetNext");
REGISTER_OPTYPE_DECLARE(INITDATA, "InitData"); REGISTER_OPTYPE_DECLARE(INITDATA, "InitData");
REGISTER_OPTYPE_DECLARE(TRANSSHAPE, "TransShape") REGISTER_OPTYPE_DECLARE(TRANSSHAPE, "TransShape")
REGISTER_OPTYPE_DECLARE(REFIDENTITY, "RefIdentity"); REGISTER_OPTYPE_DECLARE(REFIDENTITY, "RefIdentity");
REGISTER_OPTYPE_DECLARE(BITCAST, "Bitcast");
// ANN dedicated operator // ANN dedicated operator
REGISTER_OPTYPE_DECLARE(ANN_MEAN, "AnnMean"); REGISTER_OPTYPE_DECLARE(ANN_MEAN, "AnnMean");
@ -432,6 +433,8 @@ REGISTER_OPTYPE_DECLARE(HCOMALLREDUCE, "HcomAllReduce");
REGISTER_OPTYPE_DECLARE(HCOMREDUCESCATTER, "HcomReduceScatter"); REGISTER_OPTYPE_DECLARE(HCOMREDUCESCATTER, "HcomReduceScatter");
REGISTER_OPTYPE_DECLARE(HCOMSEND, "HcomSend"); REGISTER_OPTYPE_DECLARE(HCOMSEND, "HcomSend");
REGISTER_OPTYPE_DECLARE(HCOMRECEIVE, "HcomReceive"); REGISTER_OPTYPE_DECLARE(HCOMRECEIVE, "HcomReceive");
REGISTER_OPTYPE_DECLARE(HCOMREMOTEREAD, "HcomRemoteRead");
REGISTER_OPTYPE_DECLARE(HCOMREMOTEWRITE, "HcomRemoteWrite");
REGISTER_OPTYPE_DECLARE(VARASSIGN, "VarAssign"); REGISTER_OPTYPE_DECLARE(VARASSIGN, "VarAssign");
REGISTER_OPTYPE_DECLARE(VARISINITIALIZEDOP, "VarIsInitializedOp"); REGISTER_OPTYPE_DECLARE(VARISINITIALIZEDOP, "VarIsInitializedOp");
@ -558,6 +561,16 @@ enum ModelCheckType {
UNCHECK // no verification UNCHECK // no verification
}; };
///
/// @brief dynamic input type
///
enum DynamicInputType {
FIXED = 0, // default mode
DYNAMIC_BATCH = 1,
DYNAMIC_IMAGE = 2,
DYNAMIC_DIMS = 3
};
/// ///
/// @brief magic number of the model file /// @brief magic number of the model file
/// ///

@ -26,12 +26,14 @@
#include "common/ge_types.h" #include "common/ge_types.h"
#include "common/types.h" #include "common/types.h"
#include "graph/tensor.h" #include "graph/tensor.h"
#include "graph/ge_tensor.h"
#include "runtime/base.h" #include "runtime/base.h"
namespace ge { namespace ge {
class ModelListenerAdapter; class ModelListenerAdapter;
class SingleOp; class SingleOp;
class DynamicSingleOp;
struct RunModelData { struct RunModelData {
uint32_t index; // Data index uint32_t index; // Data index
@ -43,6 +45,7 @@ struct RunModelData {
uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0 uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0
uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0 uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0
uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0 uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0
std::vector<uint64_t> dynamic_dims; // Dynamic dims scene, set dynamic dims, not supported by default:empty
}; };
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
@ -87,16 +90,52 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
/// ///
ge::Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height, ge::Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height,
uint64_t image_width); uint64_t image_width);
///
/// @ingroup ge
/// @brief Set dynamic dims info
/// @param [in] model_id: model id allocate from manager
/// @param [in] dynamic_input_addr: dynamic input addr created by user
/// @param [in] length: length of dynamic input addr
/// @param [in] dynamic_dim_num: number of dynamic dimension
/// @param [in] dynamic_dims: array of dynamic dimensions
/// @return execute result
///
ge::Status SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length,
const std::vector<uint64_t> &dynamic_dims);
///
/// @ingroup ge
/// @brief Get current dynamic dims info by combined dims
/// @param [in] model_id: model id allocate from manager
/// @param [in] combined_dims: array of combined dimensions
/// @param [out] cur_dynamic_dims: current dynamic dims
/// @return execute result
///
ge::Status GetCurDynamicDims(uint32_t model_id, const std::vector<uint64_t> &combined_dims,
std::vector<uint64_t> &cur_dynamic_dims);
/// ///
/// @ingroup ge /// @ingroup ge
/// @brief Get dynamic batch_info /// @brief Get dynamic batch_info
/// @param [in] model_id /// @param [in] model_id
/// @param [out] batch_info /// @param [out] batch_info
/// @param [out] dynamic_type
/// @return execute result
///
ge::Status GetDynamicBatchInfo(uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info,
int32_t &dynamic_type);
///
/// @ingroup ge
/// @brief Get combined dynamic dims info
/// @param [in] model_id
/// @param [out] batch_info
/// @return execute result /// @return execute result
/// ///
ge::Status GetDynamicBatchInfo(uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info); ge::Status GetCombinedDynamicDims(uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info);
ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info); ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);
/// ///
/// @ingroup ge /// @ingroup ge
@ -209,6 +248,13 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
static ge::Status ExecuteAsync(SingleOp *executor, const std::vector<DataBuffer> &inputs, static ge::Status ExecuteAsync(SingleOp *executor, const std::vector<DataBuffer> &inputs,
std::vector<DataBuffer> &outputs); std::vector<DataBuffer> &outputs);
static ge::Status LoadDynamicSingleOp(const std::string &model_name, const ge::ModelData &modelData, void *stream,
DynamicSingleOp **single_op);
static ge::Status ExecuteAsync(DynamicSingleOp *executor, const std::vector<GeTensorDesc> &input_desc,
const std::vector<DataBuffer> &inputs, std::vector<GeTensorDesc> &output_desc,
std::vector<DataBuffer> &outputs);
static ge::Status ReleaseSingleOpResource(void *stream); static ge::Status ReleaseSingleOpResource(void *stream);
ge::Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count); ge::Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count);

@ -35,9 +35,6 @@ class ModelRunner {
bool LoadDavinciModel(uint32_t device_id, uint64_t session_id, uint32_t model_id, bool LoadDavinciModel(uint32_t device_id, uint64_t session_id, uint32_t model_id,
std::shared_ptr<DavinciModel> davinci_model, std::shared_ptr<ModelListener> listener); std::shared_ptr<DavinciModel> davinci_model, std::shared_ptr<ModelListener> listener);
bool DistributeTask(uint32_t model_id);
bool LoadModelComplete(uint32_t model_id); bool LoadModelComplete(uint32_t model_id);
const std::vector<uint32_t> &GetTaskIdList(uint32_t model_id) const; const std::vector<uint32_t> &GetTaskIdList(uint32_t model_id) const;
@ -46,8 +43,6 @@ class ModelRunner {
const std::map<std::string, std::shared_ptr<RuntimeInfo>> &GetRuntimeInfoMap(uint32_t model_id) const; const std::map<std::string, std::shared_ptr<RuntimeInfo>> &GetRuntimeInfoMap(uint32_t model_id) const;
void *GetModelHandle(uint32_t model_id) const;
bool UnloadModel(uint32_t model_id); bool UnloadModel(uint32_t model_id);
bool RunModel(uint32_t model_id, const InputData &input_data, OutputData *output_data); bool RunModel(uint32_t model_id, const InputData &input_data, OutputData *output_data);

@ -0,0 +1,56 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_MEMORY_MEMORY_API_H_
#define INC_FRAMEWORK_MEMORY_MEMORY_API_H_
#include <string>
#include <vector>
#include "ge/ge_api_error_codes.h"
#include "runtime/mem.h"
namespace ge {
enum MemStorageType {
HBM = 0,
RDMA_HBM,
};
struct HostVarInfo {
uint64_t base_addr;
uint64_t var_size;
};
///
/// \param size [in] rdma pool memory size to be allocated.
/// \param mem_type [in] memory type for rdma pool.
/// \return Status result of function
Status InitRdmaPool(size_t size, rtMemType_t mem_type = RT_MEMORY_HBM);
///
/// \param var_info [in] host variable addr infos.
/// \param mem_type [in] memory type for rdma pool.
/// \return Status result of function
Status RdmaRemoteRegister(const std::vector<HostVarInfo> &var_info, rtMemType_t mem_type = RT_MEMORY_HBM);
///
/// \param var_name [in] var_name name of host variable.
/// \param base_addr [out] base_addr vase addr of host variable.
/// \param var_size [out] var_size memory_size of host variable.
/// \return Status result of function
Status GetVarBaseAddrAndSize(const std::string &var_name, uint64_t &base_addr, uint64_t &var_size);
} // namespace ge
#endif // INC_FRAMEWORK_MEMORY_MEMORY_API_H_

@ -96,10 +96,6 @@ Status CheckCustomAiCpuOpLib();
Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file); Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file);
Status SetOutputNodeInfo(ge::Graph &graph, const std::string &output_type, const std::string &output_format);
Status GetOutputLeaf(ge::NodePtr node, std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info);
void GetOutputNodesNameAndIndex(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info, void GetOutputNodesNameAndIndex(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info,
std::vector<std::string> &output_nodes_name); std::vector<std::string> &output_nodes_name);

@ -120,6 +120,7 @@ struct OmgContext {
bool is_dynamic_input = false; bool is_dynamic_input = false;
std::string dynamic_batch_size; std::string dynamic_batch_size;
std::string dynamic_image_size; std::string dynamic_image_size;
std::string dynamic_dims;
}; };
} // namespace ge } // namespace ge

@ -0,0 +1,110 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_
#define INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_
#include <google/protobuf/message.h>
#include "framework/common/types.h"
#include "framework/omg/omg_inner_types.h"
#include "graph/attr_value.h"
#include "graph/compute_graph.h"
#include "graph/ge_tensor.h"
#include "graph/graph.h"
#include "graph/op_desc.h"
#include "graph/operator.h"
#include "graph/range_vistor.h"
#include "graph/utils/attr_utils.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
using Status = domi::Status;
namespace domi {
using GetGraphCallback = std::function<std::unique_ptr<google::protobuf::Message>(
const google::protobuf::Message *root_proto, const std::string &graph)>;
class ModelParser {
public:
ModelParser() {}
virtual ~ModelParser() {}
/**
* @ingroup domi_omg
* @brief Analyze network model data
* @param [in] file Network model file path
* @param [in|out] graph Save the network information after analysis
* @return SUCCESS
* @return Others failed
*/
virtual Status Parse(const char *file, ge::Graph &graph) = 0;
/**
* @ingroup domi_omg
* @brief Parse relevant data from memory and save it to graph
* @param [in] input Model file memory data
* @param [in|out] graph A graph for saving the model information after analysis
* @return SUCCESS
* @return FAILED
* @author
*/
virtual Status ParseFromMemory(const char *data, uint32_t size, ge::ComputeGraphPtr &graph) = 0;
/**
* @ingroup domi_omg
* @brief Analyze network model data
* @param [in] proto network model
* @param [in|out] graph Save the network information after analysis
* @return SUCCESS
* @return Others failed
*/
virtual Status ParseProto(const google::protobuf::Message *proto, ge::ComputeGraphPtr &graph) = 0;
/**
* @ingroup domi_omg
* @brief Analyze callback model data in subgraph
* @param [in] proto network model
* @param [in] callback callback of subgraph
* @param [in|out] graph Save the network information after analysis
* @return SUCCESS
* @return Others failed
*/
virtual Status ParseProtoWithSubgraph(const google::protobuf::Message *proto, GetGraphCallback callback,
ge::ComputeGraphPtr &graph) = 0;
/**
* @ingroup domi_omg
* @brief Convert model files to JSON format
* @param [in] model_file Model file path to be converted
* @param [out] json_file Converted JSON file path
* @return SUCCESS
* @return Others failed
*/
virtual Status ToJson(const char *model_file, const char *json_file) { return domi::SUCCESS; }
/*
* @ingroup domi_omg
* @brief Convert network data type
* @param [in] type Data type to be converted
* @return ge::DataType
*/
virtual ge::DataType ConvertToGeDataType(const uint32_t type) = 0;
virtual Status ParseAllGraph(const google::protobuf::Message *root_proto, ge::ComputeGraphPtr &root_graph) = 0;
};
} // namespace domi
#endif // INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_

@ -0,0 +1,92 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_
#define INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_
#include <google/protobuf/text_format.h>
#include "common/types.h"
#include "omg/omg_inner_types.h"
#include "proto/om.pb.h"
#include "graph/ge_tensor.h"
#include "graph/op_desc.h"
#include "graph/utils/op_desc_utils.h"
using google::protobuf::Message;
using Status = domi::Status;
namespace ge {
/**
* @ingroup domi_omg
* @brief Used to analyze operator information
*
*/
class OpParser {
public:
/**
* @ingroup domi_omg
* @brief Deconstructor
*/
virtual ~OpParser() {}
/**
* @ingroup domi_omg
* @brief Analytic operator parameters
* @param [in] op_src Parameter data to be resolved
* @param [out] graph Parsed parameter data
* @return SUCCESS
* @return FAILED
*/
virtual Status ParseParams(const Message *op_src, ge::OpDescPtr &op_desc) = 0;
/**
* @ingroup domi_omg
* @brief Analytic operator parameters
* @param [in] op_src Parameter data to be resolved
* @param [out] Operator parameter data
* @return SUCCESS
* @return FAILED
*/
virtual Status ParseParams(const Message *op_src, ge::Operator &op_dest) = 0;
/**
* @ingroup domi_omg
* @brief Analytic operator weight information
* @param [in] op_src Weight data to be resolved
* @param [out] op_dest Weight data after analysis
* @return SUCCESS
* @return FAILED
*/
virtual Status ParseWeights(const Message *op_src, ge::NodePtr &node) = 0;
/**
* @ingroup domi_omg
* @brief Get the format information according to the parameters in the operator
* @param [in] op_src Parameter data to be resolved
* @param [out] format Output the parsed format
* @return SUCCESS
* @return FAILED
*/
virtual Status GetFormat(const Message *op_src, domi::domiTensorFormat_t &format) {
(void)op_src;
// Indicates that the op does not provide a value for format
format = domi::DOMI_TENSOR_RESERVED;
return domi::SUCCESS;
}
};
} // namespace ge
#endif // INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_

@ -0,0 +1,31 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_
#define INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_
#include <iostream>
#include <map>
#include <string>
#include "ge/ge_api_error_codes.h"
namespace ge {
// Initialize parser
Status ParserInitialize(const std::map<std::string, std::string>& options);
// Finalize parser, release all resources
Status ParserFinalize();
} // namespace ge
#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_API_H_

@ -0,0 +1,138 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_
#define INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include "framework/common/types.h"
#include "framework/omg/omg_inner_types.h"
using Status = domi::Status;
namespace domi {
class WeightsParser;
class ModelParser;
typedef std::shared_ptr<ModelParser> (*MODEL_PARSER_CREATOR_FUN)(void);
// Create modelparser for different frameworks
class ModelParserFactory {
public:
static ModelParserFactory *Instance();
/**
* @ingroup domi_omg
* @brief Create a modelparser based on the type entered
* @param [in] type Framework type
* @return Created modelparser
*/
std::shared_ptr<ModelParser> CreateModelParser(const domi::FrameworkType type);
/**
* @ingroup domi_omg
* @brief Register create function
* @param [in] type Framework type
* @param [in] fun ModelParser's create function
*/
void RegisterCreator(const domi::FrameworkType type, MODEL_PARSER_CREATOR_FUN fun);
protected:
ModelParserFactory() {}
~ModelParserFactory();
private:
std::map<domi::FrameworkType, MODEL_PARSER_CREATOR_FUN> creator_map_;
}; // end class ModelParserFactory
class ModelParserRegisterar {
public:
ModelParserRegisterar(const domi::FrameworkType type, MODEL_PARSER_CREATOR_FUN fun) {
ModelParserFactory::Instance()->RegisterCreator(type, fun);
}
~ModelParserRegisterar() {}
};
// Registration macros for model parsers
#define REGISTER_MODEL_PARSER_CREATOR(type, clazz) \
std::shared_ptr<ModelParser> Creator_##type##_Model_Parser() { \
std::shared_ptr<clazz> ptr = nullptr; \
try { \
ptr = make_shared<clazz>(); \
} catch (...) { \
ptr = nullptr; \
} \
return std::shared_ptr<ModelParser>(ptr); \
} \
ModelParserRegisterar g_##type##_Model_Parser_Creator(type, Creator_##type##_Model_Parser)
typedef std::shared_ptr<WeightsParser> (*WEIGHTS_PARSER_CREATOR_FUN)(void);
// Create weightsparser for different frameworks
class WeightsParserFactory {
public:
static WeightsParserFactory *Instance();
/**
* @ingroup domi_omg
* @brief Create weightsparser based on the type entered
* @param [in] type Framework type
* @return Created weightsparser
*/
std::shared_ptr<WeightsParser> CreateWeightsParser(const domi::FrameworkType type);
/**
* @ingroup domi_omg
* @brief Register create function
* @param [in] type Framework type
* @param [in] fun WeightsParser's create function
*/
void RegisterCreator(const domi::FrameworkType type, WEIGHTS_PARSER_CREATOR_FUN fun);
protected:
WeightsParserFactory() {}
~WeightsParserFactory();
private:
std::map<domi::FrameworkType, WEIGHTS_PARSER_CREATOR_FUN> creator_map_;
}; // end class WeightsParserFactory
class WeightsParserRegisterar {
public:
WeightsParserRegisterar(const domi::FrameworkType type, WEIGHTS_PARSER_CREATOR_FUN fun) {
WeightsParserFactory::Instance()->RegisterCreator(type, fun);
}
~WeightsParserRegisterar() {}
};
// Register macro of weight resolver
#define REGISTER_WEIGHTS_PARSER_CREATOR(type, clazz) \
std::shared_ptr<WeightsParser> Creator_##type##_Weights_Parser() { \
std::shared_ptr<clazz> ptr = nullptr; \
try { \
ptr = make_shared<clazz>(); \
} catch (...) { \
ptr = nullptr; \
} \
return std::shared_ptr<WeightsParser>(ptr); \
} \
WeightsParserRegisterar g_##type##_Weights_Parser_Creator(type, Creator_##type##_Weights_Parser)
}; // namespace domi
#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_FACTORY_H_

@ -0,0 +1,43 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_
#define INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "external/register/register_fmk_types.h"
#include "external/register/register_types.h"
#include "framework/omg/omg_inner_types.h"
namespace ge {
struct ParserContext {
std::unordered_map<std::string, std::vector<int64_t>> input_dims;
domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND;
;
RunMode run_mode = ONLY_PRE_CHECK;
std::string custom_proto_path; // save caffe custom proto path, used by caffe parse
std::string caffe_proto_path; // save caffe proto path, used by caffe parse
};
ParserContext &GetParserContext();
} // namespace ge
#endif // INC_FRAMEWORK_OMG_PARSER_PARSER_INNER_CONTEXT_H_

@ -0,0 +1,74 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_
#define INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_
#include "graph/graph.h"
#include "graph/attr_value.h"
#include "graph/compute_graph.h"
#include "graph/ge_tensor.h"
#include "graph/op_desc.h"
#include "graph/operator.h"
#include "graph/range_vistor.h"
#include "graph/utils/attr_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
namespace domi {
/**
* @ingroup domi_omg
* @brief Weight information resolver
*
*/
class WeightsParser {
public:
/**
* @ingroup domi_omg
* @brief Constructor
*/
WeightsParser() {}
/**
* @ingroup domi_omg
* @brief Deconstructor
*/
virtual ~WeightsParser() {}
/**
* @ingroup domi_omg
* @brief Analyze weight data
* @param [in] file Path of weight file after training
* @param [in|out] graph Graph for saving weight information after analysis
* @return SUCCESS
* @return Others failed
*/
virtual Status Parse(const char *file, ge::Graph &graph) = 0;
/**
* @ingroup domi_omg
* @brief Parse relevant data from memory and save it to graph
* @param [in] input Model file memory data
* @param [in|out] graph A graph for saving the model information after analysis
* @return SUCCESS
* @return FAILED
* @author
*/
virtual Status ParseFromMemory(const char *input, uint32_t lengt, ge::ComputeGraphPtr &graph) = 0;
};
} // namespace domi
#endif // INC_FRAMEWORK_OMG_PARSER_WEIGHTS_PARSER_H_

@ -87,11 +87,14 @@ class ComputeGraph : public std::enable_shared_from_this<ComputeGraph>, public A
// AddNode with NodePtr // AddNode with NodePtr
NodePtr AddNode(NodePtr node); NodePtr AddNode(NodePtr node);
NodePtr AddNode(OpDescPtr op); NodePtr AddNode(OpDescPtr op);
NodePtr AddNode(OpDescPtr op, int64_t id); // for unserialize. NodePtr AddNode(OpDescPtr op, int64_t id); // for unserialize
NodePtr AddNodeFront(NodePtr node); NodePtr AddNodeFront(NodePtr node);
NodePtr AddNodeFront(const OpDescPtr &op); NodePtr AddNodeFront(const OpDescPtr &op);
NodePtr AddInputNode(NodePtr node); NodePtr AddInputNode(NodePtr node);
NodePtr AddOutputNode(NodePtr node); NodePtr AddOutputNode(NodePtr node);
// insert node with specific pre_node
NodePtr AddNodeAfter(OpDescPtr &op, const NodePtr &pre_node);
NodePtr AddNodeAfter(NodePtr node, const NodePtr &pre_node);
graphStatus RemoveNode(const NodePtr &node); graphStatus RemoveNode(const NodePtr &node);
graphStatus RemoveInputNode(const NodePtr &node); graphStatus RemoveInputNode(const NodePtr &node);

@ -185,6 +185,9 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_ORIGIN_SIZE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_INPUT_ORIGIN_SIZE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NODE_CONNECT_INPUT;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_NODE_CONNECT_OUTPUT;
// to be deleted // to be deleted
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_TO_BE_DELETED; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_TO_BE_DELETED;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string PERMUTE_RESHAPE_FUSION;
@ -934,12 +937,14 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PRED_VALUE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PRED_VALUE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_NUM; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_NUM;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_LABEL; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_LABEL;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_BATCH;
// Control flow // Control flow
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_COND; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_COND;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ACTIVE_STREAM_LIST; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_ACTIVE_STREAM_LIST;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCHN_PRED_VALUE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCHN_PRED_VALUE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SUBGRAPH_FIRST_ACTIVE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SUBGRAPH_FIRST_ACTIVE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_DYNAMIC_DIMS;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_BRANCH_NODE_LABEL; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_BRANCH_NODE_LABEL;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG;
@ -983,6 +988,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NEE
// For mutil-batch // For mutil-batch
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERT_BY_MBATCH; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERT_BY_MBATCH;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MBATCH_ORIGIN_INPUT_DIMS; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_MBATCH_ORIGIN_INPUT_DIMS;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_TYPE;
// For inserted op // For inserted op
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERTED_BY_GE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INSERTED_BY_GE;
@ -1022,6 +1028,10 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_ADDR; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_ADDR;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_VALID_SIZE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_OP_INPUT_L1_VALID_SIZE;
// for unregistered op
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_UNREGST_OPPATH;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_UNREGST_ATTRLIST;
// op overflow dump // op overflow dump
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_FLAG; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_FLAG;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_MODE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OP_DEBUG_MODE;
@ -1075,8 +1085,25 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR_INDEX; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_DYNAMIC_SHAPE_FIXED_ADDR_INDEX;
// atc user def dtype&format
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ATC_USER_DEFINE_DATATYPE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ATC_USER_DEFINE_FORMAT;
// for fusion op plugin // for fusion op plugin
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSIONOP_ORIGINAL_TYPE; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_FUSIONOP_ORIGINAL_TYPE;
// graph partition for aicpu
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PLD_FRONT_NODE_ENGINE_NAME;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_END_REAR_NODE_ENGINE_NAME;
// input and output memory type
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_VARIABLE_PLACEMENT;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_INPUT_MEMORY_TYPE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_OUTPUT_MEMORY_TYPE;
// input_output_offset
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ZERO_COPY_BASIC_OFFSET;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_ZERO_COPY_RELATIVE_OFFSET;
} // namespace ge } // namespace ge
#endif // INC_GRAPH_DEBUG_GE_ATTR_DEFINE_H_ #endif // INC_GRAPH_DEBUG_GE_ATTR_DEFINE_H_

@ -67,6 +67,9 @@ class ModelSerializeImp {
bool HandleNodeNameRef(); bool HandleNodeNameRef();
bool UnserializeOpDesc(OpDescPtr &opDesc, proto::OpDef &opDefProto); bool UnserializeOpDesc(OpDescPtr &opDesc, proto::OpDef &opDefProto);
void AttrDefToOpDesc(OpDescPtr &op_desc, std::vector<string> &key_in, std::vector<string> &key_out,
std::vector<uint32_t> &value_in, std::vector<uint32_t> &value_out, std::vector<string> &opt);
void OpDescToAttrDef(const ConstOpDescPtr &op_desc, proto::OpDef *op_def_proto);
bool UnserializeNode(ComputeGraphPtr &graph, proto::OpDef &opDefProto); bool UnserializeNode(ComputeGraphPtr &graph, proto::OpDef &opDefProto);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save