Update GraphEngine to synchronize with latest Ascend driver software suite

pull/6/head
yanghaoran 5 years ago
parent 6165d1d65d
commit 5b1ccee3ee

@ -75,17 +75,16 @@ elseif(DEFINED ENV{D_LINK_PATH})
find_library(resource libresource.so ${GE_LIB_PATH})
else()
# Ascend mode
set(HIAI_INSTALLED_DIR /usr/local/HiAI)
set(HIAI_DRIVER_DIR ${HIAI_INSTALLED_DIR}/driver/lib64)
set(HIAI_RUNTIME_DIR ${HIAI_INSTALLED_DIR}/runtime/lib64)
set(HIAI_INSTALLED_DIR /usr/local/Ascend)
set(HIAI_DRIVER_DIR ${HIAI_INSTALLED_DIR}/driver/lib64/common)
set(HIAI_RUNTIME_DIR ${HIAI_INSTALLED_DIR}/fwkacllib/lib64)
find_library(c_sec libc_sec.so ${HIAI_DRIVER_DIR})
find_library(slog libslog.so ${HIAI_DRIVER_DIR})
find_library(mmpa libmmpa.so ${HIAI_DRIVER_DIR})
find_library(msprof libmsprof.so ${HIAI_DRIVER_DIR})
find_library(cce libcce.so ${HIAI_RUNTIME_DIR})
find_library(hccl libhccl.so ${HIAI_RUNTIME_DIR})
find_library(runtime libruntime.so ${HIAI_RUNTIME_DIR})
find_library(msprof libmsprof.so ${HIAI_RUNTIME_DIR})
find_library(register libregister.so ${HIAI_RUNTIME_DIR})
find_library(resource libresource.so ${HIAI_RUNTIME_DIR})
endif()

@ -18,16 +18,15 @@
#define INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_TYPES_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "graph/buffer.h"
#include "runtime/rt_model.h"
using std::string;
namespace ge {
/*lint -e148*/
struct RunContext {
rtModel_t model;
rtStream_t stream;
@ -37,10 +36,12 @@ struct RunContext {
uint64_t weightMemSize;
uint8_t *weightMemBase;
ge::Buffer weightsBuffer;
std::vector<rtStream_t> graphStreamList; // all streams of graph which are sort by ge stream id(0,1,...)
std::vector<rtEvent_t> graphEventList; // all events of graph which are sort by ge event id(0,1,...)
std::vector<rtStream_t> graphStreamList; // all streams of graph, order by ge stream id(0,1,...)
std::vector<rtEvent_t> graphEventList; // all events of graph, order by ge event id(0,1,...)
};
/*lint +e148*/
struct Task {
uint32_t id;
uint16_t type;
@ -49,10 +50,11 @@ struct Task {
};
struct OpInfo {
string engine; // engine name
string opKernelLib; // opsKernelStore name
string engine; // which engin
/*lint -e148*/
string opKernelLib; // which opsKernelStore
int computeCost; // compute cost
bool flagPartial; // whether to support related shape
bool flagPartial; // whether to support is related to shape
bool flagAsync; // Whether to support asynchronous
bool isAtomic; // whether to support atomic addr clean
string opFileName; // op file name

@ -49,6 +49,16 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Session {
///
Status AddGraph(uint32_t graphId, const Graph &graph);
///
/// @ingroup client
/// @brief add a graph with a specific graphId and graphOptions
/// @param [in] graphId graph id
/// @param [in] graph the graph
/// @param [in] options graph options
/// @return Status result of function
///
Status AddGraph(uint32_t graphId, const Graph &graph, const std::map<std::string, std::string> &options);
///
/// @ingroup ge_graph
/// @brief remove a graph of the session with specific session id

@ -50,7 +50,7 @@ const char *const VARIABLE_MEMORY_MAX_SIZE = "ge.variableMemoryMaxSize";
// its value should be int32_t type, default value is "1"
const std::string STREAM_NUM = "ge.streamNum";
// Configure add head stream to model,
// Configure add head stream to model.
// its value should be "0" or "1", default value is "0"
const std::string HEAD_STREAM = "ge.headStream";
@ -138,7 +138,7 @@ const std::string GE_FE_FLAG = "ge.feFlag";
// this option is to obtain stream max parallel num
const std::string STREAM_MAX_PARALLEL_NUM = "ge.streamMaxParallelNum";
// configure outputDatatype to setting net output type
// congigure outputDatatype to setting net output type
const std::string OUTPUT_DATATYPE = "ge.outputDatatype";
// configure whether to enable hcom parallel by session constructor options param,
@ -149,7 +149,7 @@ const std::string HCOM_PARALLEL = "ge.hcomParallel";
// example: GA|RL, support configure multiple, split by |
const std::string AUTO_TUNE_MODE = "ge.autoTuneMode";
// Configure core type "VectorEngine", default value is "AICoreEngine"
// Configure core type "VectorEngine", default value is "AIcoreEngine"
const std::string CORE_TYPE = "ge.engineType";
// Configure soc version , example: "Ascend310"
@ -165,6 +165,10 @@ const char *const OPTION_GE_MAX_DUMP_FILE_NUM = "ge.maxDumpFileNum";
const char *const OPTION_GE_MAX_DUMP_FILE_SIZE = "ge.maxDumpFileSize";
const char *const OPTION_GE_MAX_DUMP_OP_NUM = "ge.maxDumpOpNum";
// Configure for print op pass
// Its value should be "0" or "1", default value is "1"
const char *const ENABLE_PRINT_OP_PASS = "ge.enablePrintOpPass";
// Graph run mode
enum GraphRunMode { PREDICTION = 0, TRAIN };

@ -28,29 +28,29 @@ namespace ge {
class InferenceContext;
using InferenceContextPtr = std::shared_ptr<InferenceContext>;
class ShapeAndTypeImpl;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ShapeAndType {
public:
ShapeAndType() = default;
ShapeAndType();
~ShapeAndType() = default;
ShapeAndType(const Shape &shape, DataType data_type);
ShapeAndType(const Shape &shape, DataType dataType);
void SetShape(const Shape &shape);
void SetType(DataType data_type);
void SetType(DataType dataType);
const Shape &GetShape() const;
Shape GetShape() const;
DataType GetDataType() const;
private:
Shape shape_;
DataType data_type_ = DT_UNDEFINED;
std::shared_ptr<ShapeAndTypeImpl> shape_and_type_impl_;
};
class InferenceContextImpl;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferenceContext {
public:
InferenceContext() = default;
~InferenceContext() = default;
InferenceContext(const InferenceContext &context) = delete;
InferenceContext(const InferenceContext &&context) = delete;
@ -58,22 +58,19 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY InferenceContext {
InferenceContext &operator=(const InferenceContext &&context) = delete;
void SetInputHandleShapesAndTypes(std::vector<std::vector<ShapeAndType>> &&shapes_and_types);
const std::vector<std::vector<ShapeAndType>> &GetInputHandleShapesAndTypes() const;
const std::vector<std::vector<ShapeAndType>> &GetOutputHandleShapesAndTypes() const;
void SetOutputHandleShapesAndTypes(const std::vector<std::vector<ShapeAndType>> &shapes_and_types);
void SetOutputHandleShapesAndTypes(std::vector<std::vector<ShapeAndType>> &&shapes_and_types);
void SetMarks(const std::vector<std::string> &marks);
const std::vector<std::string> &GetMarks() const;
static std::unique_ptr<InferenceContext> Create();
private:
// For deliver to op in pair, help to support dynamic shape
std::vector<std::string> marks_;
std::vector<std::vector<ShapeAndType>> input_handle_shapes_and_types_;
std::vector<std::vector<ShapeAndType>> output_handle_shapes_and_types_;
InferenceContext(std::unique_ptr<InferenceContextImpl> &impl);
std::shared_ptr<InferenceContextImpl> inference_context_impl_;
};
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_INFERENCE_CONTEXT_H_

@ -24,9 +24,8 @@
#include <vector>
#include "external/graph/ge_error_codes.h"
#include "external/graph//inference_context.h"
#include "external/graph//tensor.h"
#include "external/graph//usr_types.h"
#include "external/graph/inference_context.h"
#include "external/graph/tensor.h"
#ifndef USER_GE_LOGI
#define USER_GE_LOGI(...)
@ -182,9 +181,6 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
// Bytes type
graphStatus GetAttr(const string &name, OpBytes &attr_value) const;
Operator &SetAttr(const string &name, const UsrQuantizeFactorParams &attr_value);
graphStatus GetAttr(const string &name, UsrQuantizeFactorParams &attr_value) const;
Operator &SetAttr(const string &name, const std::vector<std::vector<int64_t>> &attr_value);
graphStatus GetAttr(const string &name, std::vector<std::vector<int64_t>> &attr_value) const;
@ -235,11 +231,9 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
graphStatus VerifyAll();
// Only has one output index = 0
Operator &SetInput(const string &dst_name, uint32_t dst_index,
const Operator &src_oprt);
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt);
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt,
const string &name);
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt, const string &name);
private:
Operator &SetInput(const string &dst_name, const OutHandler &out_handler);

@ -26,9 +26,10 @@
#include "external/graph/types.h"
namespace ge {
class ShapeImpl;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Shape {
public:
Shape() = default;
Shape();
~Shape() = default;
explicit Shape(const std::vector<int64_t> &dims);
@ -40,7 +41,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Shape {
int64_t GetShapeSize() const;
private:
std::vector<int64_t> dims_;
std::shared_ptr<ShapeImpl> impl_;
};
class TensorDescImpl;
@ -66,10 +67,10 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TensorDesc {
void SetFormat(Format format);
Shape GetOriginShape() const;
void SetOriginShape(const Shape &origin_shape);
void SetOriginShape(const Shape &originShape);
Format GetOriginFormat() const;
void SetOriginFormat(Format origin_format);
void SetOriginFormat(Format originFormat);
DataType GetDataType() const;
void SetDataType(DataType dt);
@ -82,7 +83,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY TensorDesc {
int64_t GetSize() const;
int64_t GetRealDimCnt() const;
void SetRealDimCnt(const int64_t real_dim_cnt);
void SetRealDimCnt(const int64_t realDimCnt);
private:
std::shared_ptr<TensorDescImpl> impl;

@ -67,33 +67,33 @@ enum DataType {
inline int GetSizeByDataType(DataType data_type) {
static int data_type_size[DT_UNDEFINED] = {
4, // DT_FLOAT = 0, float type
2, // DT_FLOAT16 = 1, fp16 type
1, // DT_INT8 = 2, int8 type
4, // DT_INT32 = 3,
1, // DT_UINT8 = 4, uint8 type
-1,
2, // DT_INT16 = 6, int16 type
2, // DT_UINT16 = 7, uint16 type
4, // DT_UINT32 = 8, unsigned int32
8, // DT_INT64 = 9, int64 type
8, // DT_UINT64 = 10, unsigned int64
8, // DT_DOUBLE = 11, double type
1, // DT_BOOL = 12, bool type
-1, // DT_STRING = 13, string type
1, // DT_DUAL_SUB_INT8 = 14, dual output int8 type
1, // DT_DUAL_SUB_UINT8 = 15, dual output uint8 type
8, // DT_COMPLEX64 = 16, complex64 type
16, // DT_COMPLEX128 = 17, complex128 type
1, // DT_QINT8 = 18, qint8 type
2, // DT_QINT16 = 19, qint16 type
4, // DT_QINT32 = 20, qint32 type
1, // DT_QUINT8 = 21, quint8 type
2, // DT_QUINT16 = 22, quint16 type
-1, // DT_RESOURCE = 23, resource type
-1, // DT_STRING_REF = 24, string ref type
5, // DT_DUAL = 25, dual output type (float + int8)
// DT_UNDEFINED Used to indicate a DataType field has not been set.
4, // DT_FLOAT = 0, float type
2, // DT_FLOAT16 = 1, fp16 type
1, // DT_INT8 = 2, int8 type
4, // DT_INT32 = 3,
1, // DT_UINT8 = 4, uint8 type
-1,
2, // DT_INT16 = 6, int16 type
2, // DT_UINT16 = 7, uint16 type
4, // DT_UINT32 = 8, unsigned int32
8, // DT_INT64 = 9, int64 type
8, // DT_UINT64 = 10, unsigned int64
8, // DT_DOUBLE = 11, double type
1, // DT_BOOL = 12, bool type
-1, // DT_STRING = 13, string type
1, // DT_DUAL_SUB_INT8 = 14, dual output int8 type
1, // DT_DUAL_SUB_UINT8 = 15, dual output uint8 type
8, // DT_COMPLEX64 = 16, complex64 type
16, // DT_COMPLEX128 = 17, complex128 type
1, // DT_QINT8 = 18, qint8 type
2, // DT_QINT16 = 19, qint16 type
4, // DT_QINT32 = 20, qint32 type
1, // DT_QUINT8 = 21, quint8 type
2, // DT_QUINT16 = 22, quint16 type
-1, // DT_RESOURCE = 23, resource type
-1, // DT_STRING_REF = 24, string ref type
5, // DT_DUAL = 25, dual output type (float + int8)
// DT_UNDEFINED Used to indicate a DataType field has not been set.
};
if (data_type >= DT_UNDEFINED) {
return -1;
@ -152,10 +152,11 @@ enum DeviceType {
CPU = 1,
};
class TensorTypeImpl;
struct TensorType {
explicit TensorType(DataType dt) { dt_vec_.push_back(dt); }
explicit TensorType(DataType dt);
TensorType(const std::initializer_list<DataType> &types) { dt_vec_ = types; }
TensorType(const std::initializer_list<DataType> &types);
static TensorType ALL() {
return TensorType{DT_BOOL, DT_COMPLEX128, DT_COMPLEX64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16,
@ -204,7 +205,7 @@ struct TensorType {
static TensorType FLOAT() { return TensorType{DT_FLOAT, DT_FLOAT16}; }
std::vector<DataType> dt_vec_;
std::shared_ptr<TensorTypeImpl> tensor_type_impl_;
};
} // namespace ge

@ -17,7 +17,6 @@
#ifndef INC_EXTERNAL_REGISTER_REGISTER_H_
#define INC_EXTERNAL_REGISTER_REGISTER_H_
#include <google/protobuf/message.h>
#include <functional>
#include <initializer_list>
#include <map>
@ -33,12 +32,12 @@
#include "register/register_fmk_types.h"
#include "register/register_types.h"
using std::unique_ptr;
using std::map;
using std::make_shared;
using std::to_string;
using std::string;
using std::map;
using std::pair;
using std::string;
using std::to_string;
using std::unique_ptr;
using std::vector;
namespace ge {
@ -46,55 +45,17 @@ class Operator;
class TensorDesc;
class Tensor;
class TBEPluginManager;
}
} // namespace ge
namespace domi {
struct OpOutput {
ge::Operator op;
// The output name of op
std::string outputName;
};
struct InferShapeContext {
ge::Operator op;
// Input name, input
std::map<std::string, OpOutput> inputs;
};
struct InferShapeOutput {
std::vector<ge::TensorDesc> outputDescs;
std::vector<uint32_t> realDimCnt;
};
enum OmgMoveTypeToAttr {
OMG_MOVE_TYPE_DTYPE = 0,
OMG_MOVE_TYPE_VALUE,
OMG_MOVE_TYPE_SHAPE,
OMG_MOVE_TYPE_FORMAT,
OMG_MOVE_TYPE_AXIS,
OMG_MOVE_TYPE_SCALAR_VALUE,
OMG_REMOVE_TYPE_WITH_COND = 1000,
};
struct MoveInputToAttrStu {
int inputIdx;
std::string attrName;
OmgMoveTypeToAttr moveType;
bool attrValue;
};
Status AutoMappingFn(const google::protobuf::Message *op_src, ge::Operator &op);
Status AutoMappingFnDynamic(const google::protobuf::Message *op_src, ge::Operator &op,
std::map<std::string, std::pair<std::string, std::string>> dynamic_name_attr_value,
int in_pos = -1, int out_pos = -1);
using google::protobuf::Message;
class OpRegistrationDataImpl;
using ParseParamFunc = std::function<domi::Status(const google::protobuf::Message *, ge::Operator &)>;
using InferShapeFunc = std::function<domi::Status(const ge::Operator &, std::vector<ge::TensorDesc> &)>;
using InferShapeFuncV2 = std::function<domi::Status(const InferShapeContext &, InferShapeOutput &)>;
using GetWorkspaceSizeFunc = std::function<domi::Status(const ge::Operator &, std::vector<int64_t> &)>;
using UpdateOpDescFunc = std::function<domi::Status(ge::Operator &)>;
using BuildTeBinFunc = std::function<domi::Status(const ge::Operator &, TEBinInfo &)>;
class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpRegistrationData {
public:
@ -110,64 +71,18 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpRegistrationData {
OpRegistrationData &ParseParamsFn(const ParseParamFunc &parseParamFn);
OpRegistrationData &InferShapeAndTypeFn(const InferShapeFunc &inferShapeFn);
OpRegistrationData &InferShapeAndTypeFn(const InferShapeFuncV2 &inferShapeFn);
OpRegistrationData &UpdateOpDescFn(const UpdateOpDescFunc &updateOpDescFn);
OpRegistrationData &GetWorkspaceSizeFn(const GetWorkspaceSizeFunc &getWorkspaceSizeFn);
OpRegistrationData &TEBinBuildFn(const BuildTeBinFunc &buildTeBinFn);
OpRegistrationData &ImplyType(const domi::ImplyType &imply_type);
OpRegistrationData &Formats(const std::initializer_list<domi::tagDomiTensorFormat> &input_formats,
const std::initializer_list<domi::tagDomiTensorFormat> &output_formats);
OpRegistrationData &WeightFormats(const std::initializer_list<domi::tagDomiTensorFormat> &weight_formats);
OpRegistrationData &InputFormat(const std::initializer_list<std::initializer_list<ge::Format>> &inputFormats);
OpRegistrationData &OutputFormat(const std::initializer_list<std::initializer_list<ge::Format>> &outputFormats);
OpRegistrationData &InputDataType(const std::initializer_list<std::initializer_list<ge::DataType>> &inputDataTypes);
OpRegistrationData &OutputDataType(const std::initializer_list<std::initializer_list<ge::DataType>> &outputDataTypes);
OpRegistrationData &InputLimitedTensorDescInfo(
const std::initializer_list<std::initializer_list<ge::TensorDescInfo>> &limitedTensorDescs);
OpRegistrationData &OutputLimitedTensorDescInfo(
const std::initializer_list<std::initializer_list<ge::TensorDescInfo>> &limitedTensorDescs);
OpRegistrationData &MoveInputToAttr(int inputIdx, const std::string &attrName, OmgMoveTypeToAttr moveType);
OpRegistrationData &DelInputWithCond(int inputIdx, const std::string &attrName, bool attrValue);
domi::ImplyType GetImplyType() const;
std::string GetOmOptype() const;
std::set<std::string> GetOriginOpTypeSet() const;
domi::FrameworkType GetFrameworkType() const;
ParseParamFunc GetParseParamFn() const;
private:
domi::FrameworkType fmk_type_; // Framework type
std::set<std::string> ori_optype_set_; // OP type in the original model, there may be multiple
std::string om_optype_; // OP type in OM model
domi::ImplyType imply_type_; // Execution type
std::vector<domi::tagDomiTensorFormat> input_formats_; // Data formats supported by operator input
std::vector<domi::tagDomiTensorFormat> output_formats_; // Data formats supported by operator output
std::vector<domi::tagDomiTensorFormat> weight_formats_; // Data format supported by operator weight
ParseParamFunc parseParamFn_; // ParseParam function
InferShapeFunc inferShapeFn_; // InferShape function
InferShapeFuncV2 inferShapeFnV2_; // InferShape function
GetWorkspaceSizeFunc getWorkspaceSizeFn_; // GetWorkspaceSizeFunc function
UpdateOpDescFunc updateOpDescFn_;
BuildTeBinFunc buildTeBinFn_;
// Input formats list supported by tbe operators
std::vector<std::vector<ge::Format>> supportedInputFormats_;
// Output formats list supported by tbe operators
std::vector<std::vector<ge::Format>> supportedOutputFormats_;
// Input datatypes list supported by tbe operators
std::vector<std::vector<ge::DataType>> supportedInputDataTypes_;
// Output datatypes list supported by tbe operators
std::vector<std::vector<ge::DataType>> supportedOutputDataTypes_;
// Input tensordesinfo list supported by tbe operator
std::vector<std::vector<ge::TensorDescInfo>> inputLimitedTensorDescs_;
// Output tensordesinfo list supported by tbe operator
std::vector<std::vector<ge::TensorDescInfo>> outputLimitedTensorDescs_;
std::vector<MoveInputToAttrStu> moveInputToAttrVec_;
std::shared_ptr<OpRegistrationDataImpl> impl_;
friend class OpRegistry;
friend class OpRegistrationTbe;
friend class ge::TBEPluginManager;
@ -181,19 +96,12 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpReceiver {
#define REGISTER_CUSTOM_OP(name) REGISTER_CUSTOM_OP_UNIQ_HELPER(__COUNTER__, name)
#define REGISTER_CUSTOM_OP_UNIQ_HELPER(ctr, name) REGISTER_CUSTOM_OP_UNIQ(ctr, name)
#define REGISTER_CUSTOM_OP_UNIQ(ctr, name) \
static OpReceiver register_op##ctr \
__attribute__((unused)) = \
OpRegistrationData(name)
#define REGISTER_CUSTOM_OP_UNIQ(ctr, name) \
static OpReceiver register_op##ctr __attribute__((unused)) = OpRegistrationData(name)
} // namespace domi
namespace ge {
using OpOutput = domi::OpOutput;
using InferShapeContext = domi::InferShapeContext;
using InferShapeOutput = domi::InferShapeOutput;
using OmgMoveTypeToAttr = domi::OmgMoveTypeToAttr;
using MoveInputToAttrStu = domi::MoveInputToAttrStu;
using OpRegistrationData = domi::OpRegistrationData;
using OpReceiver = domi::OpReceiver;
}
} // namespace ge
#endif // INC_EXTERNAL_REGISTER_REGISTER_H_

@ -31,12 +31,6 @@ enum FrameworkType {
FMK_TYPE_A_NN,
FMK_TYPE_RESERVED,
};
struct TEBinInfo {
std::string bin_file_path;
std::string json_file_path;
std::string ddk_version;
};
} // namespace domi
#endif // INC_EXTERNAL_REGISTER_REGISTER_FMK_TYPES_H_

@ -44,6 +44,8 @@ inline bool IsLogEnable(int module_name, int log_level) noexcept {
return false;
}
/*lint --emacro((773),GE_TIMESTAMP_START)*/
/*lint -esym(773,GE_TIMESTAMP_START)*/
#define GE_TIMESTAMP_START(stage) uint64_t startUsec_##stage = ge::GetCurrentTimestap()
#define GE_TIMESTAMP_END(stage, stage_name) \

File diff suppressed because it is too large Load Diff

@ -1,4 +1,4 @@
/**
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");

@ -1,4 +1,4 @@
/**
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
@ -20,4 +20,4 @@
#include "graph/types.h"
#include "register/register_types.h"
#endif // INC_FRAMEWORK_COMMON_FMK_TYPES_H_
#endif // INC_FRAMEWORK_COMMON_FMK_TYPES_H_

@ -14,79 +14,78 @@
* limitations under the License.
*/
/*lint -e* */
#ifndef INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_
#define INC_FRAMEWORK_COMMON_GE_INNER_ERROR_CODES_H_
#include <map>
#include <string>
#include "ge/ge_api_error_codes.h"
namespace ge {
// System ID
enum SystemIdType { kSysidGE = 8 };
enum SystemIdType { SYSID_GE = 8 };
// Runtime location
enum LogRuntime {
KRtHost = 0b01,
kRtDevice = 0b10,
RT_HOST = 0b01,
RT_DEVICE = 0b10,
};
// Sub model
enum SubModuleId {
kCommonModule = 0,
kClientModule = 1,
kInitModule = 2,
kSessionModule = 3,
kGraphModule = 4,
kEngineMOdule = 5,
kOpsModule = 6,
kPluginModule = 7,
kRuntimeModule = 8,
kExecutorModule = 9,
kGeneratorModule = 10,
COMMON_MODULE = 0,
CLIENT_MODULE = 1,
INIT_MODULE = 2,
SESSION_MODULE = 3,
GRAPH_MODULE = 4,
ENGINE_MODULE = 5,
OPS_MODULE = 6,
PLUGIN_MODULE = 7,
RUNTIME_MODULE = 8,
EXECUTOR_MODULE = 9,
GENERATOR_MODULE = 10,
};
// Error code type
enum ErrorCodeType {
kErrorCode = 0b01,
kExceptionCode = 0b10,
ERROR_CODE = 0b01,
EXCEPTION_CODE = 0b10,
};
// Error level
enum ErrorLevel {
kCommonLevel = 0b000,
kSuggestionLevel = 0b001,
kMinorLevel = 0b010,
kMajorLevel = 0b011,
kCriticalLevel = 0b100,
COMMON_LEVEL = 0b000,
SUGGESTION_LEVEL = 0b001,
MINOR_LEVEL = 0b010,
MAJOR_LEVEL = 0b011,
CRITICAL_LEVEL = 0b100,
};
// The error code is defined by the following macros
// Each module defines error codes using the following macros
#define GE_ERRORNO_COMMON(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kCommonModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, COMMON_MODULE, name, value, desc)
#define GE_ERRORNO_CLIENT(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kClientModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, CLIENT_MODULE, name, value, desc)
#define GE_ERRORNO_INIT(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kInitModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, INIT_MODULE, name, value, desc)
#define GE_ERRORNO_SESSION(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kSessionModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, SESSION_MODULE, name, value, desc)
#define GE_ERRORNO_GRAPH(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kGraphModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, GRAPH_MODULE, name, value, desc)
#define GE_ERRORNO_ENGINE(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kEngineMOdule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, ENGINE_MODULE, name, value, desc)
#define GE_ERRORNO_OPS(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kOpsModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, OPS_MODULE, name, value, desc)
#define GE_ERRORNO_PLUGIN(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kPluginModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, PLUGIN_MODULE, name, value, desc)
#define GE_ERRORNO_RUNTIME(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kRuntimeModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, RUNTIME_MODULE, name, value, desc)
#define GE_ERRORNO_EXECUTOR(name, value, desc) \
GE_ERRORNO(kRtDevice, kErrorCode, kCommonLevel, kSysidGE, kExecutorModule, name, value, desc)
GE_ERRORNO(RT_DEVICE, ERROR_CODE, COMMON_LEVEL, SYSID_GE, EXECUTOR_MODULE, name, value, desc)
#define GE_ERRORNO_GENERATOR(name, value, desc) \
GE_ERRORNO(KRtHost, kErrorCode, kCommonLevel, kSysidGE, kGeneratorModule, name, value, desc)
GE_ERRORNO(RT_HOST, ERROR_CODE, COMMON_LEVEL, SYSID_GE, GENERATOR_MODULE, name, value, desc)
// Get the description of the error code
// Get error code description
#define GE_GET_ERRORNO_STR(value) ge::StatusFactory::Instance()->GetErrDesc(value)
// Common module error code definition
@ -206,10 +205,9 @@ GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_GET_GRAPH_REBUILD_FAILED, 60,
GE_ERRORNO_GRAPH(GE_GRAPH_NODE_SEARCHER_SET_GRAPH_FINISH_REBUILD_GRAPH_FAILED, 61,
"Failed set graph finish rebuild in node searcher."); // 1343242301
GE_ERRORNO_GRAPH(GE_GRAPH_VARIABLE_OP_PASS_FAILED, 62, "Failed to run variable pass."); // 1343242302
// Optimize errocode
GE_ERRORNO_GRAPH(TO_BE_DELETED, 63, "The node of the graph to be deleted."); // 1343242303
GE_ERRORNO_GRAPH(NOT_CHANGED, 64, "The node of the graph no changed."); // 1343242304
GE_ERRORNO_GRAPH(TO_BE_DELETED, 200, "The node of the graph to be deleted.");
GE_ERRORNO_GRAPH(NOT_CHANGED, 201, "NThe node of the graph not changed.");
// Engine_manager module error code definition
GE_ERRORNO_ENGINE(GE_ENG_INIT_FAILED, 0, "Failed to initialize engine."); // 1343246336

@ -137,7 +137,7 @@ class ModelListener {
struct Options {
int64_t session_id;
int32_t device_id;
int64_t job_id;
std::string job_id;
bool isUseHcom;
bool deployMode;
bool isAICPUMode;
@ -149,5 +149,4 @@ struct Options {
int32_t physical_device_id;
};
} // namespace ge
#endif // INC_FRAMEWORK_COMMON_GE_TYPES_H_

@ -23,11 +23,6 @@
namespace ge {
class GflagsUtils {
public:
///
/// @brief Determines whether the parameter is true
/// @param name name parameter name
/// @return true or false
///
static bool IsSetCommandTrue(const char *name) {
std::string out;
return gflags::GetCommandLineOption(name, &out) && out == "true";

@ -19,6 +19,7 @@
#include <memory>
#include <string>
#include <memory>
#include "common/fmk_types.h"
#include "common/helper/om_file_helper.h"

@ -35,8 +35,8 @@ struct ModelPartition {
};
struct OmFileContext {
vector<ModelPartition> partition_datas_;
vector<char> partition_table_;
std::vector<ModelPartition> partition_datas_;
std::vector<char> partition_table_;
uint32_t model_data_len_;
};
@ -78,7 +78,7 @@ class OmFileSaveHelper {
Status AddPartition(ModelPartition &partition);
const vector<ModelPartition> &GetModelPartitions() const;
const std::vector<ModelPartition> &GetModelPartitions() const;
Status SaveModel(const SaveParam &save_param, const char *target_file);
@ -88,4 +88,5 @@ class OmFileSaveHelper {
OmFileContext context_;
};
} // namespace ge
/*lint +e148*/
#endif // INC_FRAMEWORK_COMMON_HELPER_OM_FILE_HELPER_H_

@ -28,11 +28,14 @@
#include "common/util.h"
#include "graph/compute_graph.h"
using std::vector;
namespace ge {
// Size of RC memory alignment, 2M
const size_t ALIGN_SIZE = 2097152;
const uint32_t RC_VALUE_DEFAULT = 1;
const uint32_t RC_VALUE_MAC = 32;
constexpr size_t ALIGN_SIZE = 2097152;
constexpr uint32_t RC_VALUE_DEFAULT = 1;
constexpr uint32_t RC_VALUE_MAX = 32;
// RC data type classification
enum RCType {
@ -100,7 +103,7 @@ class L2CacheOptimize {
void HandOPoutput(ge::NodePtr node, vector<int64_t> &outputList, vector<RCMemoryBlock> &blocks);
// maximum common divisor
uint32_t Measure(uint32_t x, uint32_t y) const {
uint32_t Measure(uint32_t x, uint32_t y) {
if (x == 0 || y == 0) return RC_VALUE_DEFAULT;
uint32_t z = y;
while (x % y != 0) {

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save