synchronize latest Ascend software suite 27 Oct 2020

pull/175/head
yanghaoran 4 years ago
parent cd365aa247
commit 24b8bc1cba

@ -0,0 +1,51 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_
#define INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_
#include "external/ge/ge_api_error_codes.h"
#include "cce/aicpu_engine_struct.h"
#include "common/opskernel/ops_kernel_info_types.h"
#include "graph/node.h"
#include "proto/task.pb.h"
namespace ge {
class OpsKernelBuilder {
public:
OpsKernelBuilder() = default;
virtual ~OpsKernelBuilder() = default;
// initialize OpsKernelBuilder
virtual Status Initialize(const std::map<std::string, std::string> &options) = 0;
// finalize OpsKernelBuilder
virtual Status Finalize() = 0;
// memory allocation requirement
virtual Status CalcOpRunningParam(Node &node) = 0;
// generate task for op
virtual Status GenerateTask(const Node &node, RunContext &context, std::vector<domi::TaskDef> &tasks) = 0;
// only call aicpu interface to generate task struct
virtual Status GenSingleOpRunTask(const NodePtr &node, STR_FWK_OP_KERNEL &task, string &task_info) { return FAILED; }
// only call aicpu interface to generate task struct
virtual Status GenMemCopyTask(uint64_t count, STR_FWK_OP_KERNEL &task, string &task_info) { return FAILED; }
};
} // namespace ge
#endif // INC_COMMON_OPSKERNELUTILS_OPS_KERNEL_INFO_UTILS_H_

@ -43,10 +43,10 @@ class OpsKernelInfoStore {
virtual ~OpsKernelInfoStore() {}
// initialize opsKernelInfoStore
virtual Status Initialize(const map<string, string> &options) = 0; /*lint -e148*/
virtual Status Initialize(const map<string, string> &options) = 0;
// close opsKernelInfoStore
virtual Status Finalize() = 0; /*lint -e148*/
virtual Status Finalize() = 0;
virtual Status CreateSession(const std::map<std::string, std::string> &session_options) { return SUCCESS; }
@ -65,24 +65,11 @@ class OpsKernelInfoStore {
// opsFlag opsFlag[0] indicates constant folding is supported or not
virtual void opsFlagCheck(const ge::Node &node, std::string &opsFlag){};
// memory allocation requirement
virtual Status CalcOpRunningParam(Node &node) = 0; /*lint -e148*/
// generate task for op。
virtual Status GenerateTask(const Node &node, RunContext &context,
std::vector<domi::TaskDef> &tasks) = 0; /*lint -e148*/
// only call fe engine interface to compile single op
virtual Status CompileOp(vector<ge::NodePtr> &node_vec) { return SUCCESS; }
virtual Status CompileOpRun(vector<ge::NodePtr> &node_vec) { return SUCCESS; }
// load task for op
virtual Status LoadTask(GETaskInfo &task) { return SUCCESS; }
// only call aicpu interface to generate task struct
virtual Status GenSingleOpRunTask(const NodePtr &node, STR_FWK_OP_KERNEL &task, string &task_info) { return SUCCESS; }
// only call aicpu interface to generate task struct
virtual Status GenMemCopyTask(uint64_t count, STR_FWK_OP_KERNEL &task, string &task_info) { return SUCCESS; }
};
} // namespace ge
#endif // INC_COMMON_OPSKERNEL_OPS_KERNEL_INFO_STORE_H_

@ -26,13 +26,14 @@
using std::string;
namespace ge {
/*lint -e148*/
struct RunContext {
rtModel_t model;
rtStream_t stream;
uint64_t sessionId;
uint64_t dataMemSize;
uint8_t *dataMemBase;
std::map<int64_t, uint64_t> mem_type_data_mem_size;
std::map<int64_t, uint8_t *> mem_type_data_mem_base;
uint64_t weightMemSize;
uint8_t *weightMemBase;
ge::Buffer weightsBuffer;
@ -41,8 +42,6 @@ struct RunContext {
std::vector<rtLabel_t> graphLabelList; // all labels of graph, order by ge label id(0,1,...)
};
/*lint +e148*/
struct Task {
uint32_t id;
uint16_t type;
@ -52,7 +51,6 @@ struct Task {
struct OpInfo {
string engine; // which engin
/*lint -e148*/
string opKernelLib; // which opsKernelStore
int computeCost; // compute cost
bool flagPartial; // whether to support is related to shape

@ -27,7 +27,6 @@
using std::map;
using std::string;
/*lint -e148*/
namespace ge {
class GraphOptimizer {
public:
@ -67,5 +66,4 @@ class GraphOptimizer {
virtual Status OptimizeFusedGraphAfterGraphSlice(ComputeGraph &graph) { return SUCCESS; }
};
} // namespace ge
/*lint +e148*/
#endif // INC_COMMON_OPTIMIZER_GRAPH_OPTIMIZER_H_

@ -0,0 +1,48 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef AICORE_UTIL_MANAGER_H_
#define AICORE_UTIL_MANAGER_H_
#include <string>
#include "register/graph_optimizer/graph_optimize_register_error_codes.h"
namespace fe {
class AICoreUtilManager {
public:
static AICoreUtilManager &Instance();
/*
* to initialize the aicore configuration
* param[in] the options of init
* param[in] engine Name
* param[in] socVersion soc version from ge
* return Status(SUCCESS/FAILED)
*/
Status Initialize(const std::map<std::string, std::string> &options, std::string &soc_version);
/*
* to release the source of fusion manager
* return Status(SUCCESS/FAILED)
*/
Status Finalize();
private:
AICoreUtilManager();
~AICoreUtilManager();
bool is_init_;
};
} // namespace fe
#endif // AICORE_UTIL_MANAGER_H

@ -36,6 +36,14 @@ static const std::string L1_OPTIMIZED = "l1_optimized";
static const std::string L2_OPTIMIZED = "l2_optimized";
static const std::string OP_SLICE_INFO = "_op_slice_info";
static const std::string ATTR_NAME_UNKNOWN_SHAPE = "_unknown_shape";
static const std::string ATTR_NAME_IS_UNKNOWN_GRAPH = "_fe_is_unknown_graph";
static const std::string ATTR_NAME_IS_UNKNOWN_SHAPE_OP = "_fe_is_unknown_shape_op";
static const std::string ATTR_NAME_TVM_CACHE_READ_MODE = "tvm_cache_read_mode";
static const std::string ATTR_NAME_TBE_KERNEL_SIZE = "_tbeKernelSize";
} // namespace fe
#endif

@ -0,0 +1,54 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_CONSTANTS_H_
#define INC_COMMON_UTILS_AI_CORE_COMMON_CONSTANTS_H_
#include <string>
namespace fe {
static const std::string CORE_TYPE = "_coretype";
/* engine name of AI core and vector core */
static const std::string AI_CORE_NAME = "AIcoreEngine";
static const std::string VECTOR_CORE_NAME = "VectorEngine";
static const int64_t IS_UNKNOWN_SHAPE_VALUE = 1;
static const int64_t SHAPE_UNKNOWN_DIM = -1;
static const int64_t SHAPE_UNKNOWN_DIM_NUM = -2;
static const std::string SOC_VERSION_ASCEND310 = "Ascend310";
static const std::string SOC_VERSION_ASCEND610 = "Ascend610";
static const std::string SOC_VERSION_ASCEND615 = "Ascend615";
static const std::string SOC_VERSION_ASCEND710 = "Ascend710";
static const std::string SOC_VERSION_ASCEND710P = "Ascend710Pro";
static const std::string SOC_VERSION_ASCEND910A = "Ascend910A";
static const std::string SOC_VERSION_ASCEND910B = "Ascend910B";
static const std::string SOC_VERSION_ASCEND910PROA = "Ascend910ProA";
static const std::string SOC_VERSION_ASCEND910PROB = "Ascend910ProB";
static const std::string SOC_VERSION_ASCEND910PREMIUMA = "Ascend910PremiumA";
static const std::string SOC_VERSION_HI3796CV300ES = "Hi3796CV300ES";
static const std::string SOC_VERSION_HI3796CV300CS = "Hi3796CV300CS";
static const std::vector<std::string> SOC_VERSION_CLOUD_LIST = {SOC_VERSION_ASCEND910A, SOC_VERSION_ASCEND910B,
SOC_VERSION_ASCEND910PROA, SOC_VERSION_ASCEND910PROB,
SOC_VERSION_ASCEND910PREMIUMA};
static const std::vector<std::string> SOC_VERSION_DC_LIST = {SOC_VERSION_ASCEND610, SOC_VERSION_ASCEND615,
SOC_VERSION_ASCEND710, SOC_VERSION_ASCEND710P};
} // namespace fe
#endif

@ -42,47 +42,61 @@ struct FusionDataFlow {
std::pair<std::string, ge::AnchorPtr> node_dataindex_pair;
};
typedef struct tagL2FusionData {
typedef struct tag_l2_fusion_data {
uint32_t l2Index;
uint64_t l2Addr;
uint64_t l2PageNum;
} L2FusionData_t;
typedef std::map<uint64_t, L2FusionData_t> L2FusionDataMap_t;
typedef struct tagFeSmDesc {
typedef struct tag_fe_sm_desc {
rtL2Ctrl_t l2ctrl;
std::string nodeName[8];
uint8_t outputIndex[8];
} feSmDesc_t;
std::string node_name[8];
uint8_t output_index[8];
} fe_sm_desc_t;
typedef struct TagTaskL2FusionInfo {
std::string nodeName;
feSmDesc_t l2Info;
std::string node_name;
fe_sm_desc_t l2_info;
L2FusionDataMap_t input;
L2FusionDataMap_t output;
uint32_t isUsed;
uint32_t is_used;
} TaskL2FusionInfo_t;
using L2FusionInfoPtr = std::shared_ptr<TaskL2FusionInfo_t>;
typedef struct ToOpStruct {
int64_t opL1Space = 0;
std::vector<int64_t> opL1FusionType;
int64_t opL1WorkspaceFlag = 0; // for workspace flag
int64_t opL1WorkspaceSize = 0;
std::vector<std::vector<int64_t>> validInputShape;
std::vector<std::vector<int64_t>> validOutputShape;
std::vector<std::vector<int64_t>> sliceInputOffset; // conv & pooling & ReadSelect
std::vector<std::vector<int64_t>> sliceOutputOffset; // WriteSelect
std::vector<uint32_t> totalShape;
uint32_t splitIndex = 0;
int64_t op_l1_space = 0;
std::vector<int64_t> op_l1_fusion_type;
int64_t op_l1_workspace_flag = 0; // for workspace flag
int64_t op_l1_workspace_size = 0;
std::vector<std::vector<int64_t>> valid_input_shape;
std::vector<std::vector<int64_t>> valid_output_shape;
std::vector<std::vector<int64_t>> slice_input_offset; // conv & pooling & ReadSelect
std::vector<std::vector<int64_t>> slice_output_offset; // WriteSelect
std::vector<uint32_t> total_shape;
uint32_t split_index = 0;
ToOpStruct() {
// set invalid value for essential variable
opL1Space = -1;
opL1WorkspaceSize = -1;
op_l1_space = -1;
op_l1_workspace_size = -1;
}
} ToOpStruct_t;
enum SlicePattern {
ELEMENT_WISE = 0,
ELEMENT_WISE_BROADCAST,
BROADCAST,
SLIDING_WINDOW,
SLIDING_WINDOW_DECONV,
CUBE_MATMUL,
SLICE_PATTERN_REDUCE,
SLICE_PATTERN_RESIZE,
SLICE_PATTERN_SCATTER,
SLICE_PATTERN_SEGMENT,
PATTERN_RESERVED
};
enum OpImplType {
EN_IMPL_CUSTOM_CONSTANT_CCE = 0, // custom constant op
EN_IMPL_CUSTOM_TIK, // custom tik op
@ -99,6 +113,10 @@ enum OpImplType {
EN_RESERVED // reserved value
};
// Dont change the order, only add new mode in the end
enum L2Mode { EN_L2_CLOSE = 0, EN_L2_BUFFER_OPTIMIZE, EN_L2_CACHE_NORMAL, EN_L2_CACHE_RC };
enum BufferFusionMode { EN_OPTIMIZE_DISABLE = 0, EN_L2_BUFFER, EN_L2_FUSION };
static const std::map<ge::DataType, uint32_t> DATATYPE_SIZE_MAP{{ge::DT_FLOAT, sizeof(float)},
{ge::DT_FLOAT16, sizeof(int16_t)},
{ge::DT_INT8, sizeof(int8_t)},
@ -114,5 +132,13 @@ static const std::map<ge::DataType, uint32_t> DATATYPE_SIZE_MAP{{ge::DT_FLOAT, s
{ge::DT_DUAL, sizeof(float) + sizeof(int8_t)},
{ge::DT_DUAL_SUB_UINT8, sizeof(int8_t)},
{ge::DT_DUAL_SUB_INT8, sizeof(int8_t)}};
enum OpReduceType {
REDUCE_MEAN = 0,
REDUCE_ADD,
REDUCE_MAX,
REDUCE_MIN,
};
} // namespace fe
#endif

@ -28,33 +28,34 @@
namespace fe {
using kScopeNodeMap_t = std::map<int64_t, std::vector<ge::NodePtr>>;
using kScopeNodePair_t = std::pair<int64_t, std::vector<ge::NodePtr>>;
using k_scope_node_map_t = std::map<int64_t, std::vector<ge::NodePtr>>;
using k_scope_node_pair_t = std::pair<int64_t, std::vector<ge::NodePtr>>;
class GraphCommImpl;
using GraphCommImplPtr = std::unique_ptr<GraphCommImpl>;
class GraphComm {
public:
GraphComm(const string &engineName);
GraphComm(const string &engine_name);
virtual ~GraphComm();
GraphComm(const GraphComm &in) = delete;
GraphComm &operator=(const GraphComm &in) = delete;
Status GetscopeNodeMap(ge::ComputeGraph &graph, kScopeNodeMap_t &fusionMap);
Status GetscopeNodeMap(ge::ComputeGraph &graph, k_scope_node_map_t &fusion_map);
Status CopyFusionOpNodes(vector<FusionDataFlow> &fusInputEdgeList, vector<FusionDataFlow> &fusOutputEdgeList,
vector<ge::NodePtr> &fusNodelist, ge::OpDescPtr fusionOpDesc,
ge::ComputeGraphPtr fusionGraph);
Status CopyFusionOpNodes(vector<FusionDataFlow> &fus_input_edge_list, vector<FusionDataFlow> &fus_output_edge_list,
vector<ge::NodePtr> &fus_nodelist, ge::OpDescPtr fusion_op_desc,
ge::ComputeGraphPtr fusion_graph);
Status CopyFusionOpEdges(ge::OpDescPtr fusionOpDesc, ge::ComputeGraph &origGraph, ge::ComputeGraphPtr fusionGraph);
Status CopyFusionOpEdges(ge::OpDescPtr fusion_op_desc, ge::ComputeGraph &orig_graph,
ge::ComputeGraphPtr fusion_graph);
Status GetNodeDataFlowMap(const ge::NodePtr &fusNode,
std::map<ge::NodePtr, std::map<ge::AnchorPtr, ge::AnchorPtr>> &fusionOpAnchorsMap,
ge::kFusionDataFlowVec_t &fusDataflowList, const int &mapType);
Status GetNodeDataFlowMap(const ge::NodePtr &fus_node,
std::map<ge::NodePtr, std::map<ge::AnchorPtr, ge::AnchorPtr>> &fusion_op_anchors_map,
ge::kFusionDataFlowVec_t &fus_dataflow_list, const int &map_type);
Status GetFusionNodeEdgeList(std::vector<ge::NodePtr> &fusNodelist, std::vector<FusionDataFlow> &fusInputEdgeList,
std::vector<FusionDataFlow> &fusOutputEdgeList);
Status GetFusionNodeEdgeList(std::vector<ge::NodePtr> &fus_nodelist, std::vector<FusionDataFlow> &fus_input_edge_list,
std::vector<FusionDataFlow> &fus_output_edge_list);
void ClearFusionSrc();
void ClearFusionDst();
@ -72,25 +73,26 @@ class GraphComm {
bool GetFusionSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, int32_t &fusion_src_index,
int32_t &fusion_dst_index);
Status GetFusionNodeCtrlEdgeList(vector<ge::NodePtr> &fusNodelist, vector<FusionDataFlow> &fusInputCtrlEdgeList,
vector<FusionDataFlow> &fusOutputCtrlEdgeList);
Status GetFusionNodeCtrlEdgeList(vector<ge::NodePtr> &fus_nodelist, vector<FusionDataFlow> &fus_input_ctrl_edge_list,
vector<FusionDataFlow> &fus_output_ctrl_edge_list);
Status MergeFusionNodeEdgeList(ge::NodePtr &fusNode, vector<ge::NodePtr> &fusNodelist,
vector<FusionDataFlow> &fusInputEdgeList, vector<FusionDataFlow> &fusOutputEdgeList);
Status MergeFusionNodeEdgeList(ge::NodePtr &fus_node, vector<ge::NodePtr> &fus_nodelist,
vector<FusionDataFlow> &fus_input_edge_list,
vector<FusionDataFlow> &fus_output_edge_list);
Status MergeFusionNodeCtrlEdgeList(ge::NodePtr &fusNode, vector<ge::NodePtr> &fusNodelist,
vector<FusionDataFlow> &fusInputEdgeList,
vector<FusionDataFlow> &fusOutputEdgeList);
Status MergeFusionNodeCtrlEdgeList(ge::NodePtr &fus_node, vector<ge::NodePtr> &fus_nodelist,
vector<FusionDataFlow> &fus_input_edge_list,
vector<FusionDataFlow> &fus_output_edge_list);
string GetEngineName();
private:
Status MergeFusionNodeInputEdgeList(ge::NodePtr fusNode, std::vector<ge::NodePtr> &fusNodelist,
std::vector<FusionDataFlow> &fusInputEdgeList);
Status MergeFusionNodeOutputEdgeList(ge::NodePtr fusNode, std::vector<ge::NodePtr> &fusNodelist,
std::vector<FusionDataFlow> &fusOutputEdgeList);
Status MergeFusionNodeInputEdgeList(ge::NodePtr fus_node, std::vector<ge::NodePtr> &fus_nodelist,
std::vector<FusionDataFlow> &fus_input_edge_list);
Status MergeFusionNodeOutputEdgeList(ge::NodePtr fus_node, std::vector<ge::NodePtr> &fus_nodelist,
std::vector<FusionDataFlow> &fus_output_edge_list);
string engineName_;
string engine_name_;
std::vector<FusionOpSrc> exist_fusion_src_list_;
std::vector<FusionOpDst> exist_fusion_dst_list_;
@ -101,7 +103,7 @@ class GraphComm {
// std::vector<std::multimap<std::string, ge::AnchorPtr>>
ge::kFusionDataFlowVec_t fusion_output_dataflow_list_;
GraphCommImplPtr graphCommImplPtr_;
GraphCommImplPtr graph_comm_impl_ptr_;
};
} // namespace fe
#endif

@ -0,0 +1,54 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PROJECT_JSON_UTIL_H
#define PROJECT_JSON_UTIL_H
#include "graph/compute_graph.h"
#include "common/aicore_util_types.h"
#include "fusion_engine/graph_tuner/graph_tuner_errorcode.h"
const std::string L1_FUSION_EXTEND_CONTENT = "_l1_fusion_extend_content";
const std::string L2_FUSION_EXTEND_CONTENT = "l2_fusion_extend_content";
const std::string TASK_L2_FUSION_INFO_EXTEND_CONTENT = "task_l2_fusion_info_extend_content";
const std::string L1_FUSION_TO_OP_STRUCT = "_l1fusion_ToOpStruct";
const std::string L2_FUSION_TO_OP_STRUCT = "_l2fusion_ToOpStruct";
const std::string TASK_L2_FUSION_INFO = "_task_L2FusionInfo";
namespace tune {
using ToOpStructPtr = std::shared_ptr<fe::ToOpStruct_t>;
using L2FusionInfoPtr = std::shared_ptr<fe::TaskL2FusionInfo_t>;
Status GetL1InfoFromJson(ge::OpDescPtr opDescPtr);
Status GetL2InfoFromJson(ge::OpDescPtr opDescPtr);
Status GetTaskL2FusionInfoFromJson(ge::OpDescPtr opDescPtr);
Status ReadGraphInfoFromJson(ge::ComputeGraph &graph);
Status WriteGraphInfoToJson(ge::ComputeGraph &graph);
void GetL2ToOpStructFromJson(ge::OpDescPtr &opDescPtr, ToOpStructPtr &l2InfoPtr);
void GetL1ToOpStructFromJson(ge::OpDescPtr &opDescPtr, ToOpStructPtr &l1InfoPtr);
L2FusionInfoPtr GetL2FusionInfoFromJson(ge::OpDescPtr &opDescPtr);
void SetL2FusionInfoToNode(ge::OpDescPtr &opDescPtr, L2FusionInfoPtr &l2FusionInfoPtr);
} // namespace tune
#endif // PROJECT_JSON_UTIL_H

@ -0,0 +1,44 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef L2_STREAM_INFO_H_
#define L2_STREAM_INFO_H_
#include <map>
#include <string>
#include <mutex>
#include "register/graph_optimizer/graph_optimize_register_error_codes.h"
#include "runtime/base.h"
#include "cce/l2fusion_struct.hpp"
namespace fe {
class StreamL2Info {
public:
StreamL2Info(const StreamL2Info &) = delete;
StreamL2Info &operator=(const StreamL2Info &) = delete;
static StreamL2Info &Instance();
Status GetStreamL2Info(rtStream_t stream_id, string node_name, fusion::TaskL2Info_t *&l2_data);
Status SetStreamL2Info(const rtStream_t &stream_id, fusion::TaskL2InfoFEMap_t &l2_alloc_res);
private:
StreamL2Info();
~StreamL2Info();
mutable std::mutex stream_l2_mutex_;
std::map<rtStream_t, fusion::TaskL2InfoFEMap_t> stream_l2_map_;
};
} // namespace fe
#endif // L2_STREAM_INFO_H_

@ -32,12 +32,12 @@ class ScopeAllocator {
int64_t GetCurrentScopeId();
int64_t AllocateScopeId(void);
bool HasScopeAttr(ge::ConstOpDescPtr opdef);
bool GetScopeAttr(ge::ConstOpDescPtr opdef, int64_t& scopeId);
bool SetScopeAttr(ge::OpDescPtr opdef, int64_t scopeId);
bool ResetScopeId(int64_t scopeId);
bool GetScopeAttr(ge::ConstOpDescPtr opdef, int64_t& scope_id);
bool SetScopeAttr(ge::OpDescPtr opdef, int64_t scope_id);
bool ResetScopeId(int64_t scope_id);
private:
int64_t scopeId;
int64_t scope_id;
};
} // namespace fe
#endif

@ -29,16 +29,16 @@ class TensorSizeCalculator {
public:
/**
* Calculate the tensor size of input and output of each opdesc
* @param opDesc opdesc object
* @param opImplType op impl type
* @param op_desc opdesc object
* @param op_impl_type op impl type
* @return status SUCCESS or FAILED
*/
static Status CalculateOpTensorSize(ge::OpDesc &opDesc);
static Status CalculateOpTensorSize(ge::OpDesc &op_desc);
private:
static Status CalcInputOpTensorSize(ge::OpDesc &opDesc, int32_t &outputRealCalcFlag);
static Status CalcInputOpTensorSize(ge::OpDesc &op_desc, int32_t &output_real_calc_flag);
static Status CalcOutputOpTensorSize(ge::OpDesc &opDesc, int32_t &outputRealCalcFlag);
static Status CalcOutputOpTensorSize(ge::OpDesc &op_desc, int32_t &output_real_calc_flag);
};
} // namespace fe

@ -20,6 +20,7 @@
#include <map>
#include <string>
#include <vector>
#include <mutex>
class ErrorManager {
public:
@ -86,6 +87,7 @@ class ErrorManager {
int ReadJsonFile(const std::string &file_path, void *handle);
bool is_init_ = false;
std::mutex mutex_;
std::map<std::string, ErrorInfo> error_map_;
std::vector<std::string> error_messages_;
std::vector<std::string> warning_messages_;

@ -36,66 +36,66 @@ class PlatformInfoManager {
uint32_t InitializePlatformInfo();
uint32_t Finalize();
uint32_t GetPlatformInfo(const string SoCVersion, PlatformInfo &platformInfo, OptionalInfo &optiCompilationInfo);
uint32_t GetPlatformInfo(const string SoCVersion, PlatformInfo &platform_info, OptionalInfo &opti_compilation_info);
uint32_t GetPlatformInfoWithOutSocVersion(PlatformInfo &platformInfo, OptionalInfo &optiCompilationInfo);
uint32_t GetPlatformInfoWithOutSocVersion(PlatformInfo &platform_info, OptionalInfo &opti_compilation_info);
void SetOptionalCompilationInfo(OptionalInfo &optiCompilationInfo);
void SetOptionalCompilationInfo(OptionalInfo &opti_compilation_info);
private:
PlatformInfoManager();
~PlatformInfoManager();
uint32_t LoadIniFile(string iniFileRealPath);
uint32_t LoadIniFile(string ini_file_real_path);
void Trim(string &str);
uint32_t LoadConfigFile(string realPath);
uint32_t LoadConfigFile(string real_path);
string RealPath(const std::string &path);
string GetSoFilePath();
void ParseVersion(map<string, string> &versionMap, string &socVersion, PlatformInfo &platformInfoTemp);
void ParseVersion(map<string, string> &version_map, string &soc_version, PlatformInfo &platform_info_temp);
void ParseSocInfo(map<string, string> &socInfoMap, PlatformInfo &platformInfoTemp);
void ParseSocInfo(map<string, string> &soc_info_map, PlatformInfo &platform_info_temp);
void ParseCubeOfAICoreSpec(map<string, string> &aiCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseCubeOfAICoreSpec(map<string, string> &ai_core_spec_map, PlatformInfo &platform_info_temp);
void ParseBufferOfAICoreSpec(map<string, string> &aiCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseBufferOfAICoreSpec(map<string, string> &ai_core_spec_map, PlatformInfo &platform_info_temp);
void ParseUBOfAICoreSpec(map<string, string> &aiCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseUBOfAICoreSpec(map<string, string> &ai_core_spec_map, PlatformInfo &platform_info_temp);
void ParseUnzipOfAICoreSpec(map<string, string> &aiCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseUnzipOfAICoreSpec(map<string, string> &ai_core_spec_map, PlatformInfo &platform_info_temp);
void ParseAICoreSpec(map<string, string> &aiCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseAICoreSpec(map<string, string> &ai_core_spec_map, PlatformInfo &platform_info_temp);
void ParseBufferOfAICoreMemoryRates(map<string, string> &aiCoreMemoryRatesMap, PlatformInfo &platformInfoTemp);
void ParseBufferOfAICoreMemoryRates(map<string, string> &ai_core_memory_rates_map, PlatformInfo &platform_info_temp);
void ParseAICoreMemoryRates(map<string, string> &aiCoreMemoryRatesMap, PlatformInfo &platformInfoTemp);
void ParseAICoreMemoryRates(map<string, string> &ai_core_memory_rates_map, PlatformInfo &platform_info_temp);
void ParseUBOfAICoreMemoryRates(map<string, string> &aiCoreMemoryRatesMap, PlatformInfo &platformInfoTemp);
void ParseUBOfAICoreMemoryRates(map<string, string> &ai_core_memory_rates_map, PlatformInfo &platform_info_temp);
void ParseAICoreintrinsicDtypeMap(map<string, string> &aiCoreintrinsicDtypeMap, PlatformInfo &platformInfoTemp);
void ParseAICoreintrinsicDtypeMap(map<string, string> &ai_coreintrinsic_dtype_map, PlatformInfo &platform_info_temp);
void ParseVectorCoreSpec(map<string, string> &vectorCoreSpecMap, PlatformInfo &platformInfoTemp);
void ParseVectorCoreSpec(map<string, string> &vector_core_spec_map, PlatformInfo &platform_info_temp);
void ParseVectorCoreMemoryRates(map<string, string> &vectorCoreMemoryRatesMap, PlatformInfo &platformInfoTemp);
void ParseVectorCoreMemoryRates(map<string, string> &vector_core_memory_rates_map, PlatformInfo &platform_info_temp);
void ParseCPUCache(map<string, string> &CPUCacheMap, PlatformInfo &platformInfoTemp);
void ParseCPUCache(map<string, string> &CPUCacheMap, PlatformInfo &platform_info_temp);
void ParseVectorCoreintrinsicDtypeMap(map<string, string> &vectorCoreintrinsicDtypeMap,
PlatformInfo &platformInfoTemp);
void ParseVectorCoreintrinsicDtypeMap(map<string, string> &vector_coreintrinsic_dtype_map,
PlatformInfo &platform_info_temp);
uint32_t ParsePlatformInfoFromStrToStruct(map<string, map<string, string>> &contentInfoMap, string &socVersion,
PlatformInfo &platformInfoTemp);
uint32_t ParsePlatformInfoFromStrToStruct(map<string, map<string, string>> &content_info_map, string &soc_version,
PlatformInfo &platform_info_temp);
uint32_t AssemblePlatformInfoVector(map<string, map<string, string>> &contentInfoMap);
uint32_t AssemblePlatformInfoVector(map<string, map<string, string>> &content_info_map);
private:
bool initFlag_;
map<string, PlatformInfo> platformInfoMap_;
OptionalInfo optiCompilationInfo_;
bool init_flag_;
map<string, PlatformInfo> platform_info_map_;
OptionalInfo opti_compilation_info_;
};
} // namespace fe
#endif

@ -30,111 +30,113 @@ enum MemoryType { DDR = 0, HBM };
enum L2Type { Cache = 0, Buff };
typedef struct tagStrInfo {
string aicVersion;
string ccecAICVersion;
string ccecAIVVersion;
string isSupportAIcpuCompiler;
typedef struct tag_str_info {
string aic_version;
string ccec_aic_version;
string ccec_aiv_version;
string is_support_ai_cpu_compiler;
} StrInfo;
typedef struct tagSoCInfo {
uint32_t aiCoreCnt;
uint32_t vectorCoreCnt;
uint32_t aiCpuCnt;
MemoryType memoryType;
uint64_t memorySize;
L2Type l2Type;
uint64_t l2Size;
typedef struct tag_so_c_info {
uint32_t ai_core_cnt;
uint32_t vector_core_cnt;
uint32_t ai_cpu_cnt;
MemoryType memory_type;
uint64_t memory_size;
L2Type l2_type;
uint64_t l2_size;
uint32_t l2PageNum;
} SoCInfo;
typedef struct tagAiCoreSpec {
double cubeFreq;
uint64_t cubeMSize;
uint64_t cubeNSize;
uint64_t cubeKSize;
uint64_t vecCalcSize;
uint64_t l0ASize;
uint64_t l0BSize;
uint64_t l0CSize;
uint64_t l1Size;
uint64_t smaskBuffer;
uint64_t ubSize;
uint64_t ubblockSize;
uint64_t ubbankSize;
uint64_t ubbankNum;
uint64_t ubburstInOneBlock;
uint64_t ubbankGroupNum;
uint32_t unzipEngines;
uint32_t unzipMaxRatios;
uint32_t unzipChannels;
uint8_t unzipIsTight;
typedef struct tag_ai_core_spec {
double cube_freq;
uint64_t cube_m_size;
uint64_t cube_n_size;
uint64_t cube_k_size;
uint64_t vec_calc_size;
uint64_t l0_a_size;
uint64_t l0_b_size;
uint64_t l0_c_size;
uint64_t l1_size;
uint64_t smask_buffer;
uint64_t ub_size;
uint64_t ubblock_size;
uint64_t ubbank_size;
uint64_t ubbank_num;
uint64_t ubburst_in_one_block;
uint64_t ubbank_group_num;
uint32_t unzip_engines;
uint32_t unzip_max_ratios;
uint32_t unzip_channels;
uint8_t unzip_is_tight;
uint8_t cube_vector_split;
} AiCoreSpec;
typedef struct tagAiCoreMemoryRates {
double ddrRate;
double ddrReadRate;
double ddrWriteRate;
double l2Rate;
double l2ReadRate;
double l2WriteRate;
double l1ToL0ARate;
double l1ToL0BRate;
double l1ToUBRate;
double l0CToUBRate;
double ubToL2Rate;
double ubToDdrRate;
double ubToL1Rate;
typedef struct tag_ai_core_memory_rates {
double ddr_rate;
double ddr_read_rate;
double ddr_write_rate;
double l2_rate;
double l2_read_rate;
double l2_write_rate;
double l1_to_l0_a_rate;
double l1_to_l0_b_rate;
double l1_to_ub_rate;
double l0_c_to_ub_rate;
double ub_to_l2_rate;
double ub_to_ddr_rate;
double ub_to_l1_rate;
} AiCoreMemoryRates;
typedef struct tagVectorCoreSpec {
double vecFreq;
uint64_t vecCalcSize;
uint64_t smaskBuffer;
uint64_t ubSize;
uint64_t ubblockSize;
uint64_t ubbankSize;
uint64_t ubbankNum;
uint64_t ubburstInOneBlock;
uint64_t ubbankGroupNum;
uint64_t vectorRegSize;
uint64_t predicateRegSize;
uint64_t addressRegSize;
typedef struct tag_vector_core_spec {
double vec_freq;
uint64_t vec_calc_size;
uint64_t smask_buffer;
uint64_t ub_size;
uint64_t ubblock_size;
uint64_t ubbank_size;
uint64_t ubbank_num;
uint64_t ubburst_in_one_block;
uint64_t ubbank_group_num;
uint64_t vector_reg_size;
uint64_t predicate_reg_size;
uint64_t address_reg_size;
uint64_t alignment_reg_size;
} VectorCoreSpec;
typedef struct tagVectorCoreMemoryRates {
double ddrRate;
double ddrReadRate;
double ddrWriteRate;
double l2Rate;
double l2ReadRate;
double l2WriteRate;
double ubToL2Rate;
double ubToDdrRate;
typedef struct tag_vector_core_memory_rates {
double ddr_rate;
double ddr_read_rate;
double ddr_write_rate;
double l2_rate;
double l2_read_rate;
double l2_write_rate;
double ub_to_l2_rate;
double ub_to_ddr_rate;
} VectorCoreMemoryRates;
typedef struct tagCPUCache {
typedef struct tag_cpu_cache {
uint32_t AICPUSyncBySW;
uint32_t TSCPUSyncBySW;
} CPUCache;
typedef struct tagPlatformInfo {
StrInfo strInfo;
SoCInfo socInfo;
AiCoreSpec aiCoreSpec;
AiCoreMemoryRates aiCoreMemoryRates;
map<string, vector<string>> aiCoreIntrinsicDtypeMap;
VectorCoreSpec vectorCoreSpec;
VectorCoreMemoryRates vectorCoreMemoryRates;
typedef struct tag_platform_info {
StrInfo str_info;
SoCInfo soc_info;
AiCoreSpec ai_core_spec;
AiCoreMemoryRates ai_core_memory_rates;
map<string, vector<string>> ai_core_intrinsic_dtype_map;
VectorCoreSpec vector_core_spec;
VectorCoreMemoryRates vector_core_memory_rates;
CPUCache cpucache;
map<string, vector<string>> vectorCoreIntrinsicDtypeMap;
map<string, vector<string>> vector_core_intrinsic_dtype_map;
} PlatformInfo;
typedef struct tagOptionalInfo {
string socVersion;
string coreType;
uint32_t aiCoreNum;
string l1FusionFlag;
typedef struct tag_optional_info {
string soc_version;
string core_type;
uint32_t ai_core_num;
string l1_fusion_flag;
} OptionalInfo;
} // namespace fe
#endif

@ -70,7 +70,7 @@ using Status = uint32_t;
// General error code
GE_ERRORNO(0, 0, 0, 0, 0, SUCCESS, 0, "success");
GE_ERRORNO(0b11, 0b11, 0b111, 0xFF, 0b11111, FAILED, 0xFFF, "failed"); /*lint !e401*/
GE_ERRORNO(0b11, 0b11, 0b111, 0xFF, 0b11111, FAILED, 0xFFF, "failed");
} // namespace ge
#endif // INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_

@ -89,5 +89,26 @@ graphStatus aclgrphSaveModel(const string &output_file, const ModelBufferData &m
*/
graphStatus aclgrphGetIRVersion(int *major_version, int *minor_version, int *patch_version);
/**
* @ingroup AscendCL
* @brief infer shape and data type
*
* @param graph[IN] the graph ready to build
* @retval GRAPH_SUCCESS The function is successfully executed.
* @retval OtherValues Failure
*/
graphStatus aclgrphInferShapeAndType(ge::Graph &graph);
/**
* @ingroup AscendCL
* @brief dump graph
*
* @param graph[IN] the graph ready to build
* @param file[IN] file path
* @param file[IN] file path string len
* @retval GRAPH_SUCCESS The function is successfully executed.
* @retval OtherValues Failure
*/
graphStatus aclgrphDumpGraph(const ge::Graph &graph, const char *file, const size_t len);
}; // namespace ge
#endif

@ -0,0 +1,38 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_EXTERNAL_GRAPH_ASCEND_STRING_H_
#define INC_EXTERNAL_GRAPH_ASCEND_STRING_H_
#include <string>
#include <memory>
namespace ge {
class AscendString {
public:
AscendString() = default;
~AscendString() = default;
explicit AscendString(const char* name);
const char* GetString() const;
private:
std::shared_ptr<std::string> name_;
};
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_ASCEND_STRING_H_

@ -34,7 +34,6 @@ using std::vector;
namespace ge {
class AttrValueImpl;
/*lint -e148*/
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY AttrValue {
public:
using INT = int64_t;
@ -70,6 +69,5 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY AttrValue {
VALUE_SET_GET_DEC(AttrValue::FLOAT)
#undef VALUE_SET_GET_DEC
};
/*lint +e148*/
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_ATTR_VALUE_H_

@ -33,6 +33,7 @@ using graphStatus = uint32_t;
const graphStatus GRAPH_FAILED = 0xFFFFFFFF;
const graphStatus GRAPH_SUCCESS = 0;
const graphStatus GRAPH_PARAM_INVALID = 50331649;
const graphStatus GRAPH_NODE_WITHOUT_CONST_INPUT = 50331648;
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_GE_ERROR_CODES_H_

@ -0,0 +1,129 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_EXTERNAL_GRAPH_NODE_H_
#define INC_EXTERNAL_GRAPH_NODE_H_
#include <vector>
#include <cstdint>
#include "./ge_error_codes.h"
#include "./types.h"
#include "./tensor.h"
#include "./ascend_string.h"
namespace ge {
class AttrValue;
class GNode;
class OpDesc;
class Graph;
class ComputeGraph;
using GNodePtr = std::shared_ptr<GNode>;
using GraphPtr = std::shared_ptr<Graph>;
using OpBytes = std::vector<uint8_t>;
using OpDescPtr = std::shared_ptr<OpDesc>;
using ComputeGraphPtr = std::shared_ptr<ComputeGraph>;
class NodeImpl;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GNode {
public:
GNode();
~GNode() = default;
graphStatus GetType(ge::AscendString &type) const;
graphStatus GetName(ge::AscendString &name) const;
std::pair<GNodePtr, int32_t> GetInDataNodesAndPortIndexs(const int32_t index) const;
std::vector<GNodePtr> GetInControlNodes() const;
std::vector<std::pair<GNodePtr, int32_t>> GetOutDataNodesAndPortIndexs(const int32_t index) const;
std::vector<GNodePtr> GetOutControlNodes() const;
graphStatus GetInputConstData(const int32_t index, Tensor &data) const;
graphStatus GetInputIndexByName(const ge::AscendString &name, int32_t &index);
graphStatus GetOutputIndexByName(const ge::AscendString &name, int32_t &index);
size_t GetInputsSize() const;
size_t GetOutputsSize() const;
graphStatus GetInputDesc(const int32_t index, TensorDesc &tensor_desc) const;
graphStatus UpdateInputDesc(const int32_t index, const TensorDesc &tensor_desc);
graphStatus GetOutputDesc(const int32_t index, TensorDesc &tensor_desc) const;
graphStatus UpdateOutputDesc(const int32_t index, const TensorDesc &tensor_desc);
graphStatus GetAttr(const ge::AscendString &name, int64_t &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, int32_t &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, uint32_t &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, float &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, ge::AscendString &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, bool &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, Tensor &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<int64_t> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<int32_t> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<uint32_t> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<float> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<ge::AscendString> &attr_values) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<bool> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<Tensor> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, OpBytes &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<std::vector<int64_t>> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, std::vector<ge::DataType> &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, ge::DataType &attr_value) const;
graphStatus GetAttr(const ge::AscendString &name, AttrValue &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, int64_t &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, int32_t &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, uint32_t &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, float &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, ge::AscendString &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, bool &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, Tensor &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<int64_t> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<int32_t> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<uint32_t> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<float> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<ge::AscendString> &attr_values) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<bool> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<Tensor> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, OpBytes &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<std::vector<int64_t>> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, std::vector<ge::DataType> &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, ge::DataType &attr_value) const;
graphStatus SetAttr(const ge::AscendString &name, AttrValue &attr_value) const;
bool HasAttr(const ge::AscendString &name);
graphStatus GetSubgraph(uint32_t index, GraphPtr graph) const;
graphStatus GetALLSubgraphs(std::vector<GraphPtr> graph_list) const;
private:
std::shared_ptr<NodeImpl> impl_;
friend class NodeAdapter;
};
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_NODE_H_

@ -23,11 +23,14 @@
#include <vector>
#include "./operator.h"
#include "./gnode.h"
namespace ge {
class Graph;
class GraphImpl;
using GraphImplPtr = std::shared_ptr<GraphImpl>;
using GraphPtr = std::shared_ptr<Graph>;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Graph {
friend class GraphUtils;
@ -53,15 +56,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Graph {
graphStatus AddOp(const ge::Operator &op);
graphStatus FindOpByName(const string &name, ge::Operator &op) const;
graphStatus FindOpByName(const std::string &name, ge::Operator &op) const;
graphStatus FindOpByType(const string &type, std::vector<ge::Operator> &ops) const;
graphStatus FindOpByType(const std::string &type, std::vector<ge::Operator> &ops) const;
graphStatus GetAllOpName(std::vector<string> &op_name) const;
graphStatus GetAllOpName(std::vector<std::string> &op_name) const;
graphStatus SaveToFile(const string &file_name) const;
graphStatus SaveToFile(const std::string &file_name) const;
graphStatus LoadFromFile(const string &file_name);
graphStatus LoadFromFile(const std::string &file_name);
const std::string &GetName() const;
@ -73,6 +76,22 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Graph {
///
void SetNeedIteration(bool need_iteration);
std::vector<GNode> GetAllNodes() const;
std::vector<GNode> GetDirectNode() const;
graphStatus RemoveNode(GNode &node);
graphStatus RemoveEdge(GNode &src_node, const int32_t src_port_index, GNode &dst_node, const int32_t dst_port_index);
GNode AddNodeByOp(const Operator &op);
graphStatus AddDataEdge(GNode &src_node, const int32_t src_port_index, GNode &dst_node, const int32_t dst_port_index);
graphStatus AddControlEdge(GNode &src_node, GNode &dst_node);
static GraphPtr ConstructFromInputs(const std::vector<Operator> &inputs, const ge::AscendString &name);
private:
GraphImplPtr impl_{nullptr};
};

@ -63,7 +63,6 @@ using std::function;
using std::shared_ptr;
using std::string;
/*lint -e148*/
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
public:
friend class OperatorImpl;
@ -91,7 +90,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
explicit Operator(const string &type);
Operator(const string &name, const string &type); // lint !e148
Operator(const string &name, const string &type);
virtual ~Operator() = default;
@ -104,7 +103,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
// Only has one output index = 0
Operator &SetInput(const string &dst_name, const Operator &src_oprt);
Operator &SetInput(const string &dst_name, const Operator &src_oprt, const string &name); // lint !e148
Operator &SetInput(const string &dst_name, const Operator &src_oprt, const string &name);
Operator &SetInput(const string &dst_name, const Operator &src_oprt, uint32_t index);
@ -128,22 +127,22 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
TensorDesc GetOutputDesc(uint32_t index) const;
graphStatus UpdateOutputDesc(const string &name, const TensorDesc &tensor_desc); // lint !e148
graphStatus UpdateOutputDesc(const string &name, const TensorDesc &tensor_desc);
TensorDesc GetDynamicInputDesc(const string &name, uint32_t index) const;
graphStatus UpdateDynamicInputDesc(const string &name, uint32_t index, const TensorDesc &tensor_desc); // lint !e148
graphStatus UpdateDynamicInputDesc(const string &name, uint32_t index, const TensorDesc &tensor_desc);
TensorDesc GetDynamicOutputDesc(const string &name, uint32_t index) const;
graphStatus UpdateDynamicOutputDesc(const string &name, uint32_t index, const TensorDesc &tensor_desc); // lint !e148
graphStatus UpdateDynamicOutputDesc(const string &name, uint32_t index, const TensorDesc &tensor_desc);
graphStatus InferShapeAndType(); // lint !e148
graphStatus InferShapeAndType();
void SetInferenceContext(const InferenceContextPtr &inference_context);
InferenceContextPtr GetInferenceContext() const;
graphStatus VerifyAllAttr(bool disable_common_verifier = false); // lint !e148
graphStatus VerifyAllAttr(bool disable_common_verifier = false);
size_t GetInputsSize() const;
@ -256,20 +255,19 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
void RequiredAttrRegister(const string &name);
graphStatus VerifyAll(); // lint !e148
graphStatus VerifyAll();
// Only has one output index = 0
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt);
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt,
const string &name); // lint !e148
Operator &SetInput(const string &dst_name, uint32_t dst_index, const Operator &src_oprt, const string &name);
void SubgraphRegister(const string &ir_name, bool dynamic);
void SubgraphCountRegister(const string &ir_name, uint32_t count);
void SetSubgraphBuilder(const string &ir_name, uint32_t index, const SubgraphBuilder &builder);
private:
Operator &SetInput(const string &dst_name, const OutHandler &out_handler); // lint !e148
Operator &SetInput(const string &dst_name, const OutHandler &out_handler);
OutHandler GetOutput(const string &name) const;
@ -283,7 +281,6 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
std::shared_ptr<const Node> GetNode() const;
};
/*lint +e148*/
} // namespace ge
#endif // INC_EXTERNAL_GRAPH_OPERATOR_H_

@ -126,6 +126,5 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Tensor {
friend class TensorAdapter;
};
} // namespace ge
/*lint +e148*/
#endif // INC_EXTERNAL_GRAPH_TENSOR_H_

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save