Compare commits

...

9 Commits
master ... r0.7

Author SHA1 Message Date
mindspore-ci-bot 80f9c96ed3 !82 fix securec download links due to mistakes made by openeuler community
4 years ago
yanghaoran b0f450f237 fix securec download links due to mistakes made by openeuler community
4 years ago
mindspore-ci-bot 61f6add0d7 !63 fix aicpu device ptr release failed
4 years ago
caifubi fc4ee9e531 fix aicpu device ptr release failed
4 years ago
lujiale 794ecbdbd4 update RELEASE.md.
4 years ago
mindspore-ci-bot cb39cb2ba7 !59 Support Aicpu Dynamic Shape
5 years ago
mindspore-ci-bot 323e79a77e !60 code sync for C75B150-0829
5 years ago
wuweikang 47066aea57 code sync for runpackage C75B150
5 years ago
gukecai eed1d913b2 Support Aicpu Dynamic Shape
5 years ago

@ -1,3 +1,22 @@
# Release 0.7.0-beta
## Major Features and Improvements
* Conditional operator memory supports separate allocation of 4G memory space;
* In the zero-copy scenario, atomic_clean supports cleaning the memory of each part of the output when the network is multi-output;
* Support profiling of multiple levels of data in inference scenarios;
* In the online compilation scenarios, GE compilation time optimization.
## Bugfixes
* Fix the issue that calculation result is wrong when the unknown subgraph contains conditional operations;
* Fix the issue that the hccl executor fails to load the task when the input of hccl operator is unkown shape;
* Fix the issue that allgather output is wrong when it exists in the unknown subgraph and its input is unkown shape;
## Thanks to our Contributors
Thanks goes to these wonderful people: wuweikangwangcongweiyangyanghaorangxutianchunshibeijizhouchao, tanghuikang, zhoulili, liujunzhu, zhengyuanhua, taoxiangdong Contributions of any kind are welcome!
Contributions of any kind are welcome!
# Release 0.6.0-beta
## Major Features and Improvements

@ -1,7 +1,7 @@
graphengine_add_pkg(securec
VER 1.1.10
URL https://gitee.com/openeuler/bounds_checking_function/repository/archive/v1.1.10.tar.gz
MD5 0782dd2351fde6920d31a599b23d8c91
URL https://gitee.com/openeuler/libboundscheck/repository/archive/v1.1.10.tar.gz
MD5 193f0ca5246c1dd84920db34d2d8249f
LIBS c_sec
PATCHES ${GE_SOURCE_DIR}/third_party/patch/securec/securec.patch001
CMAKE_OPTION " "

@ -42,6 +42,9 @@ class GraphOptimizer {
// optimize original graph for FE quant optimize
virtual Status OptimizeGraphPrepare(ComputeGraph &graph) { return SUCCESS; }
// optimize graph before build for RTS
virtual Status OptimizeGraphBeforeBuild(ComputeGraph &graph) { return SUCCESS; }
// optimize original graph, using in graph preparation stage
virtual Status OptimizeOriginalGraph(ComputeGraph &graph) = 0;

@ -0,0 +1,39 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_
#define INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_
#include <string>
namespace fe {
static const std::string SCOPE_ID_ATTR = "fusion_scope";
static const std::string FE_IMPLY_TYPE = "_fe_imply_type";
static const std::string PARENT_OP_TYPE = "parentOpType";
static const std::string ATTR_NAME_TASK_L2_FUSION_INFO_EXTEND_PTR = "task_l2_fusion_info_extend_content";
static const std::string ATTR_DATA_DUMP_REF = "_datadump_ref";
static const std::string ATTR_NAME_L2_FUSION_EXTEND_PTR = "l2_fusion_extend_content";
static const std::string L1_OPTIMIZED = "l1_optimized";
static const std::string L2_OPTIMIZED = "l2_optimized";
} // namespace fe
#endif

@ -0,0 +1,118 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_TYPES_H_
#define INC_COMMON_UTILS_AI_CORE_COMMON_TYPES_H_
#include "graph/anchor.h"
#include "graph/types.h"
#include "runtime/kernel.h"
#include <map>
#include <string>
#include <vector>
namespace fe {
struct FusionOpSrc {
uint32_t src_op_id;
ge::AnchorPtr src_anchor;
int32_t fusion_src_index;
int32_t fusion_dst_index;
};
struct FusionOpDst {
uint32_t dst_op_id;
ge::AnchorPtr dst_anchor;
};
struct FusionDataFlow {
std::pair<ge::AnchorPtr, ge::AnchorPtr> edge;
std::pair<std::string, ge::AnchorPtr> node_dataindex_pair;
};
typedef struct tagL2FusionData {
uint32_t l2Index;
uint64_t l2Addr;
uint64_t l2PageNum;
} L2FusionData_t;
typedef std::map<uint64_t, L2FusionData_t> L2FusionDataMap_t;
typedef struct tagFeSmDesc {
rtL2Ctrl_t l2ctrl;
std::string nodeName[8];
uint8_t outputIndex[8];
} feSmDesc_t;
typedef struct TagTaskL2FusionInfo {
std::string nodeName;
feSmDesc_t l2Info;
L2FusionDataMap_t input;
L2FusionDataMap_t output;
uint32_t isUsed;
} TaskL2FusionInfo_t;
using L2FusionInfoPtr = std::shared_ptr<TaskL2FusionInfo_t>;
typedef struct ToOpStruct {
int64_t opL1Space = 0;
std::vector<int64_t> opL1FusionType;
int64_t opL1WorkspaceFlag = 0; // for workspace flag
int64_t opL1WorkspaceSize = 0;
std::vector<std::vector<int64_t>> validInputShape;
std::vector<std::vector<int64_t>> validOutputShape;
std::vector<std::vector<int64_t>> sliceInputOffset; // conv & pooling & ReadSelect
std::vector<std::vector<int64_t>> sliceOutputOffset; // WriteSelect
std::vector<uint32_t> totalShape;
uint32_t splitIndex = 0;
ToOpStruct() {
// set invalid value for essential variable
opL1Space = -1;
opL1WorkspaceSize = -1;
}
} ToOpStruct_t;
enum OpImplType {
EN_IMPL_CUSTOM_CONSTANT_CCE = 0, // custom constant op
EN_IMPL_CUSTOM_TIK, // custom tik op
EN_IMPL_CUSTOM_TBE, // custom tbe op
EN_IMPL_HW_CONSTANT_CCE, // Huawei built-in constant op
EN_IMPL_HW_GENERAL_CCE, // Huawei built-in cce op
EN_IMPL_HW_TIK, // Huawei built-in tik op
EN_IMPL_HW_TBE, // Huawei built-in tbe op
EN_IMPL_RL, // RL op
EN_IMPL_PLUGIN_TBE, // Huawei built-in tbe plugin op
EN_IMPL_VECTOR_CORE_HW_TBE, // Huawei built-in tbe op
EN_IMPL_VECTOR_CORE_CUSTOM_TBE, // custom tbe op
EN_IMPL_NON_PERSISTENT_CUSTOM_TBE, // custom tbe op
EN_RESERVED // reserved value
};
static const std::map<ge::DataType, uint32_t> DATATYPE_SIZE_MAP{{ge::DT_FLOAT, sizeof(float)},
{ge::DT_FLOAT16, sizeof(int16_t)},
{ge::DT_INT8, sizeof(int8_t)},
{ge::DT_INT32, sizeof(int32_t)},
{ge::DT_UINT8, sizeof(uint8_t)},
{ge::DT_UINT32, sizeof(uint32_t)},
{ge::DT_INT16, sizeof(int16_t)},
{ge::DT_UINT16, sizeof(uint16_t)},
{ge::DT_INT64, sizeof(int64_t)},
{ge::DT_UINT64, sizeof(uint64_t)},
{ge::DT_DOUBLE, sizeof(double)},
{ge::DT_BOOL, sizeof(bool)},
{ge::DT_DUAL, sizeof(float) + sizeof(int8_t)},
{ge::DT_DUAL_SUB_UINT8, sizeof(int8_t)},
{ge::DT_DUAL_SUB_INT8, sizeof(int8_t)}};
} // namespace fe
#endif

@ -0,0 +1,107 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_GRAPH_COMMON_H_
#define INC_COMMON_UTILS_AI_CORE_COMMON_GRAPH_COMMON_H_
#include "graph/compute_graph.h"
#include "common/aicore_util_types.h"
#include "register/graph_optimizer/graph_optimize_register_error_codes.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
namespace fe {
using kScopeNodeMap_t = std::map<int64_t, std::vector<ge::NodePtr>>;
using kScopeNodePair_t = std::pair<int64_t, std::vector<ge::NodePtr>>;
class GraphCommImpl;
using GraphCommImplPtr = std::unique_ptr<GraphCommImpl>;
class GraphComm {
public:
GraphComm(const string &engineName);
virtual ~GraphComm();
GraphComm(const GraphComm &in) = delete;
GraphComm &operator=(const GraphComm &in) = delete;
Status GetscopeNodeMap(ge::ComputeGraph &graph, kScopeNodeMap_t &fusionMap);
Status CopyFusionOpNodes(vector<FusionDataFlow> &fusInputEdgeList, vector<FusionDataFlow> &fusOutputEdgeList,
vector<ge::NodePtr> &fusNodelist, ge::OpDescPtr fusionOpDesc,
ge::ComputeGraphPtr fusionGraph);
Status CopyFusionOpEdges(ge::OpDescPtr fusionOpDesc, ge::ComputeGraph &origGraph, ge::ComputeGraphPtr fusionGraph);
Status GetNodeDataFlowMap(const ge::NodePtr &fusNode,
std::map<ge::NodePtr, std::map<ge::AnchorPtr, ge::AnchorPtr>> &fusionOpAnchorsMap,
ge::kFusionDataFlowVec_t &fusDataflowList, const int &mapType);
Status GetFusionNodeEdgeList(std::vector<ge::NodePtr> &fusNodelist, std::vector<FusionDataFlow> &fusInputEdgeList,
std::vector<FusionDataFlow> &fusOutputEdgeList);
void ClearFusionSrc();
void ClearFusionDst();
void AddFusionOutputSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, const int32_t &fusion_src_index,
std::pair<string, ge::AnchorPtr> &node_dataindex_pair);
void AddFusionInputSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, const int32_t &fusion_dst_index,
std::pair<string, ge::AnchorPtr> &node_dataindex_pair);
void SaveFusionDst(const uint32_t &dst_op_id, ge::AnchorPtr dst_anchor);
bool IsFusionDstExist(const uint32_t &dst_op_id, const ge::AnchorPtr &dst_anchor);
bool GetFusionSrc(const uint32_t &src_op_id, const ge::AnchorPtr &src_anchor, int32_t &fusion_src_index,
int32_t &fusion_dst_index);
Status GetFusionNodeCtrlEdgeList(vector<ge::NodePtr> &fusNodelist, vector<FusionDataFlow> &fusInputCtrlEdgeList,
vector<FusionDataFlow> &fusOutputCtrlEdgeList);
Status MergeFusionNodeEdgeList(ge::NodePtr &fusNode, vector<ge::NodePtr> &fusNodelist,
vector<FusionDataFlow> &fusInputEdgeList, vector<FusionDataFlow> &fusOutputEdgeList);
Status MergeFusionNodeCtrlEdgeList(ge::NodePtr &fusNode, vector<ge::NodePtr> &fusNodelist,
vector<FusionDataFlow> &fusInputEdgeList,
vector<FusionDataFlow> &fusOutputEdgeList);
string GetEngineName();
private:
Status MergeFusionNodeInputEdgeList(ge::NodePtr fusNode, std::vector<ge::NodePtr> &fusNodelist,
std::vector<FusionDataFlow> &fusInputEdgeList);
Status MergeFusionNodeOutputEdgeList(ge::NodePtr fusNode, std::vector<ge::NodePtr> &fusNodelist,
std::vector<FusionDataFlow> &fusOutputEdgeList);
string engineName_;
std::vector<FusionOpSrc> exist_fusion_src_list_;
std::vector<FusionOpDst> exist_fusion_dst_list_;
// std::vector<std::multimap<std::string, uint32_t>>
ge::kFusionDataFlowVec_t fusion_input_dataflow_list_;
// std::vector<std::multimap<std::string, ge::AnchorPtr>>
ge::kFusionDataFlowVec_t fusion_output_dataflow_list_;
GraphCommImplPtr graphCommImplPtr_;
};
} // namespace fe
#endif

@ -0,0 +1,42 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INC_COMMON_UTILS_AI_CORE_COMMON_SCOPE_ALLOCATOR_H_
#define INC_COMMON_UTILS_AI_CORE_COMMON_SCOPE_ALLOCATOR_H_
#include "graph/op_desc.h"
namespace fe {
class ScopeAllocator {
public:
ScopeAllocator();
virtual ~ScopeAllocator();
ScopeAllocator(const ScopeAllocator& in) = delete;
ScopeAllocator& operator=(const ScopeAllocator& in) = delete;
public:
void Init();
int64_t GetCurrentScopeId();
int64_t AllocateScopeId(void);
bool HasScopeAttr(ge::ConstOpDescPtr opdef);
bool GetScopeAttr(ge::ConstOpDescPtr opdef, int64_t& scopeId);
bool SetScopeAttr(ge::OpDescPtr opdef, int64_t scopeId);
private:
int64_t scopeId;
};
} // namespace fe
#endif

@ -14,15 +14,20 @@
* limitations under the License.
*/
#ifndef GE_GRAPH_PASSES_SWITCH_SPLIT_PASS_H_
#define GE_GRAPH_PASSES_SWITCH_SPLIT_PASS_H_
#ifndef AICORE_PARAM_CALCULATOR
#define AICORE_PARAM_CALCULATOR
#include <set>
#include "graph/passes/base_pass.h"
namespace ge {
class SwitchSplitPass : public BaseNodePass {
#include "graph/node.h"
#include "graph_optimizer/graph_optimize_register_error_codes.h"
namespace fe {
class AICoreParamCalculator {
public:
Status Run(NodePtr &node) override;
AICoreParamCalculator();
~AICoreParamCalculator();
Status CalcOpRunningParam(ge::Node &node);
};
} // namespace ge
#endif // GE_GRAPH_PASSES_SWITCH_SPLIT_PASS_H_
} // namespace fe
#endif // AICORE_PARAM_CALCULATOR

@ -0,0 +1,45 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORSIZE_CALCULATOR_H
#define TENSORSIZE_CALCULATOR_H
#include "graph_optimizer/graph_optimize_register_error_codes.h"
#include <map>
#include <string>
#include "graph/compute_graph.h"
#include "graph/op_desc.h"
namespace fe {
class TensorSizeCalculator {
public:
/**
* Calculate the tensor size of input and output of each opdesc
* @param opDesc opdesc object
* @param opImplType op impl type
* @return status SUCCESS or FAILED
*/
static Status CalculateOpTensorSize(ge::OpDesc &opDesc);
private:
static Status CalcInputOpTensorSize(ge::OpDesc &opDesc, int32_t &outputRealCalcFlag);
static Status CalcOutputOpTensorSize(ge::OpDesc &opDesc, int32_t &outputRealCalcFlag);
};
} // namespace fe
#endif // TENSORSIZE_CALCULATOR_H

@ -98,6 +98,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Session {
///
Status RunGraphAsync(uint32_t graphId, const std::vector<ge::InputTensorInfo> &inputs, RunAsyncCallback callback);
///
/// @ingroup ge_graph
/// @brief get variables in the session with specific session id
/// @param [in] var_names: variable names
/// @param [out] var_values: variable values
/// @return Status result of function
///
Status GetVariables(const std::vector<std::string> &var_names, std::vector<Tensor> &var_values);
///
/// @ingroup ge_graph
/// @brief register callback func with specific summary or checkpoint by users

@ -23,6 +23,12 @@
#include "graph/graph.h"
#include "graph/ge_error_codes.h"
namespace {
#define IR_MAJOR_VERSION (int(1))
#define IR_MINOR_VERSION (int(0))
#define IR_PATCH_VERSION (int(0))
} // namespace
namespace ge {
struct ModelBufferData {
@ -71,5 +77,17 @@ graphStatus aclgrphBuildModel(const ge::Graph &graph, const std::map<std::string
*/
graphStatus aclgrphSaveModel(const string &output_file, const ModelBufferData &model);
/**
* @ingroup AscendCL
* @brief query IR interface version
*
* @param major_version[OUT] IR interface major version
* @param minor_version[OUT] IR interface minor version
* @param patch_version[OUT] IR interface patch version
* @retval GRAPH_SUCCESS The function is successfully executed.
* @retval OtherValues Failure
*/
graphStatus aclgrphGetIRVersion(int *major_version, int *minor_version, int *patch_version);
}; // namespace ge
#endif

@ -45,9 +45,11 @@
namespace ge {
class Operator;
class OperatorImpl;
class NodeUtils;
class NamedAttrs;
class Graph;
class AttrValue;
class Node;
using SubgraphBuilder = std::function<Graph()>;
using OperatorImplPtr = std::shared_ptr<OperatorImpl>;
@ -65,8 +67,8 @@ using std::string;
class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
public:
friend class OperatorImpl;
friend class GraphBuilderImpl;
friend class NodeUtils;
using OpInt = int64_t;
using OpFloat = float;
@ -104,6 +106,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
Operator &SetInput(const string &dst_name, const Operator &src_oprt, const string &name); // lint !e148
Operator &SetInput(const string &dst_name, const Operator &src_oprt, uint32_t index);
Operator &AddControlInput(const Operator &src_oprt);
graphStatus GetInputConstData(const string &dst_name, Tensor &data) const;
@ -269,11 +273,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator {
OutHandler GetOutput(const string &name) const;
OutHandler GetOutput(uint32_t index) const;
OperatorImplPtr GetOperatorImplPtr() const;
OperatorImplPtr operator_impl_{nullptr};
graphStatus GetInputConstDataOut(const string &dst_name, Tensor &data) const;
std::shared_ptr<const Node> GetNode() const;
};
/*lint +e148*/
} // namespace ge

@ -130,6 +130,10 @@ class OpReg {
Operator::SetInput(#x, v, srcName); \
return *this; \
} \
_THIS_TYPE &set_input_##x(Operator &v, uint32_t index) { \
Operator::SetInput(#x, v, index); \
return *this; \
} \
_THIS_TYPE &set_input_##x(Operator &v) { \
Operator::SetInput(#x, v); \
return *this; \
@ -159,6 +163,10 @@ class OpReg {
Operator::SetInput(#x, v, srcName); \
return *this; \
} \
_THIS_TYPE &set_input_##x(Operator &v, uint32_t index) { \
Operator::SetInput(#x, v, index); \
return *this; \
} \
TensorDesc get_input_desc_##x() const { return Operator::GetInputDesc(#x); } \
graphStatus update_input_desc_##x(const TensorDesc &tensorDesc) { \
return Operator::UpdateInputDesc(#x, tensorDesc); \

File diff suppressed because it is too large Load Diff

@ -22,7 +22,7 @@
#include <string>
#include <vector>
#include "common/fmk_error_codes.h"
#include "framework/common/fmk_error_codes.h"
#include "ge/ge_api_error_codes.h"
#include "external/graph/types.h"
#include "external/ge/ge_api_types.h"
@ -49,6 +49,7 @@ enum OpEngineType {
};
const char *const GE_ENGINE_ATTR_MEM_TYPE_HBM = "HBM";
const char *const GE_OPTION_EXEC_PLACEMENT = "ge.exec.placement";
// Data cache, including data address and length
struct DataBuffer {
@ -128,6 +129,7 @@ struct OriginInputInfo {
// The structure of AIPP info
struct AippConfigInfo {
int8_t aipp_mode;
int8_t input_format;
int32_t src_image_size_w;
int32_t src_image_size_h;
@ -175,6 +177,9 @@ struct AippConfigInfo {
float var_reci_chn_1;
float var_reci_chn_2;
float var_reci_chn_3;
int8_t support_rotation;
uint32_t related_input_rank;
uint32_t max_src_image_size;
};
// The structure of offline Modeldata
@ -250,5 +255,31 @@ struct ComputeGraphDescInfo {
std::vector<std::vector<int64_t>> output_shape;
std::vector<DataType> output_data_type;
};
struct OpDescInfo {
std::string op_name;
uint32_t task_id;
uint32_t stream_id;
std::vector<Format> input_format;
std::vector<std::vector<int64_t>> input_shape;
std::vector<DataType> input_data_type;
std::vector<void *> input_addrs;
std::vector<Format> output_format;
std::vector<std::vector<int64_t>> output_shape;
std::vector<DataType> output_data_type;
std::vector<void *> output_addrs;
};
struct ModelDumpConfig {
std::string model_name;
std::vector<std::string> layers;
};
struct DumpConfig {
std::string dump_path;
std::string dump_mode;
std::string dump_status;
std::string dump_op_switch;
std::vector<ModelDumpConfig> dump_list;
};
} // namespace ge
#endif // INC_FRAMEWORK_COMMON_GE_TYPES_H_

@ -606,6 +606,7 @@ static constexpr uint32_t MODEL_FILE_RESERVED_LENGTH = 79;
/// @brief INPUT node type
///
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string INPUT_TYPE;
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMMY_DATA;
///
/// @ingroup domi_omg

@ -347,6 +347,14 @@ std::string ToString(const google::protobuf::RepeatedField<T> &rpd_field) {
///
uint64_t GetCurrentTimestap();
///
/// @ingroup domi_common
/// @brief Obtains the absolute time (timestamp) of the current system.
/// @return Timestamp, in seconds (US)
///
///
uint32_t GetCurrentSecondTimestap();
///
/// @ingroup domi_common
/// @brief Check whether the product of two int64 numbers exceeds the int64 range.

@ -31,6 +31,7 @@ enum PriorityEnum {
COST_1,
COST_2,
COST_9 = 9,
COST_10 = 10,
};
struct DNNEngineAttribute {

@ -135,6 +135,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
///
ge::Status GetCombinedDynamicDims(uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info);
///
/// @ingroup ge
/// @brief Get user designeate shape order
/// @param [in] model_id
/// @param [out] user_designate_shape_order
/// @return execute result
///
ge::Status GetUserDesignateShapeOrder(uint32_t model_id, std::vector<std::string> &user_designate_shape_order);
ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);
///
@ -162,6 +171,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
ge::Status CommandHandle(const ge::Command &command);
ge::Status SetDump(const DumpConfig &dump_config);
///
/// @ingroup ge
/// @brief Query model memory consuming interface
@ -261,6 +272,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info);
ge::Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims);
ge::Status GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info);
private:
static bool isInit_;

@ -161,12 +161,13 @@ class TbeTaskInfo : public TaskInfo {
class AicpuTaskInfo : public TaskInfo {
public:
AicpuTaskInfo(const std::string &op_name, uint32_t stream_id, const string &so_name, const std::string &kernel_name,
const std::string &node_def, const std::vector<void *> &input_data_addrs,
const std::string &node_def, const std::string &ext_info, const std::vector<void *> &input_data_addrs,
const std::vector<void *> &output_data_addrs, bool dump_flag)
: TaskInfo(op_name, stream_id, TaskInfoType::AICPU, dump_flag),
so_name_(so_name),
kernel_name_(kernel_name),
node_def_(node_def),
ext_info_(ext_info),
input_data_addrs_(input_data_addrs),
output_data_addrs_(output_data_addrs) {}
~AicpuTaskInfo() override {}
@ -176,11 +177,13 @@ class AicpuTaskInfo : public TaskInfo {
const std::string &node_def() const { return node_def_; }
const std::vector<void *> &input_data_addrs() const { return input_data_addrs_; }
const std::vector<void *> &output_data_addrs() const { return output_data_addrs_; }
const std::string &ext_info() const { return ext_info_; }
private:
std::string so_name_;
std::string kernel_name_;
std::string node_def_;
std::string ext_info_;
std::vector<void *> input_data_addrs_;
std::vector<void *> output_data_addrs_;
};

@ -27,6 +27,7 @@ namespace ge {
enum MemStorageType {
HBM = 0,
RDMA_HBM,
HOST_DDR,
};
struct HostVarInfo {

@ -96,6 +96,10 @@ Status CheckCustomAiCpuOpLib();
Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file);
Status SetOutputNodeInfo(ge::Graph &graph, const std::string &output_type, const std::string &output_format);
Status GetOutputLeaf(ge::NodePtr node, std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info);
void GetOutputNodesNameAndIndex(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info,
std::vector<std::string> &output_nodes_name);

@ -883,6 +883,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string REF_VAR_
// Assign
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ASSIGN_VALIDATE_SHAPE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ASSIGN_VAR_NAME;
// ShapeN
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string SHAPEN_ATTR_N;
@ -939,6 +940,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_NUM;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_LABEL;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_BATCH;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_USER_DESIGNEATE_SHAPE_ORDER;
// Control flow
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_COND;
@ -957,7 +959,6 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
// Function Op
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PARENT_NODE_INDEX;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_PARENT_CONST_TYPE;
// Used for mark the active node is for loop, type:bool
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_IS_LOOP_ACTIVE;
@ -968,6 +969,8 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_WORKSPACE;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_MEMORY_TYPE_RANGE;
// Atomic addr clean attrs
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_INPUT_INDEX;
GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATOMIC_ATTR_OUTPUT_INDEX;

@ -24,6 +24,7 @@ namespace ge {
class GEContext {
public:
graphStatus GetOption(const std::string &key, std::string &option);
bool GetHostExecFlag();
uint64_t SessionId();
uint32_t DeviceId();
uint64_t TraceId();

@ -153,9 +153,6 @@ class OpDesc : public std::enable_shared_from_this<OpDesc>, public AttrHolder {
graphStatus AddDynamicOutputDesc(const string &name, const unsigned int num, bool isPushBack = true);
void RemoveInputDesc(uint32_t index);
void RemoveOutputDesc(uint32_t index);
bool IsOptionalInput(const string &name) const;
bool IsOptionalInput(uint32_t index) const;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save