Delete unused-const-variable

pull/875/head
zhangxiaokun 4 years ago
parent df6f27c544
commit 30ee7230b3

@ -52,7 +52,6 @@ const char *const kAttrNameWorkspaceReuseFlag = "workspace_reuse_flag";
const char *const kL2FusionDynamicConvergeOp = "l2fusion_dynamic_converge_op";
const char *const kOpNoReuseMem = "no_reuse_mem_flag";
const char *const OP_NO_REUSE_MEM = "OP_NO_REUSE_MEM";
const int kReuseMaxCount = 10;
const int kReuseMaxOpNum = 10;
const int kReuseMaxCharNum = 2000;
} // namespace

@ -32,7 +32,6 @@
#include "graph/utils/type_utils.h"
namespace {
const int kDataOutputIndex = 0;
const int kAllInputAddrIsAtomic = -1;
const int kVirtualInputNodeMemoryReuse = 0;
const int kVirtualOutputNodeMemoryReuse = 1;
@ -920,7 +919,7 @@ Status GraphMemoryAssigner::ReAssignAtomicMemory(bool is_loop_graph) {
auto mem_iter = memory_offset_.find(RT_MEMORY_HBM);
if (mem_iter == memory_offset_.end()) {
std::string error = "Memory offset does not have memory type" + FmtToStr(RT_MEMORY_HBM);
GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str());
GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str());
return FAILED;
}

@ -55,15 +55,8 @@ using std::vector;
namespace {
const uint32_t kWeightsStartOffset = 512;
const int32_t kWrongIndex = -2;
const float kImgRatioYUV420SP_U8 = 1.5;
const int kImgRatioRGB888_U8 = 3;
const int kImgRatioNC1HWC0DI_FP16 = 12;
const int kInvalidIndexNum = -1;
const uint32_t kInputDimensions2D = 2;
const uint32_t kInputDimensions3D = 3;
const char *const kVectorCore = "VectorCore";
const char *const kCoreType = "ge.engineType";
const std::string kEnableL1Fusion = "ge.l1Fusion";

@ -34,7 +34,6 @@ using std::string;
using std::vector;
namespace {
const uint32_t kMaxSwitchStreamNum = 1;
const int64_t kTaskNumPerNormalNode = 3;
const int64_t kTaskNumPerHcclNode = 200;
const char *const kTrueStr = "true";

@ -24,10 +24,6 @@
#include "graph/load/new_model_manager/model_utils.h"
namespace ge {
namespace {
const uint32_t kMaxTaskOfStream = 200;
}
std::mutex HcclTaskInfo::hccl_follow_stream_mutex_;
HcclTaskInfo::~HcclTaskInfo() {
@ -293,8 +289,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
kernel_hccl_infos[i].opType = op_type;
}
davinci_model_->DisableZeroCopy(input_data_addr);
return SUCCESS;
}
return SUCCESS;
}
void HcclTaskInfo::TransToGETaskInfo(GETaskInfo &ge_task) {
ge_task.id = id_;

@ -114,15 +114,9 @@ const char *const kCheckPointForGetVar = "CheckPointGraphForGetVar";
const char *const kCheckPointGraph = "checkpoint_graph";
const char *const kVectorEngine = "VectorEngine";
const char *const kAIcoreEngine = "AIcoreEngine";
const char *const kOffOptimize = "off_optimize";
const int32_t kDynamicDimsTypeIsGetNext = 0;
const int32_t kDynamicDimsTypeIsData = 1;
const int64_t kInvalidDynaimcDimsType = -1;
const char *const kSubstrOfGetNextNosinkName = "IteratorGetNext";
const char *const kShapeDataName = "ascend_mbatch_shape_data";
const char *const kGetNextName = "IteratorV2";
const char *const kExtAttrDataNodes = "data_nodes";
const char *const kExtAttrGetNextNoSink = "getnext_no_sink";
bool IsTailingOptimization() {
string is_tailing_optimization_option;

@ -21,7 +21,6 @@
namespace {
const std::string kStringLength = "StringLength";
const size_t kScalarDimNum = 1;
}
namespace ge {

@ -28,7 +28,6 @@
#include "init/gelib.h"
namespace {
const char *const kRemainNode = "node_remain";
const int kNoTransOp = 1;
} // namespace

@ -37,17 +37,12 @@ constexpr int kDecimal = 10;
constexpr uint8_t kMaxShapesCount = 100;
constexpr uint8_t kMinShapesCount = 2;
const int kDynmaicDims = -1;
const int kDynamicBatchDynamicDimsNum = 1;
const int kDynamicImgSizeDynamciDimsNum = 2;
const size_t kMaxNDDimNum = 4;
const size_t kMinNDDimNum = 1;
const size_t kNumOfGetnextNode = 1;
const int kDivisionConst = 2;
const char *const kSubstrOfGetNextNosinkName = "IteratorGetNext";
const char *const kShapeDataName = "ascend_mbatch_shape_data";
const char *const kGetNextName = "IteratorV2";
const char *const kExtAttrDataNodes = "data_nodes";
const char *const kExtAttrGetNextNoSink = "getnext_no_sink";
inline bool IsGetNextType(const NodePtr &node) {
std::string original_type;

@ -53,9 +53,6 @@ const int kDecimal = 10;
const int kSocVersionLen = 50;
const int kDefaultDeviceIdForTrain = 0;
const int kDefaultDeviceIdForInfer = -1;
const uint32_t kAicoreOverflow = (0x1 << 0);
const uint32_t kAtomicOverflow = (0x1 << 1);
const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow);
const char *const kGlobalOptionFpCeilingModeDefault = "2";
} // namespace
static std::shared_ptr<GELib> instancePtr_ = nullptr;

@ -32,9 +32,6 @@ namespace ge {
static std::set<std::string> caffe_support_input_format = {"NCHW", "ND"};
static std::set<std::string> tf_support_input_format = {"NCHW", "NHWC", "ND", "NCDHW", "NDHWC"};
static std::set<std::string> onnx_support_input_format = {"NCHW", "ND"};
static const char *const kCaffeFormatSupport = "only support NCHW, ND in Caffe model";
static const char *const kTFFormatSupport = "only support NCHW, NHWC, ND, NCDHW, NDHWC in TF model";
static const char *const kONNXFormatSupport = "only support NCHW, ND in ONNX model";
static std::map<std::string, domiTensorFormat_t> input_format_str_to_geformat = {
{"ND", domi::DOMI_TENSOR_ND},

@ -68,6 +68,10 @@ const char *const kModeSupport = "only support 0(model to framework model), "
"1(framework model to json), 3(only pre-check), 5(pbtxt to json)";
const char *const kModelToJsonSupport = "only support 0(Caffe) 3(TensorFlow) 5(Onnx)";
static const char *const kCaffeFormatSupport = "only support NCHW, ND in Caffe model";
static const char *const kTFFormatSupport = "only support NCHW, NHWC, ND, NCDHW, NDHWC in TF model";
static const char *const kONNXFormatSupport = "only support NCHW, ND in ONNX model";
// limit available mem size 2G
const long kMinAvailableMem = 2097152; // 2 * 1024 * 1024
@ -614,9 +618,9 @@ static bool CheckInputFormat() {
}
// only support NCHW ND
ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, ge::kCaffeFormatSupport});
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, kCaffeFormatSupport});
GELOGE(ge::FAILED,
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), ge::kCaffeFormatSupport);
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), kCaffeFormatSupport);
return false;
} else if ((FLAGS_framework == static_cast<int32_t>(domi::TENSORFLOW))) { // tf
if (ge::tf_support_input_format.find(FLAGS_input_format) != ge::tf_support_input_format.end()) {
@ -624,9 +628,9 @@ static bool CheckInputFormat() {
}
// only support NCHW NHWC ND NCDHW NDHWC
ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, ge::kTFFormatSupport});
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, kTFFormatSupport});
GELOGE(ge::FAILED,
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), ge::kTFFormatSupport);
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), kTFFormatSupport);
return false;
} else if (FLAGS_framework == static_cast<int32_t>(domi::ONNX)) {
if (ge::onnx_support_input_format.find(FLAGS_input_format) != ge::onnx_support_input_format.end()) {
@ -634,9 +638,9 @@ static bool CheckInputFormat() {
}
// only support NCHW ND
ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, ge::kONNXFormatSupport});
"E10001", {"parameter", "value", "reason"}, {"--input_format", FLAGS_input_format, kONNXFormatSupport});
GELOGE(ge::FAILED,
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), ge::kONNXFormatSupport);
"Invalid value for --input_format[%s], %s.", FLAGS_input_format.c_str(), kONNXFormatSupport);
return false;
}
return true;

Loading…
Cancel
Save