Clean cmake building warnings

pull/4867/head
wsc 5 years ago
parent 8440f2bcdb
commit 2fc79ae043

@ -64,14 +64,25 @@ set(CMAKE_VERBOSE_MAKEFILE on)
add_compile_definitions(USE_ANDROID_LOG)
add_compile_definitions(NO_DLIB)
add_compile_options(-fPIC)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden")
string(REPLACE "-g" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
if (NOT PLATFORM_ARM64 AND NOT PLATFORM_ARM32)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else ()
## enable for binscope for release
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_CXX_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}")
string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif ()
endif ()
if (BUILD_DEVICE)
add_compile_definitions(BUILD_DEVICE)
endif()
endif ()
if (SUPPORT_TRAIN)
add_compile_definitions(SUPPORT_TRAIN)
endif()
@ -86,17 +97,17 @@ if (SUPPORT_GPU)
add_definitions(-DMS_OPENCL_PROFILE=false)
add_definitions(-DCL_HPP_TARGET_OPENCL_VERSION=200)
add_compile_definitions(SUPPORT_GPU)
if(OFFLINE_COMPILE)
if (OFFLINE_COMPILE)
add_compile_definitions(PROGRAM_WITH_IL)
endif()
endif ()
include_directories(${TOP_DIR}/third_party/OpenCL-Headers)
include_directories(${TOP_DIR}/third_party/OpenCL-CLHPP/include)
endif()
endif ()
if (WIN32)
add_compile_definitions(LITE_EXPORTS)
add_compile_definitions(BUILDING_DLL)
endif()
endif ()
set(ANF_SRC
${CMAKE_CURRENT_SOURCE_DIR}/../core/ir/meta_tensor.cc
@ -110,26 +121,26 @@ if (BUILD_CONVERTER)
MESSAGE(FATAL_ERROR "Cannot build converter in arm platform")
endif()
find_package(Python3 3.7 COMPONENTS Interpreter Development)
if(Python3_FOUND)
if (Python3_FOUND)
set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}")
set(PYTHON_LIBRARIES "${Python3_LIBRARIES}")
if (WIN32)
if (Python3_DIR)
message("Python3_DIR set already: " ${Python3_DIR})
else()
else ()
string(LENGTH ${PYTHON_LIBRARIES} PYTHON_LIBRARIES_LEN)
string(LENGTH "libpythonxx.a" Python3_NAME_LEN)
math(EXPR Python3_DIR_LEN ${PYTHON_LIBRARIES_LEN}-${Python3_NAME_LEN})
string(SUBSTRING ${Python3_LIBRARIES} 0 ${Python3_DIR_LEN} Python3_DIR)
message("Python3_DIR: " ${Python3_DIR})
endif()
endif ()
link_directories(${Python3_DIR})
endif()
else()
endif ()
else ()
find_python_package(py_inc py_lib)
set(PYTHON_INCLUDE_DIRS "${py_inc}")
set(PYTHON_LIBRARIES "${py_lib}")
endif()
endif ()
include_directories(${PYTHON_INCLUDE_DIRS})
include(${TOP_DIR}/cmake/external_libs/json.cmake)
include(${TOP_DIR}/cmake/external_libs/pybind11.cmake)
@ -137,27 +148,27 @@ if (BUILD_CONVERTER)
include_directories(${TOP_DIR}/third_party/protobuf/build/include)
link_directories(${TOP_DIR}/third_party/protobuf/build/lib)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/converter)
endif()
endif ()
if (BUILD_DEVICE)
if (PLATFORM_ARM32 OR PLATFORM_ARM64)
if (NOT DEFINED ENV{ANDROID_NDK})
message(FATAL_ERROR "env ANDROID_NDK should be setted for ARM compile")
endif()
endif ()
add_compile_definitions(ENABLE_ARM)
endif()
endif ()
if (PLATFORM_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
add_compile_definitions(ENABLE_ARM32)
endif()
endif ()
if (PLATFORM_ARM64)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod+fp16")
add_compile_definitions(ENABLE_ARM64)
if (ENABLE_FP16)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8.2-a+dotprod+fp16")
endif ()
endif()
endif()
endif ()
endif ()
if (BUILD_MINDDATA)
# opencv
@ -167,7 +178,7 @@ if (BUILD_MINDDATA)
# json
if (NOT BUILD_CONVERTER)
include(${TOP_DIR}/cmake/external_libs/json.cmake)
endif()
endif ()
# eigen
include_directories(${TOP_DIR}/third_party/eigen/)
# jpeg-turbo
@ -183,7 +194,7 @@ if (BUILD_MINDDATA)
add_compile_definitions(ENABLE_ANDROID)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/minddata)
endif()
endif ()
if (BUILD_DEVICE)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src)
@ -191,7 +202,7 @@ if (BUILD_DEVICE)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/time_profile)
endif()
endif()
endif ()
endif ()
include(${TOP_DIR}/cmake/package_lite.cmake)

@ -37,11 +37,6 @@ static constexpr int kNHWCDimNumber = 4;
static constexpr int TENSOR_MAX_REFCOUNT = 999;
static const char *DELIM_COLON = ":";
static const char *DELIM_COMMA = ",";
static const char *DELIM_SLASH = "/";
static const char *DELIM_DOUBLE_BACKSLASH = "\\";
// quantization relative
static const char QUANTIZED_UINT8[] = "QUANTIZED_UINT8";
static const char QUANTIZED_INT8[] = "QUANTIZED_INT8";

@ -103,7 +103,7 @@ int WriteToBin(const std::string &file_path, void *data, size_t size) {
int CompareOutputData(float *output_data, float *correct_data, int data_size) {
float error = 0;
for (size_t i = 0; i < data_size; i++) {
for (int i = 0; i < data_size; i++) {
float abs = fabs(output_data[i] - correct_data[i]);
if (abs > 0.00001) {
error += abs;

@ -237,7 +237,7 @@ std::string Tensor::ToString() const {
if (data == nullptr) {
return "Data of tensor is nullptr";
} else {
for (size_t i = 0; i < 40 && i < this->ElementsNum(); i++) {
for (int i = 0; i < 40 && i < this->ElementsNum(); i++) {
oss << " " << data[i];
}
}
@ -247,7 +247,7 @@ std::string Tensor::ToString() const {
if (data == nullptr) {
return "Data of tensor is nullptr";
} else {
for (size_t i = 0; i < 40 && i < this->ElementsNum(); i++) {
for (int i = 0; i < 40 && i < this->ElementsNum(); i++) {
oss << " " << data[i];
}
}

@ -187,8 +187,8 @@ class Tensor : public mindspore::tensor::MetaTensor {
protected:
void *data_ = nullptr;
void *device_data_ = nullptr;
schema::NodeType tensorType;
schema::Format format_;
schema::NodeType tensorType;
size_t refCount = 0;
std::vector<tensor::QuantArg> quant_params_;
mindspore::lite::Allocator *allocator_ = nullptr;

@ -154,11 +154,11 @@ class LiteKernel {
KernelKey desc_;
std::string name_;
OpParameter *op_parameter_ = nullptr;
const mindspore::lite::PrimitiveC *primitive_ = nullptr;
const lite::Context *context_ = nullptr;
// tensor will free in ~lite_session()
std::vector<lite::tensor::Tensor *> in_tensors_;
std::vector<lite::tensor::Tensor *> out_tensors_;
const mindspore::lite::PrimitiveC *primitive_ = nullptr;
const lite::Context *context_ = nullptr;
std::vector<LiteKernel *> in_kernels_;
std::vector<LiteKernel *> out_kernels_;
bool train_mode_ = false;

@ -66,7 +66,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
}
auto quant_params = srcTensor->quantParams();
if (quant_params != nullptr) {
for (int j = 0; j < quant_params->size(); j++) {
for (size_t j = 0; j < quant_params->size(); j++) {
tensor::QuantArg quant_arg{};
quant_arg.scale = quant_params->Get(j)->scale();
quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint();
@ -93,9 +93,7 @@ void LiteSession::InitGraphInputTensors(const lite::Model *model) {
}
void LiteSession::InitGraphInputMSTensors(const lite::Model *model) {
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->input_vec_.empty());
MS_ASSERT(meta_graph != nullptr);
for (auto &input_tensor : this->inputs_) {
MS_ASSERT(input_tensor != nullptr);
this->input_vec_.emplace_back(new lite::tensor::LiteTensor(input_tensor));

@ -52,10 +52,9 @@ int Fill::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
}
std::vector<int> output_shape;
for (int i = 0; i < GetDims().size(); i++) {
for (size_t i = 0; i < GetDims().size(); i++) {
output_shape.push_back(GetDims()[i]);
}
// (void)output_shape.insert(output_shape.begin(), GetDims().begin(), GetDims().end());
output->set_shape(output_shape);
return RET_OK;
}

@ -64,7 +64,7 @@ int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
}
int new_k = 1;
if (GetUseAxis()) {
for (int i = GetAxis(); i < input0->shape().size(); ++i) {
for (size_t i = GetAxis(); i < input0->shape().size(); ++i) {
new_k *= input0->shape()[i];
}
if (new_k != input1->shape()[1]) {
@ -86,7 +86,7 @@ int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
out_shape[GetAxis()] = input1->shape()[0];
} else {
int total = 1;
for (int i = 0; i < input0->shape().size(); ++i) {
for (size_t i = 0; i < input0->shape().size(); ++i) {
total *= input0->shape()[i];
}
out_shape.resize(2);

@ -43,7 +43,6 @@ void Pad::SetPaddingMode(int padding_mode) {}
void Pad::SetConstantValue(float constant_value) {}
#endif
namespace {
const size_t kPaddingsSize = 8;
const size_t kInputRank = 4;
} // namespace
int Pad::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::Tensor *> outputs) {

@ -145,10 +145,9 @@ int Reshape::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso
}
}
} else if (inputs_.size() == kSingleNum) {
for (int i = 0; i < GetShape().size(); ++i) {
for (size_t i = 0; i < GetShape().size(); ++i) {
out_shape.push_back(GetShape()[i]);
}
// std::copy(GetShape().begin(), GetShape().end(), std::back_inserter(out_shape));
} else {
MS_LOG(ERROR) << "inputs tensor size invalid.";
return RET_INFER_ERR;

@ -75,7 +75,7 @@ int Split::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:
int split_dim = GetSplitDim();
std::vector<int> input_shape = input->shape();
std::vector<int> size_split;
for (int i = 0; i < GetSizeSplits().size(); ++i) {
for (size_t i = 0; i < GetSizeSplits().size(); ++i) {
size_split.push_back(GetSizeSplits()[i]);
}
for (int i = 0; i < number_split; ++i) {

@ -60,10 +60,9 @@ int Tile::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
MS_ASSERT(tile_prim != nullptr);
std::vector<int> out_shape;
std::vector<int> multiples;
for (int i = 0; i < GetMultiples().size(); ++i) {
for (size_t i = 0; i < GetMultiples().size(); ++i) {
multiples.push_back(GetMultiples()[i]);
}
// std::copy(GetMultiples().begin(), GetMultiples().end(), std::back_inserter(multiples));
for (size_t i = 0; i < input->shape().size(); ++i) {
int tmp = input->shape()[i] * multiples[i];
out_shape.push_back(tmp);

@ -59,10 +59,9 @@ int Transpose::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten
return RET_ERROR;
}
std::vector<int> perm;
for (int i = 0; i < GetPerm().size(); i++) {
for (size_t i = 0; i < GetPerm().size(); i++) {
perm.push_back(GetPerm()[i]);
}
// perm.insert(perm.begin(), GetPerm().begin(), GetPerm().end());
std::vector<int> in_shape = input->shape();
std::vector<int> out_shape;
out_shape.resize(perm.size());

@ -246,7 +246,7 @@ OpParameter *PopulatePreluParameter(const mindspore::lite::PrimitiveC *primitive
}
prelu_param->op_parameter_.type_ = primitive->Type();
auto temp = param->GetSlope();
for (int i = 0; i < temp.size(); i++) {
for (size_t i = 0; i < temp.size(); i++) {
prelu_param->slope_[i] = temp[i];
}
return reinterpret_cast<OpParameter *>(prelu_param);
@ -404,7 +404,6 @@ OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitiv
conv_param->stride_h_ = conv_primitive->GetStrideH();
conv_param->stride_w_ = conv_primitive->GetStrideW();
auto pad_mode = conv_primitive->GetPadMode();
auto convdw_lite_primitive = (lite::DepthwiseConv2D *)primitive;
MS_ASSERT(nullptr != convdw_lite_primitive);
conv_param->pad_u_ = convdw_lite_primitive->PadUp();
@ -828,7 +827,7 @@ OpParameter *PopulateTileParameter(const mindspore::lite::PrimitiveC *primitive)
auto param = dynamic_cast<const mindspore::lite::Tile *>(primitive);
auto multiples = param->GetMultiples();
tile_param->in_dim_ = multiples.size();
for (size_t i = 0; i < tile_param->in_dim_; ++i) {
for (int i = 0; i < tile_param->in_dim_; ++i) {
tile_param->multiples_[i] = multiples[i];
}
return reinterpret_cast<OpParameter *>(tile_param);
@ -1231,7 +1230,7 @@ OpParameter *PopulateCropParameter(const mindspore::lite::PrimitiveC *primitive)
crop_param->op_parameter_.type_ = primitive->Type();
crop_param->axis_ = param->GetAxis();
crop_param->offset_size_ = param_offset.size();
for (int i = 0; i < param_offset.size(); ++i) {
for (size_t i = 0; i < param_offset.size(); ++i) {
crop_param->offset_[i] = param_offset[i];
}
return reinterpret_cast<OpParameter *>(crop_param);

@ -43,8 +43,8 @@ class CaffePreluBaseCPUKernel : public LiteKernel {
int Run() override { return 0; }
protected:
int thread_count_;
const Context *ctx_;
int thread_count_;
CaffePreluParameter *prelu_param_;
};
} // namespace mindspore::kernel

@ -43,9 +43,9 @@ class ConcatBaseCPUKernel : public LiteKernel {
int Run() override { return 0; }
protected:
int thread_count_;
int axis_;
const Context *ctx_;
int thread_count_;
ConcatParameter *concat_param_ = nullptr;
};
} // namespace mindspore::kernel

@ -121,7 +121,7 @@ int ConvolutionBaseCPUKernel::SetIfPerChannel() {
uint8_t per_channel = 0b0;
if (conv_quant_arg_->input_arg_num_ != kPerTensor) {
int in_channel = conv_param_->input_channel_;
if (conv_quant_arg_->input_arg_num_ != in_channel) {
if (static_cast<int>(conv_quant_arg_->input_arg_num_) != in_channel) {
MS_LOG(ERROR) << "input per channel quant param length is not equal to input channel.";
return RET_ERROR;
}
@ -130,7 +130,7 @@ int ConvolutionBaseCPUKernel::SetIfPerChannel() {
if (conv_quant_arg_->filter_arg_num_ != kPerTensor) {
int filter_num = conv_param_->output_channel_;
if (conv_quant_arg_->filter_arg_num_ != filter_num) {
if (static_cast<int>(conv_quant_arg_->filter_arg_num_) != filter_num) {
MS_LOG(ERROR) << "weight per channel quant param length is not equal to filter num.";
return RET_ERROR;
}
@ -139,7 +139,7 @@ int ConvolutionBaseCPUKernel::SetIfPerChannel() {
if (conv_quant_arg_->output_arg_num_ != kPerTensor) {
int out_channel = conv_param_->output_channel_;
if (conv_quant_arg_->output_arg_num_ != out_channel) {
if (static_cast<int>(conv_quant_arg_->output_arg_num_) != out_channel) {
MS_LOG(ERROR) << "output per channel quant param length is not equal to output channel.";
return RET_ERROR;
}
@ -218,11 +218,6 @@ int ConvolutionBaseCPUKernel::SetInputTensorQuantParam() {
// per channel
MS_LOG(ERROR) << "Not Support Per Channel for input now.";
return RET_ERROR;
// auto input_quant_arg = input_tensor->GetQuantParams();
// for (int i = 0; i < in_arg_num; ++i) {
// conv_quant_arg_->input_quant_args_[i].zp_ = input_quant_arg[i].zeroPoint;
// conv_quant_arg_->input_quant_args_[i].scale_ = input_quant_arg[i].scale;
// }
}
return RET_OK;
}
@ -236,7 +231,7 @@ int ConvolutionBaseCPUKernel::SetFilterTensorQuantParam() {
conv_quant_arg_->filter_quant_args_[0].scale_ = weight_quant_arg.scale;
} else {
auto weight_quant_arg = weight_tensor->GetQuantParams();
for (int i = 0; i < weight_arg_num; ++i) {
for (size_t i = 0; i < weight_arg_num; ++i) {
conv_quant_arg_->filter_quant_args_[i].zp_ = weight_quant_arg[i].zeroPoint;
conv_quant_arg_->filter_quant_args_[i].scale_ = weight_quant_arg[i].scale;
}

@ -62,11 +62,11 @@ class ConvolutionBaseCPUKernel : public LiteKernel {
void FreeQuantParam();
protected:
int thread_count_;
int tile_num_;
void *bias_data_ = nullptr;
void *nhwc4_input_ = nullptr;
const Context *ctx_;
int thread_count_;
ConvParameter *conv_param_;
ConvQuantArg *conv_quant_arg_;
LayoutConvertor convert_func_;

@ -41,9 +41,9 @@ class FullconnectionBaseCPUKernel : public LiteKernel {
protected:
MatMulParameter *fc_param_;
int thread_count_;
int thread_stride_;
const Context *ctx_;
int thread_count_;
};
} // namespace mindspore::kernel

@ -41,9 +41,9 @@ class MatmulBaseCPUKernel : public LiteKernel {
protected:
MatMulParameter *params_;
int thread_count_;
int thread_stride_;
const Context *ctx_;
int thread_count_;
};
} // namespace mindspore::kernel

@ -43,8 +43,8 @@ class PoolingBaseCPUKernel : public LiteKernel {
void FreeQuantParam();
protected:
int thread_count_;
const Context *ctx_;
int thread_count_;
PoolingParameter *pooling_param_;
QuantArg **pooling_quant_arg_ = nullptr;
};

@ -41,8 +41,8 @@ class PriorBoxCPUKernel : public LiteKernel {
int PriorBoxImpl(int task_id);
protected:
int thread_count_;
const Context *ctx_;
int thread_count_;
private:
std::vector<float> output_;

@ -76,7 +76,7 @@ int ReduceBaseCPUKernel::CheckParameters() {
}
if (num_axes_ == 0) {
for (int i = 0; i < input_rank; i++) {
for (size_t i = 0; i < input_rank; i++) {
axes_[i] = i;
}
num_axes_ = static_cast<int>(input_rank);

@ -45,7 +45,7 @@ int SliceBaseCPUKernel::ReSize() {
param_->begin_[DIMENSION_4D - j] = param_->begin_[i];
param_->size_[DIMENSION_4D - j] = param_->size_[i];
}
for (size_t i = 0; i < DIMENSION_4D - param_->param_length_; i++) {
for (int i = 0; i < DIMENSION_4D - param_->param_length_; i++) {
param_->begin_[i] = 0;
param_->size_[i] = 1;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save