diff --git a/build.sh b/build.sh index a4402f03dd..094f0d1de1 100755 --- a/build.sh +++ b/build.sh @@ -616,7 +616,7 @@ build_lite() -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DBUILD_DEVICE=on -DPLATFORM_ARM32=on -DENABLE_NEON=on -DSUPPORT_TRAIN=${SUPPORT_TRAIN} -DBUILD_CONVERTER=off \ -DSUPPORT_GPU=${ENABLE_GPU} -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ - -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp "${BASEPATH}/mindspore/lite" + -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp "${BASEPATH}/mindspore/lite" else cmake -DBUILD_DEVICE=on -DPLATFORM_ARM64=off -DBUILD_CONVERTER=${ENABLE_CONVERTER} -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${ENABLE_GPU} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ diff --git a/mindspore/lite/src/ops/addn.cc b/mindspore/lite/src/ops/addn.cc index 91ad51afc7..5795384365 100644 --- a/mindspore/lite/src/ops/addn.cc +++ b/mindspore/lite/src/ops/addn.cc @@ -48,7 +48,7 @@ int AddN::InferShape(std::vector inputs, std::vectorshape() != inputs.at(0)->shape()) { MS_LOG(ERROR) << "AddN inputs shape is not equal!"; return RET_INPUT_TENSOR_ERROR; diff --git a/mindspore/lite/src/ops/argmax.cc b/mindspore/lite/src/ops/argmax.cc index 703e970ffd..9d7a7d225f 100644 --- a/mindspore/lite/src/ops/argmax.cc +++ b/mindspore/lite/src/ops/argmax.cc @@ -63,7 +63,7 @@ int ArgMax::InferShape(std::vector inputs_, std::vector output_shape(input->shape()); auto input_shape_size = input->shape().size(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); + auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); if (axis >= input_shape_size || axis < 0) { MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/argmin.cc b/mindspore/lite/src/ops/argmin.cc index 349418a0b4..e9d4599558 100644 --- a/mindspore/lite/src/ops/argmin.cc +++ b/mindspore/lite/src/ops/argmin.cc @@ -61,7 +61,7 @@ int ArgMin::InferShape(std::vector inputs_, std::vector< return RET_OK; } auto input_shape_size = input->shape().size(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); + auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); if (axis >= input_shape_size || axis < 0) { MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/arithmetic.cc b/mindspore/lite/src/ops/arithmetic.cc index cdd775dc79..9e34d9d6a5 100644 --- a/mindspore/lite/src/ops/arithmetic.cc +++ b/mindspore/lite/src/ops/arithmetic.cc @@ -55,7 +55,7 @@ int Arithmetic::InferShape(std::vector inputs_, std::vec ndim_ = input_shape1.size(); auto fill_dim_num = input_shape1.size() - input_shape0.size(); int j = 0; - for (int i = 0; i < input_shape1.size(); i++) { + for (size_t i = 0; i < input_shape1.size(); i++) { if (i < fill_dim_num) { in_shape0_[i] = 1; } else { @@ -68,7 +68,7 @@ int Arithmetic::InferShape(std::vector inputs_, std::vec ndim_ = input_shape0.size(); auto fill_dim_num = input_shape0.size() - input_shape1.size(); int j = 0; - for (int i = 0; i < input_shape0.size(); i++) { + for (size_t i = 0; i < input_shape0.size(); i++) { if (i < fill_dim_num) { in_shape1_[i] = 1; } else { @@ -77,14 +77,14 @@ int Arithmetic::InferShape(std::vector inputs_, std::vec in_shape0_[i] = input_shape0[i]; } } else { - for (int i = 0; i < input_shape0.size(); i++) { + for (size_t i = 0; i < input_shape0.size(); i++) { in_shape1_[i] = input_shape1[i]; in_shape0_[i] = input_shape0[i]; } } std::vector output_shape; - for (size_t i = 0; i < ndim_; i++) { + for (int i = 0; i < ndim_; i++) { if (in_shape0_[i] != in_shape1_[i]) { if (in_shape0_[i] == 1) { out_shape_[i] = in_shape1_[i]; diff --git a/mindspore/lite/src/ops/batch_to_space.cc b/mindspore/lite/src/ops/batch_to_space.cc index d9f9ebf792..cd1c897b3b 100644 --- a/mindspore/lite/src/ops/batch_to_space.cc +++ b/mindspore/lite/src/ops/batch_to_space.cc @@ -85,7 +85,7 @@ int BatchToSpace::InferShape(std::vector inputs, std::ve MS_LOG(ERROR) << "Crops size should be " << kCropsSize; return RET_PARAM_INVALID; } - size_t mul_block_shape = 1; + int mul_block_shape = 1; for (size_t i = 0; i < kBlockShapeSize; ++i) { if (block_shape[i] <= 0) { diff --git a/mindspore/lite/src/ops/concat.cc b/mindspore/lite/src/ops/concat.cc index 3e3d7aa2ec..2e724b82d4 100644 --- a/mindspore/lite/src/ops/concat.cc +++ b/mindspore/lite/src/ops/concat.cc @@ -58,7 +58,7 @@ int Concat::InferShape(std::vector inputs_, std::vectorshape(); - int axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis(); + auto axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis(); if (axis < 0 || axis >= input0_shape.size()) { MS_LOG(ERROR) << "Invalid axis: " << axis; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/embedding_lookup.cc b/mindspore/lite/src/ops/embedding_lookup.cc index c1aa76a4bb..fa8e8fa094 100644 --- a/mindspore/lite/src/ops/embedding_lookup.cc +++ b/mindspore/lite/src/ops/embedding_lookup.cc @@ -58,7 +58,7 @@ int EmbeddingLookup::InferShape(std::vector inputs_, std::vect for (size_t i = 0; i < embedding_shape.size(); ++i) { output_shape.push_back(embedding_shape.at(i)); } - for (int i = 1; i < inputs_.size() - 1; ++i) { + for (size_t i = 1; i < inputs_.size() - 1; ++i) { auto embedding_shape_t = inputs_.at(i)->shape(); embedding_shape_t.erase(embedding_shape_t.begin()); if (embedding_shape_t != embedding_shape) { diff --git a/mindspore/lite/src/ops/expand_dims.cc b/mindspore/lite/src/ops/expand_dims.cc index a05ea72439..36cc4d8064 100644 --- a/mindspore/lite/src/ops/expand_dims.cc +++ b/mindspore/lite/src/ops/expand_dims.cc @@ -51,7 +51,7 @@ int ExpandDims::InferShape(std::vector inputs_, std::vectorshape().size() + 1; } - if (dim > input->shape().size()) { + if (dim > static_cast(input->shape().size())) { MS_LOG(ERROR) << "attribute dim out of range"; return RET_INPUT_TENSOR_ERROR; } diff --git a/mindspore/lite/src/ops/flatten.cc b/mindspore/lite/src/ops/flatten.cc index f5a9606838..b9259b8c4f 100644 --- a/mindspore/lite/src/ops/flatten.cc +++ b/mindspore/lite/src/ops/flatten.cc @@ -42,7 +42,7 @@ int Flatten::InferShape(std::vector inputs_, std::vector output_shape(2); output_shape[0] = input_shape[0]; output_shape[1] = 1; - for (int i = 1; i < input_shape.size(); i++) { + for (size_t i = 1; i < input_shape.size(); i++) { output_shape[1] *= input_shape[i]; } output->set_shape(output_shape); diff --git a/mindspore/lite/src/ops/full_connection.cc b/mindspore/lite/src/ops/full_connection.cc index 8f7bb73828..985fa76f0c 100644 --- a/mindspore/lite/src/ops/full_connection.cc +++ b/mindspore/lite/src/ops/full_connection.cc @@ -60,7 +60,7 @@ int FullConnection::InferShape(std::vector inputs_, MS_LOG(ERROR) << "Input tensors num error"; return 1; } - if (GetAxis() < 1 || GetAxis() > input0->shape().size()) { + if (GetAxis() < 1 || GetAxis() > static_cast(input0->shape().size())) { MS_LOG(ERROR) << "FullConnection axis invalid"; return 1; } diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc index 9f7c5ab435..4e8bac1fa8 100644 --- a/mindspore/lite/src/ops/gather.cc +++ b/mindspore/lite/src/ops/gather.cc @@ -83,7 +83,7 @@ int Gather::InferShape(std::vector inputs_, std::vector out_shape{in_shape}; out_shape.erase(out_shape.begin() + axis); - for (size_t i = 0; i < indices_rank; i++) { + for (int i = 0; i < indices_rank; i++) { out_shape.insert(out_shape.begin() + axis, indices_shape[i]); } output->set_shape(out_shape); diff --git a/mindspore/lite/src/ops/matmul.cc b/mindspore/lite/src/ops/matmul.cc index 67fc4fee27..51cc51d327 100644 --- a/mindspore/lite/src/ops/matmul.cc +++ b/mindspore/lite/src/ops/matmul.cc @@ -56,7 +56,7 @@ int MatMul::InferShape(std::vector inputs_, std::vector inputs_, std::vector inputs_, std::vector(axes[idx]) == i) { reduce_axis = true; break; diff --git a/mindspore/lite/src/ops/prior_box.cc b/mindspore/lite/src/ops/prior_box.cc index 1b972e18c0..ec7cb3d471 100644 --- a/mindspore/lite/src/ops/prior_box.cc +++ b/mindspore/lite/src/ops/prior_box.cc @@ -110,7 +110,7 @@ int PriorBox::InferShape(std::vector inputs_, std::vector different_aspect_ratios{1.0f}; auto aspect_ratios = GetAspectRatios(); MS_ASSERT(aspect_ratios != nullptr); - for (auto i = 0; i < aspect_ratios.size(); i++) { + for (size_t i = 0; i < aspect_ratios.size(); i++) { float ratio = aspect_ratios[i]; bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), [&](float v) { return abs(ratio - v) < 1e-6; }); diff --git a/mindspore/lite/src/ops/reduce.cc b/mindspore/lite/src/ops/reduce.cc index 2fa00dd966..9e887c9b6a 100644 --- a/mindspore/lite/src/ops/reduce.cc +++ b/mindspore/lite/src/ops/reduce.cc @@ -71,7 +71,7 @@ int Reduce::InferShape(std::vector inputs_, std::vector inputs_, std::vector(axes[idx]) == i || static_cast(axes[idx] + in_shape.size()) == i) { reduce_axis = true; break; diff --git a/mindspore/lite/src/ops/reshape.cc b/mindspore/lite/src/ops/reshape.cc index e9662f22b0..fd136d0bbb 100644 --- a/mindspore/lite/src/ops/reshape.cc +++ b/mindspore/lite/src/ops/reshape.cc @@ -80,15 +80,15 @@ void CalShape(const T *data, const std::vector &inputs, std::v int input_count = inputs[0]->ElementsNum(); int index = 0; int size = 1; - for (size_t i = 0; i < shape_size; i++) { - if (data[i] == -1) { + for (int i = 0; i < shape_size; i++) { + if (static_cast(data[i]) == -1) { index = i; } else { size *= data[i]; } out_shape->push_back(data[i]); } - if (data[index] == -1) { + if (static_cast(data[index]) == -1) { (*out_shape)[index] = input_count / size; } } diff --git a/mindspore/lite/src/ops/slice.cc b/mindspore/lite/src/ops/slice.cc index 3190bb57dd..a221a11eac 100644 --- a/mindspore/lite/src/ops/slice.cc +++ b/mindspore/lite/src/ops/slice.cc @@ -67,7 +67,7 @@ int SliceOp::InferShape(std::vector inputs, std::vector< std::vector slice_begin(GetBegin().begin(), GetBegin().end()); std::vector slice_size(GetSize().begin(), GetSize().end()); std::vector output_shape(input_shape.size()); - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { if (slice_size[i] < 0 && slice_size[i] != -1) { MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << slice_size[i]; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/split.cc b/mindspore/lite/src/ops/split.cc index 4b48debd83..ab17e87b71 100644 --- a/mindspore/lite/src/ops/split.cc +++ b/mindspore/lite/src/ops/split.cc @@ -62,7 +62,7 @@ int Split::InferShape(std::vector inputs_, std::vectornumberSplit(); - if (outputs_.size() != number_split) { + if (static_cast(outputs_.size()) != number_split) { MS_LOG(ERROR) << "outputs number is not equal to " << number_split; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/squeeze.cc b/mindspore/lite/src/ops/squeeze.cc index 6669429a42..2496c77fd9 100644 --- a/mindspore/lite/src/ops/squeeze.cc +++ b/mindspore/lite/src/ops/squeeze.cc @@ -62,15 +62,15 @@ int Squeeze::InferShape(std::vector inputs_, std::vector(i)) { MS_ASSERT(in_shape[i] == 1); axisIdx++; continue; diff --git a/mindspore/lite/src/ops/stack.cc b/mindspore/lite/src/ops/stack.cc index 2134ac03ef..c87bd86dc7 100644 --- a/mindspore/lite/src/ops/stack.cc +++ b/mindspore/lite/src/ops/stack.cc @@ -64,7 +64,7 @@ int Stack::InferShape(std::vector inputs, std::vectorshape(); std::vector output_shape = input_shape; - int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); + auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); if (axis < 0 || axis > input_shape.size()) { MS_LOG(ERROR) << "Invalid axis " << GetAxis(); return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/strided_slice.cc b/mindspore/lite/src/ops/strided_slice.cc index dc2dda089c..80714673a4 100644 --- a/mindspore/lite/src/ops/strided_slice.cc +++ b/mindspore/lite/src/ops/strided_slice.cc @@ -89,7 +89,7 @@ constexpr int kStridedSliceInputNum = 1; } // namespace void StridedSlice::ApplyNewAxisMask() { - for (int i = 0; i < new_axis_mask_.size(); i++) { + for (size_t i = 0; i < new_axis_mask_.size(); i++) { if (new_axis_mask_.at(i)) { ndim_ += 1; in_shape_.insert(in_shape_.begin() + i, 1); @@ -112,7 +112,7 @@ void StridedSlice::ApplyNewAxisMask() { std::vector StridedSlice::ApplyShrinkMask(std::vector out_shape) { auto old_out_shape = out_shape; out_shape.clear(); - for (int i = 0; i < shrink_axis_mask_.size(); i++) { + for (size_t i = 0; i < shrink_axis_mask_.size(); i++) { if (shrink_axis_mask_.at(i)) { ends_.at(i) = begins_.at(i) + 1; strides_.at(i) = 1; @@ -120,7 +120,7 @@ std::vector StridedSlice::ApplyShrinkMask(std::vector out_shape) { out_shape.emplace_back(old_out_shape.at(i)); } } - for (int i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) { + for (size_t i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) { out_shape.emplace_back(old_out_shape.at(i)); } return out_shape; @@ -128,7 +128,7 @@ std::vector StridedSlice::ApplyShrinkMask(std::vector out_shape) { /*only one bit will be used if multiple bits are true.*/ void StridedSlice::ApplyEllipsisMask() { - for (int i = 0; i < ellipsis_mask_.size(); i++) { + for (size_t i = 0; i < ellipsis_mask_.size(); i++) { if (ellipsis_mask_.at(i)) { begins_.at(i) = 0; ends_.at(i) = in_shape_.at(i); @@ -204,7 +204,7 @@ int StridedSlice::InferShape(std::vector inputs, std::ve output_shape.clear(); output_shape.resize(in_shape_.size()); - for (int i = 0; i < in_shape_.size(); i++) { + for (int i = 0; i < static_cast(in_shape_.size()); i++) { if (i < ndim_ && new_axis_mask_.at(i)) { output_shape.at(i) = 1; } else { diff --git a/mindspore/lite/src/ops/transpose.cc b/mindspore/lite/src/ops/transpose.cc index f8830d26ca..67a223f4c9 100644 --- a/mindspore/lite/src/ops/transpose.cc +++ b/mindspore/lite/src/ops/transpose.cc @@ -63,7 +63,7 @@ int Transpose::InferShape(std::vector inputs_, std::vector in_shape = input->shape(); std::vector out_shape; out_shape.resize(perm.size()); - for (int i = 0; i < perm.size(); ++i) { + for (size_t i = 0; i < perm.size(); ++i) { out_shape[i] = in_shape[perm[i]]; } output->set_shape(out_shape); diff --git a/mindspore/lite/src/ops/unsqueeze.cc b/mindspore/lite/src/ops/unsqueeze.cc index ab384aa833..1d7f682f4c 100644 --- a/mindspore/lite/src/ops/unsqueeze.cc +++ b/mindspore/lite/src/ops/unsqueeze.cc @@ -67,10 +67,10 @@ int Unsqueeze::InferShape(std::vector inputs_, std::vector(i)) { out_shape.emplace_back(1); ax_itr++; } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { diff --git a/mindspore/lite/src/ops/unstack.cc b/mindspore/lite/src/ops/unstack.cc index 0b3f737db2..7119f18019 100644 --- a/mindspore/lite/src/ops/unstack.cc +++ b/mindspore/lite/src/ops/unstack.cc @@ -39,7 +39,7 @@ int Unstack::InferShape(std::vector inputs, std::vectorshape(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); + auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); if (axis < 0 || axis >= input_shape.size()) { MS_LOG(ERROR) << "Invalid axis " << GetAxis(); return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/where.cc b/mindspore/lite/src/ops/where.cc index 03dbf1beb3..35bc3056dd 100644 --- a/mindspore/lite/src/ops/where.cc +++ b/mindspore/lite/src/ops/where.cc @@ -66,8 +66,8 @@ int Where::InferShape(std::vector inputs_, std::vectorshape(); auto shape_tmp2 = inputs_.at(2)->shape(); int axisout = 0; - int temp = 0; - for (int j = 0; j < shape_tmp.size(); j++) { + size_t temp = 0; + for (size_t j = 0; j < shape_tmp.size(); j++) { if (shape_tmp[j] == shape_tmp1[j] && shape_tmp[j] != shape_tmp2[j]) { axisout = j; break; diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index b4541097ff..faf717b7ef 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -118,7 +118,7 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr &me // activation auto input_quant_params = primitive->GetInputQuantParams(); auto node_type = primitive->GetPrimitiveT()->value.type; - for (int i = 0; i < input_quant_params.size(); i++) { + for (size_t i = 0; i < input_quant_params.size(); i++) { if (i >= dst_node->inputIndex.size()) { MS_LOG(ERROR) << "node: " << dst_node->name << " input has " << input_quant_params.size() << " quant_params; but only " << dst_node->inputIndex.size() << " input"; @@ -375,7 +375,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr(cnode->abstract())) { auto tuple = std::reinterpret_pointer_cast(cnode->abstract()); - for (int i = 0; i < tuple->size(); i++) { + for (size_t i = 0; i < tuple->size(); i++) { auto msTensor = new schema::TensorT(); msTensor->nodeType = schema::NodeType_Parameter; fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc index 30fcea3566..66b8481ea9 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc @@ -136,7 +136,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) { STATUS status = RET_OK; auto input_tensor_size = (*iter)->inputIndex.size(); - for (auto i = 0; i < input_tensor_size; i++) { + for (size_t i = 0; i < input_tensor_size; i++) { iter = InsertFormatTransNode(graph, iter, kBefore, i, pre_insert_trans_type_, &status); if (status != RET_OK) { MS_LOG(ERROR) << "Insert" << pre_insert_trans_type_ << "before " << (*iter)->name << " failed"; @@ -144,7 +144,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) { } } auto output_tensor_size = (*iter)->outputIndex.size(); - for (auto i = 0; i < output_tensor_size; i++) { + for (size_t i = 0; i < output_tensor_size; i++) { iter = InsertFormatTransNode(graph, iter, kAfter, i, post_insert_trans_type_, &status); if (status != RET_OK) { MS_LOG(ERROR) << "Insert" << post_insert_trans_type_ << "Node before " << (*iter)->name << " failed"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc index 7b22fd2d95..b126089ac7 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc @@ -37,16 +37,15 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf // check bottom size if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) { - // MS_LOGE("Layer %s bottom numbers is error, it must be %d, but is %d", proto.name().c_str(), - // CAFFE_BATCHNORMAL_BOTTOM_SIZE, proto.bottom_size()); + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " \ + << CAFFE_BATCHNORMAL_BOTTOM_SIZE << "but is " << proto.bottom_size(); return RET_ERROR; } // check top size if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) { - // MS_LOGE("Layer %s top numbers is error, it must be %d, but is %d", \ - proto.name().c_str(), CAFFE_BATCHNORMAL_TOP_SIZE, - // proto.top_size()); + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be " \ + << CAFFE_BATCHNORMAL_TOP_SIZE << "but is " << proto.top_size(); return RET_ERROR; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc index 18c4337b8f..6c5428fc9b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc @@ -23,7 +23,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) { net = proto; if (proto.layer_size() == 0) { - // MS_LOGE("net layer num is zero, prototxt file may be invalid."); + MS_LOG(ERROR) << "net layer num is zero, prototxt file may be invalid."; return RET_ERROR; } @@ -32,12 +32,13 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) { SetTopsAndBottoms(); FindInputAndOutput(); + return RET_OK; } STATUS CaffeInspector::ParseInput() { if (net.input_size() > 0) { - // MS_LOGI("This net exist input."); - for (int i = 0; i < net.input_size(); i++) { + MS_LOG(INFO) << "This net exist input."; + for (size_t i = 0; i < net.input_size(); i++) { graphInput.insert(net.input(i)); } } @@ -55,6 +56,7 @@ STATUS CaffeInspector::FindInputAndOutput() { graphOutput.insert(iter); } } + return RET_OK; } STATUS CaffeInspector::SetTopsAndBottoms() { @@ -73,6 +75,7 @@ STATUS CaffeInspector::SetTopsAndBottoms() { layerBottoms.insert(layer.bottom(j)); } } + return RET_OK; } } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc index fbee603b14..b82d55446e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc @@ -95,6 +95,7 @@ STATUS ConvertShape(const caffe::BlobProto &proto, std::vector *shape) shape->push_back(proto.shape().dim(i)); } } + return RET_OK; } } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc index b88666fca9..8c563d3bd9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc @@ -49,7 +49,7 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.value = attr.release(); // set input - for (int i = 0; i < tflite_op->inputs.size(); i++) { + for (size_t i = 0; i < tflite_op->inputs.size(); i++) { AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); } diff --git a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc index 6e1e59cab2..4e8897c659 100644 --- a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc +++ b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc @@ -74,7 +74,7 @@ void BitPack::BitPacking(const std::vector& originDataVec, std::vector< size_t remainBitData = bitDataVec.size(); if (8 > remainBitData && remainBitData > 0) { - for (int i = 0; i < 8 - remainBitData; i++) { + for (size_t i = 0; i < 8 - remainBitData; i++) { bitDataVec.push(0); } PackFromOriginToUint8(bitDataVec, packedDataVec);