!8108 modify files for static check

Merge pull request !8108 from lyvette/check_master
pull/8108/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit ea001d330f

@ -16,7 +16,7 @@
#include "src/common/file_utils.h" #include "src/common/file_utils.h"
#include <fcntl.h> #include <fcntl.h>
#include <stdlib.h> #include <cstdlib>
#include <climits> #include <climits>
#include <cmath> #include <cmath>
#include "securec/include/securec.h" #include "securec/include/securec.h"
@ -107,7 +107,7 @@ int CompareOutputData(const float *output_data, size_t output_size, const float
return 0; return 0;
} }
int CompareOutput(const float *output_data, size_t output_num, std::string file_path) { int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path) {
size_t ground_truth_size = 0; size_t ground_truth_size = 0;
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size)); auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
size_t ground_truth_num = ground_truth_size / sizeof(float); size_t ground_truth_num = ground_truth_size / sizeof(float);

@ -59,7 +59,7 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) {
} }
int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size); int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size);
int CompareOutput(const float *output_data, size_t output_num, std::string file_path); int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path);
std::string GetAndroidPackageName(); std::string GetAndroidPackageName();
std::string GetAndroidPackagePath(); std::string GetAndroidPackagePath();

@ -14,8 +14,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include "src/common/string_util.h"
#include <algorithm> #include <algorithm>
#include "src/common/string_util.h"
#include "include/ms_tensor.h" #include "include/ms_tensor.h"
namespace mindspore { namespace mindspore {
@ -35,7 +35,7 @@ std::vector<StringPack> ParseStringBuffer(const void *data) {
MS_LOG(ERROR) << "data is nullptr"; MS_LOG(ERROR) << "data is nullptr";
return buffer; return buffer;
} }
const int32_t *offset = reinterpret_cast<const int32_t *>(data); const auto *offset = reinterpret_cast<const int32_t *>(data);
int32_t num = *offset; int32_t num = *offset;
for (int i = 0; i < num; i++) { for (int i = 0; i < num; i++) {
offset += 1; offset += 1;
@ -59,7 +59,7 @@ int WriteStringsToTensor(Tensor *tensor, const std::vector<StringPack> &string_b
return RET_ERROR; return RET_ERROR;
} }
int32_t *string_info = reinterpret_cast<int32_t *>(data); auto *string_info = reinterpret_cast<int32_t *>(data);
char *string_data = reinterpret_cast<char *>(data); char *string_data = reinterpret_cast<char *>(data);
string_info[0] = num; string_info[0] = num;
@ -140,13 +140,13 @@ static uint64_t k1 = 0xb492b66fbe98f273ULL;
static uint64_t k2 = 0x9ae16a3b2f90404fULL; static uint64_t k2 = 0x9ae16a3b2f90404fULL;
uint64_t Fetch64Bit(const char *p) { uint64_t Fetch64Bit(const char *p) {
uint64_t result; uint64_t result = 0;
memcpy(&result, p, sizeof(uint64_t)); memcpy(&result, p, sizeof(uint64_t));
return result; return result;
} }
uint32_t Fetch32Bit(const char *p) { uint32_t Fetch32Bit(const char *p) {
uint32_t result; uint32_t result = 0;
memcpy(&result, p, sizeof(uint32_t)); memcpy(&result, p, sizeof(uint32_t));
return result; return result;
} }
@ -226,7 +226,7 @@ std::pair<uint64_t, uint64_t> HashLen32WithSeeds(const char *s, uint64_t a, uint
} }
uint64_t StringHash64(const char *s, size_t len) { uint64_t StringHash64(const char *s, size_t len) {
uint64_t seed_value = 81; const uint64_t seed_value = 81;
if (len <= 16) { if (len <= 16) {
return HashStringLen0to16(s, len); return HashStringLen0to16(s, len);
} else if (len <= 32) { } else if (len <= 32) {

@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#include "src/model_common.h" #include "src/model_common.h"
#include "include/version.h" #include "include/version.h"
#ifndef PRIMITIVE_WRITEABLE #ifndef PRIMITIVE_WRITEABLE
@ -23,7 +22,7 @@
namespace mindspore::lite { namespace mindspore::lite {
bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) { bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) {
for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) { for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) {
Model::Node *node = new (std::nothrow) Model::Node(); auto *node = new (std::nothrow) Model::Node();
if (node == nullptr) { if (node == nullptr) {
MS_LOG(ERROR) << "new node fail!"; MS_LOG(ERROR) << "new node fail!";
return false; return false;

@ -384,6 +384,7 @@ int SetArch(CpuInfo *freq_set, int core_num) {
freq_set[i].arch = archs[i]; freq_set[i].arch = archs[i];
} }
free(archs); free(archs);
fclose(fp);
return RET_TP_OK; return RET_TP_OK;
} }

@ -49,8 +49,8 @@ TrainModel *TrainModel::Import(const char *model_buf, size_t size) {
model->buf_size_ = size; model->buf_size_ = size;
auto meta_graph = schema::GetMetaGraph(model->buf); auto meta_graph = schema::GetMetaGraph(model->buf);
if (meta_graph == nullptr) { if (meta_graph == nullptr) {
delete model;
free(model->buf); free(model->buf);
delete model;
MS_LOG(ERROR) << "meta_graph is nullptr!"; MS_LOG(ERROR) << "meta_graph is nullptr!";
return nullptr; return nullptr;
} }

@ -16,6 +16,7 @@
#include "tools/converter/parser/onnx/onnx_expand_parser.h" #include "tools/converter/parser/onnx/onnx_expand_parser.h"
#include <memory> #include <memory>
#include <vector>
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {

@ -110,14 +110,15 @@ class PostTrainingQuantizer : public Quantizer {
STATUS QuantNode(); STATUS QuantNode();
STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min, STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min,
std::shared_ptr<PrimitiveC> lite_primitive); const std::shared_ptr<PrimitiveC> &lite_primitive) const;
STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min, std::shared_ptr<PrimitiveC>); STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min,
const std::shared_ptr<PrimitiveC> &) const;
STATUS DoWeightQuant(AnfNodePtr weight, std::shared_ptr<PrimitiveC> primitive_c, bool perchannel); STATUS DoWeightQuant(const AnfNodePtr &weight, std::shared_ptr<PrimitiveC> primitive_c, bool perchannel) const;
STATUS DoBiasQuant(AnfNodePtr bias, std::shared_ptr<PrimitiveC> primitive_c); STATUS DoBiasQuant(const AnfNodePtr &bias, const std::shared_ptr<PrimitiveC> &primitive_c);
STATUS Int8Inference(); STATUS Int8Inference();
STATUS BiasCorrection(FuncGraphPtr func_graph); STATUS BiasCorrection(const FuncGraphPtr &func_graph);
}; };
struct DivergInfo { struct DivergInfo {
@ -189,7 +190,7 @@ class Calibrator {
size_t GetInputNum() const { return config_param_.image_paths.size(); } size_t GetInputNum() const { return config_param_.image_paths.size(); }
STATUS AddQuantizedOp(CNodePtr node); STATUS AddQuantizedOp(const CNodePtr &node);
STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info); STATUS RecordMaxValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);

Loading…
Cancel
Save