!14036 up_date mnist_x86 example codes

From: @zoloft
Reviewed-by: @wangchengyuan,@hangangqiang
Signed-off-by: @wangchengyuan
pull/14036/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit bbd9cb9f69

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.14)
project(benchmark)
@ -14,6 +13,8 @@ set(HEADER_PATH ${PKG_PATH}/inference)
option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)
add_compile_definitions(NOT_USE_STL)
if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
@ -38,15 +39,17 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
message(STATUS "build benchmark release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
add_subdirectory(src)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)
include_directories(${HEADER_PATH})
set(SRC_FILES
benchmark/benchmark.cc
@ -54,4 +57,3 @@ set(SRC_FILES
)
add_executable(benchmark ${SRC_FILES})
target_link_libraries(benchmark net -lm -pthread)

@ -1,5 +1,4 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -39,6 +38,17 @@ void usage() {
"args[5]: runtime thread bind mode\n\n");
}
uint64_t GetTimeUs() {
const int USEC = 1000000;
const int MSEC = 1000;
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return retval;
}
template <typename T>
void PrintData(void *data, size_t data_number) {
if (data == nullptr) {
@ -46,23 +56,20 @@ void PrintData(void *data, size_t data_number) {
}
auto casted_data = static_cast<T *>(data);
for (size_t i = 0; i < 10 && i < data_number; i++) {
std::cout << std::to_string(casted_data[i]) << ", ";
printf("%s, ", std::to_string(casted_data[i]).c_str());
}
std::cout << std::endl;
printf("\n");
}
void TensorToString(tensor::MSTensor *tensor) {
uint8_t i = 0;
std::cout << "uint8: " << i << std::endl;
std::cout << "Name: " << tensor->tensor_name();
std::cout << ", DataType: " << tensor->data_type();
std::cout << ", Size: " << tensor->Size();
std::cout << ", Shape:";
printf("name: %s, ", tensor->tensor_name().c_str());
printf("DataType: %d, ", tensor->data_type());
printf("Elements: %d, ", tensor->ElementsNum());
printf("Shape: [");
for (auto &dim : tensor->shape()) {
std::cout << " " << dim;
printf("%d ", dim);
}
std::cout << ", Data:" << std::endl;
printf("], Data: \n");
switch (tensor->data_type()) {
case kNumberTypeFloat32: {
PrintData<float>(tensor->MutableData(), tensor->ElementsNum());
@ -90,26 +97,42 @@ void TensorToString(tensor::MSTensor *tensor) {
int main(int argc, const char **argv) {
if (argc < 2) {
std::cout << "input command is invalid\n" << std::endl;
printf("input command is invalid\n");
usage();
return lite::RET_ERROR;
}
std::cout << "start run benchmark" << std::endl;
printf("=======run benchmark======\n");
const char *model_buffer = nullptr;
int model_size = 0;
// read .net file by ReadBinaryFile;
// read .bin file by ReadBinaryFile;
if (argc >= 3) {
model_buffer = static_cast<const char *>(ReadInputData(argv[2], &model_size));
}
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr);
lite::Context *context = nullptr;
if (argc >= 5) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[4]);
context->device_list_.resize(1);
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[5]))}}};
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context);
if (session == nullptr) {
std::cerr << "create lite session failed" << std::endl;
printf("create lite session failed\n");
return lite::RET_ERROR;
}
delete[] model_buffer;
// set model inputs tensor data
std::vector<tensor::MSTensor *> inputs = session->GetInputs();
Vector<tensor::MSTensor *> inputs = session->GetInputs();
size_t inputs_num = inputs.size();
void *inputs_binbuf[inputs_num];
int inputs_size[inputs_num];
@ -125,23 +148,41 @@ int main(int argc, const char **argv) {
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}
if (argc >= 4) {
int loop_count = atoi(argv[3]);
printf("\nloop count: %d\n", loop_count);
uint64_t start_time = GetTimeUs();
for (int i = 0; i < loop_count; ++i) {
ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
}
uint64_t end_time = GetTimeUs();
float total_time = (float)(end_time - start_time) / 1000.0f;
printf("total time: %.5fms, per time: %.5fms\n", total_time, total_time / loop_count);
}
ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
auto outputs = session->GetOutputs();
std::cout << "output size: " << outputs.size() << std::endl;
for (const auto &item : outputs) {
auto output = item.second;
Vector<String> outputs_name = session->GetOutputTensorNames();
printf("\noutputs: \n");
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
TensorToString(output);
}
std::cout << "run benchmark success" << std::endl;
printf("========run success=======\n");
delete session;
session = nullptr;
if (context != nullptr) {
delete context;
context = nullptr;
}
for (size_t i = 0; i < inputs_num; ++i) {
free(inputs_binbuf[i]);
inputs_binbuf[i] = nullptr;
}
return lite::RET_OK;
}

@ -27,7 +27,7 @@ do
done
BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
MINDSPORE_ROOT_DIR=${${BASEPATH}%%/mindspore/lite/micro/example/mnist_x86}
MINDSPORE_ROOT_DIR=${BASEPATH%%/mindspore/lite/micro/example/mnist_x86}
echo "current dir is: ${BASEPATH}"

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.14)
project(net)
@ -16,7 +15,8 @@ set(HEADER_PATH ${PKG_PATH}/inference)
message("operator lib path: ${OP_LIB}")
message("operator header path: ${OP_HEADER_PATH}")
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
add_compile_definitions(NOT_USE_STL)
include_directories(${OP_HEADER_PATH})
include_directories(${HEADER_PATH})
@ -43,15 +43,19 @@ endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build net library with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
message(STATUS "build net library release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
function(create_library)
@ -80,4 +84,3 @@ function(create_library)
endfunction(create_library)
string(CONCAT library_name "lib" net ".a")
create_library()

@ -0,0 +1,65 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#define MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#include "include/model.h"
#include "session.h"
#include <new>
#include <string.h>
namespace mindspore::lite {
class MModel : public Model {
public:
void Free() override {
if (this->buf != nullptr) {
free(this->buf);
this->buf = nullptr;
this->buf_size_ = 0;
}
}
void Destroy() override { Free(); }
~MModel() override { Destroy(); }
void set_buf_size(size_t size) { buf_size_ = size; }
size_t buf_size() const { return buf_size_; }
private:
size_t buf_size_{0};
};
Model *Model::Import(const char *model_buf, size_t size) {
MS_NULLPTR_IF_NULL(model_buf);
MModel *model = new (std::nothrow) MModel();
MS_NULLPTR_IF_NULL(model);
if (size == 0) {
delete model;
return nullptr;
}
model->buf = reinterpret_cast<char *>(malloc(size));
if (model->buf == nullptr) {
delete model;
return nullptr;
}
memcpy(model->buf, model_buf, size);
model->set_buf_size(size);
return model;
}
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_

@ -61,7 +61,6 @@ void FreeResource() {
}
}
void Inference() {
const int g_thread_num = 1;
{
DoQuantizeFp32ToInt8((float *)(g_Input0), (int8_t *)(g_Buffer+0), 0.007874015718698501587, 0, 784, false);
}
@ -71,15 +70,15 @@ memset((int16_t *)(g_Buffer+12976), 0, 256);
memset((int *)(g_Buffer+13232), 0, 6144);
memset((int8_t *)(g_Buffer+19376), 0, 8112);
memset((int16_t *)(g_Buffer+27488), 0, 12544);
static QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}};
static QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}};
static QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}};
static double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779};
static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10};
static int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388};
static int conv_param__out_act_min[1] = {0};
static int conv_param__out_act_max[1] = {127};
QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}};
QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}};
QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}};
double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779};
int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10};
int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388};
int conv_param__out_act_min[1] = {0};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 26);
ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0};
@ -100,15 +99,15 @@ memset((int16_t *)(g_Buffer+15024), 0, 256);
memset((int *)(g_Buffer+15280), 0, 6144);
memset((int8_t *)(g_Buffer+21424), 0, 1452);
memset((int16_t *)(g_Buffer+22876), 0, 5408);
static QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}};
static QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}};
static QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}};
static double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088};
static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10};
static int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286};
static int conv_param__out_act_min[1] = {0};
static int conv_param__out_act_max[1] = {127};
QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}};
QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}};
QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}};
double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088};
int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10};
int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286};
int conv_param__out_act_min[1] = {0};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 11);
ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0};
@ -131,11 +130,11 @@ Int8Reshape((int8_t *)(g_Buffer+1456), (int8_t *)(g_Buffer+0), 300, reshape_quan
int32_t tmp_weight_zp = 1;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+0)+0, (int8_t *)(g_Buffer+10928), 1, 300);
CalcInputSums((int8_t *)(g_Buffer+0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer+12144), RowMajor);
static float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505};
static int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
static int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10};
static int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333};
float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505};
int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10};
int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333};
const MatmulQuantParameter matmul_quant_parameter = {{0.07136065512895584106, 0}, {0, 0}, {0.258998185396194458, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0;
int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0;
@ -147,11 +146,11 @@ MatmulInt8Opt((int8_t *)(g_Buffer+10928), g_Weight15+0 + 0, (int8_t *)(g_Buffer+
int32_t tmp_weight_zp = 1;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+304)+0, (int8_t *)(g_Buffer+10928), 1, 20);
CalcInputSums((int8_t *)(g_Buffer+304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer+11056), RowMajor);
static float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885};
static int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
static int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8};
static int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260};
float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885};
int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8};
int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260};
const MatmulQuantParameter matmul_quant_parameter = {{0.258998185396194458, 0}, {0, 0}, {0.5359870791435241699, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0;
int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0;

@ -17,6 +17,7 @@ set(OP_SRC
net.c.o
session.cc.o
tensor.cc.o
string.cc.o
)
file(GLOB NET_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cc

@ -16,21 +16,31 @@
*/
#include "session.h"
#include "model.h"
#include "net.h"
#include <new>
namespace mindspore {
namespace lite {
int LiteSession::CompileGraph(lite::Model *model) {
inputs_.resize(1);
inputs_[0] = new (std::nothrow) MTensor("graph_input-0", kNumberTypeFloat32, {1, 28, 28, 1, });
Vector<int32_t> in_shape_0;
in_shape_0.resize(4);
in_shape_0[0] = 1;
in_shape_0[1] = 28;
in_shape_0[2] = 28;
in_shape_0[3] = 1;
inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeFloat32, in_shape_0);
MS_ERROR_IF_NULL(inputs_[0]);
outputs_.resize(1);
outputs_[0] = new (std::nothrow) MTensor("Softmax-7", kNumberTypeFloat32, {1, 10, });
Vector<int32_t> out_shape_0;
out_shape_0.resize(2);
out_shape_0[0] = 1;
out_shape_0[1] = 10;
outputs_[0] = new (std::nothrow) MTensor(String("Softmax-7"), kNumberTypeFloat32, out_shape_0);
MS_ERROR_IF_NULL(outputs_[0]);
for (const auto &output: outputs_) {
output_tensor_map_[output->tensor_name()] = output;
}
return RET_OK;
int ret = Init(model->buf, dynamic_cast<MModel *>(model)->buf_size());
return ret;
}
@ -65,8 +75,7 @@ LiteSession::~LiteSession() {
delete input;
input = nullptr;
}
for (auto &item : output_tensor_map_) {
auto output = item.second;
for (auto &output : outputs_) {
if (output == nullptr) {
continue;
}
@ -88,69 +97,53 @@ int LiteSession::InitRuntimeBuffer() {
return RET_OK;
}
std::vector<tensor::MSTensor *> LiteSession::GetInputs() const {
std::vector<tensor::MSTensor *> inputs;
inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end());
return inputs;
}
std::vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const std::string &node_name) const {
auto iter = output_node_map_.find(node_name);
if (iter == output_node_map_.end()) {
std::vector<tensor::MSTensor *> empty;
return empty;
Vector<tensor::MSTensor *> LiteSession::GetInputs() const {
Vector<tensor::MSTensor *> inputs;
for (const auto &input : inputs_) {
inputs.push_back(input);
}
return iter->second;
return inputs;
}
std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const {
return output_tensor_map_;
Vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const String &node_name) const {
Vector<tensor::MSTensor *> outputs;
return outputs;
}
std::vector<std::string> LiteSession::GetOutputTensorNames() const {
std::vector<std::string> output_names;
for (const auto &item : output_node_map_) {
for (const auto &output : item.second) {
output_names.emplace_back(output->tensor_name());
}
Vector<String> LiteSession::GetOutputTensorNames() const {
Vector<String> output_names;
for (const auto &output : outputs_) {
output_names.push_back(output->tensor_name());
}
return output_names;
}
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const {
auto item = output_tensor_map_.find(tensor_name);
if (item == output_tensor_map_.end()) {
return nullptr;
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const {
for (const auto &output : outputs_) {
if (output->tensor_name() == tensor_name) {
return output;
}
}
return item->second;
}
int LiteSession::Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) {
return RET_OK;
return nullptr;
}
} // namespace lite
session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
auto *session = new (std::nothrow) lite::LiteSession();
if (session == nullptr) {
return nullptr;
}
session->InitRuntimeBuffer();
MS_NULLPTR_IF_NULL(session);
int ret = session->InitRuntimeBuffer();
MS_NULLPTR_IF_ERROR(ret);
return session;
}
session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size,
session::LiteSession *session::LiteSession::CreateSession(const char *model_buf, size_t size,
const lite::Context *context) {
session::LiteSession *session = CreateSession(context);
if (session == nullptr) {
return nullptr;
}
int ret = session->CompileGraph(nullptr);
if (ret != lite::RET_OK) {
return nullptr;
}
Init(const_cast<char *>(net_buf), size);
MS_NULLPTR_IF_NULL(session);
lite::Model *model = lite::Model::Import(model_buf, size);
int ret = session->CompileGraph(model);
MS_NULLPTR_IF_ERROR(ret);
delete model;
return session;
}
} // namespace mindspore

@ -33,6 +33,20 @@ namespace lite {
} \
} while (0)
#define MS_NULLPTR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return nullptr; \
} \
} while (0)
#define MS_NULLPTR_IF_ERROR(ptr) \
do { \
if ((ptr) != mindspore::lite::RET_OK) { \
return nullptr; \
} \
} while (0)
class LiteSession : public session::LiteSession {
public:
LiteSession() = default;
@ -43,31 +57,25 @@ class LiteSession : public session::LiteSession {
int CompileGraph(lite::Model *model) override;
std::vector<tensor::MSTensor *> GetInputs() const override;
Vector<tensor::MSTensor *> GetInputs() const override;
mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; }
mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; }
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const override;
Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const override;
std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const override;
Vector<String> GetOutputTensorNames() const override;
std::vector<std::string> GetOutputTensorNames() const override;
mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override;
int Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) override;
int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) override { return RET_ERROR; }
int InitRuntimeBuffer();
private:
int SetInputsData(const std::vector<MTensor *> &inputs) const;
std::vector<MTensor *> inputs_;
std::vector<MTensor *> outputs_;
std::unordered_map<std::string, mindspore::tensor::MSTensor *> output_tensor_map_;
std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> output_node_map_;
Vector<MTensor *> inputs_;
Vector<MTensor *> outputs_;
void *runtime_buffer_;
};
@ -75,4 +83,3 @@ class LiteSession : public session::LiteSession {
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_

File diff suppressed because it is too large Load Diff

@ -1,5 +1,4 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -61,14 +60,6 @@ MTensor::~MTensor() {
}
}
int MTensor::DimensionSize(const size_t index) const {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
}
return dim_size;
}
int MTensor::ElementsNum() const {
int elements = 1;
for (int i : shape_) {
@ -90,4 +81,3 @@ void *MTensor::MutableData() {
}
} // namespace lite
} // namespace mindspore

@ -1,5 +1,4 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -20,8 +19,6 @@
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "include/ms_tensor.h"
#include <utility>
#include <vector>
namespace mindspore {
namespace lite {
@ -31,7 +28,7 @@ struct QuantArg {
float var_corr{1};
float mean_corr{0};
bool inited;
std::vector<float> clusters{};
Vector<float> clusters{};
int bitNum;
int roundType;
int multiplier;
@ -41,31 +38,29 @@ struct QuantArg {
class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(std::string name, enum TypeId type, std::vector<int32_t> shape)
: tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {}
MTensor(String name, TypeId type, Vector<int32_t> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
~MTensor() override;
TypeId data_type() const override { return data_type_; }
std::vector<int> shape() const override { return shape_; }
int DimensionSize(size_t index) const override;
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int ElementsNum() const override;
size_t Size() const override;
String tensor_name() const override { return tensor_name_; }
void set_tensor_name(const String &name) override { tensor_name_ = name; }
void *MutableData() override;
std::string tensor_name() const override { return tensor_name_; }
void set_tensor_name(const std::string name) override { tensor_name_ = name; }
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
private:
std::string tensor_name_;
String tensor_name_;
TypeId data_type_;
std::vector<int> shape_;
Vector<int> shape_;
void *data_ = nullptr;
std::vector<QuantArg> quant_params_;
Vector<QuantArg> quant_params_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_

@ -17,7 +17,8 @@
#include "weight.h"
unsigned char * g_Buffer = 0 ;
int g_thread_num = 1;
unsigned char * g_Buffer = 0;
int16_t g_Weight10[1536];
int32_t g_Weight11[12];
int16_t g_Weight12[3072];
@ -33,7 +34,6 @@ int Init(void *weight_buffer, int weight_size) {
if (weight_buffer == NULL) {
return RET_ERROR;
}
struct ModelParameter {
void *addr;
size_t size;
@ -74,7 +74,7 @@ if (g_Weight15 == NULL) {
return RET_ERROR;
}
memset(g_Weight15, 0, 6080);
static int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
InitInt8MatrixB(g_Weight6, g_Weight16, g_Weight15, 1, 300, 20, 20, 304, 0, init_filter_zp, g_Weight14, true, true);
}
{
@ -94,7 +94,7 @@ if (g_Weight18 == NULL) {
return RET_ERROR;
}
memset(g_Weight18, 0, 384);
static int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
InitInt8MatrixB(g_Weight8, g_Weight19, g_Weight18, 1, 20, 10, 12, 32, 0, init_filter_zp, g_Weight17, true, true);
}
return RET_OK;

@ -34,6 +34,7 @@ enum STATUS {
RET_ERROR = 1,
};
extern int g_thread_num;
extern int16_t g_Weight10[];
extern int32_t g_Weight11[];
extern int16_t g_Weight12[];

Loading…
Cancel
Save