sync lite to r0.7

pull/5456/head
xuanyue 5 years ago
parent b5393e6628
commit 0ce8708dee

@ -16,20 +16,20 @@
@title mindspore_build
SET BASEPATH=%CD%
IF NOT EXIST %BASEPATH%/build (
IF NOT EXIST "%BASEPATH%/build" (
md "build"
)
cd %BASEPATH%/build
cd "%BASEPATH%/build"
set BUILD_PATH=%CD%
IF NOT EXIST %BUILD_PATH%/mindspore (
IF NOT EXIST "%BUILD_PATH%/mindspore" (
md "mindspore"
)
cd %CD%/mindspore
cd "%CD%/mindspore"
IF "%2%" == "lite" (
IF "%1%" == "lite" (
call :gene_gtest
call :run_cmake
IF errorlevel 1 (
@ -47,14 +47,17 @@ IF "%2%" == "lite" (
)
cd %BUILD_PATH%/mindspore
IF "%1%" == "" (
cmake --build . -- -j6
IF "%2%" == "" (
cmake --build . --target package -- -j6
) ELSE (
cmake --build . -- -j%1%
cmake --build . --target package -- -j%2%
)
IF errorlevel 1 (
echo "build fail."
goto run_fail
) ELSE (
cd "%BASEPATH%/output"
rd /s /q _CPack_Packages
)
) ELSE (
cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_CPU=ON -DENABLE_MINDDATA=ON -DUSE_GLOG=ON ^
@ -75,40 +78,40 @@ IF "%2%" == "lite" (
)
)
cd %BASEPATH%
cd "%BASEPATH%"
goto run_eof
:run_cmake
cd %BUILD_PATH%/mindspore
cd "%BUILD_PATH%/mindspore"
cmake -DBUILD_DEVICE=on -DBUILD_CONVERTER=on -DPLATFORM_ARM64=off -DSUPPORT_TRAIN=off ^
-DCMAKE_BUILD_TYPE=Release -DSUPPORT_GPU=off -DBUILD_MINDDATA=off -DOFFLINE_COMPILE=off ^
-G "CodeBlocks - MinGW Makefiles" %BASEPATH%/mindspore/lite
-G "CodeBlocks - MinGW Makefiles" "%BASEPATH%/mindspore/lite"
GOTO:EOF
:gene_gtest
cd %BASEPATH%/third_party
cd "%BASEPATH%/third_party"
IF EXIST googletest rd /s /q googletest
git submodule update --init --recursive googletest
cd %BUILD_PATH%/mindspore
cd "%BUILD_PATH%/mindspore"
GOTO:EOF
:gene_protobuf
SET PROTOC=%BASEPATH%/build/mindspore/_deps/protobuf-src/_build/protoc
SET PROTOC="%BASEPATH%/build/mindspore/_deps/protobuf-src/_build/protoc"
SET PROTO_SRC_DIR=%BASEPATH%/mindspore/lite/tools/converter/parser/caffe
SET PROTO_SRC_DIR="%BASEPATH%/mindspore/lite/tools/converter/parser/caffe"
cd %PROTO_SRC_DIR%
%PROTOC% *.proto --proto_path=%PROTO_SRC_DIR% --cpp_out=%PROTO_SRC_DIR%
SET PROTO_SRC_DIR=%BASEPATH%/mindspore/lite/tools/converter/parser/onnx
SET PROTO_SRC_DIR="%BASEPATH%/mindspore/lite/tools/converter/parser/onnx"
cd %PROTO_SRC_DIR%
%PROTOC% *.proto --proto_path=%PROTO_SRC_DIR% --cpp_out=%PROTO_SRC_DIR%
cd %BUILD_PATH%/mindspore
GOTO:EOF
:gene_flatbuffer
SET FLATC=%BASEPATH%/build/mindspore/_deps/flatbuffers-src/_build/flatc
SET FLAT_DIR=%BASEPATH%/mindspore/lite/schema
SET FLATC="%BASEPATH%/build/mindspore/_deps/flatbuffers-src/_build/flatc"
SET FLAT_DIR="%BASEPATH%/mindspore/lite/schema"
cd %FLAT_DIR%
IF EXIST inner rd /s /q inner
md inner
@ -116,14 +119,14 @@ GOTO:EOF
%FLATC% -c -b *.fbs
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o %FLAT_DIR%/inner *.fbs
SET FLAT_DIR=%BASEPATH%/mindspore/lite/tools/converter/parser/tflite
SET FLAT_DIR="%BASEPATH%/mindspore/lite/tools/converter/parser/tflite"
cd %FLAT_DIR%
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o %FLAT_DIR% *.fbs
cd %BUILD_PATH%/mindspore
cd "%BUILD_PATH%/mindspore"
GOTO:EOF
:run_fail
cd %BASEPATH%
cd "%BASEPATH%"
set errorlevel=1
:run_eof

@ -393,7 +393,7 @@ build_mindspore()
CMAKE_VERBOSE="--verbose"
fi
cmake --build . --target package ${CMAKE_VERBOSE} -j$THREAD_NUM
echo "success to build mindspore project!"
echo "success building mindspore project!"
}
checkndk() {
@ -618,10 +618,12 @@ build_lite()
if [[ "${COMPILE_RET}" -ne 0 ]]; then
echo "---------------- mindspore lite: build failed ----------------"
exit 1
else
mv ${BASEPATH}/output/tmp/*.tar.gz* ${BASEPATH}/output/
rm -rf ${BASEPATH}/output/tmp/
echo "---------------- mindspore lite: build success ----------------"
exit 0
fi
}

@ -1,12 +1,18 @@
include(CMakePackageConfigHelpers)
set(LIB_DIR ${MAIN_DIR}/lib)
set(INC_DIR ${MAIN_DIR}/include)
set(TURBO_DIR ${MAIN_DIR}/third_party/libjpeg-turbo)
set(OPENCV_DIR ${MAIN_DIR}/third_party/opencv)
set(PROTOBF_DIR ${MAIN_DIR}/third_party/protobuf)
set(FLATBF_DIR ${MAIN_DIR}/third_party/flatbuffers)
set(LIB_DIR ${MAIN_DIR}-${COMPONENT_NAME}/lib)
set(INC_DIR ${MAIN_DIR}-${COMPONENT_NAME}/include)
set(TURBO_DIR ${MAIN_DIR}-${COMPONENT_NAME}/third_party/libjpeg-turbo)
set(OPENCV_DIR ${MAIN_DIR}-${COMPONENT_NAME}/third_party/opencv)
set(PROTOBF_DIR ${MAIN_DIR}-${COMPONENT_NAME}/third_party/protobuf)
set(FLATBF_DIR ${MAIN_DIR}-${COMPONENT_NAME}/third_party/flatbuffers)
set(LIB_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/lib)
set(INC_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/include)
set(TURBO_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/third_party/libjpeg-turbo)
set(OPENCV_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/third_party/opencv)
set(PROTOBF_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/third_party/protobuf)
set(FLATBF_DIR_RUN_X86 ${MAIN_DIR}-${RUN_X86_COMPONENT_NAME}/third_party/flatbuffers)
if (BUILD_MINDDATA)
install(DIRECTORY ${TOP_DIR}/mindspore/ccsrc/minddata/dataset/include/ DESTINATION ${INC_DIR} COMPONENT ${COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION ${LIB_DIR} COMPONENT ${COMPONENT_NAME})
@ -41,19 +47,40 @@ elseif (PLATFORM_ARM32)
install(DIRECTORY ${TOP_DIR}/mindspore/lite/include/ DESTINATION ${INC_DIR} COMPONENT ${COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(DIRECTORY ${TOP_DIR}/mindspore/lite/schema/ DESTINATION ${INC_DIR}/schema COMPONENT ${COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "inner" EXCLUDE)
install(DIRECTORY ${TOP_DIR}/third_party/flatbuffers/include DESTINATION ${FLATBF_DIR} COMPONENT ${COMPONENT_NAME})
elseif (CMAKE_SYSTEM_NAME MATCHES "Windows")
get_filename_component(CXX_DIR ${CMAKE_CXX_COMPILER} PATH)
file(GLOB LIB_LIST ${CXX_DIR}/libstdc++-6.dll ${CXX_DIR}/libwinpthread-1.dll ${CXX_DIR}/libssp-0.dll ${CXX_DIR}/libgcc_s_seh-1.dll)
install(FILES ${TOP_DIR}/build/mindspore/tools/converter/converter_lite.exe DESTINATION ${TOP_DIR}/build/mindspore/package COMPONENT ${COMPONENT_NAME})
install(FILES ${LIB_LIST} DESTINATION ${TOP_DIR}/build/mindspore/package COMPONENT ${COMPONENT_NAME})
install(FILES ${TOP_DIR}/build/mindspore/tools/converter/libconverter_parser.a DESTINATION ${TOP_DIR}/build/mindspore/package COMPONENT ${PARSER_NAME})
else ()
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${LIB_DIR} COMPONENT ${RUN_X86_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/mindspore/lite/include/ DESTINATION ${INC_DIR_RUN_X86} COMPONENT ${RUN_X86_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(DIRECTORY ${TOP_DIR}/mindspore/lite/schema/ DESTINATION ${INC_DIR_RUN_X86}/schema COMPONENT ${RUN_X86_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "inner" EXCLUDE)
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${INC_DIR_RUN_X86}/ir/dtype COMPONENT ${RUN_X86_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/third_party/flatbuffers/include DESTINATION ${FLATBF_DIR_RUN_X86} COMPONENT ${RUN_X86_COMPONENT_NAME})
install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${LIB_DIR_RUN_X86} COMPONENT ${RUN_X86_COMPONENT_NAME})
install(FILES ${TOP_DIR}/third_party/protobuf/build/lib/libprotobuf.so.19.0.0 DESTINATION ${PROTOBF_DIR}/lib RENAME libprotobuf.so.19 COMPONENT ${COMPONENT_NAME})
endif ()
set(CPACK_GENERATOR TGZ)
if (CMAKE_SYSTEM_NAME MATCHES "Windows")
set(CPACK_GENERATOR ZIP)
else ()
set(CPACK_GENERATOR TGZ)
endif ()
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
if (PLATFORM_ARM64 OR PLATFORM_ARM32)
set(CPACK_COMPONENTS_ALL ${COMPONENT_NAME})
elseif (WIN32)
set(CPACK_COMPONENTS_ALL ${COMPONENT_NAME})
else ()
set(CPACK_COMPONENTS_ALL ${COMPONENT_NAME} ${RUN_X86_COMPONENT_NAME})
endif ()
set(CPACK_PACKAGE_FILE_NAME ${MAIN_DIR})
set(CPACK_PACKAGE_DIRECTORY ${TOP_DIR}/output/tmp)
if (WIN32)
set(CPACK_PACKAGE_DIRECTORY ${TOP_DIR}/output)
else ()
set(CPACK_PACKAGE_DIRECTORY ${TOP_DIR}/output/tmp)
endif()
set(CPACK_PACKAGE_CHECKSUM SHA256)
include(CPack)

@ -5,15 +5,15 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_
message(FATAL_ERROR "GCC vesion ${CMAKE_CXX_COMPILER_VERSION} must not be less than 7.3.0")
endif ()
set(MS_VERSION_MAJOY 0)
set(MS_VERSION_MAJOR 0)
set(MS_VERSION_MINOR 7)
set(MS_VERSION_REVISION 0)
set(DIR_PREFIX mindspore-lite)
set(MS_VERSION ${MS_VERSION_MAJOY}.${MS_VERSION_MINOR}.${MS_VERSION_REVISION})
set(MS_VERSION ${MS_VERSION_MAJOR}.${MS_VERSION_MINOR}.${MS_VERSION_REVISION})
set(MAIN_DIR ${DIR_PREFIX}-${MS_VERSION})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMS_VERSION_MAJOR=${MS_VERSION_MAJOR} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMS_VERSION_MAJOR=${MS_VERSION_MAJOR} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
if (SUPPORT_GPU)
set(PROCESS_UNIT gpu)
@ -25,13 +25,16 @@ if (PLATFORM_ARM64)
set(COMPONENT_NAME runtime-arm64-${PROCESS_UNIT})
elseif (PLATFORM_ARM32)
set(COMPONENT_NAME runtime-arm32-${PROCESS_UNIT})
elseif (WIN32)
set(PARSER_NAME libconverter-parser-win-${PROCESS_UNIT})
set(COMPONENT_NAME converter-win-${PROCESS_UNIT})
else ()
set(COMPONENT_NAME convert-ubuntu)
endif()
set(RUN_X86_COMPONENT_NAME runtime-x86-${PROCESS_UNIT})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../..)
string(REPLACE "/mindspore/lite" "" TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(CORE_DIR ${TOP_DIR}/mindspore/core)
set(CCSRC_DIR ${TOP_DIR}/mindspore/ccsrc)
include_directories(${TOP_DIR})
@ -65,20 +68,20 @@ set(CMAKE_VERBOSE_MAKEFILE on)
add_compile_definitions(USE_ANDROID_LOG)
add_compile_definitions(NO_DLIB)
add_compile_options(-fPIC)
if (NOT PLATFORM_ARM64 AND NOT PLATFORM_ARM32)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else ()
## enable for binscope for release
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_CXX_FLAGS}")
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else ()
## enable for binscope for release
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
if (NOT WIN32)
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}")
string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif ()
endif()
string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif ()
if (BUILD_DEVICE)
@ -110,12 +113,11 @@ if (WIN32)
add_compile_definitions(BUILDING_DLL)
endif ()
set(ANF_SRC
${CMAKE_CURRENT_SOURCE_DIR}/../core/ir/meta_tensor.cc
set(CORE_SRC
${CORE_DIR}/ir/meta_tensor.cc
${CORE_DIR}/gvar/logging_level.cc
${CORE_DIR}/gvar/typeid_manager.cc
${CMAKE_CURRENT_SOURCE_DIR}/../core/base/base.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/common/log_adapter.cc
${CORE_DIR}/base/base.cc
)
if (BUILD_CONVERTER)
if (PLATFORM_ARM64 OR PLATFORM_ARM32)
@ -163,7 +165,6 @@ if (BUILD_DEVICE)
add_compile_definitions(ENABLE_ARM32)
endif ()
if (PLATFORM_ARM64)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod+fp16")
add_compile_definitions(ENABLE_ARM64)
if (ENABLE_FP16)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8.2-a+dotprod+fp16")
@ -207,4 +208,4 @@ if (BUILD_DEVICE)
endif ()
endif ()
include(${TOP_DIR}/cmake/package_lite.cmake)
include(${TOP_DIR}/cmake/package_lite.cmake)

@ -0,0 +1,56 @@
[查看中文](./README_CN.md)
## What Is MindSpore Lite
MindSpore lite is a high-performance, lightweight open source reasoning framework that can be used to meet the needs of AI applications on mobile devices. MindSpore Lite focuses on how to deploy AI technology more effectively on devices. It has been integrated into HMS (Huawei Mobile Services) to provide inferences for applications such as image classification, object detection and OCR. MindSpore Lite will promote the development and enrichment of the AI software/hardware application ecosystem.
<img src="../../docs/MindSpore-Lite-architecture.png" alt="MindSpore Lite Architecture" width="600"/>
For more details please check out our [MindSpore Lite Architecture Guide](https://www.mindspore.cn/lite/docs/en/master/architecture.html).
### MindSpore Lite features
1. Cooperative work with MindSpore training
- Provides training, optimization, and deployment.
- The unified IR realizes the device-cloud AI application integration.
2. Lightweight
- Provides model compress, which could help to improve performance as well.
- Provides the ultra-lightweight reasoning solution MindSpore Micro to meet the deployment requirements in extreme environments such as smart watches and headphones.
3. High-performance
- The built-in high-performance kernel computing library NNACL supports multiple convolution optimization algorithms such as Slide window, im2col+gemm, winograde, etc.
- Assembly code to improve performance of kernel operators. Supports CPU, GPU, and NPU.
4. Versatility
- Supports IOS, Android.
- Supports Lite OS.
- Supports mobile device, smart screen, pad, and IOT devices.
- Supports third party models such as TFLite, CAFFE and ONNX.
## MindSpore Lite AI deployment procedure
1. Model selection and personalized training
Select a new model or use an existing model for incremental training using labeled data. When designing a model for mobile device, it is necessary to consider the model size, accuracy and calculation amount.
The MindSpore team provides a series of pre-training models used for image classification, object detection. You can use these pre-trained models in your application.
The pre-trained models provided by MindSpore include: [Image Classification](https://download.mindspore.cn/model_zoo/official/lite/) and [Object Detection](https://download.mindspore.cn/model_zoo/official/lite/). More models will be provided in the feature.
MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. See [Retraining](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html).
2. Model converter and optimization
If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure.
MindSpore also provides a tool to convert models running on IoT devices .
3. Model deployment
This stage mainly realizes model deployment, including model management, deployment, operation and maintenance monitoring, etc.
4. Inference
Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html) is the process of running input data through the model to get output.
MindSpore provides a series of pre-trained models that can be deployed on mobile device [example](#TODO).

@ -0,0 +1,66 @@

[View English](./README.md)
## MindSpore Lite介绍
MindSpore Lite是MindSpore推出的端云协同的、轻量化、高性能AI推理框架用于满足越来越多的端测AI应用需求。MindSpore Lite聚焦AI技术在端侧设备上的部署和运行已经在华为HMS和智能终端的图像分类、目标识别、人脸识别、文字识别等应用中广泛使用未来MindSpore Lite将与MindSpore AI社区一起致力于丰富AI软硬件应用生态。
<img src="../../docs/MindSpore-Lite-architecture.png" alt="MindSpore Lite Architecture" width="600"/>
欲了解更多详情,请查看我们的[MindSpore Lite 总体架构](https://www.mindspore.cn/lite/docs/zh-CN/master/architecture.html)。
## MindSpore Lite技术特点
1. 端云协同提供一站式训练和推理
- 提供模型训练、模型转换优化、部署和推理端到端流程。
- 统一的IR实现端云AI应用一体化。
2. 超轻量
- 支持模型量化压缩,模型更小跑得更快。
- 提供超轻量的推理解决方案MindSpore Micro满足智能手表、耳机等极限环境下的部署要求。
3. 高性能
- 自带的高性能内核计算库NNACL支持Sliding Windows、Im2Col+GEMM、Winograd等多种卷积优化算法。
- 汇编级优化支持CPU、GPU、NPU异构调度最大化发挥硬件算力最小化推理时延和功耗。
4. 广覆盖
- 支持iOS、Android等手机操作系统。
- 支持LiteOS嵌入式操作系统。
- 支持手机、大屏、平板、IoT等各种智能设备上的AI应用。
- 支持MindSpore/TensorFlow Lite/Caffe/ONNX模型方便用户快速部署。
## MindSpore Lite AI部署流程
1. 模型选择和个性化训练
包括选择新模型或对已有模型,利用标注数据进行增量训练。面向端侧设计模型时,需要考虑模型大小、精度和计算量。
MindSpore团队提供了一系列预训练模型用于解决图像分类、目标检测等场景的学习问题。可以在您的应用程序中使用这些预训练模型对应的终端模型。
MindSpore提供的预训练模型包括[图像分类Image Classification](https://download.mindspore.cn/model_zoo/official/lite/)和[目标检测Object Detection](https://download.mindspore.cn/model_zoo/official/lite/)。后续MindSpore团队会增加更多的预置模型。
MindSpore允许您重新训练预训练模型以执行其他任务。比如使用预训练的图像分类模型可以重新训练来识别新的图像类型。参见[重训练](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html)。
2. 模型转换/优化
如果您使用MindSpore或第三方训练的模型需要使用[MindSpore Lite模型转换工具](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html)转换成MindSpore Lite模型格式。MindSpore Lite模型转换工具不仅提供了将TensorFlow Lite、Caffe、ONNX等模型格式转换为MindSpore Lite模型格式还提供了算子融合、量化等功能。
MindSpore还提供了将IoT设备上运行的模型转换成.C代码的生成工具。
经过上述两个部署,您已经得到端侧可以部署的模型。
3. 模型部署
这个阶段主要实现模型部署,包括模型管理、部署和运维监控等。
4. 模型推理
主要完成模型推理工作,即加载模型,完成模型相关的所有计算。[推理](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html)是通过模型运行输入数据,获取预测的过程。
MindSpore提供了一系列预训练模型部署在智能终端的[样例](#TODO)。

@ -28,17 +28,17 @@ namespace mindspore::lite {
class Allocator;
/// \brief CpuBindMode defined for holding bind cpu strategy argument.
enum CpuBindMode {
typedef enum {
MID_CPU = -1, /**< bind middle cpu first */
HIGHER_CPU = 1, /**< bind higher cpu first */
NO_BIND = 0 /**< no bind */
};
} CpuBindMode;
/// \brief DeviceType defined for holding user's preferred backend.
typedef enum {
DT_CPU, /**< CPU device type */
DT_GPU, /**< GPU device type */
DT_NPU /**< NPU device type */
DT_NPU /**< NPU device type, not supported yet */
} DeviceType;
/// \brief DeviceContext defined for holding DeviceType.

@ -86,17 +86,34 @@ class MS_API LiteSession {
/// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h.
virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0;
/// \brief Get output MindSpore Lite MSTensors of model.
/// \brief Get output MindSpore Lite MSTensors of model mapped by node name.
///
/// \return The map of output node name and MindSpore Lite MSTensor.
virtual std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputs() const = 0;
virtual std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> GetOutputMapByNode() const = 0;
/// \brief Get output MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \return The vector of MindSpore Lite MSTensor.
virtual std::vector<tensor::MSTensor *> GetOutputsByName(const std::string &node_name) const = 0;
virtual std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const = 0;
/// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name.
///
/// \return The map of output tensor name and MindSpore Lite MSTensor.
virtual std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputMapByTensor() const = 0;
/// \brief Get name of output tensors of model compiled by this session.
///
/// \return The vector of string as output tensor names in order.
virtual std::vector<std::string> GetOutputTensorNames() const = 0;
/// \brief Get output MindSpore Lite MSTensors of model by tensor name.
///
/// \param[in] tensor_name Define tensor name.
///
/// \return Pointer of MindSpore Lite MSTensor.
virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const = 0;
/// \brief Resize inputs shape.
///

@ -24,8 +24,17 @@ namespace lite {
/// \brief Global method to get a version string.
///
/// \return The version string of MindSpore Lite.
#ifndef MS_VERSION_MAJOR
#define MS_VERSION_MAJOR 0
#endif
#ifndef MS_VERSION_MINOR
#define MS_VERSION_MINOR 7
#endif
#ifndef MS_VERSION_REVISION
#define MS_VERSION_REVISION 0
#endif
std::string Version() {
return "MindSpore Lite " + std::to_string(MS_VERSION_MAJOY) + "." + std::to_string(MS_VERSION_MINOR) + "." +
return "MindSpore Lite " + std::to_string(MS_VERSION_MAJOR) + "." + std::to_string(MS_VERSION_MINOR) + "." +
std::to_string(MS_VERSION_REVISION);
}
} // namespace lite

@ -15,10 +15,10 @@ fi
# copy arm64 so
cd ${TOP_PATH}/output/
rm -rf mindspore-lite-0.6.0
tar -zxvf mindspore-lite-0.6.0-runtime-arm64-cpu.tar.gz
rm -rf mindspore-lite-0.7.0
tar -zxvf mindspore-lite-0.7.0-runtime-arm64-cpu.tar.gz
mkdir -p ${BASE_PATH}/lib/
cp ${TOP_PATH}/output/mindspore-lite-0.6.0/lib/libmindspore-lite.so ${BASE_PATH}/lib/
cp ${TOP_PATH}/output/mindspore-lite-0.7.0-runtime-arm64-cpu/lib/libmindspore-lite.so ${BASE_PATH}/lib/
cp ${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/libc++_shared.so ${BASE_PATH}/lib/
# build jni so

@ -76,8 +76,8 @@ public class LiteSession {
return tensors;
}
public Map<String, List<MSTensor>> getOutputs() {
Map<String, List<Long>> ret = this.getOutputs(this.sessionPtr);
public Map<String, List<MSTensor>> getOutputMapByNode() {
Map<String, List<Long>> ret = this.getOutputMapByNode(this.sessionPtr);
Map<String, List<MSTensor>> tensorMap = new HashMap<>();
Set<Map.Entry<String, List<Long>>> entrySet = ret.entrySet();
for (Map.Entry<String, List<Long>> entry : entrySet) {
@ -93,8 +93,8 @@ public class LiteSession {
return tensorMap;
}
public List<MSTensor> getOutputsByName(String nodeName) {
List<Long> ret = this.getOutputsByName(this.sessionPtr, nodeName);
public List<MSTensor> getOutputsByNodeName(String nodeName) {
List<Long> ret = this.getOutputsByNodeName(this.sessionPtr, nodeName);
ArrayList<MSTensor> tensors = new ArrayList<>();
for (Long msTensorAddr : ret) {
MSTensor msTensor = new MSTensor(msTensorAddr);
@ -103,6 +103,27 @@ public class LiteSession {
return tensors;
}
public Map<String, MSTensor> getOutputMapByTensor() {
Map<String, Long> ret = this.getOutputMapByTensor(this.sessionPtr);
Map<String, MSTensor> tensorMap = new HashMap<>();
Set<Map.Entry<String, Long>> entrySet = ret.entrySet();
for (Map.Entry<String, Long> entry : entrySet) {
String name = entry.getKey();
Long msTensorAddr = entry.getValue();
tensorMap.put(name, new MSTensor(msTensorAddr));
}
return tensorMap;
}
public List<String> getOutputTensorNames() {
return getOutputTensorNames(this.sessionPtr);
}
public MSTensor getOutputByTensorName(String tensorName) {
Long tensor_addr = getOutputByTensorName(this.sessionPtr, tensorName);
return new MSTensor(tensor_addr);
}
public void free() {
this.free(this.sessionPtr);
this.sessionPtr = 0;
@ -120,9 +141,15 @@ public class LiteSession {
private native List<Long> getInputsByName(long sessionPtr, String nodeName);
private native Map<String, List<Long>> getOutputs(long sessionPtr);
private native Map<String, List<Long>> getOutputMapByNode(long sessionPtr);
private native List<Long> getOutputsByNodeName(long sessionPtr, String nodeName);
private native Map<String, Long> getOutputMapByTensor(long sessionPtr);
private native List<String> getOutputTensorNames(long sessionPtr);
private native List<Long> getOutputsByName(long sessionPtr, String nodeName);
private native Long getOutputByTensorName(long sessionPtr, String tensorName);
private native void free(long sessionPtr);
}

@ -16,6 +16,10 @@
package com.mindspore.lite;
import android.util.Log;
import java.nio.ByteBuffer;
public class MSTensor {
private long tensorPtr;
@ -27,7 +31,7 @@ public class MSTensor {
this.tensorPtr = tensorPtr;
}
public boolean init (int dataType, int[] shape) {
public boolean init(int dataType, int[] shape) {
this.tensorPtr = createMSTensor(dataType, shape, shape.length);
return this.tensorPtr != 0;
}
@ -48,14 +52,30 @@ public class MSTensor {
this.setDataType(this.tensorPtr, dataType);
}
public byte[] getData() {
return this.getData(this.tensorPtr);
public byte[] getByteData() {
return this.getByteData(this.tensorPtr);
}
public float[] getFloatData() {
return this.getFloatData(this.tensorPtr);
}
public int[] getIntData() {
return this.getIntData(this.tensorPtr);
}
public long[] getLongData() {
return this.getLongData(this.tensorPtr);
}
public void setData(byte[] data) {
this.setData(this.tensorPtr, data, data.length);
}
public void setData(ByteBuffer data) {
this.setByteBufferData(this.tensorPtr, data);
}
public long size() {
return this.size(this.tensorPtr);
}
@ -69,6 +89,24 @@ public class MSTensor {
this.tensorPtr = 0;
}
private float[] decodeBytes(byte[] bytes) {
if (bytes.length % 4 != 0) {
Log.e("MS_LITE", "Length of bytes should be multi of 4 ");
return null;
}
int size = bytes.length / 4;
float[] ret = new float[size];
for (int i = 0; i < size; i = i + 4) {
int accNum = 0;
accNum = accNum | (bytes[i] & 0xff) << 0;
accNum = accNum | (bytes[i + 1] & 0xff) << 8;
accNum = accNum | (bytes[i + 2] & 0xff) << 16;
accNum = accNum | (bytes[i + 3] & 0xff) << 24;
ret[i / 4] = Float.intBitsToFloat(accNum);
}
return ret;
}
private native long createMSTensor(int dataType, int[] shape, int shapeLen);
private native int[] getShape(long tensorPtr);
@ -79,10 +117,18 @@ public class MSTensor {
private native boolean setDataType(long tensorPtr, int dataType);
private native byte[] getData(long tensorPtr);
private native byte[] getByteData(long tensorPtr);
private native long[] getLongData(long tensorPtr);
private native int[] getIntData(long tensorPtr);
private native float[] getFloatData(long tensorPtr);
private native boolean setData(long tensorPtr, byte[] data, long dataLen);
private native boolean setByteBufferData(long tensorPtr, ByteBuffer buffer);
private native long size(long tensorPtr);
private native int elementsNum(long tensorPtr);

@ -80,6 +80,11 @@ public class Model {
return ret;
}
public boolean loadModel(String modelPath) {
this.modelPtr = loadModelByPath(modelPath);
return this.modelPtr != 0;
}
public void free() {
this.free(this.modelPtr);
this.modelPtr = 0;
@ -87,5 +92,7 @@ public class Model {
private native long loadModel(MappedByteBuffer buffer);
private native long loadModelByPath(String modelPath);
private native void free(long modelPtr);
}

@ -1,11 +1,11 @@
cmake_minimum_required(VERSION 3.14)
project (Lite-java)
set(MS_VERSION_MAJOY 0)
set(MS_VERSION_MAJOR 0)
set(MS_VERSION_MINOR 7)
set(MS_VERSION_REVISION 0)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMS_VERSION_MAJOY=${MS_VERSION_MAJOY} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMS_VERSION_MAJOR=${MS_VERSION_MAJOR} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMS_VERSION_MAJOR=${MS_VERSION_MAJOR} -DMS_VERSION_MINOR=${MS_VERSION_MINOR} -DMS_VERSION_REVISION=${MS_VERSION_REVISION}")
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../../lite/)

@ -14,12 +14,11 @@
* limitations under the License.
*/
#include "common/jni_utils.h"
#include <cstring>
char *JstringToChar(JNIEnv *env, jstring jstr) {
char *rtn = NULL;
char *rtn = nullptr;
jclass clsstring = env->FindClass("java/lang/String");
jstring strencode = env->NewStringUTF("GB2312");
jmethodID mid = env->GetMethodID(clsstring, "getBytes", "(Ljava/lang/String;)[B");

@ -18,6 +18,7 @@
#include <jni.h>
#include "common/ms_log.h"
#include "include/context.h"
#include "include/thread_pool_config.h"
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_context_Context_createContext(JNIEnv *env, jobject thiz,
jint device_type,
@ -44,13 +45,13 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_context_Context_creat
}
switch (cpu_bind_mode) {
case -1:
context->cpu_bind_mode_ = mindspore::lite::MID_CPU;
context->cpu_bind_mode_ = MID_CPU;
break;
case 0:
context->cpu_bind_mode_ = mindspore::lite::NO_BIND;
context->cpu_bind_mode_ = NO_BIND;
break;
case 1:
context->cpu_bind_mode_ = mindspore::lite::HIGHER_CPU;
context->cpu_bind_mode_ = HIGHER_CPU;
break;
default:
MS_LOGE("Invalid cpu_bind_mode : %d", cpu_bind_mode);

@ -14,7 +14,6 @@
* limitations under the License.
*/
#include <jni.h>
#include "common/ms_log.h"
#include "common/jni_utils.h"
@ -22,7 +21,7 @@
#include "include/errorcode.h"
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_LiteSession_createSession(JNIEnv *env, jobject thiz,
jlong context_ptr) {
jlong context_ptr) {
auto *pointer = reinterpret_cast<void *>(context_ptr);
if (pointer == nullptr) {
MS_LOGE("Context pointer from java is nullptr");
@ -38,8 +37,8 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_LiteSession_createSes
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_LiteSession_compileGraph(JNIEnv *env, jobject thiz,
jlong session_ptr,
jlong model_ptr) {
jlong session_ptr,
jlong model_ptr) {
auto *session_pointer = reinterpret_cast<void *>(session_ptr);
if (session_pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
@ -58,7 +57,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_LiteSession_compil
}
extern "C" JNIEXPORT void JNICALL Java_com_mindspore_lite_LiteSession_bindThread(JNIEnv *env, jobject thiz,
jlong session_ptr, jboolean if_bind) {
jlong session_ptr, jboolean if_bind) {
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
@ -69,7 +68,7 @@ extern "C" JNIEXPORT void JNICALL Java_com_mindspore_lite_LiteSession_bindThread
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_LiteSession_runGraph(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jlong session_ptr) {
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
@ -81,7 +80,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_LiteSession_runGra
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getInputs(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jlong session_ptr) {
jclass array_list = env->FindClass("java/util/ArrayList");
jmethodID array_list_construct = env->GetMethodID(array_list, "<init>", "()V");
jobject ret = env->NewObject(array_list, array_list_construct);
@ -104,8 +103,8 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getInpu
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getInputsByName(JNIEnv *env, jobject thiz,
jlong session_ptr,
jstring node_name) {
jlong session_ptr,
jstring node_name) {
jclass array_list = env->FindClass("java/util/ArrayList");
jmethodID array_list_construct = env->GetMethodID(array_list, "<init>", "()V");
jobject ret = env->NewObject(array_list, array_list_construct);
@ -127,8 +126,8 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getInpu
return ret;
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputs(JNIEnv *env, jobject thiz,
jlong session_ptr) {
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputMapByNode(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jclass hash_map_clazz = env->FindClass("java/util/HashMap");
jmethodID hash_map_construct = env->GetMethodID(hash_map_clazz, "<init>", "()V");
jobject hash_map = env->NewObject(hash_map_clazz, hash_map_construct);
@ -140,7 +139,7 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutp
return hash_map;
}
auto *lite_session_ptr = static_cast<mindspore::session::LiteSession *>(pointer);
auto outputs = lite_session_ptr->GetOutputs();
auto outputs = lite_session_ptr->GetOutputMapByNode();
jclass long_object = env->FindClass("java/lang/Long");
jmethodID long_object_construct = env->GetMethodID(long_object, "<init>", "(J)V");
jclass array_list = env->FindClass("java/util/ArrayList");
@ -159,9 +158,9 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutp
return hash_map;
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputsByName(JNIEnv *env, jobject thiz,
jlong session_ptr,
jstring node_name) {
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputsByNodeName(JNIEnv *env, jobject thiz,
jlong session_ptr,
jstring node_name) {
jclass array_list = env->FindClass("java/util/ArrayList");
jmethodID array_list_construct = env->GetMethodID(array_list, "<init>", "()V");
jobject ret = env->NewObject(array_list, array_list_construct);
@ -175,7 +174,7 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutp
return ret;
}
auto *lite_session_ptr = static_cast<mindspore::session::LiteSession *>(pointer);
auto inputs = lite_session_ptr->GetOutputsByName(JstringToChar(env, node_name));
auto inputs = lite_session_ptr->GetOutputsByNodeName(JstringToChar(env, node_name));
for (auto input : inputs) {
jobject tensor_addr = env->NewObject(long_object, long_object_construct, jlong(input));
env->CallBooleanMethod(ret, array_list_add, tensor_addr);
@ -183,8 +182,66 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutp
return ret;
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputMapByTensor(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jclass hash_map_clazz = env->FindClass("java/util/HashMap");
jmethodID hash_map_construct = env->GetMethodID(hash_map_clazz, "<init>", "()V");
jobject hash_map = env->NewObject(hash_map_clazz, hash_map_construct);
jmethodID hash_map_put =
env->GetMethodID(hash_map_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
return hash_map;
}
auto *lite_session_ptr = static_cast<mindspore::session::LiteSession *>(pointer);
auto outputs = lite_session_ptr->GetOutputMapByTensor();
jclass long_object = env->FindClass("java/lang/Long");
jmethodID long_object_construct = env->GetMethodID(long_object, "<init>", "(J)V");
for (auto output_iter : outputs) {
auto node_name = output_iter.first;
auto ms_tensor = output_iter.second;
jobject tensor_addr = env->NewObject(long_object, long_object_construct, jlong(ms_tensor));
env->CallObjectMethod(hash_map, hash_map_put, env->NewStringUTF(node_name.c_str()), tensor_addr);
}
return hash_map;
}
extern "C" JNIEXPORT jobject JNICALL Java_com_mindspore_lite_LiteSession_getOutputTensorNames(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jclass array_list = env->FindClass("java/util/ArrayList");
jmethodID array_list_construct = env->GetMethodID(array_list, "<init>", "()V");
jobject ret = env->NewObject(array_list, array_list_construct);
jmethodID array_list_add = env->GetMethodID(array_list, "add", "(Ljava/lang/Object;)Z");
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
return ret;
}
auto *lite_session_ptr = static_cast<mindspore::session::LiteSession *>(pointer);
auto output_names = lite_session_ptr->GetOutputTensorNames();
for (auto output_name : output_names) {
env->CallBooleanMethod(ret, array_list_add, env->NewStringUTF(output_name.c_str()));
}
return ret;
}
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_LiteSession_getOutputByTensorName(JNIEnv *env, jobject thiz,
jlong session_ptr,
jstring tensor_name) {
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");
return jlong(nullptr);
}
auto *lite_session_ptr = static_cast<mindspore::session::LiteSession *>(pointer);
auto output = lite_session_ptr->GetOutputByTensorName(JstringToChar(env, tensor_name));
return jlong(output);
}
extern "C" JNIEXPORT void JNICALL Java_com_mindspore_lite_LiteSession_free(JNIEnv *env, jobject thiz,
jlong session_ptr) {
jlong session_ptr) {
auto *pointer = reinterpret_cast<void *>(session_ptr);
if (pointer == nullptr) {
MS_LOGE("Session pointer from java is nullptr");

@ -14,9 +14,10 @@
* limitations under the License.
*/
#include <jni.h>
#include <fstream>
#include "common/ms_log.h"
#include "common/jni_utils.h"
#include "include/model.h"
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_Model_loadModel(JNIEnv *env, jobject thiz, jobject buffer) {
@ -38,6 +39,46 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_Model_loadModel(JNIEn
return reinterpret_cast<jlong>(model);
}
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_Model_loadModelByPath(JNIEnv *env, jobject thiz,
jstring model_path) {
auto model_path_char = JstringToChar(env, model_path);
if (nullptr == model_path_char) {
MS_LOGE("model_path_char is nullptr");
return reinterpret_cast<jlong>(nullptr);
}
std::ifstream ifs(model_path_char);
if (!ifs.good()) {
MS_LOGE("file: %s is not exist", model_path_char);
return reinterpret_cast<jlong>(nullptr);
}
if (!ifs.is_open()) {
MS_LOGE("file: %s open failed", model_path_char);
return reinterpret_cast<jlong>(nullptr);
}
ifs.seekg(0, std::ios::end);
auto size = ifs.tellg();
std::unique_ptr<char[]> buf(new (std::nothrow) char[size]);
if (buf == nullptr) {
MS_LOGE("malloc buf failed, file: %s", model_path_char);
ifs.close();
return reinterpret_cast<jlong>(nullptr);
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), size);
ifs.close();
delete[](model_path_char);
MS_LOGD("Start Loading model");
auto model = mindspore::lite::Model::Import(buf.get(), size);
if (model == nullptr) {
MS_LOGE("Import model failed");
return reinterpret_cast<jlong>(nullptr);
}
return reinterpret_cast<jlong>(model);
}
extern "C" JNIEXPORT void JNICALL Java_com_mindspore_lite_Model_free(JNIEnv *env, jobject thiz, jlong model_ptr) {
auto *pointer = reinterpret_cast<void *>(model_ptr);
if (pointer == nullptr) {

@ -14,15 +14,14 @@
* limitations under the License.
*/
#include <jni.h>
#include "common/ms_log.h"
#include "include/ms_tensor.h"
#include "ir/dtype/type_id.h"
extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_MSTensor_createMSTensor(JNIEnv *env, jobject thiz,
jint data_type, jintArray shape,
jint shape_len) {
jint data_type, jintArray shape,
jint shape_len) {
jboolean is_copy = false;
jint *local_shape_arr = env->GetIntArrayElements(shape, &is_copy);
std::vector<int> local_shape(shape_len);
@ -39,7 +38,7 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_MSTensor_createMSTens
}
extern "C" JNIEXPORT jintArray JNICALL Java_com_mindspore_lite_MSTensor_getShape(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
@ -59,8 +58,8 @@ extern "C" JNIEXPORT jintArray JNICALL Java_com_mindspore_lite_MSTensor_getShape
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setShape(JNIEnv *env, jobject thiz,
jlong tensor_ptr, jintArray shape,
jint shape_len) {
jlong tensor_ptr, jintArray shape,
jint shape_len) {
jboolean is_copy = false;
jint *local_shape_arr = env->GetIntArrayElements(shape, &is_copy);
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
@ -78,7 +77,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setShape(
}
extern "C" JNIEXPORT jint JNICALL Java_com_mindspore_lite_MSTensor_getDataType(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
@ -89,7 +88,7 @@ extern "C" JNIEXPORT jint JNICALL Java_com_mindspore_lite_MSTensor_getDataType(J
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setDataType(JNIEnv *env, jobject thiz,
jlong tensor_ptr, jint data_type) {
jlong tensor_ptr, jint data_type) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
@ -100,8 +99,8 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setDataTy
return ret == data_type;
}
extern "C" JNIEXPORT jbyteArray JNICALL Java_com_mindspore_lite_MSTensor_getData(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
extern "C" JNIEXPORT jbyteArray JNICALL Java_com_mindspore_lite_MSTensor_getByteData(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
@ -113,20 +112,134 @@ extern "C" JNIEXPORT jbyteArray JNICALL Java_com_mindspore_lite_MSTensor_getData
MS_LOGD("Tensor has no data");
return env->NewByteArray(0);
}
auto local_data_size = ms_tensor_ptr->Size();
auto ret = env->NewByteArray(local_data_size);
env->SetByteArrayRegion(ret, 0, local_data_size, local_data);
if (ms_tensor_ptr->data_type() != mindspore::kNumberTypeUInt8) {
MS_LOGE("data type is error : %d", ms_tensor_ptr->data_type());
return env->NewByteArray(0);
}
auto local_element_num = ms_tensor_ptr->ElementsNum();
auto ret = env->NewByteArray(local_element_num);
env->SetByteArrayRegion(ret, 0, local_element_num, local_data);
return ret;
}
extern "C" JNIEXPORT jlongArray JNICALL Java_com_mindspore_lite_MSTensor_getLongData(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return env->NewLongArray(0);
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
auto *local_data = static_cast<jlong *>(ms_tensor_ptr->MutableData());
if (local_data == nullptr) {
MS_LOGD("Tensor has no data");
return env->NewLongArray(0);
}
if (ms_tensor_ptr->data_type() != mindspore::kNumberTypeInt64) {
MS_LOGE("data type is error : %d", ms_tensor_ptr->data_type());
return env->NewLongArray(0);
}
auto local_element_num = ms_tensor_ptr->ElementsNum();
auto ret = env->NewLongArray(local_element_num);
env->SetLongArrayRegion(ret, 0, local_element_num, local_data);
return ret;
}
extern "C" JNIEXPORT jintArray JNICALL Java_com_mindspore_lite_MSTensor_getIntData(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return env->NewIntArray(0);
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
auto *local_data = static_cast<jint *>(ms_tensor_ptr->MutableData());
if (local_data == nullptr) {
MS_LOGD("Tensor has no data");
return env->NewIntArray(0);
}
if (ms_tensor_ptr->data_type() != mindspore::kNumberTypeInt32) {
MS_LOGE("data type is error : %d", ms_tensor_ptr->data_type());
return env->NewIntArray(0);
}
auto local_element_num = ms_tensor_ptr->ElementsNum();
auto ret = env->NewIntArray(local_element_num);
env->SetIntArrayRegion(ret, 0, local_element_num, local_data);
return ret;
}
extern "C" JNIEXPORT jfloatArray JNICALL Java_com_mindspore_lite_MSTensor_getFloatData(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return env->NewFloatArray(0);
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
auto *local_data = static_cast<jfloat *>(ms_tensor_ptr->MutableData());
if (local_data == nullptr) {
MS_LOGD("Tensor has no data");
return env->NewFloatArray(0);
}
if (ms_tensor_ptr->data_type() != mindspore::kNumberTypeFloat32) {
MS_LOGE("data type is error : %d", ms_tensor_ptr->data_type());
return env->NewFloatArray(0);
}
auto local_element_num = ms_tensor_ptr->ElementsNum();
auto ret = env->NewFloatArray(local_element_num);
env->SetFloatArrayRegion(ret, 0, local_element_num, local_data);
return ret;
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setData(JNIEnv *env, jobject thiz,
jlong tensor_ptr, jbyteArray data,
jlong data_len) {
jlong tensor_ptr, jbyteArray data,
jlong data_len) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return static_cast<jboolean>(false);
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
if (data_len != ms_tensor_ptr->Size()) {
MS_LOGE("data_len(%ld) not equal to Size of ms_tensor(%zu)", data_len, ms_tensor_ptr->Size());
return static_cast<jboolean>(false);
}
jboolean is_copy = false;
auto *data_arr = env->GetByteArrayElements(data, &is_copy);
auto *local_data = ms_tensor_ptr->MutableData();
memcpy(local_data, data_arr, data_len);
return static_cast<jboolean>(true);
}
extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBufferData(JNIEnv *env, jobject thiz,
jlong tensor_ptr,
jobject buffer) {
jbyte *p_data = reinterpret_cast<jbyte *>(env->GetDirectBufferAddress(buffer)); // get buffer poiter
jlong data_len = env->GetDirectBufferCapacity(buffer); // get buffer capacity
if (!p_data) {
MS_LOGE("GetDirectBufferAddress return null");
return NULL;
}
jbyteArray data = env->NewByteArray(data_len); // create byte[]
env->SetByteArrayRegion(data, 0, data_len, p_data); // copy data to byte[]
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return static_cast<jboolean>(false);
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
if (data_len != ms_tensor_ptr->Size()) {
MS_LOGE("data_len(%ld) not equal to Size of ms_tensor(%zu)", data_len, ms_tensor_ptr->Size());
@ -150,7 +263,7 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_MSTensor_size(JNIEnv
}
extern "C" JNIEXPORT jint JNICALL Java_com_mindspore_lite_MSTensor_elementsNum(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");

@ -32,9 +32,11 @@ if (PLATFORM_ARM64)
)
set_target_properties(optimize PROPERTIES CLEAN_DIRECT_OUTPUT 1)
add_custom_command(TARGET optimize POST_BUILD
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
add_custom_command(TARGET optimize POST_BUILD
COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip
${TOP_DIR}/build/nnacl/liboptimize.so)
endif ()
add_custom_command(TARGET optimize POST_BUILD
COMMAND rm -rf ${TOP_DIR}/output/lib/liboptimize.so

@ -51,6 +51,8 @@ void TileOneDimension(float *inData, float *outData, int dim, size_t ndim, int *
int *outStrides, int *multiple);
void ComputeStrides(int *shape, int *strides, int ndim);
void CalcMultiplesAndStrides(ArithmeticParameter *param);
void TileDimensions(float *data0, float *data1, float *tile_data0, float *tile_data1, ArithmeticParameter *param);
void TileDimensionsUint8(uint8_t *data0, uint8_t *data1, uint8_t *tile_data0, uint8_t *tile_data1,
ArithmeticParameter *param);

@ -29,7 +29,7 @@ mov x6, x1
mov x7, x2
mov x8, x4
LoopInputDepth16In:
LoopDepth16In:
cmp x8, #16
blt L4
sub x8, x8, #16
@ -39,8 +39,8 @@ mov x8, x4
ld1 {v16.4s, v17.4s}, [x0], #32
cmp x8, #16
blt LoopInputDepth16Out
LoopInputDepth16:
blt LoopDepth16Out
LoopDepth16:
fmla v16.4s, v0.4s, v2.4s
fmla v17.4s, v1.4s, v3.4s
@ -61,9 +61,9 @@ mov x8, x4
sub x8, x8, #16
cmp x8, #16
bge LoopInputDepth16
bge LoopDepth16
LoopInputDepth16Out:
LoopDepth16Out:
fmla v16.4s, v0.4s, v2.4s
fmla v17.4s, v1.4s, v3.4s
st1 {v16.4s, v17.4s}, [x9], #32
@ -81,7 +81,7 @@ mov x8, x4
cmp x8, #4
blt L0
LoopInputDepth4:
LoopDepth4:
ld1 {v0.4s}, [x6], #16
ld1 {v2.4s}, [x7], #16
ld1 {v16.4s}, [x0], #16
@ -89,13 +89,13 @@ mov x8, x4
st1 {v16.4s}, [x9], #16
sub x8, x8, #4
cmp x8, #4
bge LoopInputDepth4
bge LoopDepth4
L0:
cmp x8, #0
beq Loop16LineEnd
LoopInputDepth0:
LoopDepth0:
ldr s0, [x6], #4
ldr s1, [x7], #4
ldr s2, [x0], #4
@ -103,7 +103,7 @@ mov x8, x4
fadd s2, s2, s0
str s2, [x9], #4
subs x8, x8, #1
bne LoopInputDepth0
bne LoopDepth0
Loop16LineEnd:

@ -90,36 +90,36 @@ ConvDwInt8Center:
LoopKw16:
mov x22, x21
ld1 {v25.4h}, [x17], #8
ld1 {v16.4h}, [x22], x13
ld1 {v17.4h}, [x22], x13
ld1 {v16.4h}, [x22], x11
ld1 {v17.4h}, [x22], x11
smlal v0.4s, v16.4h, v25.4h
smlal v1.4s, v17.4h, v25.4h
ld1 {v18.4h}, [x22], x13
ld1 {v19.4h}, [x22], x13
ld1 {v18.4h}, [x22], x11
ld1 {v19.4h}, [x22], x11
smlal v2.4s, v18.4h, v25.4h
smlal v3.4s, v19.4h, v25.4h
ld1 {v20.4h}, [x22], x13
ld1 {v21.4h}, [x22], x13
ld1 {v20.4h}, [x22], x11
ld1 {v21.4h}, [x22], x11
smlal v4.4s, v20.4h, v25.4h
smlal v5.4s, v21.4h, v25.4h
ld1 {v22.4h}, [x22], x13
ld1 {v23.4h}, [x22], x13
ld1 {v22.4h}, [x22], x11
ld1 {v23.4h}, [x22], x11
smlal v6.4s, v22.4h, v25.4h
smlal v7.4s, v23.4h, v25.4h
ld1 {v16.4h}, [x22], x13
ld1 {v17.4h}, [x22], x13
ld1 {v16.4h}, [x22], x11
ld1 {v17.4h}, [x22], x11
smlal v8.4s, v16.4h, v25.4h
smlal v9.4s, v17.4h, v25.4h
ld1 {v18.4h}, [x22], x13
ld1 {v19.4h}, [x22], x13
ld1 {v18.4h}, [x22], x11
ld1 {v19.4h}, [x22], x11
smlal v10.4s, v18.4h, v25.4h
smlal v11.4s, v19.4h, v25.4h
ld1 {v20.4h}, [x22], x13
ld1 {v21.4h}, [x22], x13
ld1 {v20.4h}, [x22], x11
ld1 {v21.4h}, [x22], x11
smlal v12.4s, v20.4h, v25.4h
smlal v13.4s, v21.4h, v25.4h
ld1 {v22.4h}, [x22], x13
ld1 {v23.4h}, [x22], x13
ld1 {v22.4h}, [x22], x11
ld1 {v23.4h}, [x22], x11
smlal v14.4s, v22.4h, v25.4h
smlal v15.4s, v23.4h, v25.4h
subs x18, x18, #1
@ -420,20 +420,20 @@ ConvDwInt8Center:
LoopKw8:
mov x22, x21
ld1 {v25.4h}, [x17], #8
ld1 {v16.4h}, [x22], x13
ld1 {v17.4h}, [x22], x13
ld1 {v16.4h}, [x22], x11
ld1 {v17.4h}, [x22], x11
smlal v0.4s, v16.4h, v25.4h
smlal v1.4s, v17.4h, v25.4h
ld1 {v18.4h}, [x22], x13
ld1 {v19.4h}, [x22], x13
ld1 {v18.4h}, [x22], x11
ld1 {v19.4h}, [x22], x11
smlal v2.4s, v18.4h, v25.4h
smlal v3.4s, v19.4h, v25.4h
ld1 {v20.4h}, [x22], x13
ld1 {v21.4h}, [x22], x13
ld1 {v20.4h}, [x22], x11
ld1 {v21.4h}, [x22], x11
smlal v4.4s, v20.4h, v25.4h
smlal v5.4s, v21.4h, v25.4h
ld1 {v22.4h}, [x22], x13
ld1 {v23.4h}, [x22], x13
ld1 {v22.4h}, [x22], x11
ld1 {v23.4h}, [x22], x11
smlal v6.4s, v22.4h, v25.4h
smlal v7.4s, v23.4h, v25.4h
subs x18, x18, #1

@ -0,0 +1,169 @@
#ifdef __aarch64__
.text
.align 5
.global ConvDwInt8PostAlign4
#ifndef __APPLE__
.type ConvDwInt8PostAlign4, %function
#endif
// void ConvDwInt8PostAlign4(int8_t *dst, int32_t *buffer, int num_pixels, int32_t output_zp, int32_t out_multiplier,
// int32_t left_shift, int32_t right_shift, int32_t acc_min, int32_t acc_max);
// x0: dst, x1: buffer, x2: num_pixels, x3: output_zp, x4: out_multiplier,
// x5: left_shift, x6: right_shift, x7: acc_min, x8: acc_max
ConvDwInt8PostAlign4:
// registers v8 ~ v15 must be preserved by a callee across subroutine calls, according to
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#simd-and-floating-point-registers
// x19 ~ x29 should be also preserved
// whereas our coding style do not permit such amount of parameters
ldr x8, [sp]
dup v26.4s, w5
dup v27.4s, w4
dup v28.4s, w6
dup v29.4s, w3
dup v30.4s, w7
dup v31.4s, w8
cmp x2, 16
blt LoopDepth8
LoopDepth16:
ld1 {v0.4s}, [x1], #16
ld1 {v1.4s}, [x1], #16
ld1 {v2.4s}, [x1], #16
ld1 {v3.4s}, [x1], #16
sqshl v0.4s, v0.4s, v26.4s
sqshl v1.4s, v1.4s, v26.4s
sqshl v2.4s, v2.4s, v26.4s
sqshl v3.4s, v3.4s, v26.4s
sqrdmulh v0.4s, v0.4s, v27.4s
sqrdmulh v1.4s, v1.4s, v27.4s
sqrdmulh v2.4s, v2.4s, v27.4s
sqrdmulh v3.4s, v3.4s, v27.4s
and v16.16b, v28.16b, v0.16b
sshr v16.4s, v16.4s, #31
sqadd v0.4s, v0.4s, v16.4s
srshl v0.4s, v0.4s, v28.4s
and v17.16b, v28.16b, v1.16b
sshr v17.4s, v17.4s, #31
sqadd v1.4s, v1.4s, v17.4s
srshl v1.4s, v1.4s, v28.4s
and v18.16b, v28.16b, v2.16b
sshr v18.4s, v18.4s, #31
sqadd v2.4s, v2.4s, v18.4s
srshl v2.4s, v2.4s, v28.4s
and v19.16b, v28.16b, v3.16b
sshr v19.4s, v19.4s, #31
sqadd v3.4s, v3.4s, v19.4s
srshl v3.4s, v3.4s, v28.4s
add v0.4s, v0.4s, v29.4s
add v1.4s, v1.4s, v29.4s
add v2.4s, v2.4s, v29.4s
add v3.4s, v3.4s, v29.4s
smax v0.4s, v0.4s, v30.4s
smax v1.4s, v1.4s, v30.4s
smax v2.4s, v2.4s, v30.4s
smax v3.4s, v3.4s, v30.4s
smin v0.4s, v0.4s, v31.4s
smin v1.4s, v1.4s, v31.4s
smin v2.4s, v2.4s, v31.4s
smin v3.4s, v3.4s, v31.4s
sqxtn v0.4h, v0.4s
sqxtn v1.4h, v1.4s
sqxtn v2.4h, v2.4s
sqxtn v3.4h, v3.4s
sqxtn v0.8b, v0.8h
sqxtn v1.8b, v1.8h
sqxtn v2.8b, v2.8h
sqxtn v3.8b, v3.8h
st1 {v0.s}[0], [x0], #4
st1 {v1.s}[0], [x0], #4
st1 {v2.s}[0], [x0], #4
st1 {v3.s}[0], [x0], #4
sub x2, x2, #16
cmp x2, #16
bge LoopDepth16
LoopDepth8:
cmp x2, #8
blt LoopDepth4
ld1 {v0.4s}, [x1], #16
ld1 {v1.4s}, [x1], #16
sqshl v0.4s, v0.4s, v26.4s
sqshl v1.4s, v1.4s, v26.4s
sqrdmulh v0.4s, v0.4s, v27.4s
sqrdmulh v1.4s, v1.4s, v27.4s
and v16.16b, v28.16b, v0.16b
sshr v16.4s, v16.4s, #31
sqadd v0.4s, v0.4s, v16.4s
srshl v0.4s, v0.4s, v28.4s
and v17.16b, v28.16b, v1.16b
sshr v17.4s, v17.4s, #31
sqadd v1.4s, v1.4s, v17.4s
srshl v1.4s, v1.4s, v28.4s
add v0.4s, v0.4s, v29.4s
add v1.4s, v1.4s, v29.4s
smax v0.4s, v0.4s, v30.4s
smax v1.4s, v1.4s, v30.4s
smin v0.4s, v0.4s, v31.4s
smin v1.4s, v1.4s, v31.4s
sqxtn v0.4h, v0.4s
sqxtn v1.4h, v1.4s
sqxtn v0.8b, v0.8h
sqxtn v1.8b, v1.8h
st1 {v0.s}[0], [x0], #4
st1 {v1.s}[0], [x0], #4
sub x2, x2, #8
cmp x2, #8
bge LoopDepth8
LoopDepth4:
cmp x2, #4
blt End
ld1 {v0.4s}, [x1], #16
sqshl v0.4s, v0.4s, v26.4s
sqrdmulh v0.4s, v0.4s, v27.4s
and v16.16b, v28.16b, v0.16b
sshr v16.4s, v16.4s, #31
sqadd v0.4s, v0.4s, v16.4s
srshl v0.4s, v0.4s, v28.4s
add v0.4s, v0.4s, v29.4s
smax v0.4s, v0.4s, v30.4s
smin v0.4s, v0.4s, v31.4s
sqxtn v0.4h, v0.4s
sqxtn v0.8b, v0.8h
st1 {v0.s}[0], [x0], #4
sub x2, x2, #4
bge LoopDepth4
End:
ret
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save