From 0149d56f4c7ace64c8e4acbc10b4561575417fac Mon Sep 17 00:00:00 2001 From: yeyunpeng Date: Thu, 24 Dec 2020 14:34:10 +0800 Subject: [PATCH] change min emui version --- mindspore/lite/include/context.h | 2 +- .../lite/src/runtime/agent/npu/npu_executor.cc | 9 +++++---- .../lite/src/runtime/agent/npu/npu_manager.cc | 4 +++- .../runtime/agent/npu/subgraph_npu_kernel.cc | 2 +- mindspore/lite/test/models_npu.cfg | 3 +++ mindspore/lite/test/run_benchmark_nets.sh | 17 +++++++++++++++++ 6 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 mindspore/lite/test/models_npu.cfg diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index 127933c275..70a3c1a3c9 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -34,7 +34,7 @@ typedef enum { typedef enum { DT_CPU, /**< CPU device type */ DT_GPU, /**< GPU device type */ - DT_NPU /**< NPU device type, not supported yet */ + DT_NPU /**< NPU device type */ } DeviceType; /// \brief CpuDeviceInfo defined for CPU's configuration information. diff --git a/mindspore/lite/src/runtime/agent/npu/npu_executor.cc b/mindspore/lite/src/runtime/agent/npu/npu_executor.cc index da34dc233b..f30be3053c 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_executor.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_executor.cc @@ -88,10 +88,11 @@ int NPUExecutor::Run(const std::vector &in_tensors, const std::vector< if (std::find(trans_tensors.begin(), trans_tensors.end(), out_tensors[i]) != trans_tensors.end()) { // Change data&tensor shape nc->nh - PackNCHWToNHWCFp32(npu_output_tensors_[i]->GetBuffer(), data, out_tensors[i]->Batch(), - out_tensors[i]->Width() * out_tensors[i]->Height(), out_tensors[i]->Channel()); - out_tensors[i]->set_shape({out_tensors[i]->shape()[0], out_tensors[i]->shape()[2], out_tensors[i]->shape()[3], - out_tensors[i]->shape()[1]}); + PackNCHWToNHWCFp32(npu_output_tensors_[i]->GetBuffer(), data, + npu_output_tensors_[i]->GetTensorDimension().GetNumber(), + npu_output_tensors_[i]->GetTensorDimension().GetWidth() * + npu_output_tensors_[i]->GetTensorDimension().GetHeight(), + npu_output_tensors_[i]->GetTensorDimension().GetChannel()); } else { memcpy(data, npu_output_tensors_[i]->GetBuffer(), npu_output_tensors_[i]->GetSize()); out_tensors[i]->ResetRefCount(); diff --git a/mindspore/lite/src/runtime/agent/npu/npu_manager.cc b/mindspore/lite/src/runtime/agent/npu/npu_manager.cc index 0e1d802363..6357ee8821 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_manager.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_manager.cc @@ -47,7 +47,7 @@ bool NPUManager::CheckEMUIVersion() { int pos = emui_str.find('_'); if (pos != std::string::npos) { auto version = emui_str.substr(pos + 1); - int ret = CompareVersion(version, "11.0.0"); + int ret = CompareVersion(version, "10.0.0"); if (ret < 0) { return false; } @@ -138,6 +138,7 @@ int NPUManager::AddModel(domi::ModelBufferData *model_buffer_data, const std::st index_++; return RET_OK; } + std::shared_ptr NPUManager::CreateAiModelMngerClient() { auto client = std::make_shared(); if (client == nullptr) { @@ -151,6 +152,7 @@ std::shared_ptr NPUManager::CreateAiModelMngerClient() } return client; } + int NPUManager::LoadOMModel() { std::vector> models_desc; std::shared_ptr client = nullptr; diff --git a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc index 365315e7bf..2718732209 100644 --- a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc +++ b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc @@ -78,7 +78,7 @@ domi::ModelBufferData *SubGraphNpuKernel::BuildIRModel() { } int SubGraphNpuKernel::Run() { - return reinterpret_cast(this->executor_)->Run(in_tensors_, out_tensors_, out_kernels_, nodes_); + return reinterpret_cast(this->executor_)->Run(in_tensors_, out_tensors_, out_nodes_, nodes_); } int SubGraphNpuKernel::BuildNPUInputOp() { diff --git a/mindspore/lite/test/models_npu.cfg b/mindspore/lite/test/models_npu.cfg new file mode 100644 index 0000000000..bf856efcd7 --- /dev/null +++ b/mindspore/lite/test/models_npu.cfg @@ -0,0 +1,3 @@ +mobilenet_v1_1.0_224.tflite 3 +squeezenet.tflite 3 +inception_v3.tflite 3 diff --git a/mindspore/lite/test/run_benchmark_nets.sh b/mindspore/lite/test/run_benchmark_nets.sh index 7d3cfe01e3..b0aff472a0 100644 --- a/mindspore/lite/test/run_benchmark_nets.sh +++ b/mindspore/lite/test/run_benchmark_nets.sh @@ -1314,6 +1314,22 @@ function Run_arm64() { fi done < ${models_mindspore_weightquant_config} + # Run npu converted models: + while read line; do + model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` + accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` + echo "mindspore run npu: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=NPU --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --device=NPU --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64_npu: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64_npu: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_npu_config} + # Run converted models which has several inputs or does not need to be cared about the accuracy: while read line; do model_name=${line%%;*} @@ -1492,6 +1508,7 @@ models_gpu_fp16_config=${basepath}/models_gpu_fp16.cfg models_gpu_weightquant_config=${basepath}/models_gpu_weightquant.cfg models_mindspore_weightquant_config=${basepath}/models_mindspore_weightquant.cfg models_arm32_config=${basepath}/models_arm32.cfg +models_npu_config=${basepath}/models_npu.cfg models_compatibility_config=${basepath}/models_compatibility.cfg models_only_for_process_config=${basepath}/models_with_several_inputs_or_without_outputs.cfg