!3784 add lite/test

Merge pull request !3784 from wangzhe/master
pull/3784/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 0972d15629

@ -1,9 +1,11 @@
set(TEST_DIR ${TOP_DIR}/tests/ut/cpp)
set(TEST_DIR ${TOP_DIR}/mindspore/lite/test)
set(LITE_DIR ${TOP_DIR}/mindspore/lite)
include_directories(${TOP_DIR})
include_directories(${TEST_DIR})
include_directories(${LITE_DIR})
include_directories(${LITE_DIR}/tools)
include_directories(${LITE_DIR}/lite)
include(${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/dependency_gtest.cmake)
include(${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/external_libs/gtest.cmake)
### anf src
set(ANF_SRC
@ -158,7 +160,7 @@ set(TEST_LITE_SRC
${LITE_DIR}/tools/common/flag_parser.cc
${LITE_DIR}/tools/common/storage.cc
${LITE_DIR}/tools/benchmark/benchmark.cc
${LITE_DIR}/test/benchmark_test.cc
${LITE_DIR}/test/st/benchmark_test.cc
)
### gpu runtime
if (SUPPORT_GPU)
@ -179,6 +181,7 @@ endif()
if(BUILD_CONVERTER)
set(TEST_LITE_SRC
${TEST_LITE_SRC}
${TOP_DIR}/mindspore/core/utils/flags.cc
${LITE_DIR}/tools/converter/optimizer.cc
${LITE_DIR}/src/common/anf_importer/anf_importer.cc
${LITE_DIR}/src/common/anf_importer/import_from_meta_graphT.cc
@ -188,7 +191,7 @@ if(BUILD_CONVERTER)
${LITE_DIR}/tools/converter/converter_flags.cc
${LITE_DIR}/tools/converter/converter.cc
${LITE_DIR}/tools/converter/parser/onnx/onnx.pb.cc
${LITE_DIR}/test/converter_test.cc
${LITE_DIR}/test/st/converter_test.cc
${LITE_DIR}/src/gllo/common/node_pass.cc
${LITE_DIR}/src/gllo/common/optimizer.cc
${LITE_DIR}/src/gllo/common/pass_manager.cc
@ -233,59 +236,50 @@ else()
endif()
### test src
file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
${TEST_DIR}/kernel/cpu/arm/fp32/*.cc
${TEST_DIR}/kernel/cpu/arm/int8/*.cc
${TEST_DIR}/ut/src/runtime/kernel/arm/fp32/*.cc
${TEST_DIR}/ut/src/runtime/kernel/arm/int8/*.cc
)
set(TEST_SRC
${TEST_LITE_SRC}
${TEST_CASE_KERNEL_SRC}
${TEST_DIR}/common/common_test.cc
${TEST_DIR}/common/test_lite_main.cc
${TEST_DIR}/kernel/cpu/arm/common/pack_tests.cc
${TEST_DIR}/device/cpu/arm/infer_test.cc
${TEST_DIR}/main.cc
${TEST_DIR}/ut/src/runtime/kernel/arm/common/pack_tests.cc
${TEST_DIR}/ut/src/infer_test.cc
# ${TEST_DIR}/device/cpu/arm/graph_test.cc
)
if (SUPPORT_TRAIN)
set(TEST_SRC
${TEST_SRC}
${TEST_DIR}/device/cpu/arm/train_test.cc
${TEST_DIR}/ut/src/train_test.cc
)
else()
set(TEST_SRC
${TEST_SRC}
${TEST_DIR}/device/cpu/arm/infer_test.cc
${TEST_DIR}/ut/src/infer_test.cc
)
endif()
if (SUPPORT_GPU)
set(TEST_SRC
${TEST_SRC}
${TEST_DIR}/device/opencl/opencl_infer_tests.cc
${TEST_DIR}/kernel/opencl/utils_cl_tests.cc
${TEST_DIR}/kernel/opencl/arithmetic_tests.cc
${TEST_DIR}/kernel/opencl/convolution_tests.cc
${TEST_DIR}/kernel/opencl/depthwise_conv2d_tests.cc
${TEST_DIR}/kernel/opencl/matmul_tests.cc
${TEST_DIR}/kernel/opencl/max_pooling_cl_tests.cc
${TEST_DIR}/kernel/opencl/avg_pooling_cl_tests.cc
${TEST_DIR}/kernel/opencl/softmax_cl_tests.cc
${TEST_DIR}/kernel/opencl/concat_tests.cc
${TEST_DIR}/kernel/opencl/conv2d_transpose_tests.cc
${TEST_DIR}/ut/stc/runtime/kernel/opencl/matmul_tests.cc
${TEST_DIR}/ut/stc/runtime/kernel/opencl/softmax_cl_tests.cc
)
endif()
if (ENABLE_FP16)
set(TEST_SRC
${TEST_SRC}
${TEST_DIR}/kernel/cpu/arm/fp16/convolution_fp16_tests.cc)
${TEST_DIR}/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc)
endif ()
add_executable(lite-test ${TEST_SRC})
target_link_libraries(lite-test dl ${SECUREC_LIBRARY} ${GTEST_LIBRARY} mindspore::json)
target_link_libraries(lite-test dl ${SECUREC_LIBRARY} ${GTEST_LIBRARY} mindspore::json mindspore::gtest)
if (BUILD_CONVERTER)
target_link_libraries(lite-test
anf_exporter_mid

@ -0,0 +1,41 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/common_test.h"
#include "mindspore/core/utils/log_adapter.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif
#endif
namespace mindspore {
void Common::SetUpTestCase() {}
void Common::TearDownTestCase() {}
void Common::SetUp() {}
void Common::TearDown() {}
} // namespace mindspore
#ifdef __cplusplus
#if __cplusplus
}
#endif
#endif

@ -0,0 +1,78 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TESTS_UT_COMMON_UT_COMMON_H_
#define TESTS_UT_COMMON_UT_COMMON_H_
#include <cmath>
#include <fstream>
#include <iostream>
#include <string>
#include <algorithm>
#include "gtest/gtest.h"
namespace mindspore {
class Common : public testing::Test {
public:
// TestCase only enter once
static void SetUpTestCase();
static void TearDownTestCase();
// every TEST_F macro will enter one
virtual void SetUp();
virtual void TearDown();
template <typename T>
void PrintData(std::string name, T *output_data, int size) {
std::cout << "The " << name << " is as follows:" << std::endl;
if (typeid(output_data[0]) == typeid(uint8_t) || typeid(output_data[0]) == typeid(int8_t)) {
for (size_t i = 0; i < std::min(size, 100); i++) {
std::cout << static_cast<int>(output_data[i]) << " ";
}
} else {
for (size_t i = 0; i < std::min(size, 100); i++) {
std::cout << output_data[i] << " ";
}
}
std::cout << std::endl;
}
template <typename T>
static void CompareOutputData(T *output_data, T *correct_data, int size, float err_bound) {
for (size_t i = 0; i < size; i++) {
T abs = fabs(output_data[i] - correct_data[i]);
ASSERT_LE(abs, err_bound);
}
}
void ReadFile(const char *file, size_t *size, char **buf) {
ASSERT_NE(nullptr, file);
ASSERT_NE(nullptr, size);
ASSERT_NE(nullptr, buf);
std::string path = std::string(file);
std::ifstream ifs(path);
ASSERT_EQ(true, ifs.good());
ASSERT_EQ(true, ifs.is_open());
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
*buf = new char[*size];
ifs.seekg(0, std::ios::beg);
ifs.read(*buf, *size);
ifs.close();
}
};
} // namespace mindspore
#endif // TESTS_UT_COMMON_UT_COMMON_H_

@ -0,0 +1,29 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "gtest/gtest.h"
#include "mindspore/core/utils/log_adapter.h"
namespace mindspore {
extern void InitSubModulesLogLevel();
}
GTEST_API_ int main(int argc, char** argv) {
mindspore::InitSubModulesLogLevel();
testing::InitGoogleTest(&argc, argv);
int ret = RUN_ALL_TESTS();
return ret;
}

@ -15,12 +15,12 @@
*/
#include <gtest/gtest.h>
#include <string>
#include "tests/ut/cpp/common/common_test.h"
#include "common/common_test.h"
#include "benchmark/benchmark.h"
namespace mindspore {
namespace lite {
class BenchmarkTest : public UT::Common {
class BenchmarkTest : public mindspore::Common {
public:
BenchmarkTest() {}
};

@ -16,11 +16,11 @@
#include <gtest/gtest.h>
#include <string>
#include "converter/converter.h"
#include "tests/ut/cpp/common/common_test.h"
#include "common/common_test.h"
namespace mindspore {
namespace lite {
class ConverterTest : public UT::Common {
class ConverterTest : public mindspore::Common {
public:
ConverterTest() {}
};

@ -0,0 +1,246 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <climits>
#include <string>
#include <iostream>
#include <memory>
#include <fstream>
#include "common/common_test.h"
#include "mindspore/core/utils/log_adapter.h"
#include "mindspore/lite/include/lite_session.h"
#include "mindspore/lite/src/executor.h"
#include "mindspore/lite/schema/inner/anf_ir_generated.h"
namespace mindspore {
class TestLiteInference : public mindspore::Common {
public:
TestLiteInference() {}
};
std::string RealPath(const char *path) {
if (path == nullptr) {
return "";
}
if ((strlen(path)) >= PATH_MAX) {
return "";
}
std::shared_ptr<char> resolvedPath(new (std::nothrow) char[PATH_MAX]{0});
if (resolvedPath == nullptr) {
return "";
}
auto ret = realpath(path, resolvedPath.get());
if (ret == nullptr) {
return "";
}
return resolvedPath.get();
}
char *ReadModelFile(const char *file, size_t *size) {
if (file == nullptr) {
return nullptr;
}
MS_ASSERT(size != nullptr);
std::ifstream ifs(RealPath(file));
if (!ifs.good()) {
return nullptr;
}
if (!ifs.is_open()) {
return nullptr;
}
ifs.seekg(0, std::ios::end);
*size = ifs.tellg();
std::unique_ptr<char> buf(new (std::nothrow) char[*size]);
if (buf == nullptr) {
ifs.close();
return nullptr;
}
ifs.seekg(0, std::ios::beg);
ifs.read(buf.get(), *size);
ifs.close();
return buf.release();
}
// TEST_F(TestLiteInference, Net) {
// auto msGraph = std::make_shared<lite::GraphDefT>();
// msGraph->name = "graph";
// auto msSubgraph = std::make_unique<lite::SubGraphDefT>();
// msSubgraph->name = "subGraph";
//
// auto node = std::make_unique<lite::OpDefT>();
// node->inputIndex = {0, 1};
// node->outputIndex = {2};
// node->attr.type = lite::OpT_Add;
// node->attr.value = new lite::AddT;
// node->name = "Add";
// node->fmkType = lite::FmkType_CAFFE;
// msSubgraph->nodes.emplace_back(std::move(node));
//
// msSubgraph->inputIndex = {0};
// msSubgraph->outputIndex = {2};
//
// auto input0 = std::make_unique<lite::TensorDefT>();
// input0->refCount = lite::MSCONST_WEIGHT_REFCOUNT;
// input0->format = lite::Format_NCHW;
// input0->dataType = TypeId::kNumberTypeFloat;
// input0->dims = {1, 1, 2, 2};
// input0->offset = -1;
// msSubgraph->allTensors.emplace_back(std::move(input0));
//
// auto input1 = std::make_unique<lite::TensorDefT>();
// input1->refCount = lite::MSCONST_WEIGHT_REFCOUNT;
// input1->format = lite::Format_NCHW;
// input1->dataType = TypeId::kNumberTypeFloat;
// input1->dims = {1, 1, 2, 2};
// input1->offset = -1;
// input1->data.resize(16);
// msSubgraph->allTensors.emplace_back(std::move(input1));
//
// auto output = std::make_unique<lite::TensorDefT>();
// output->refCount = 0;
// output->format = lite::Format_NCHW;
// output->dims = {1, 1, 2, 2};
// output->offset = -1;
// msSubgraph->allTensors.emplace_back(std::move(output));
// msGraph->subgraphs.emplace_back(std::move(msSubgraph));
//
// flatbuffers::FlatBufferBuilder builder(1024);
// auto offset = lite::GraphDef::Pack(builder, msGraph.get());
// builder.Finish(offset);
// int size = builder.GetSize();
// auto *content = builder.GetBufferPointer();
// mindspore::lite::Context context;
// context.allocator = nullptr;
// context.deviceCtx.type = mindspore::lite::DeviceType::DT_CPU;
// #if 0
// auto graph = mindspore::lite::inference::LoadModel((char *)content, size);
//
// auto session = mindspore::lite::inference::Session::CreateSession(&context);
//
// std::vector<float> z1 = {1.1, 2.1, 3.1, 4.1};
// std::vector<inference::MSTensor *> inputs;
// auto t1 = inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, std::vector<int>({1, 1, 2, 2}));
// memcpy_s(t1->MutableData(), z1.size() * sizeof(float), z1.data(), z1.size() * sizeof(float));
//
// auto t2 = inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, std::vector<int>({1, 1, 2, 2}));
// memcpy_s(t2->MutableData(), z1.size() * sizeof(float), z1.data(), z1.size() * sizeof(float));
//
// inputs.push_back(t1);
// inputs.push_back(t1);
// // VectorRef *outputs = new VectorRef();
// auto outputs = session->RunGraph(inputs);
// #else
// auto file = "./efficientnet_b0.ms";
// size_t model_size;
//
// char *modelbuf = ReadModelFile(file, &model_size);
// auto graph = mindspore::lite::inference::LoadModel(modelbuf, model_size);
// auto session = mindspore::lite::inference::Session::CreateSession(&context);
// session->CompileGraph(graph);
// std::vector<inference::MSTensor *> inputs;
// auto t1 = inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, std::vector<int>({1, 244, 244, 3}));
//
// inputs.push_back(t1);
// auto outputs = session->RunGraph(inputs);
// #endif
// }
// TEST_F(TestLiteInference, Conv) {
// auto msGraph = std::make_shared<lite::GraphDefT>();
// msGraph->name = "graph";
// auto msSubgraph = std::make_unique<lite::SubGraphDefT>();
// msSubgraph->name = "subGraph";
//
// auto node = std::make_unique<lite::OpDefT>();
// node->inputIndex = {0, 1};
// node->outputIndex = {2};
// node->attr.type = lite::OpT_Conv2D;
// auto attr = new lite::Conv2DT;
// attr->padMode = lite::PadMode_SAME;
// attr->channelIn = 1;
// attr->channelOut = 1;
// attr->format = lite::Format_NHWC;
// attr->strideH = 1;
// attr->strideW = 1;
// attr->kernelH = 2;
// attr->kernelW = 2;
//
// node->attr.value = attr;
// node->name = "Conv2D";
// node->fmkType = lite::FmkType_CAFFE;
// msSubgraph->nodes.emplace_back(std::move(node));
//
// msSubgraph->inputIndex = {0};
// msSubgraph->outputIndex = {2};
// // MS_LOG(ERROR) << "OutData";
//
// auto input0 = std::make_unique<lite::TensorDefT>();
// input0->refCount = lite::MSCONST_WEIGHT_REFCOUNT;
// input0->format = lite::Format_NCHW;
// input0->dataType = TypeId::kNumberTypeFloat;
// input0->dims = {1, 1, 5, 5};
// // input0->data.resize(sizeof(float) * 25);
// // std::vector<float> input_data = {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5};
// // memcpy(input0->data.data(), input_data.data(), sizeof(int) * 25);
// input0->offset = -1;
// msSubgraph->allTensors.emplace_back(std::move(input0));
//
// auto weight = std::make_unique<lite::TensorDefT>();
// weight->refCount = lite::MSCONST_WEIGHT_REFCOUNT;
// weight->format = lite::Format_KHWC;
// weight->dataType = TypeId::kNumberTypeFloat;
// weight->dims = {1, 2, 2, 1};
// weight->data.resize(sizeof(float) * 4);
// std::vector<float> weight_data = {1, 2, 3, 4};
// memcpy(weight->data.data(), weight_data.data(), sizeof(int) * 4);
// weight->offset = -1;
// msSubgraph->allTensors.emplace_back(std::move(weight));
//
// auto output = std::make_unique<lite::TensorDefT>();
// output->refCount = 0;
// output->format = lite::Format_NCHW;
// output->dims = {1, 1, 5, 5};
// output->offset = -1;
// msSubgraph->allTensors.emplace_back(std::move(output));
// msGraph->subgraphs.emplace_back(std::move(msSubgraph));
//
// flatbuffers::FlatBufferBuilder builder(1024);
// auto offset = lite::GraphDef::Pack(builder, msGraph.get());
// builder.Finish(offset);
// int size = builder.GetSize();
// auto *content = builder.GetBufferPointer();
// mindspore::lite::Context context;
// context.allocator = nullptr;
// context.deviceCtx.type = mindspore::lite::DeviceType::DT_CPU;
// auto graph = mindspore::lite::inference::LoadModel((char *)content, size);
// auto session = mindspore::lite::inference::Session::CreateSession(&context);
// session->CompileGraph(graph);
// std::vector<inference::MSTensor *> inputs;
// auto t1 = inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, std::vector<int>({1, 3, 244, 244}));
//
// inputs.push_back(t1);
// auto outputs = session->RunGraph(inputs);
// }
} // namespace mindspore

File diff suppressed because it is too large Load Diff

@ -0,0 +1,128 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "mindspore/core/utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/fp32/activation.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace mindspore {
class TestActivationFp32 : public mindspore::Common {
public:
TestActivationFp32() {}
};
TEST_F(TestActivationFp32, ReluFp32) {
float input[8] = {-3, -2, -1, 0, 1, 5, 6, 7};
float output[8] = {0};
Relu(input, 8, output);
float expect[8] = {0, 0, 0, 0, 1, 5, 6, 7};
for (int i = 0; i < 8; ++i) {
ASSERT_EQ(output[i], expect[i]);
}
}
TEST_F(TestActivationFp32, Relu6Fp32) {
float input[8] = {-3, -2, -1, 0, 1, 5, 6, 7};
float output[8] = {0};
Relu6(input, 8, output);
float expect[8] = {0, 0, 0, 0, 1, 5, 6, 6};
for (int i = 0; i < 8; ++i) {
ASSERT_EQ(output[i], expect[i]);
}
MS_LOG(INFO) << "TestActivationFp32 passed";
}
TEST_F(TestActivationFp32, LReluFp32) {
float input[8] = {-3, -2, -1, 0, 1, 5, 6, 7};
float output[8] = {0};
LRelu(input, 8, output, 0.01);
float expect[8] = {-0.03, -0.02, -0.01, 0, 1, 5, 6, 7};
for (int i = 0; i < 8; ++i) {
ASSERT_EQ(output[i], expect[i]);
}
MS_LOG(INFO) << "TestActivationFp32 passed";
}
TEST_F(TestActivationFp32, SigmoidFp32) {
float input[8] = {0, 1, 2, 3, 4, 5, 6, 7};
float output[8] = {0};
Sigmoid(input, 8, output);
// expect output {0.5, 0.731059, 0.880797, 0.952574, 0.982014, 0.993307, 0.997527, 0.999089};
printf("==================output data=================\n");
for (int i = 0; i < 8; ++i) {
std::cout << output[i] << " ";
}
std::cout << std::endl;
MS_LOG(INFO) << "TestSigmoidFp32 passed";
}
TEST_F(TestActivationFp32, TanhFp32) {
float input[7] = {-3, -2, -1, 0, 1, 2, 3};
float output[7] = {0};
Tanh(input, 7, output);
float expect[8] = {-0.995055, -0.964028, -0.761594, 0.000000, 0.761594, 0.964028, 0.995055};
for (int i = 0; i < 8; ++i) {
EXPECT_NEAR(output[i], expect[i], 0.00001);
}
MS_LOG(INFO) << "TanhFp32 passed";
}
TEST_F(TestActivationFp32, HSwishFp32) {
std::vector<lite::tensor::Tensor *> inputs_tensor;
std::vector<lite::tensor::Tensor *> outputs_tensor;
ActivationParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Activation;
op_param.type_ = schema::ActivationType_HSWISH;
op_param.alpha_ = 0.01;
std::vector<float> input = {-3.0, -2.0, -1.0, 0.0, 1.0, 5.0, 6.0, 7.0};
std::vector<int> in_shape = {8};
lite::tensor::Tensor input0_tensor;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_shape(in_shape);
std::vector<float> output(8);
std::vector<int> output_shape = {8};
lite::tensor::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, schema::PrimitiveType_Activation};
auto creator = lite::KernelRegistry::GetInstance()->GetKernelCreator(desc);
ASSERT_NE(creator, nullptr);
lite::Context ctx;
ctx.threadNum = 7;
kernel::LiteKernel *kernel =
creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc);
ASSERT_NE(kernel, nullptr);
auto output_tensor_shape = output0_tensor.shape();
kernel->Run();
std::vector<float> expect_output = {-0, -0.33333334, -0.33333334, 0, 0.6666667, 5, 6, 7};
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);
input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
}
} // namespace mindspore

@ -0,0 +1,74 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/include/context.h"
namespace mindspore {
class TestQuantizedAdd : public mindspore::Common {
public:
TestQuantizedAdd() {}
};
TEST_F(TestQuantizedAdd, Add) {
lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5});
lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 2, 5});
lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5});
int8_t input_data0[] = {-102, 25, -51, 89, -102, 25, -51, 89, -102, 25}; // -0.8 0.2 -0.4 0.7
int8_t input_data1[] = {38, 51, 64, -102, 38, 51, 64, -102, 38, 51}; // 0.3 0.4 0.5 -0.8
int8_t output_data[10] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::tensor::QuantArg quant_in1 = {0.00784314f, 0};
const lite::tensor::QuantArg quant_out = {0.00784314f, 0};
in_tensor0.AddQuantParam(quant_in0);
in_tensor1.AddQuantParam(quant_in1);
out_tensor.AddQuantParam(quant_out);
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
OpParameter parameter = {};
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, schema::PrimitiveType_Add};
auto creator = lite::KernelRegistry::GetInstance()->GetKernelCreator(desc);
ASSERT_NE(creator, nullptr);
auto ctx = std::make_shared<lite::Context>();
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc);
ASSERT_NE(kernel, nullptr);
auto ret = kernel->Run();
EXPECT_EQ(0, ret);
int8_t expect0[10] = {-64, 76, 13, -13, -64, 76, 13, -13, -64, 76}; // -0.5 0.6 0.1 -0.1
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(output_data[i], expect0[i]);
}
in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
}
} // namespace mindspore

@ -0,0 +1,2 @@
„ë:¿eQÝ¿²c?pº @ÞE(Àoéï=ű*¿Î¢ñ=Í•†¿^C½°ç?Æþ-?ú»=@$ Á?ò(ÀW!=à+> æ¿êó@•@§? -¿JP Àµï?€k ¿ýüÁ¿þ“­?M
¾wq>3Û=RïÀ¢j @¿E%@!H￸lÀþ<C380>¾=•©=\j/½m2¶>bâ @òB‡¾

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save