!9062 upsample fp32

From: @zhaozhenlong
Reviewed-by: @zhanghaibo5,@zhang_xue_tong
Signed-off-by:
pull/9062/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 6cf308076d

@ -0,0 +1,26 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_
#define MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_
#include "nnacl/op_base.h"
typedef struct {
OpParameter op_parameter_;
int method_; // 0 for bilinear; 1 for nearest
} UpsampleParameter;
#endif // MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_

@ -0,0 +1,44 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/upsample.h"
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
#include "nnacl/upsample_parameter.h"
namespace mindspore {
namespace lite {
OpParameter *PopulateUpsampleParameter(const mindspore::lite::PrimitiveC *primitive) {
UpsampleParameter *upsample_parameter = reinterpret_cast<UpsampleParameter *>(malloc(sizeof(UpsampleParameter)));
if (upsample_parameter == nullptr) {
MS_LOG(ERROR) << "malloc Upsample Parameter failed.";
return nullptr;
}
memset(upsample_parameter, 0, sizeof(UpsampleParameter));
auto param = reinterpret_cast<mindspore::lite::Upsample *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
upsample_parameter->op_parameter_.type_ = primitive->Type();
auto method = param->GetMode();
if (method == "linear") {
upsample_parameter->method_ = 0;
} else {
upsample_parameter->method_ = 1;
}
return reinterpret_cast<OpParameter *>(upsample_parameter);
}
Registry UpsampleParemeterRegistry(schema::PrimitiveType_Upsample, PopulateUpsampleParameter);
} // namespace lite
} // namespace mindspore

@ -575,7 +575,6 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Floor>(prim, inputs, quantType);
} else if (op_type == "Minimum") {
return NewPrimitiveC<Minimum>(prim, inputs, quantType);
#ifdef SUPPORT_TRAIN
} else if (op_type == "SoftmaxCrossEntropyWithLogits") {
return NewPrimitiveC<SoftmaxCrossEntropy>(prim, inputs, quantType);

@ -62,5 +62,41 @@ PrimitiveC *UpsampleCreator(const schema::Primitive *primitive) {
Registry UpsampleRegistry(schema::PrimitiveType_Upsample, UpsampleCreator);
#endif
int Upsample::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) {
auto input_tensor = inputs_.at(0);
MS_ASSERT(input_tensor);
auto input_shape = input_tensor->shape();
if (input_shape.size() != 4) {
MS_LOG(ERROR) << "Upsample InferShape input tensor rank should be 4";
return RET_INFER_ERR;
}
auto scale_tensor = inputs_.at(1);
MS_ASSERT(scale_tensor);
auto scale_shape = scale_tensor->shape();
if (scale_shape.size() != 1 && scale_shape[0] != 4) {
MS_LOG(ERROR) << "Upsample scale tensor shape should be 4";
return RET_INFER_ERR;
}
auto scale = reinterpret_cast<float *>(scale_tensor->data_c());
if (scale == nullptr) {
MS_LOG(ERROR) << "Upsample scale data nullptr";
return RET_INFER_INVALID;
}
std::vector<int> out_shape = input_shape; // n, h, w, c; n, c not changed, h = floor(input_h * scale_h).
int new_height = static_cast<int>(floor(input_shape[1] * scale[1]));
MS_ASSERT(new_height > 0);
int new_width = static_cast<int>(floor(input_shape[2] * scale[2]));
MS_ASSERT(new_width > 0);
out_shape[1] = new_height;
out_shape[2] = new_width;
auto out_tensor = outputs_.at(0);
MS_ASSERT(out_tensor);
out_tensor->set_shape(out_shape);
out_tensor->set_data_type(input_tensor->data_type());
return RET_OK;
}
} // namespace lite
} // namespace mindspore

@ -42,6 +42,7 @@ class Upsample : public PrimitiveC {
#endif
std::string GetMode() const;
std::vector<float> GetScales() const;
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
};
} // namespace lite
} // namespace mindspore

@ -36,11 +36,11 @@ class ResizeCPUKernel : public ResizeBaseCPUKernel {
int Init() override;
int ReSize() override;
int Run() override;
int RunImpl(int task_id);
virtual int RunImpl(int task_id);
int MallocTmpBuffer();
void FreeTmpBuffer();
private:
protected:
int *y_tops_ = nullptr;
int *y_bottoms_ = nullptr;
int *x_lefts_ = nullptr;

@ -0,0 +1,161 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/upsample_fp32.h"
#include <algorithm>
#include "nnacl/fp32/resize_fp32.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Upsample;
namespace mindspore::kernel {
int UpsampleCPUKernel::Init() {
param_ = reinterpret_cast<UpsampleParameter *>(op_parameter_);
MS_ASSERT(param_);
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int UpsampleCPUKernel::ReSize() {
auto ret = RET_OK;
auto out_tensor = out_tensors_.at(0);
MS_ASSERT(out_tensor);
auto out_shape = out_tensor->shape();
if (out_shape.size() != 4) {
MS_LOG(ERROR) << "Upsample out tensor dim should be 4";
return RET_ERROR;
}
new_height_ = out_shape[1];
new_width_ = out_shape[2];
if (param_->method_ == 0) { // bilinear
FreeTmpBuffer();
ret = MallocTmpBuffer();
if (ret != RET_OK) {
FreeTmpBuffer();
return ret;
}
auto input = in_tensors_.at(0);
MS_ASSERT(input);
auto input_shape = input->shape();
auto output = out_tensors().at(0);
MS_ASSERT(output);
auto output_shape = output->shape();
ret = PrepareResizeBilinear(input_shape.data(), output_shape.data(), align_corners_, y_bottoms_, y_tops_, x_lefts_,
x_rights_, y_bottom_weights_, x_left_weights_);
if (ret != RET_OK) {
FreeTmpBuffer();
}
}
return ret;
}
int UpsampleImpl(void *cdata, int task_id) {
auto upsample_kernel = reinterpret_cast<UpsampleCPUKernel *>(cdata);
auto error_code = upsample_kernel->RunImpl(task_id);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Upsample Run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
}
return RET_OK;
}
int UpsampleCPUKernel::RunImpl(int task_id) {
MS_ASSERT(in_tensors_.size() == 2);
auto input = in_tensors_.at(0); // input to be upsampled(resized)
auto input_data = reinterpret_cast<float *>(input->data_c());
MS_ASSERT(input_data);
auto out_tensor = out_tensors_.at(0);
MS_ASSERT(out_tensor);
auto output_data = reinterpret_cast<float *>(out_tensor->data_c());
MS_ASSERT(output_data);
auto input_shape = input->shape();
int ret = 0;
switch (param_->method_) {
case static_cast<int>(schema::ResizeMethod_LINEAR): {
int n_h_begin, n_h_end;
int n = out_tensor->shape()[0];
int h = new_height_;
int unit = UP_DIV(n * h, context_->thread_num_);
n_h_begin = unit * task_id;
n_h_end = std::min(n_h_begin + unit, n * h);
int c = in_tensors_.at(0)->shape()[3];
float *line0 = line_buffer_ + new_width_ * c * 2 * task_id;
float *line1 = line0 + new_width_ * c;
ret =
ResizeBilinear2(input_data, output_data, input_shape.data(), out_tensor->shape().data(), y_bottoms_, y_tops_,
x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_, line0, line1, n_h_begin, n_h_end);
break;
}
case static_cast<int>(schema::ResizeMethod_NEAREST): {
align_corners_ = false;
ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensor->shape().data(),
align_corners_, task_id, context_->thread_num_);
break;
}
default: {
MS_LOG(ERROR) << "Upsample unknown method " << param_->method_;
ret = RET_ERROR;
}
}
return ret;
}
int UpsampleCPUKernel::Run() {
int error_code = ParallelLaunch(this->context_->thread_pool_, UpsampleImpl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Upsample run error, error_code[" << error_code << "]";
FreeTmpBuffer();
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuUpsampleFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(parameter != nullptr);
MS_ASSERT(desc.type == PrimitiveType_Upsample);
auto *kernel = new (std::nothrow) UpsampleCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Upsample, CpuUpsampleFp32KernelCreator)
} // namespace mindspore::kernel

@ -0,0 +1,43 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/upsample_parameter.h"
#include "src/runtime/kernel/arm/fp32/resize_fp32.h"
namespace mindspore::kernel {
class UpsampleCPUKernel : public ResizeCPUKernel {
public:
UpsampleCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ResizeCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~UpsampleCPUKernel() = default;
int Init() override;
int ReSize() override;
int Run() override;
int RunImpl(int task_id) override;
private:
UpsampleParameter *param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_

@ -0,0 +1,247 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "common/common_test.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
#include "mindspore/lite/src/tensor.h"
#include "nnacl/upsample_parameter.h"
#include "schema/ops_generated.h"
#include "src/ops/upsample.h"
using mindspore::schema::Format_NHWC;
namespace mindspore {
class TestUpsampleFp32 : public mindspore::CommonTest {
public:
TestUpsampleFp32() = default;
void Prepare(const std::vector<int> &input_shape, float *input_data, float *scale_data, float *output_data,
schema::ResizeMethod method, const int thread_num);
void TearDown() override;
public:
float err_tol = 1e-5;
lite::Tensor in_tensor_;
lite::Tensor scale_tensor_;
lite::Tensor out_tensor_;
std::vector<lite::Tensor *> inputs_{&in_tensor_, &scale_tensor_};
std::vector<lite::Tensor *> outputs_{&out_tensor_};
UpsampleParameter *param_ = nullptr;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Upsample};
lite::InnerContext ctx_ = lite::InnerContext();
kernel::KernelCreator creator_ = nullptr;
kernel::LiteKernel *kernel_ = nullptr;
lite::Upsample *upsample_ = nullptr;
};
void TestUpsampleFp32::TearDown() {
in_tensor_.set_data(nullptr);
scale_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
delete upsample_;
delete kernel_;
}
void TestUpsampleFp32::Prepare(const std::vector<int> &input_shape, float *input_data, float *scale_data,
float *output_data, schema::ResizeMethod method, const int thread_num) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_format(Format_NHWC);
in_tensor_.set_shape(input_shape);
in_tensor_.set_data(input_data);
scale_tensor_.set_data_type(kNumberTypeFloat32);
scale_tensor_.set_data(scale_data);
scale_tensor_.set_shape({4});
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_data(output_data);
upsample_ = new (std::nothrow) lite::Upsample;
upsample_->InferShape(inputs_, outputs_);
param_ = reinterpret_cast<UpsampleParameter *>(malloc(sizeof(UpsampleParameter)));
param_->method_ = static_cast<int>(method);
desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Upsample};
ctx_ = lite::InnerContext();
ctx_.thread_num_ = thread_num;
ASSERT_EQ(lite::RET_OK, ctx_.Init());
creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator_, nullptr);
kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(param_), &ctx_, desc, nullptr);
ASSERT_NE(kernel_, nullptr);
}
// 2*2 -> 4*4 1thread
TEST_F(TestUpsampleFp32, test1) {
float input_data[] = {0.0, 1.0, 2.0, 3.0};
float output_data[16] = {0.0f};
std::vector<int> input_shape = {1, 2, 2, 1};
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
std::vector<float> expect = {0.0, 0.5, 1.0, 1.0, 1.0, 1.5, 2.0, 2.0, 2.0, 2.5, 3.0, 3.0, 2.0, 2.5, 3.0, 3.0};
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 1);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
auto output_size = 16;
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 2*2 -> 4*4 2thread
TEST_F(TestUpsampleFp32, test2) {
float input_data[] = {0.0, 1.0, 2.0, 3.0};
float output_data[16] = {0.0f};
std::vector<int> input_shape = {1, 2, 2, 1};
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
std::vector<float> expect = {0.0, 0.5, 1.0, 1.0, 1.0, 1.5, 2.0, 2.0, 2.0, 2.5, 3.0, 3.0, 2.0, 2.5, 3.0, 3.0};
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
auto output_size = 16;
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 2*2*2*5 -> 2*4*4*5 thread num 1
TEST_F(TestUpsampleFp32, test3) {
float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0,
14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0};
float output_data[160] = {0};
std::vector<int> input_shape = {2, 2, 2, 5};
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
std::vector<float> expect = {
0.0, 1.0, 2.0, 3.0, 4.0, 2.5, 3.5, 4.5, 5.5, 6.5, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0,
8.0, 9.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.5, 8.5, 9.5, 10.5, 11.5, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0,
11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, 17.0, 18.0,
19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0,
17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 22.5, 23.5, 24.5, 25.5, 26.5,
25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 27.5, 28.5, 29.5,
30.5, 31.5, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 32.5,
33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0,
34.0, 32.5, 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0};
auto output_size = 160;
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 1);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 2*2*2*5 -> 2*4*4*5 thread_num 2
TEST_F(TestUpsampleFp32, test4) {
float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0,
14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0};
float output_data[160] = {0};
std::vector<int> input_shape = {2, 2, 2, 5};
std::vector<float> expect = {
0.0, 1.0, 2.0, 3.0, 4.0, 2.5, 3.5, 4.5, 5.5, 6.5, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0,
8.0, 9.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.5, 8.5, 9.5, 10.5, 11.5, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0,
11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, 17.0, 18.0,
19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0,
17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 22.5, 23.5, 24.5, 25.5, 26.5,
25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 27.5, 28.5, 29.5,
30.5, 31.5, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 32.5,
33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0,
34.0, 32.5, 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0};
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
auto output_size = 160;
std::vector<float> output(output_size, 0.0);
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 1 5 5 5 -> 1 2 2 5 thread num 1
TEST_F(TestUpsampleFp32, test5) {
float input_data[] = {
0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,
48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0,
64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0,
96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0,
112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0};
float output_data[20] = {0};
std::vector<int> input_shape = {1, 5, 5, 5};
std::vector<float> expect = {0.0, 1.0, 2.0, 3.0, 4.0, 12.5, 13.5, 14.5, 15.5, 16.5,
62.5, 63.5, 64.5, 65.5, 66.5, 75.0, 76.0, 77.0, 78.0, 79.0};
float scale_data[] = {1.0f, 0.4f, 0.4f, 1.0f};
auto output_size = 20;
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 2 2 2 5 -> 2 4 4 5 thread num 1
TEST_F(TestUpsampleFp32, test6) {
float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0,
14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0};
float output_data[160] = {0};
std::vector<int> input_shape = {2, 2, 2, 5};
std::vector<int> output_shape = {2, 4, 4, 5};
std::vector<float> expect = {
0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0,
8.0, 9.0, 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0,
6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, 23.0, 24.0,
25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0,
31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0,
34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0};
size_t output_size = 160;
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_NEAREST, 1);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
// 2 2 2 5 -> 2 4 4 5 thread num 2
TEST_F(TestUpsampleFp32, test7) {
float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0,
14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0};
float output_data[160] = {0};
std::vector<int> input_shape = {2, 2, 2, 5};
std::vector<int> output_shape = {2, 4, 4, 5};
std::vector<float> expect = {
0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0,
8.0, 9.0, 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0,
6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, 23.0, 24.0,
25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0,
31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0,
34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0};
size_t output_size = 160;
float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f};
Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_NEAREST, 2);
auto ret = kernel_->Run();
EXPECT_EQ(0, ret);
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
}
} // namespace mindspore
Loading…
Cancel
Save