[MSLITE][Develop] fix bug of npu op: reshape, pooling

pull/11919/head
yangruoqi713 4 years ago
parent 3bf5681ebc
commit 964471ea6e

@ -29,6 +29,8 @@ typedef struct ReshapeQuantArg {
typedef struct ReshapeParameter {
// primitive parameter
OpParameter op_parameter_;
int shape_dim_;
int shape_[MAX_SHAPE_SIZE];
// other parameter
ReshapeQuantArg quant_para_;

@ -43,6 +43,18 @@ OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primiti
pooling_param->stride_w_ = pooling_primitive->GetStrideW();
pooling_param->stride_h_ = pooling_primitive->GetStrideH();
pooling_param->avg_mode_ = pooling_primitive->GetAvgMode();
auto pad_mode = pooling_primitive->GetPadMode();
switch (pad_mode) {
case schema::PadMode_SAME_UPPER:
pooling_param->pad_mode_ = Pad_Same;
break;
case schema::PadMode_VALID:
pooling_param->pad_mode_ = Pad_Valid;
break;
default:
pooling_param->pad_mode_ = Pad_No;
break;
}
auto is_global = pooling_primitive->GetGlobal();
pooling_param->global_ = is_global;

@ -19,6 +19,7 @@
#include "src/common/log_adapter.h"
#include "src/tensor.h"
#include "nnacl/reshape_parameter.h"
#include "src/ops/reshape.h"
namespace mindspore {
namespace lite {
@ -31,6 +32,13 @@ OpParameter *PopulateReshapeParameter(const mindspore::lite::PrimitiveC *primiti
}
memset(reshape_param, 0, sizeof(ReshapeParameter));
reshape_param->op_parameter_.type_ = primitive->Type();
auto reshape_lite_primitive = (lite::Reshape *)primitive;
auto shape = reshape_lite_primitive->GetShape();
reshape_param->shape_dim_ = shape.size();
int i = 0;
for (auto iter = shape.begin(); iter != shape.end(); iter++) {
reshape_param->shape_[i++] = *iter;
}
return reinterpret_cast<OpParameter *>(reshape_param);
}

@ -176,13 +176,13 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
return RET_INFER_INVALID;
}
std::vector<int> out_shape;
out_shape_.clear();
if (inputs_.size() == kDoubleNum) {
auto shape_tensor = inputs_.at(1);
if (shape_tensor->IsConst()) {
if (shape_tensor->data_c() == nullptr || (shape_tensor->shape().size() == 1 && shape_tensor->shape()[0] == 0)) {
MS_LOG(DEBUG) << "reshape to a scalar.";
output->set_shape(out_shape);
output->set_shape(out_shape_);
return RET_OK;
}
}
@ -194,23 +194,23 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
switch (shape_tensor->data_type()) {
case kNumberTypeInt8: {
auto data = reinterpret_cast<int8_t *>(shape_tensor->MutableData());
CalShape<int8_t>(data, inputs_, &out_shape, shape_size);
CalShape<int8_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeInt32: {
auto data = reinterpret_cast<int32_t *>(shape_tensor->MutableData());
CalShape<int32_t>(data, inputs_, &out_shape, shape_size);
CalShape<int32_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeInt64: {
auto data = reinterpret_cast<int64_t *>(shape_tensor->MutableData());
CalShape<int64_t>(data, inputs_, &out_shape, shape_size);
CalShape<int64_t>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeFloat: {
auto data = reinterpret_cast<float *>(shape_tensor->MutableData());
CalShape<float>(data, inputs_, &out_shape, shape_size);
CalShape<float>(data, inputs_, &out_shape_, shape_size);
} break;
case kNumberTypeUInt32: {
auto data = reinterpret_cast<uint32_t *>(shape_tensor->MutableData());
CalShape<uint32_t>(data, inputs_, &out_shape, shape_size);
CalShape<uint32_t>(data, inputs_, &out_shape_, shape_size);
} break;
default: {
MS_LOG(ERROR) << "Reshape weight tensor has unsupported dataType: " << shape_tensor->data_type();
@ -219,18 +219,18 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
}
} else if (inputs_.size() == kSingleNum) {
for (size_t i = 0; i < GetShape().size(); ++i) {
out_shape.push_back(GetShape().at(i));
out_shape_.push_back(GetShape().at(i));
}
} else {
MS_LOG(ERROR) << "inputs tensor size invalid.";
return RET_INFER_ERR;
}
auto ret = CalNewShape(inputs_.front(), &out_shape);
auto ret = CalNewShape(inputs_.front(), &out_shape_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "CalNewShape error";
return ret;
}
output->set_shape(out_shape);
output->set_shape(out_shape_);
return RET_OK;
}
} // namespace lite

@ -42,9 +42,11 @@ class Reshape : public PrimitiveC {
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
int GetFormat() const;
std::vector<int64_t> GetShape() const;
std::vector<int> GetOutputShape() { return out_shape_; }
private:
int CalNewShape(const lite::Tensor *in_tensor, std::vector<int> *out_shape) const;
std::vector<int> out_shape_;
};
} // namespace lite
} // namespace mindspore

@ -15,7 +15,6 @@
*/
#include "src/runtime/kernel/npu/activation_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
@ -28,7 +27,7 @@ int ActivationNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs,
if (act_param_->type_ != schema::ActivationType_RELU && act_param_->type_ != schema::ActivationType_RELU6 &&
act_param_->type_ != schema::ActivationType_SIGMOID && act_param_->type_ != schema::ActivationType_TANH &&
act_param_->type_ != schema::ActivationType_HSIGMOID && act_param_->type_ != schema::ActivationType_LEAKY_RELU) {
MS_LOG(ERROR) << "Unsupport activation type for activation op " << name_ << "when running npu";
MS_LOG(ERROR) << "Unsupported activation type for activation op " << name_ << "when running npu";
return RET_ERROR;
}
return RET_OK;
@ -64,7 +63,7 @@ int ActivationNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
act_->set_attr_mode(14);
break;
default:
MS_LOG(ERROR) << "Unsupport activation type for activation op " << name_ << "when running npu";
MS_LOG(ERROR) << "Unsupported activation type for activation op " << name_ << "when running npu";
return RET_ERROR;
}
return RET_OK;

@ -18,10 +18,8 @@
#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/fp32/activation_fp32.h"
namespace mindspore::kernel {
class ActivationNPUKernel : public NPUKernel {
public:

@ -16,7 +16,6 @@
#include "src/runtime/kernel/npu/arithmetic_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;

@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/arithmetic.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class ArithmeticNPUKernel : public NPUKernel {
public:

@ -16,7 +16,6 @@
#include "src/runtime/kernel/npu/arithmetic_self_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;

@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class ArithmeticSelfNPUKernel : public NPUKernel {
public:

@ -17,7 +17,6 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_BATCHNORM_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/batchnorm_parameter.h"

@ -17,9 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
namespace mindspore::kernel {
class CastNPUKernel : public NPUKernel {
public:

@ -17,10 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/concat_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class ConcatNPUKernel : public NPUKernel {
public:

@ -18,10 +18,9 @@
#include <vector>
#include <memory>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/conv_parameter.h"
namespace mindspore::kernel {
class ConvolutionBaseNPUKernel : public NPUKernel {
public:

@ -16,8 +16,6 @@
#include "src/runtime/kernel/npu/convolution_depthwise_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_DepthwiseConv2D;

@ -18,9 +18,7 @@
#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/conv_parameter.h"
namespace mindspore::kernel {

@ -15,7 +15,6 @@
*/
#include "src/runtime/kernel/npu/convolution_npu.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;

@ -15,8 +15,6 @@
*/
#include "src/runtime/kernel/npu/deconvolution_npu.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_DeConv2D;

@ -15,7 +15,6 @@
*/
#include "src/runtime/kernel/npu/eltwise_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"

@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "src/ops/eltwise.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class EltwiseNPUKernel : public NPUKernel {
public:

@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FULLCONNECTION_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FULLCONNECTION_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/convolution_base_npu.h"
#include "nnacl/matmul_parameter.h"
namespace mindspore::kernel {
class FullconnectionNPUKernel : public ConvolutionBaseNPUKernel {

@ -16,7 +16,6 @@
#include "src/runtime/kernel/npu/gather_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Gather;

@ -17,8 +17,8 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/gather_parameter.h"
namespace mindspore::kernel {
class GatherNPUKernel : public NPUKernel {

@ -17,9 +17,9 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_INSTANCE_NORM_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_INSTANCE_NORM_NPU_H_
#include <vector>
#include "include/graph/op/all_ops.h"
#include "nnacl/instance_norm_parameter.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class InstanceNormNPUKernel : public NPUKernel {
public:

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save