!4941 [MS][LITE][Develop]rename caffeprelu to prelu

Merge pull request !4941 from chenjianping/lite_dev2
pull/4941/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 026bbc46ff

@ -114,7 +114,7 @@
#include "src/runtime/kernel/arm/nnacl/fp32/arg_min_max.h"
#include "src/runtime/kernel/arm/nnacl/fp32/cast.h"
#include "src/runtime/kernel/arm/nnacl/concat_parameter.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu_parameter.h"
#include "src/runtime/kernel/arm/nnacl/prelu_parameter.h"
#include "src/runtime/kernel/arm/nnacl/fp32/slice.h"
#include "src/runtime/kernel/arm/nnacl/fp32/broadcast_to.h"
#include "src/runtime/kernel/arm/nnacl/reshape_parameter.h"
@ -165,7 +165,7 @@
#include "src/runtime/kernel/arm/nnacl/fp32/lstm.h"
#include "src/runtime/kernel/arm/nnacl/fp32/embedding_lookup.h"
#include "src/runtime/kernel/arm/nnacl/fp32/elu.h"
#include "src/runtime/kernel/arm/nnacl/prelu_parameter.h"
#include "src/runtime/kernel/arm/nnacl/leaky_relu_parameter.h"
namespace mindspore::kernel {
@ -227,31 +227,37 @@ OpParameter *PopulateExpandDimsParameter(const mindspore::lite::PrimitiveC *prim
return reinterpret_cast<OpParameter *>(expand_dims_param);
}
OpParameter *PopulateCaffePReLUParameter(const mindspore::lite::PrimitiveC *primitive) {
auto param = reinterpret_cast<mindspore::lite::CaffePReLU *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
auto *caffePrelu_param = new (std::nothrow) CaffePreluParameter();
if (caffePrelu_param == nullptr) {
OpParameter *PopulatePReLUParameter(const mindspore::lite::PrimitiveC *primitive) {
auto param = dynamic_cast<const mindspore::lite::CaffePReLU *>(primitive);
auto *prelu_param = new (std::nothrow) PReluParameter();
if (prelu_param == nullptr) {
MS_LOG(ERROR) << "new caffePReluParameter failed.";
return nullptr;
}
caffePrelu_param->op_parameter_.type_ = primitive->Type();
caffePrelu_param->channelShared = param->GetChannelShared();
return reinterpret_cast<OpParameter *>(caffePrelu_param);
prelu_param->op_parameter_.type_ = primitive->Type();
prelu_param->channelShared = param->GetChannelShared();
return reinterpret_cast<OpParameter *>(prelu_param);
}
OpParameter *PopulatePreluParameter(const mindspore::lite::PrimitiveC *primitive) {
auto param = reinterpret_cast<mindspore::lite::Prelu *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
auto *prelu_param = new (std::nothrow) PreluParameter();
if (prelu_param == nullptr) {
MS_LOG(ERROR) << "new caffePReluParameter failed.";
OpParameter *PopulateLeakyReluParameter(const mindspore::lite::PrimitiveC *primitive) {
auto param = dynamic_cast<const mindspore::lite::Prelu *>(primitive);
LeakyReluParameter *leaky_relu_param = new (std::nothrow) LeakyReluParameter();
if (leaky_relu_param == nullptr) {
MS_LOG(ERROR) << "new LeakyReluParameter failed.";
return nullptr;
}
prelu_param->op_parameter_.type_ = primitive->Type();
leaky_relu_param->op_parameter_.type_ = primitive->Type();
auto temp = param->GetSlope();
leaky_relu_param->slope_ = reinterpret_cast<float *>(malloc(temp.size() * sizeof(float)));
if (leaky_relu_param->slope_ == nullptr) {
MS_LOG(ERROR) << "malloc relu slope fail!";
return nullptr;
}
for (size_t i = 0; i < temp.size(); i++) {
prelu_param->slope_[i] = temp[i];
leaky_relu_param->slope_[i] = temp[i];
}
return reinterpret_cast<OpParameter *>(prelu_param);
leaky_relu_param->slope_num_ = temp.size();
return reinterpret_cast<OpParameter *>(leaky_relu_param);
}
OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primitive) {
@ -1529,8 +1535,8 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
populate_parameter_funcs_[schema::PrimitiveType_ScatterND] = PopulateScatterNDParameter;
populate_parameter_funcs_[schema::PrimitiveType_Squeeze] = PopulateSqueezeParameter;
populate_parameter_funcs_[schema::PrimitiveType_Split] = PopulateSplitParameter;
populate_parameter_funcs_[schema::PrimitiveType_CaffePReLU] = PopulateCaffePReLUParameter;
populate_parameter_funcs_[schema::PrimitiveType_Prelu] = PopulatePreluParameter;
populate_parameter_funcs_[schema::PrimitiveType_CaffePReLU] = PopulatePReLUParameter;
populate_parameter_funcs_[schema::PrimitiveType_Prelu] = PopulateLeakyReluParameter;
populate_parameter_funcs_[schema::PrimitiveType_PriorBox] = PopulatePriorBoxParameter;
populate_parameter_funcs_[schema::PrimitiveType_QuantDTypeCast] = PopulateQuantDTypeCastParameter;
populate_parameter_funcs_[schema::PrimitiveType_Lstm] = PopulateLstmParameter;

@ -1,57 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/caffeprelu_base.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CaffePReLU;
namespace mindspore::kernel {
int CaffePreluBaseCPUKernel::Init() { return RET_OK; }
kernel::LiteKernel *CpuCaffePreluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const Context *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_CaffePrelu);
auto *kernel = new (std::nothrow) CaffePreluBaseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PreluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CaffePReLU, CpuCaffePreluFp32KernelCreator)
} // namespace mindspore::kernel

@ -1,52 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class CaffePreluBaseCPUKernel : public LiteKernel {
public:
CaffePreluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = reinterpret_cast<CaffePreluParameter *>(parameter);
}
~CaffePreluBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
const Context *ctx_;
int thread_count_;
CaffePreluParameter *prelu_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_

@ -13,9 +13,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/prelu_base.h"
#include "src/runtime/kernel/arm/base/leaky_relu_base.h"
#include <vector>
#include "src/runtime/kernel/arm/int8/prelu_int8.h"
#include "src/runtime/kernel/arm/int8/leaky_relu_int8.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
@ -24,10 +24,10 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Prelu;
using mindspore::schema::PrimitiveType_LeakyReLU;
namespace mindspore::kernel {
int PreluBaseCPUKernel::Init() { return RET_OK; }
int LeakyReluBaseCPUKernel::Init() { return RET_OK; }
kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
@ -38,8 +38,8 @@ kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector<lite::tensor::Te
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Prelu);
auto *kernel = new (std::nothrow) PreluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(desc.type == schema::PrimitiveType_LeakyRelu);
auto *kernel = new (std::nothrow) LeakyReluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PreluCPUKernel fail!";
return nullptr;
@ -54,5 +54,5 @@ kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector<lite::tensor::Te
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Prelu, CpuPreluInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LeakyReLU, CpuPreluInt8KernelCreator)
} // namespace mindspore::kernel

@ -14,38 +14,32 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_Prelu_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_Prelu_BASE_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/nnacl/prelu_parameter.h"
#include "src/runtime/kernel/arm/nnacl/leaky_relu_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class PreluBaseCPUKernel : public LiteKernel {
class LeakyReluBaseCPUKernel : public LiteKernel {
public:
PreluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
LeakyReluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) {
prelu_param_ = reinterpret_cast<PreluParameter *>(op_parameter_);
}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~PreluBaseCPUKernel() = default;
~LeakyReluBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
const Context *ctx_;
PreluParameter *prelu_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_Prelu_BASE_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_

@ -1,98 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/caffeprelu.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CaffePReLU;
namespace mindspore::kernel {
int CaffePReluCPUKernel::Init() { return RET_OK; }
int CaffePReluCPUKernel::DoExcute(int task_id) {
CaffePRelu(input_data, output_data, prelu_param_, task_id);
return RET_OK;
}
int CaffePReluRun(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto PReludata = reinterpret_cast<CaffePReluCPUKernel *>(cdata);
auto ret = PReludata->DoExcute(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluRun error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int CaffePReluCPUKernel::Run() {
auto prepare_ret = Prepare();
if (prepare_ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
return prepare_ret;
}
auto input = in_tensors_[0];
auto input1 = in_tensors_[1];
prelu_param_->input_num_ = input->ElementsNum();
input_data = reinterpret_cast<float *>(input->Data());
output_data = reinterpret_cast<float *>(out_tensors_[0]->Data());
auto channels = input->shape();
prelu_param_->negtive_slope_ = reinterpret_cast<float *>(input1->Data());
prelu_param_->channel_num_ = channels.at(channels.size() - 1);
auto ret = LiteBackendParallelLaunch(CaffePReluRun, this, prelu_param_->op_parameter_.thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluDwRun error: error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuCaffePReluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const lite::Context *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Prelu);
auto *kernel = new (std::nothrow) CaffePReluCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PReluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CaffePReLU, CpuCaffePReluFp32KernelCreator)
} // namespace mindspore::kernel

@ -0,0 +1,104 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/leaky_relu.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/runtime/kernel/arm/nnacl/fp32/leaky_relu.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_LeakyReLU;
using mindspore::schema::PrimitiveType_Prelu;
namespace mindspore::kernel {
namespace {
int LeakyReluRun(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto kernel_relu = reinterpret_cast<LeakyReluCPUKernel *>(cdata);
auto ret = kernel_relu->DoExcute(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "LeakyReluRun error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
} // namespace
LeakyReluCPUKernel::~LeakyReluCPUKernel() {
if (prelu_param_->slope_ != nullptr) {
free(prelu_param_->slope_);
prelu_param_->slope_ = nullptr;
}
}
int LeakyReluCPUKernel::Init() { return RET_OK; }
int LeakyReluCPUKernel::DoExcute(int task_id) {
DoLeakyRelu(input_data, output_data, prelu_param_, task_id);
return RET_OK;
}
int LeakyReluCPUKernel::Run() {
auto prepare_ret = Prepare();
if (prepare_ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
return prepare_ret;
}
auto input = in_tensors_.at(0);
prelu_param_->input_num_ = input->ElementsNum();
input_data = reinterpret_cast<float *>(input->Data());
output_data = reinterpret_cast<float *>(out_tensors_.at(0)->Data());
auto ret = LiteBackendParallelLaunch(LeakyReluRun, this, context_->thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluDwRun error: error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *param, const lite::Context *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (param == nullptr) {
MS_LOG(ERROR) << "input param is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_LeakyRelu);
auto *kernel = new (std::nothrow) LeakyReluCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new LeakyReluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(param->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LeakyReLU, CpuLeakyReluFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Prelu, CpuLeakyReluFp32KernelCreator)
} // namespace mindspore::kernel

@ -13,29 +13,28 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "src/runtime/kernel/arm/nnacl/fp32/leaky_relu.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class CaffePReluCPUKernel : public LiteKernel {
class LeakyReluCPUKernel : public LiteKernel {
public:
CaffePReluCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = reinterpret_cast<CaffePReluParameter *>(op_parameter_);
LeakyReluCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
prelu_param_ = (reinterpret_cast<LeakyReluParameter *>(op_parameter_));
primitive_ = primitive;
}
~CaffePReluCPUKernel() = default;
~LeakyReluCPUKernel();
int Init() override;
int ReSize() override { return 0; }
@ -43,13 +42,11 @@ class CaffePReluCPUKernel : public LiteKernel {
int DoExcute(int task_id);
protected:
const Context *ctx_;
int thread_count_;
CaffePReluParameter *prelu_param_;
LeakyReluParameter *prelu_param_;
private:
float *input_data;
float *output_data;
float *input_data = nullptr;
float *output_data = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_

@ -16,7 +16,6 @@
#include "src/runtime/kernel/arm/fp32/prelu.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/runtime/kernel/arm/nnacl/prelu.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
@ -25,16 +24,10 @@ using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Prelu;
using mindspore::schema::PrimitiveType_CaffePReLU;
namespace mindspore::kernel {
int PReluCPUKernel::Init() { return RET_OK; }
int PReluCPUKernel::DoExcute(int task_id) {
PRelu(input_data, output_data, prelu_param_, task_id);
return RET_OK;
}
namespace {
int PReluRun(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto PReludata = reinterpret_cast<PReluCPUKernel *>(cdata);
auto ret = PReludata->DoExcute(task_id);
@ -44,6 +37,14 @@ int PReluRun(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
}
return RET_OK;
}
} // namespace
int PReluCPUKernel::Init() { return RET_OK; }
int PReluCPUKernel::DoExcute(int task_id) {
DoPRelu(input_data, output_data, prelu_param_, task_id);
return RET_OK;
}
int PReluCPUKernel::Run() {
auto prepare_ret = Prepare();
@ -51,12 +52,17 @@ int PReluCPUKernel::Run() {
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
return prepare_ret;
}
auto input = in_tensors_.at(0);
auto input = in_tensors_[0];
auto input1 = in_tensors_[1];
prelu_param_->input_num_ = input->ElementsNum();
input_data = reinterpret_cast<float *>(input->Data());
output_data = reinterpret_cast<float *>(out_tensors_.at(0)->Data());
output_data = reinterpret_cast<float *>(out_tensors_[0]->Data());
auto channels = input->shape();
prelu_param_->slope_ = reinterpret_cast<float *>(input1->Data());
prelu_param_->channel_num_ = channels.at(channels.size() - 1);
auto ret = LiteBackendParallelLaunch(PReluRun, this, prelu_param_->thread_num_);
auto ret = LiteBackendParallelLaunch(PReluRun, this, prelu_param_->op_parameter_.thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluDwRun error: error_code[" << ret << "]";
return RET_ERROR;
@ -66,28 +72,28 @@ int PReluCPUKernel::Run() {
kernel::LiteKernel *CpuPReluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const lite::Context *ctx,
OpParameter *param, const lite::Context *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "input opParameter is nullptr!";
if (param == nullptr) {
MS_LOG(ERROR) << "input param is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Prelu);
auto *kernel = new (std::nothrow) PReluCPUKernel(opParameter, inputs, outputs, ctx, primitive);
auto *kernel = new (std::nothrow) PReluCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PReluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(param->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Prelu, CpuPReluFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CaffePReLU, CpuPReluFp32KernelCreator)
} // namespace mindspore::kernel

@ -18,22 +18,17 @@
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/nnacl/prelu.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
#include "src/runtime/kernel/arm/nnacl/fp32/prelu.h"
namespace mindspore::kernel {
class PReluCPUKernel : public LiteKernel {
public:
PReluCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = (reinterpret_cast<PReluParameter *>(op_parameter_));
primitive_ = primitive;
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
prelu_param_ = reinterpret_cast<PReluParameter *>(op_parameter_);
}
~PReluCPUKernel() = default;
@ -42,14 +37,10 @@ class PReluCPUKernel : public LiteKernel {
int Run() override;
int DoExcute(int task_id);
protected:
const Context *ctx_;
int thread_count_;
PReluParameter *prelu_param_;
private:
float *input_data;
float *output_data;
PReluParameter *prelu_param_;
float *input_data = nullptr;
float *output_data = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_PRELU_H_

@ -14,9 +14,9 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/prelu_int8.h"
#include "src/runtime/kernel/arm/int8/leaky_relu_int8.h"
#include <limits>
#include "src/runtime/kernel/arm/nnacl/int8/prelu_int8.h"
#include "src/runtime/kernel/arm/nnacl/int8/leaky_relu_int8.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
@ -28,62 +28,78 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Prelu;
namespace mindspore::kernel {
int PreluInt8CPUKernel::Init() {
PreluBaseCPUKernel::Init();
int LeakyReluInt8CPUKernel::Init() {
LeakyReluBaseCPUKernel::Init();
LeakyReluParameter *param = reinterpret_cast<LeakyReluParameter *>(op_parameter_);
quant_prelu_parm_.slope_ = reinterpret_cast<float *>(malloc(param->slope_num_ * sizeof(float)));
if (quant_prelu_parm_.slope_ == nullptr) {
MS_LOG(ERROR) << "malloc data fail!";
return RET_ERROR;
}
for (size_t i = 0; i < param->slope_num_; ++i) {
quant_prelu_parm_.slope_[i] = param->slope_[i];
}
auto *input_tensor = in_tensors_.at(kInputIndex);
auto in_quant_args = input_tensor->GetQuantParams();
quant_prelu_parm_->quant_arg.in_args_.scale_ = in_quant_args.front().scale;
quant_prelu_parm_->quant_arg.in_args_.zp_ = in_quant_args.front().zeroPoint;
quant_prelu_parm_.quant_arg.in_args_.scale_ = in_quant_args.front().scale;
quant_prelu_parm_.quant_arg.in_args_.zp_ = in_quant_args.front().zeroPoint;
auto *out_tensor = out_tensors_.at(kOutputIndex);
auto out_quant_args = out_tensor->GetQuantParams();
quant_prelu_parm_->quant_arg.out_args_.scale_ = out_quant_args.front().scale;
quant_prelu_parm_->quant_arg.out_args_.zp_ = out_quant_args.front().zeroPoint;
quant_prelu_parm_.quant_arg.out_args_.scale_ = out_quant_args.front().scale;
quant_prelu_parm_.quant_arg.out_args_.zp_ = out_quant_args.front().zeroPoint;
quant_prelu_parm_->quant_arg.output_activation_max_ = std::numeric_limits<int8_t>::max();
quant_prelu_parm_->quant_arg.output_activation_min_ = std::numeric_limits<int8_t>::min();
quant_prelu_parm_.quant_arg.output_activation_max_ = std::numeric_limits<int8_t>::max();
quant_prelu_parm_.quant_arg.output_activation_min_ = std::numeric_limits<int8_t>::min();
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int PreluInt8CPUKernel::ReSize() {
LeakyReluInt8CPUKernel::~LeakyReluInt8CPUKernel() {
if (quant_prelu_parm_.slope_ != nullptr) {
free(quant_prelu_parm_.slope_);
quant_prelu_parm_.slope_ = nullptr;
}
}
int LeakyReluInt8CPUKernel::ReSize() {
auto *input_tensor = in_tensors_.at(kInputIndex);
auto *out_tensor = out_tensors_.at(kOutputIndex);
auto input_dim = input_tensor->shape().size();
MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE);
quant_prelu_parm_->input_dim_ = input_dim;
quant_prelu_parm_->element_num = in_tensors_[0]->Size();
quant_prelu_parm_->in_shape_ = input_tensor->shape().data();
quant_prelu_parm_->out_shape_ = out_tensor->shape().data();
quant_prelu_parm_.input_dim_ = input_dim;
quant_prelu_parm_.element_num = in_tensors_[0]->Size();
quant_prelu_parm_.in_shape_ = input_tensor->shape().data();
quant_prelu_parm_.out_shape_ = out_tensor->shape().data();
return RET_OK;
}
int PreluInt8CPUKernel::Run() {
int LeakyReluInt8CPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << ret;
return ret;
}
ret = LiteBackendParallelLaunch(PreluInt8Run, this, quant_prelu_parm_->op_parameter_.thread_num_);
ret = LiteBackendParallelLaunch(PreluInt8Run, this, op_parameter_->thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "RunPreluParam failed. errorcode: ";
}
return RET_OK;
}
int PreluInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto prelu = reinterpret_cast<PreluInt8CPUKernel *>(cdata);
auto prelu = reinterpret_cast<LeakyReluInt8CPUKernel *>(cdata);
prelu->DoExecute(task_id);
return RET_OK;
}
int PreluInt8CPUKernel::DoExecute(int task_id) {
int LeakyReluInt8CPUKernel::DoExecute(int task_id) {
auto input_tensor = in_tensors_.at(kInputIndex);
auto out_tensor = out_tensors_.at(kOutputIndex);
int8_t *input_data = reinterpret_cast<int8_t *>(input_tensor->Data());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensor->Data());
prelu(input_data, output_data, quant_prelu_parm_, task_id);
DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id);
return RET_OK;
}

@ -20,21 +20,18 @@
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/prelu_base.h"
#include "src/runtime/kernel/arm/base/leaky_relu_base.h"
#include "src/runtime/runtime_api.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class PreluInt8CPUKernel : public PreluBaseCPUKernel {
class LeakyReluInt8CPUKernel : public LeakyReluBaseCPUKernel {
public:
PreluInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: PreluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {
quant_prelu_parm_ = reinterpret_cast<PreluParameter *>(op_parameter_);
LeakyReluInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LeakyReluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {
}
~PreluInt8CPUKernel() override {}
~LeakyReluInt8CPUKernel() override;;
int Init() override;
int ReSize() override;
@ -42,7 +39,7 @@ class PreluInt8CPUKernel : public PreluBaseCPUKernel {
int DoExecute(int task_id);
private:
PreluParameter *quant_prelu_parm_;
LeakyReluQuantArg quant_prelu_parm_;
};
int PreluInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata);
} // namespace mindspore::kernel

@ -1,35 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_
#include "src/runtime/kernel/arm/nnacl/op_base.h"
#define CAFFEPRELU_OFFSET_MAX_SIZE 4
struct CaffePreluParameter {
OpParameter op_parameter_;
bool channelShared;
double alpha_;
int64_t offset_[CAFFEPRELU_OFFSET_MAX_SIZE];
int64_t in_offset_[CAFFEPRELU_OFFSET_MAX_SIZE];
int64_t axis_;
const int *in_shape_;
const int *out_shape_;
int input_dim_;
};
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_

@ -13,12 +13,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/prelu.h"
#include "nnacl/fp32/leaky_relu.h"
void PRelu(float *input, float *output, PReluParameter *prelu_param_, int task_id) {
for (int i = task_id; i < prelu_param_->input_num_; i += prelu_param_->op_parameter_.thread_num_) {
void DoLeakyRelu(float *input, float *output, LeakyReluParameter *param, int task_id) {
for (int i = task_id; i < param->input_num_; i += param->op_parameter_.thread_num_) {
if (input[i] <= 0) {
output[i] = input[i] * prelu_param_->negtive_slope_[0];
output[i] = input[i] * param->slope_[0];
} else {
output[i] = input[i];
}

@ -17,18 +17,12 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_H_
#include "nnacl/op_base.h"
typedef struct PReluParameter {
OpParameter op_parameter_;
float *negtive_slope_;
int input_num_;
int thread_num_;
} PReluParameter;
#include "nnacl/leaky_relu_parameter.h"
#ifdef __cplusplus
extern "C" {
#endif
void PRelu(float *input, float *output, PReluParameter *prelu_param_, int task_id);
void DoLeakyRelu(float *input, float *output, LeakyReluParameter *prelu_param_, int task_id);
#ifdef __cplusplus
}
#endif

@ -13,9 +13,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "nnacl/fp32/prelu.h"
void CaffePRelu(float *input, float *output, CaffePReluParameter *prelu_param_, int task_id) {
void DoPRelu(float *input, float *output, PReluParameter *prelu_param_, int task_id) {
int block = (int)(prelu_param_->input_num_ / prelu_param_->op_parameter_.thread_num_);
int start = task_id * block;
int end = start + block;
@ -26,11 +26,11 @@ void CaffePRelu(float *input, float *output, CaffePReluParameter *prelu_param_,
if (input[i] > 0) {
output[i] = input[i];
} else {
if (!prelu_param_->channeShared) {
if (!prelu_param_->channelShared) {
int temp = i % prelu_param_->channel_num_;
output[i] = input[i] * prelu_param_->negtive_slope_[temp];
output[i] = input[i] * prelu_param_->slope_[temp];
} else {
output[i] = input[i] * prelu_param_->negtive_slope_[0];
output[i] = input[i] * prelu_param_->slope_[0];
}
}
}

@ -13,26 +13,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_FP32_PRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_FP32_PRELU_H_
#include "src/runtime/kernel/arm/nnacl/op_base.h"
typedef struct CaffePReluParameter {
OpParameter op_parameter_;
float *negtive_slope_;
bool channeShared;
int channel_num_;
int input_num_;
int thread_num_;
} CaffePReluParameter;
#include "nnacl/op_base.h"
#include "nnacl/prelu_parameter.h"
#ifdef __cplusplus
extern "C" {
#endif
void CaffePRelu(float *input, float *output, CaffePReluParameter *prelu_param_, int task_id);
void DoPRelu(float *input, float *output, PReluParameter *prelu_param_, int task_id);
#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_FP32_PRELU_H_

@ -14,10 +14,9 @@
* limitations under the License.
*/
#include "nnacl/prelu_parameter.h"
#include "nnacl/int8/prelu_int8.h"
#include "nnacl/int8/leaky_relu_int8.h"
void prelu(int8_t *inputs, int8_t *output_ptr, PreluParameter *quant_prelu_parm, int task_id) {
void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) {
float output_scale = quant_prelu_parm->quant_arg.out_args_.scale_;
int output_zp = quant_prelu_parm->quant_arg.out_args_.zp_;
const float output_inverse_scale = 1.f / output_scale;
@ -34,7 +33,7 @@ void prelu(int8_t *inputs, int8_t *output_ptr, PreluParameter *quant_prelu_parm,
float bias = -input_quant[i].zp_ * scale;
for (int j = task_id; j < quant_prelu_parm->element_num; j += quant_prelu_parm->op_parameter_.thread_num_) {
if (inputs[j] <= 0) {
int32_t output_tmp = round(inputs[j] * quant_prelu_parm->alpha_ * scale + bias) + output_zp;
int32_t output_tmp = round(inputs[j] * quant_prelu_parm->slope_[0] * scale + bias) + output_zp;
if (output_tmp > 127) {
output_ptr[j] = 127;
} else if (output_tmp < -128) {

@ -18,12 +18,12 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_PRELU_INT8_H_
#include "nnacl/op_base.h"
#include "nnacl/prelu_parameter.h"
#include "nnacl/quantization/quantize.h"
#ifdef __cplusplus
extern "C" {
#endif
void prelu(int8_t *inputs, int8_t *output_ptr, PreluParameter *quant_Prelu_parm, int task_id);
void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id);
#ifdef __cplusplus
}
#endif

@ -14,14 +14,16 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKYRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKYRELU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKY_RELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKY_RELU_PARAMETER_H_
#include "nnacl/op_base.h"
typedef struct LeakyReluParameter {
OpParameter op_parameter_;
float alpha;
float *slope_;
size_t slope_num_;
int input_num_;
} LeakyReluParameter;
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKYRELU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_LEAKY_RELU_PARAMETER_H_

@ -17,23 +17,13 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_
#include "nnacl/op_base.h"
#include "nnacl/quantization/quantize.h"
#define PRELU_OFFSET_MAX_SIZE 65535
typedef struct PreluParameter {
#include "src/runtime/kernel/arm/nnacl/op_base.h"
typedef struct PReluParameter {
OpParameter op_parameter_;
PreluQuantArg quant_arg;
double alpha_;
int thread_count_;
float slope_[PRELU_OFFSET_MAX_SIZE];
int64_t in_offset_[PRELU_OFFSET_MAX_SIZE];
int64_t axis_;
const int *in_shape_;
const int *out_shape_;
int input_dim_;
int element_num;
} PreluParameter;
float *slope_;
bool channelShared;
int channel_num_;
int input_num_;
} PReluParameter;
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_

@ -259,6 +259,17 @@ typedef struct PowerQuantArg {
int output_activation_max_;
} PowerQuantArg;
typedef struct LeakyReluQuantArg {
OpParameter op_parameter_;
PreluQuantArg quant_arg;
float *slope_;
int64_t axis_;
const int *in_shape_;
const int *out_shape_;
int input_dim_;
int element_num;
} LeakyReluQuantArg;
#ifdef __cplusplus
extern "C" {
#endif

@ -25,7 +25,6 @@
#include "src/runtime/kernel/opencl/kernel/caffe_prelu.h"
#include "src/runtime/opencl/opencl_runtime.h"
#include "src/runtime/kernel/opencl/cl/caffe_prelu.cl.inc"
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
using mindspore::kernel::KERNEL_ARCH::kGPU;
using mindspore::lite::KernelRegistrar;

@ -18,7 +18,7 @@
#include "schema/inner/model_generated.h"
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/prelu_parameter.h"
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
#include "mindspore/lite/src/ir/tensor.h"
@ -64,10 +64,10 @@ TEST_F(TestPreluInt8, prelu_1) {
output0_tensor->set_data_type(tid_int8);
outputs_tensor[0] = output0_tensor;
PreluParameter op_param;
LeakyReluQuantArg op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Prelu;
op_param.alpha_ = 0.25;
op_param.slope_ = reinterpret_cast<float *>(malloc(sizeof(float)));
op_param.slope_[0] = 0.25;
lite::Context *ctx = new lite::Context;
ctx->thread_num_ = 2;

@ -22,7 +22,7 @@
#include "mindspore/lite/src/runtime/opencl/opencl_runtime.h"
#include "mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h"
#include "mindspore/lite/src/runtime/kernel/opencl/kernel/caffe_prelu.h"
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/prelu_parameter.h"
using mindspore::kernel::CaffePReluOpenCLKernel;
using mindspore::kernel::LiteKernel;
@ -127,7 +127,7 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
MS_LOG(INFO) << "CaffePRelu==================weight data================";
printf_tensor_caffeprelu(inputs[1], weight_tensor->ElementsNum());
auto param = new (std::nothrow) CaffePReluParameter();
auto param = new (std::nothrow) PReluParameter();
if (param == nullptr) {
MS_LOG(ERROR) << "new param error!";
delete input_tensor;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save