!3824 Changing folders for several operators

Merge pull request !3824 from zhangzheng/master
pull/3824/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 0082c0e5df

@ -179,7 +179,9 @@ lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *src_prim) {
case schema::PrimitiveType_StridedSlice:
return new lite::StridedSlice(const_cast<schema::Primitive *>(src_prim));
case schema::PrimitiveType_Prelu:
return new lite::Prelu(const_cast<schema::Primitive *>(src_prim));
return new lite::Prelu(const_cast<schema::Primitive *>(srcPrim));
case schema::PrimitiveType_CaffePReLU:
return new lite::CaffePReLU(const_cast<schema::Primitive *>(srcPrim));
case schema::PrimitiveType_Round:
return new lite::Round(const_cast<schema::Primitive *>(src_prim));
case schema::PrimitiveType_Reverse:

@ -124,6 +124,12 @@ class Prelu : public Activation {
const schema::Prelu *GetAttribute() const { return this->primitive->value_as_Prelu(); }
};
class CaffePReLU : public Activation {
public:
explicit CaffePReLU(schema::Primitive *primitive) : Activation(primitive) {}
const schema::CaffePReLU *GetAttribute() const { return this->primitive->value_as_CaffePReLU(); }
};
class Split : public Primitive {
public:
explicit Split(schema::Primitive *primitive) : Primitive(primitive) {}

@ -23,6 +23,7 @@
#include "src/runtime/kernel/arm/nnacl/fp32/arg_min_max.h"
#include "src/runtime/kernel/arm/nnacl/fp32/cast.h"
#include "src/runtime/kernel/arm/nnacl/concat_parameter.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu_parameter.h"
#include "src/runtime/kernel/arm/nnacl/fp32/slice.h"
#include "src/runtime/kernel/arm/nnacl/fp32/broadcast_to.h"
#include "src/runtime/kernel/arm/nnacl/reshape_parameter.h"
@ -72,6 +73,7 @@
#include "src/runtime/kernel/arm/nnacl/fp32/lstm.h"
#include "src/runtime/kernel/arm/nnacl/fp32/embedding_lookup.h"
#include "src/runtime/kernel/arm/nnacl/fp32/elu.h"
#include "src/runtime/kernel/arm/nnacl/prelu_parameter.h"
namespace mindspore::kernel {
@ -130,6 +132,33 @@ OpParameter *PopulateExpandDimsParameter(const lite::Primitive *primitive) {
return reinterpret_cast<OpParameter *>(expand_dims_param);
}
OpParameter *PopulateCaffePReLUParameter(const lite::Primitive *primitive) {
auto param = primitive->Value()->value_as_CaffePReLU();
CaffePreluParameter *caffePrelu_param = new (std::nothrow) CaffePreluParameter();
if (caffePrelu_param == nullptr) {
MS_LOG(ERROR) << "new caffePReluParameter failed.";
return nullptr;
}
caffePrelu_param->op_parameter_.type_ = primitive->Type();
caffePrelu_param->channelShared = param->channelShared();
return reinterpret_cast<OpParameter *>(caffePrelu_param);
}
OpParameter *PopulatePreluParameter(const lite::Primitive *primitive) {
auto param = primitive->Value()->value_as_Prelu();
PreluParameter *Prelu_param = new (std::nothrow) PreluParameter();
if (Prelu_param == nullptr) {
MS_LOG(ERROR) << "new caffePReluParameter failed.";
return nullptr;
}
Prelu_param->op_parameter_.type_ = primitive->Type();
auto temp = param->slope();
for (int i = 0; i < temp->size(); i++) {
Prelu_param->slope_[i] = temp->Get(i);
}
return reinterpret_cast<OpParameter *>(Prelu_param);
}
OpParameter *PopulatePoolingParameter(const lite::Primitive *primitive) {
auto pooling_primitive = primitive->Value()->value_as_Pooling();
// todo use malloc instead
@ -1365,6 +1394,8 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
populate_parameter_funcs_[schema::PrimitiveType_ScatterND] = PopulateScatterNDParameter;
populate_parameter_funcs_[schema::PrimitiveType_Squeeze] = PopulateSqueezeParameter;
populate_parameter_funcs_[schema::PrimitiveType_Split] = PopulateSplitParameter;
populate_parameter_funcs_[schema::PrimitiveType_CaffePReLU] = PopulateCaffePReLUParameter;
populate_parameter_funcs_[schema::PrimitiveType_Prelu] = PopulatePreluParameter;
populate_parameter_funcs_[schema::PrimitiveType_PriorBox] = PopulatePriorBoxParameter;
populate_parameter_funcs_[schema::PrimitiveType_QuantDTypeCast] = PopulateQuantDTypeCastParameter;
populate_parameter_funcs_[schema::PrimitiveType_Lstm] = PopulateLstmParameter;

@ -0,0 +1,56 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/caffeprelu_base.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_factory.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CaffePReLU;
namespace mindspore::kernel {
int CaffePreluBaseCPUKernel::Init() { return RET_OK; }
kernel::LiteKernel *CpuCaffePreluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const Context *ctx,
const kernel::KernelKey &desc, const lite::Primitive *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_CaffePrelu);
auto *kernel = new (std::nothrow) CaffePreluBaseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PreluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CaffePReLU, CpuCaffePreluFp32KernelCreator)
} // namespace mindspore::kernel

@ -0,0 +1,52 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class CaffePreluBaseCPUKernel : public LiteKernel {
public:
CaffePreluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const lite::Primitive *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = reinterpret_cast<CaffePreluParameter *>(parameter);
}
~CaffePreluBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
int thread_count_;
const Context *ctx_;
CaffePreluParameter *prelu_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CAFFEPRELU_BASE_H_

@ -30,9 +30,9 @@ class PreluBaseCPUKernel : public LiteKernel {
PreluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const lite::Primitive *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
op_parameter_->thread_num_ = ctx->thread_num_;
prelu_param_ = reinterpret_cast<PreluParameter *>(op_parameter_);
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) {
opParameter->thread_num_ = ctx->thread_num_;
prelu_param_ = reinterpret_cast<PreluParameter *>(opParameter);
}
~PreluBaseCPUKernel() = default;
@ -44,7 +44,6 @@ class PreluBaseCPUKernel : public LiteKernel {
int Run() override { return 0; }
protected:
int thread_count_;
const Context *ctx_;
PreluParameter *prelu_param_;
};

@ -121,4 +121,5 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Sin, CpuArithmeticSelfFp32Ker
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogicalNot, CpuArithmeticSelfFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Floor, CpuArithmeticSelfFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Ceil, CpuArithmeticSelfFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Round, CpuArithmeticSelfFp32KernelCreator)
} // namespace mindspore::kernel

@ -32,6 +32,7 @@ using mindspore::schema::PrimitiveType_Exp;
using mindspore::schema::PrimitiveType_Floor;
using mindspore::schema::PrimitiveType_Log;
using mindspore::schema::PrimitiveType_LogicalNot;
using mindspore::schema::PrimitiveType_Round;
using mindspore::schema::PrimitiveType_Rsqrt;
using mindspore::schema::PrimitiveType_Sin;
using mindspore::schema::PrimitiveType_Sqrt;
@ -80,6 +81,9 @@ class ArithmeticSelfCPUKernel : public LiteKernel {
case PrimitiveType_Ceil:
arithmeticSelf_run_ = ElementCeil;
break;
case PrimitiveType_Round:
arithmeticSelf_run_ = ElementRound;
break;
default:
break;
}

@ -0,0 +1,97 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/caffeprelu.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CaffePReLU;
namespace mindspore::kernel {
int CaffePReluCPUKernel::Init() { return RET_OK; }
int CaffePReluCPUKernel::DoExcute(int task_id) {
PRelu(input_data, output_data, prelu_param_, task_id);
return RET_OK;
}
int CaffePReluRun(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto PReludata = reinterpret_cast<CaffePReluCPUKernel *>(cdata);
auto ret = PReludata->DoExcute(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluRun error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int CaffePReluCPUKernel::Run() {
auto prepare_ret = Prepare();
if (prepare_ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
return prepare_ret;
}
auto input = inputs_.at(0);
auto input1 = inputs_.at(1);
prelu_param_->input_num_ = input->ElementsNum();
input_data = reinterpret_cast<float *>(input->Data());
output_data = reinterpret_cast<float *>(outputs_.at(0)->Data());
auto channels = input->shape();
prelu_param_->negtive_slope_ = reinterpret_cast<float *>(input1->Data());
prelu_param_->channel_num_ = channels.at(channels.size() - 1);
auto ret = LiteBackendParallelLaunch(CaffePReluRun, this, prelu_param_->op_parameter_.thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "PReluDwRun error: error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuCaffePReluFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const lite::Context *ctx,
const kernel::KernelKey &desc, const lite::Primitive *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Prelu);
auto *kernel = new (std::nothrow) CaffePReluCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PReluCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CaffePReLU, CpuCaffePReluFp32KernelCreator)
} // namespace mindspore::kernel

@ -0,0 +1,55 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class CaffePReluCPUKernel : public LiteKernel {
public:
CaffePReluCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const lite::Primitive *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = (reinterpret_cast<CaffePReluParameter *>(opParameter));
primitive_ = primitive;
}
~CaffePReluCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override;
int DoExcute(int task_id);
protected:
int thread_count_;
const Context *ctx_;
CaffePReluParameter *prelu_param_;
private:
float *input_data;
float *output_data;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CAFFEPRELU_H_

@ -56,7 +56,7 @@ int PreluInt8CPUKernel::Init() {
int PreluInt8CPUKernel::ReSize() { return 0; }
int PreluInt8CPUKernel::Run() {
auto ret = LiteBackendParallelLaunch(PreluInt8Run, this, thread_count_);
auto ret = LiteBackendParallelLaunch(PreluInt8Run, this, quant_prelu_parm_->op_parameter_.thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "RunPreluParam failed. errorcode: ";
}

@ -0,0 +1,37 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
// * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/nnacl/caffeprelu.h"
void PRelu(float *input, float *output, CaffePReluParameter *prelu_param_, int task_id) {
int block = int(prelu_param_->input_num_ / prelu_param_->op_parameter_.thread_num_);
int start = task_id * block;
int end = start + block;
if (task_id == prelu_param_->op_parameter_.thread_num_ - 1) {
end = prelu_param_->input_num_;
}
for (int i = start; i < end; i++) {
if (input[i] > 0) {
output[i] = input[i];
} else {
if (prelu_param_->channeShared) {
int temp = i % prelu_param_->channel_num_;
output[i] = input[i] * prelu_param_->negtive_slope_[temp];
} else {
output[i] = input[i] * prelu_param_->negtive_slope_[0];
}
}
}
}

@ -0,0 +1,32 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_
#include "src/runtime/kernel/arm/nnacl/op_base.h"
struct CaffePReluParameter {
OpParameter op_parameter_;
float *negtive_slope_;
bool channeShared;
int channel_num_;
int input_num_;
int thread_num_;
};
void PRelu(float *input, float *output, CaffePReluParameter *prelu_param_, int task_id);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_H_

@ -0,0 +1,35 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_
#include "src/runtime/kernel/arm/nnacl/op_base.h"
#define CAFFEPRELU_OFFSET_MAX_SIZE 4
struct CaffePreluParameter {
OpParameter op_parameter_;
bool channelShared;
double alpha_;
int64_t offset_[CAFFEPRELU_OFFSET_MAX_SIZE];
int64_t in_offset_[CAFFEPRELU_OFFSET_MAX_SIZE];
int64_t axis_;
const int *in_shape_;
const int *out_shape_;
int input_dim_;
};
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_CAFFEPRELU_PARAMETER_H_

@ -14,20 +14,20 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_PRELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_PRELU_PARAMETER_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_
#include "nnacl/op_base.h"
#include "nnacl/quantization/quantize.h"
#define PRELU_OFFSET_MAX_SIZE 4
#define PRELU_OFFSET_MAX_SIZE 65535
typedef struct PreluParameter {
OpParameter op_parameter_;
PreluQuantArg quant_arg;
double alpha_;
int thread_count_;
int64_t offset_[PRELU_OFFSET_MAX_SIZE];
float slope_[PRELU_OFFSET_MAX_SIZE];
int64_t in_offset_[PRELU_OFFSET_MAX_SIZE];
int64_t axis_;
const int *in_shape_;
@ -36,4 +36,4 @@ typedef struct PreluParameter {
int element_num;
} PreluParameter;
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_PRELU_PARAMETER_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_PRELU_PARAMETER_H_

Loading…
Cancel
Save