From 6e1ad28724f26202e48be0a817e400282d077666 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Fri, 11 Sep 2020 15:14:27 +0800 Subject: [PATCH] support neg,neg_grad,log_grad --- mindspore/lite/include/model.h | 8 +- mindspore/lite/nnacl/fp32/arithmetic_self.c | 7 ++ mindspore/lite/nnacl/fp32/arithmetic_self.h | 2 + mindspore/lite/schema/model.fbs | 3 + mindspore/lite/schema/ops.fbs | 11 ++ mindspore/lite/src/model.cc | 9 ++ mindspore/lite/src/ops/log_grad.cc | 37 ++++++ mindspore/lite/src/ops/log_grad.h | 42 +++++++ mindspore/lite/src/ops/neg.cc | 33 ++++++ mindspore/lite/src/ops/neg.h | 43 +++++++ mindspore/lite/src/ops/neg_grad.cc | 33 ++++++ mindspore/lite/src/ops/neg_grad.h | 43 +++++++ mindspore/lite/src/ops/primitive_c.cc | 19 ++++ mindspore/lite/src/populate_parameter.cc | 4 + .../kernel/arm/base/leaky_relu_base.cc | 2 +- .../kernel/arm/fp32/arithmetic_self.cc | 1 + .../runtime/kernel/arm/fp32/arithmetic_self.h | 4 + .../arm/fp32_grad/arithmetic_self_grad.cc | 107 ++++++++++++++++++ .../arm/fp32_grad/arithmetic_self_grad.h | 46 ++++++++ .../runtime/kernel/arm/fp32_grad/neg_grad.cc | 95 ++++++++++++++++ .../runtime/kernel/arm/fp32_grad/neg_grad.h | 44 +++++++ 21 files changed, 591 insertions(+), 2 deletions(-) create mode 100644 mindspore/lite/src/ops/log_grad.cc create mode 100644 mindspore/lite/src/ops/log_grad.h create mode 100644 mindspore/lite/src/ops/neg.cc create mode 100644 mindspore/lite/src/ops/neg.h create mode 100644 mindspore/lite/src/ops/neg_grad.cc create mode 100644 mindspore/lite/src/ops/neg_grad.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 7d6aab7f4e..876abdeb9f 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -45,8 +45,14 @@ struct Model { /// \return Pointer of MindSpore Lite Model. static Model *Import(const char *model_buf, size_t size); - /// \brief Free all the temporary buffer + /// \brief Free meta graph temporary buffer void Free(); + + /// \brief Free all temporay buffer + void Destroy(); + + /// \brief Model destruct, free all memory + ~Model(); }; } // namespace mindspore::lite diff --git a/mindspore/lite/nnacl/fp32/arithmetic_self.c b/mindspore/lite/nnacl/fp32/arithmetic_self.c index 8b500bdb11..1323e1fd99 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic_self.c +++ b/mindspore/lite/nnacl/fp32/arithmetic_self.c @@ -113,3 +113,10 @@ int ElementCeil(float *input, float *output, int number) { } return NNACL_OK; } + +int ElementNegative(float *input, float *output, int element_size) { + for (int i = 0; i < element_size; ++i) { + output[i] = -input[i]; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/fp32/arithmetic_self.h b/mindspore/lite/nnacl/fp32/arithmetic_self.h index 38db8002dd..ca277f8bf0 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic_self.h +++ b/mindspore/lite/nnacl/fp32/arithmetic_self.h @@ -47,6 +47,8 @@ int ElementRound(float *input, float *output, int element_size); int ElementFloor(float *input, float *output, int element_size); int ElementCeil(float *input, float *output, int number); + +int ElementNegative(float *input, float *output, int element_size); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/schema/model.fbs b/mindspore/lite/schema/model.fbs index d7b9e0deb3..8ff085b143 100644 --- a/mindspore/lite/schema/model.fbs +++ b/mindspore/lite/schema/model.fbs @@ -199,6 +199,9 @@ union PrimitiveType { Proposal, Custom, BlackBox, + NegGrad, + LogGrad, + BatchToSpaceND, } enum QuantType: int { diff --git a/mindspore/lite/schema/ops.fbs b/mindspore/lite/schema/ops.fbs index 11df65e477..7297daa5a8 100644 --- a/mindspore/lite/schema/ops.fbs +++ b/mindspore/lite/schema/ops.fbs @@ -481,6 +481,9 @@ table Abs { table Neg { } +table NegGrad { +} + table Exp { base : float = -1.0; scale : float = 1.0; @@ -505,6 +508,9 @@ table Ceil { table Log { } +table LogGrad { +} + table Tan { } @@ -749,6 +755,11 @@ table BatchToSpace { crops: [int]; } +table BatchToSpaceND { + blockShape: [int]; + crops: [int]; +} + table AddN { N: int; } diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index 4cf441ea80..9331626071 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -124,12 +124,21 @@ void Model::Free() { free(this->buf); this->buf = nullptr; } +} + +void Model::Destroy() { + Free(); auto nodes_size = this->nodes_.size(); for (size_t i = 0; i < nodes_size; ++i) { auto node = this->nodes_[i]; MS_ASSERT(node != nullptr); + MS_ASSERT(node->primitive_ != nullptr); + delete node->primitive_; + node->primitive_ = nullptr; delete node; } this->nodes_.clear(); } + +Model::~Model() { Destroy(); } } // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/log_grad.cc b/mindspore/lite/src/ops/log_grad.cc new file mode 100644 index 0000000000..a290ff5f1c --- /dev/null +++ b/mindspore/lite/src/ops/log_grad.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/ops/log_grad.h" + +namespace mindspore { +namespace lite { +#ifndef PRIMITIVE_WRITEABLE +int LogGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { + MS_ASSERT(primitive != nullptr); + MS_ASSERT(fbb != nullptr); + auto attr = primitive->value_as_LogGrad(); + if (attr == nullptr) { + MS_LOG(ERROR) << "value_as_LogGrad return nullptr"; + return RET_ERROR; + } + auto val_offset = schema::CreateLogGrad(*fbb); + auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogGrad, val_offset.o); + fbb->Finish(prim_offset); + return RET_OK; +} +#endif +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/log_grad.h b/mindspore/lite/src/ops/log_grad.h new file mode 100644 index 0000000000..7ba52bfb04 --- /dev/null +++ b/mindspore/lite/src/ops/log_grad.h @@ -0,0 +1,42 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ +#define LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/primitive_c.h" + +namespace mindspore { +namespace lite { +class LogGrad : public PrimitiveC { + public: +#ifdef PRIMITIVE_WRITEABLE + MS_DECLARE_PARENT(LogGrad, PrimitiveC); + LogGrad() = default; + explicit LogGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} +#else + LogGrad() = default; + + int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; +#endif +}; +} // namespace lite +} // namespace mindspore +#endif // LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ diff --git a/mindspore/lite/src/ops/neg.cc b/mindspore/lite/src/ops/neg.cc new file mode 100644 index 0000000000..2645927a7d --- /dev/null +++ b/mindspore/lite/src/ops/neg.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/ops/neg.h" + +namespace mindspore { +namespace lite { +#ifndef PRIMITIVE_WRITEABLE +int Neg::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { + MS_ASSERT(primitive != nullptr); + MS_ASSERT(fbb != nullptr); + auto val_offset = schema::CreateNeg(*fbb); + auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Neg, val_offset.o); + fbb->Finish(prim_offset); + return RET_OK; +} + +#endif +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/neg.h b/mindspore/lite/src/ops/neg.h new file mode 100644 index 0000000000..1733666ac6 --- /dev/null +++ b/mindspore/lite/src/ops/neg.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_H_ +#define LITE_MINDSPORE_LITE_C_OPS_NEG_H_ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/arithmetic_self.h" + +namespace mindspore { +namespace lite { +class Neg : public ArithmeticSelf { + public: +#ifdef PRIMITIVE_WRITEABLE + MS_DECLARE_PARENT(Neg, ArithmeticSelf); + Neg() = default; + explicit Neg(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} +#else + Neg() = default; + + int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; +#endif +}; +} // namespace lite +} // namespace mindspore + +#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_H_ diff --git a/mindspore/lite/src/ops/neg_grad.cc b/mindspore/lite/src/ops/neg_grad.cc new file mode 100644 index 0000000000..b5fad4919b --- /dev/null +++ b/mindspore/lite/src/ops/neg_grad.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/ops/neg_grad.h" + +namespace mindspore { +namespace lite { +#ifndef PRIMITIVE_WRITEABLE +int NegGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { + MS_ASSERT(primitive != nullptr); + MS_ASSERT(fbb != nullptr); + auto val_offset = schema::CreateNegGrad(*fbb); + auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NegGrad, val_offset.o); + fbb->Finish(prim_offset); + return RET_OK; +} + +#endif +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/neg_grad.h b/mindspore/lite/src/ops/neg_grad.h new file mode 100644 index 0000000000..061d387bdf --- /dev/null +++ b/mindspore/lite/src/ops/neg_grad.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ +#define LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/arithmetic_self.h" + +namespace mindspore { +namespace lite { +class NegGrad : public ArithmeticSelf { + public: +#ifdef PRIMITIVE_WRITEABLE + MS_DECLARE_PARENT(NegGrad, ArithmeticSelf); + NegGrad() = default; + explicit NegGrad(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} +#else + NegGrad() = default; + + int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; +#endif +}; +} // namespace lite +} // namespace mindspore + +#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index 186b427290..912db6aef2 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -120,6 +120,7 @@ #include "src/ops/quant.h" #include "src/ops/tuple_get_item.h" #include "src/ops/l2_norm.h" +#include "src/ops/neg.h" #include "src/ops/sparse_to_dense.h" #include "src/ops/detection_post_process.h" #include "src/ops/dropout.h" @@ -128,6 +129,7 @@ #endif #ifdef SUPPORT_TRAIN +#include "src/ops/neg_grad.h" #include "src/ops/activation_grad.h" #include "src/ops/apply_momentum.h" #include "src/ops/bias_grad.h" @@ -141,6 +143,7 @@ #include "src/ops/arithmetic_grad.h" #include "src/ops/depend.h" #include "src/ops/flatten_grad.h" +#include "src/ops/log_grad.h" #endif namespace mindspore { @@ -383,6 +386,10 @@ std::shared_ptr PrimitiveC::Create(const Primitive &prim, const std: return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "ApplyMomentum") { return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "NegGrad") { + return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "LogGrad") { + return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "BatchNormGrad") { return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "Conv2DGradInput") { @@ -620,6 +627,8 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) { return new DetectionPostProcess(primitive); case schema::PrimitiveType_Dropout: return new Dropout(primitive); + case schema::PrimitiveType_Neg: + return new Neg(primitive); #ifdef SUPPORT_TRAIN case schema::PrimitiveType_ActivationGrad: @@ -654,6 +663,10 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) { return new Depend(primitive); case schema::PrimitiveType_FlattenGrad: return new FlattenGrad(primitive); + case schema::PrimitiveType_NegGrad: + return new NegGrad(primitive); + case schema::PrimitiveType_LogGrad: + return new LogGrad(primitive); #endif default: @@ -755,6 +768,8 @@ PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) { return NewPrimitiveC(primitive); case schema::PrimitiveType_Log: return NewPrimitiveC(primitive); + case schema::PrimitiveType_Neg: + return NewPrimitiveC(primitive); case schema::PrimitiveType_Sqrt: return NewPrimitiveC(primitive); case schema::PrimitiveType_Rsqrt: @@ -895,6 +910,10 @@ PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) { return NewPrimitiveC(primitive); case schema::PrimitiveType_DivGrad: return NewPrimitiveC(primitive); + case schema::PrimitiveType_NegGrad: + return NewPrimitiveC(primitive); + case schema::PrimitiveType_LogGrad: + return NewPrimitiveC(primitive); #endif default: MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(op_type); diff --git a/mindspore/lite/src/populate_parameter.cc b/mindspore/lite/src/populate_parameter.cc index 57efab75b3..5ad708b316 100644 --- a/mindspore/lite/src/populate_parameter.cc +++ b/mindspore/lite/src/populate_parameter.cc @@ -113,6 +113,7 @@ #include "src/ops/round.h" #include "src/ops/sparse_to_dense.h" #include "src/ops/l2_norm.h" +#include "src/ops/neg.h" #include "src/ops/detection_post_process.h" #include "nnacl/op_base.h" #include "nnacl/fp32/arg_min_max.h" @@ -1632,6 +1633,9 @@ PopulateParameterRegistry::PopulateParameterRegistry() { populate_parameter_funcs_[schema::PrimitiveType_Sin] = PopulateArithmeticSelf; populate_parameter_funcs_[schema::PrimitiveType_Exp] = PopulateExpParameter; populate_parameter_funcs_[schema::PrimitiveType_Log] = PopulateArithmeticSelf; + populate_parameter_funcs_[schema::PrimitiveType_Neg] = PopulateArithmeticSelf; + populate_parameter_funcs_[schema::PrimitiveType_NegGrad] = PopulateArithmeticSelf; + populate_parameter_funcs_[schema::PrimitiveType_LogGrad] = PopulateArithmeticSelf; populate_parameter_funcs_[schema::PrimitiveType_Square] = PopulateArithmeticSelf; populate_parameter_funcs_[schema::PrimitiveType_Sqrt] = PopulateArithmeticSelf; populate_parameter_funcs_[schema::PrimitiveType_Rsqrt] = PopulateArithmeticSelf; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc index 1658f581a7..5dcb65ce4b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc @@ -37,7 +37,7 @@ kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector(cdata); + return kernel->DoArithmeticSelfGrad(thread_id); +} +} // namespace + +int ArithmeticSelfGradCPUKernel::Init() { + auto type = Type(); + switch (type) { + case PrimitiveType_LogGrad: + self_grad_operation_ = ElementDiv; + break; + default: + MS_LOG(ERROR) << "Unsupport type: " << type; + return RET_ERROR; + } + return RET_OK; +} + +int ArithmeticSelfGradCPUKernel::DoArithmeticSelfGrad(int thread_id) { + auto dy = reinterpret_cast(in_tensors_[0]->MutableData()); + auto in_x = reinterpret_cast(in_tensors_[1]->MutableData()); + auto dx = reinterpret_cast(out_tensors_[0]->MutableData()); + int dy_size = in_tensors_.at(0)->ElementsNum(); + int size = MSMIN(thread_stride_, static_cast(dy_size - thread_id * thread_stride_)); + if (size <= 0) { + return RET_OK; + } + int offset = thread_id * thread_stride_; + (*self_grad_operation_)(dy + offset, in_x + offset, dx + offset, size); + return RET_OK; +} + +int ArithmeticSelfGradCPUKernel::ReSize() { return RET_OK; } + +int ArithmeticSelfGradCPUKernel::Run() { + int dy_size = in_tensors_.at(0)->ElementsNum(); + op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, static_cast(dy_size)); + thread_stride_ = UP_DIV(dy_size, op_parameter_->thread_num_); + auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, ArithmeticSelfGradRun, this, op_parameter_->thread_num_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "parallel launch fail!ret: " << ret; + return ret; + } + + return RET_OK; +} + +kernel::LiteKernel *CpuArithmeticSelfGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *param, const lite::Context *ctx, + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { + if (param == nullptr) { + MS_LOG(ERROR) << "input parameter is nullptr!"; + return nullptr; + } + auto *kernel = new (std::nothrow) ArithmeticSelfGradCPUKernel(param, inputs, outputs, ctx, primitive); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new ArithmeticSelfGradCPUKernel fail!"; + return nullptr; + } + + auto ret = kernel->Init(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(param->type_)); + delete kernel; + return nullptr; + } + return kernel; +} + +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogGrad, CpuArithmeticSelfGradFp32KernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h new file mode 100644 index 0000000000..5955e7dd24 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_ + +#include +#include "src/lite_kernel.h" +#include "schema/model_generated.h" +#include "ir/anf.h" + +namespace mindspore::kernel { + +class ArithmeticSelfGradCPUKernel : public LiteKernel { + typedef int (*ArithmeticSelfGradOperation)(float *, float *, float *, int); + public: + ArithmeticSelfGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) + : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + ~ArithmeticSelfGradCPUKernel() override {} + int Init() override; + int ReSize() override; + int Run() override; + int DoArithmeticSelfGrad(int thread_id); + + private: + int thread_stride_; + ArithmeticSelfGradOperation self_grad_operation_; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc new file mode 100644 index 0000000000..e0394013c0 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/fp32_grad/neg_grad.h" +#include "schema/model_generated.h" +#include "src/kernel_registry.h" +#include "include/errorcode.h" +#include "src/runtime/runtime_api.h" +#include "nnacl/fp32/arithmetic_self.h" + +using mindspore::kernel::KERNEL_ARCH::kCPU; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_NegGrad; + +namespace mindspore::kernel { +namespace { +int NegGradRun(void *cdata, int thread_id) { + MS_ASSERT(cdata != nullptr); + auto kernel = reinterpret_cast(cdata); + return kernel->DoNegGrad(thread_id); +} +} // namespace + +int NegGradCPUKernel::Init() { return RET_OK; } + +int NegGradCPUKernel::DoNegGrad(int thread_id) { + auto dy = reinterpret_cast(in_tensors_[0]->MutableData()); + auto dx = reinterpret_cast(out_tensors_[0]->MutableData()); + int dy_size = in_tensors_.at(0)->ElementsNum(); + int size = MSMIN(thread_stride_, static_cast(dy_size - thread_id * thread_stride_)); + if (size <= 0) { + return RET_OK; + } + int offset = thread_id * thread_stride_; + ElementNegative(dy + offset, dx + offset, size); + return RET_OK; +} + +int NegGradCPUKernel::ReSize() { return RET_OK; } + +int NegGradCPUKernel::Run() { + int dy_size = in_tensors_.at(0)->ElementsNum(); + op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, static_cast(dy_size)); + thread_stride_ = UP_DIV(dy_size, op_parameter_->thread_num_); + auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, NegGradRun, this, op_parameter_->thread_num_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "parallel launch fail!ret: " << ret; + return ret; + } + + return RET_OK; +} + +kernel::LiteKernel *CpuNegGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *param, const lite::Context *ctx, + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { + if (param == nullptr) { + MS_LOG(ERROR) << "input parameter is nullptr!"; + return nullptr; + } + auto *kernel = new (std::nothrow) NegGradCPUKernel(param, inputs, outputs, ctx, primitive); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new NegGradCPUKernel fail!"; + return nullptr; + } + + auto ret = kernel->Init(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(param->type_)); + delete kernel; + return nullptr; + } + return kernel; +} + +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_NegGrad, CpuNegGradFp32KernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h new file mode 100644 index 0000000000..799a84f1e5 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_ + +#include +#include "src/lite_kernel.h" +#include "schema/model_generated.h" +#include "ir/anf.h" + +namespace mindspore::kernel { + +class NegGradCPUKernel : public LiteKernel { + public: + explicit NegGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) + : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + ~NegGradCPUKernel() override {} + int Init() override; + int ReSize() override; + int Run() override; + int DoNegGrad(int thread_id); + + private: + int thread_stride_; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_