diff --git a/mindspore/lite/nnacl/fp32/arithmetic_fp32.c b/mindspore/lite/nnacl/fp32/arithmetic_fp32.c index 37d7b9d303..23720bf1df 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic_fp32.c +++ b/mindspore/lite/nnacl/fp32/arithmetic_fp32.c @@ -509,6 +509,23 @@ int ElementOptDivRelu6(const float *input0, const float *input1, float *output, return NNACL_OK; } +int ElementOptDivInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param) { + if (param->in_elements_num0_ == 1) { + for (int index = 0; index < element_size; index++) { + output[index] = input0[0] / input1[index]; + } + } else { + if (input1[0] == 0) { + return NNACL_ERRCODE_DIVISOR_ZERO; + } + for (int index = 0; index < element_size; index++) { + output[index] = input0[index] / input1[0]; + } + } + return NNACL_OK; +} + int ElementMul(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON diff --git a/mindspore/lite/nnacl/fp32/arithmetic_fp32.h b/mindspore/lite/nnacl/fp32/arithmetic_fp32.h index f7030e2ee5..c30f992487 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic_fp32.h +++ b/mindspore/lite/nnacl/fp32/arithmetic_fp32.h @@ -58,6 +58,8 @@ int ElementOptDivRelu(const float *input0, const float *input1, float *output, c const ArithmeticParameter *param); int ElementOptDivRelu6(const float *input0, const float *input1, float *output, const int element_size, const ArithmeticParameter *param); +int ElementOptDivInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); int ElementMul(const float *input0, const float *input1, float *output, const int element_size); int ElementMulRelu(const float *input0, const float *input1, float *output, const int element_size); int ElementMulRelu6(const float *input0, const float *input1, float *output, const int element_size); diff --git a/mindspore/lite/src/ops/activation.cc b/mindspore/lite/src/ops/activation.cc index 24b7d51a95..e959d8cb8e 100644 --- a/mindspore/lite/src/ops/activation.cc +++ b/mindspore/lite/src/ops/activation.cc @@ -59,6 +59,8 @@ int Activation::UnPackAttr(const Primitive &prim, const std::vector attr->type = schema::ActivationType_HSWISH; } else if (prim.name() == "HSigmoid") { attr->type = schema::ActivationType_HSIGMOID; + } else if (prim.name() == "Tanh") { + attr->type = schema::ActivationType_TANH; } this->primitive_->value.value = attr.release(); if (this->primitive_->value.value == nullptr) { diff --git a/mindspore/lite/src/ops/div.cc b/mindspore/lite/src/ops/div.cc index bb5a2f9034..f345e01d30 100644 --- a/mindspore/lite/src/ops/div.cc +++ b/mindspore/lite/src/ops/div.cc @@ -29,6 +29,35 @@ void Div::SetActivationType(int activation_type) { this->primitive_->value.AsDiv()->activationType = (schema::ActivationType)activation_type; } +int Div::UnPackAttr(const Primitive &prim, const std::vector &inputs) { + if (this->primitive_ == nullptr) { + this->primitive_ = new (std::nothrow) schema::PrimitiveT; + if (this->primitive_ == nullptr) { + MS_LOG(ERROR) << "new primitiveT failed"; + return RET_ERROR; + } + this->primitive_->value.type = schema::PrimitiveType_Div; + } + if (this->primitive_->value.type != schema::PrimitiveType_Div) { + MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; + return RET_ERROR; + } + if (this->primitive_->value.value == nullptr) { + auto attr = new (std::nothrow) schema::DivT(); + if (attr == nullptr) { + MS_LOG(ERROR) << "new primitiveT value failed"; + return RET_ERROR; + } + this->primitive_->value.value = attr; + if (this->primitive_->value.value == nullptr) { + MS_LOG(ERROR) << "primitive value is nullptr"; + return RET_ERROR; + } + } + + return RET_OK; +} + #else int Div::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { MS_ASSERT(nullptr != primitive); diff --git a/mindspore/lite/src/ops/div.h b/mindspore/lite/src/ops/div.h index 0398245c0c..c23e7ab5c4 100644 --- a/mindspore/lite/src/ops/div.h +++ b/mindspore/lite/src/ops/div.h @@ -32,6 +32,7 @@ class Div : public Arithmetic { MS_DECLARE_PARENT(Div, Arithmetic); explicit Div(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} void SetActivationType(int activation_type); + int UnPackAttr(const Primitive &prim, const std::vector &inputs) override; #else int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index ddf28dde64..34584be2c6 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -581,6 +581,10 @@ std::shared_ptr PrimitiveC::Create(const Primitive &prim, const std: return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "Minimum") { return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "Div") { + return NewPrimitiveC
(prim, inputs, quantType); + } else if (op_type == "Tanh") { + return NewPrimitiveC(prim, inputs, quantType); #ifdef SUPPORT_TRAIN } else if (op_type == "SoftmaxCrossEntropyWithLogits") { return NewPrimitiveC(prim, inputs, quantType); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index 210db97044..ca0307161a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -297,6 +297,7 @@ void ArithmeticCPUKernel::InitOptRunFunction() { default: arithmeticParameter_->broadcasting_ = false; arithmetic_opt_run_ = ElementOptDiv; + arithmetic_opt_run_int_ = ElementOptDivInt; break; } break; @@ -554,4 +555,5 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorDiv, CpuArithmeticFp32Kern REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorMod, CpuArithmeticFp32KernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SquaredDifference, CpuArithmeticFp32KernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Eltwise, CpuArithmeticFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Div, CpuArithmeticFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index e67e0c373d..1196e8c8ae 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -445,7 +445,8 @@ int AnfExporter::ConvertInputValueNode(const std::shared_ptr &input_ano auto valueAbstract = valueNode->abstract(); auto abstractScalar = utils::cast(valueAbstract); auto typePtr = abstractScalar->GetTypeTrack(); - paramTensor->dataType = typePtr->type_id(); + // data of int64 is converted to int32 here. + paramTensor->dataType = kNumberTypeInt32; paramTensor->dims = {1}; paramTensor->nodeType = schema::NodeType::NodeType_ValueNode; int real_data = CastToInt(value).front(); diff --git a/model_zoo/official/lite/style_transfer/README.md b/model_zoo/official/lite/style_transfer/README.md index 4825e21436..a464e9b92c 100644 --- a/model_zoo/official/lite/style_transfer/README.md +++ b/model_zoo/official/lite/style_transfer/README.md @@ -1,4 +1,4 @@ -# MindSpore Lite 端侧骨骼检测demo(Android) +# MindSpore Lite 端侧风格迁移demo(Android) 本示例程序演示了如何在端侧利用MindSpore Lite API以及MindSpore Lite风格迁移模型完成端侧推理,根据demo内置的标准图片更换目标图片的艺术风格,并在App图像预览界面中显示出来。