diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc index e6fc9e9b5a..5a17faaa20 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc @@ -33,24 +33,24 @@ namespace mindspore::kernel { int LayerNormOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(this->op_parameter_); - // if (param->elementwise_mode_ == ELEMENTWISE_PER_CHANNEL) { - // if (in_tensors_.size() != 3) { - // MS_LOG(ERROR) << " invalid in_tensors_ size" << in_tensors_.size() << std::endl; - // return RET_ERROR; - // } - // if (param->normalized_dims_ > in_tensors_.at(0)->shape().size()) { - // MS_LOG(ERROR) << " invalid normalized_shape_ size" << param->normalized_dims_ << std::endl; - // return RET_ERROR; - // } - // } else if (param->elementwise_mode_ == ELEMENTWISE_NOT) { - // if (in_tensors_.size() != 1) { - // MS_LOG(ERROR) << " invalid in_tensors_ size" << in_tensors_.size() << std::endl; - // return RET_ERROR; - // } - // } else { - // MS_LOG(ERROR) << "Unsupported elementwise_mode_" << param->elementwise_mode_; - // return RET_ERROR; - // } + if (param->elementwise_mode_ == ELEMENTWISE_PER_NUM) { + if (in_tensors_.size() != 3) { + MS_LOG(ERROR) << " invalid in_tensors_ size" << in_tensors_.size() << std::endl; + return RET_ERROR; + } + if (param->normalized_dims_ > in_tensors_.at(0)->shape().size()) { + MS_LOG(ERROR) << " invalid normalized_shape_ size" << param->normalized_dims_ << std::endl; + return RET_ERROR; + } + } else if (param->elementwise_mode_ == ELEMENTWISE_NOT) { + if (in_tensors_.size() != 1) { + MS_LOG(ERROR) << " invalid in_tensors_ size" << in_tensors_.size() << std::endl; + return RET_ERROR; + } + } else { + MS_LOG(ERROR) << "Unsupported elementwise_mode_" << param->elementwise_mode_; + return RET_ERROR; + } if (in_tensors_.at(0)->shape().size() != 4 || out_tensors_.size() != 1) { MS_LOG(ERROR) << "UnSupported in_tensors_.shape.size: " << in_tensors_.at(0)->shape().size() << " out_tensors_.size(): " << out_tensors_.size(); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index 88931491aa..72bb13e7d0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -25,8 +25,10 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_ExpandDims; using mindspore::schema::PrimitiveType_Reshape; using mindspore::schema::PrimitiveType_Squeeze; +using mindspore::schema::PrimitiveType_Unsqueeze; namespace mindspore::kernel { @@ -97,4 +99,8 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Reshape, OpenCLKernelCreator< REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Reshape, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Squeeze, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Squeeze, OpenCLKernelCreator) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Unsqueeze, OpenCLKernelCreator) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Unsqueeze, OpenCLKernelCreator) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ExpandDims, OpenCLKernelCreator) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ExpandDims, OpenCLKernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc index c199c5c7d1..53b5c3e67c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc @@ -24,7 +24,7 @@ namespace { // PrimitiveType_Stack: src/ops/populate/stack_populate.cc OpParameter *CreateParameter(float epsilon, int normalized_dims_, std::vector normalizedShape) { auto *param = test::CreateParameter(schema::PrimitiveType_LayerNorm); - param->elementwise_mode_ = ELEMENTWISE_PER_CHANNEL; + param->elementwise_mode_ = ELEMENTWISE_PER_NUM; param->epsilon_ = epsilon; param->normalized_dims_ = normalized_dims_; for (int i = 0; i < normalizedShape.size() && i < normalized_dims_; ++i) {