!9969 fix npu subgraph executor bug
From: @yeyunpeng2020 Reviewed-by: @HilbertDavid,@zhanghaibo5,@zhang_xue_tong Signed-off-by: @zhang_xue_tongpull/9969/MERGE
commit
c3e7bbb4c2
@ -0,0 +1,51 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/matmul_npu.h"
|
||||
#include "src/kernel_registry.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_MatMul;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int MatMulNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatMulNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::MatMul(name_);
|
||||
op_->set_input_x1(*npu_inputs[0]);
|
||||
op_->set_input_x2(*npu_inputs[1]);
|
||||
|
||||
op_->set_attr_transpose_x1(a_transpose_);
|
||||
op_->set_attr_transpose_x2(b_transpose_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::MatMulNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
MatMulNPUKernel::~MatMulNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_MatMul, NPUKernelCreator<MatMulNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,49 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_
|
||||
#include <vector>
|
||||
#include "nnacl/matmul_parameter.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "nnacl/softmax_parameter.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class MatMulNPUKernel : public NPUKernel {
|
||||
public:
|
||||
MatMulNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
auto matmul_parameter = reinterpret_cast<MatMulParameter *>(parameter);
|
||||
a_transpose_ = matmul_parameter->a_transpose_;
|
||||
b_transpose_ = matmul_parameter->b_transpose_;
|
||||
}
|
||||
~MatMulNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::MatMul *op_ = nullptr;
|
||||
bool a_transpose_ = false;
|
||||
bool b_transpose_ = false;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_
|
@ -0,0 +1,73 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/pad_npu.h"
|
||||
#include <memory>
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/agent/npu/npu_converter_utils.h"
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_Pad;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int PadNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
if (padding_mode_ != schema::PaddingMode_CONSTANT) {
|
||||
MS_LOG(WARNING) << "NPU only support CONSTANT padding mode";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int PadNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::PadV2(name_);
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << name_ << " op is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
int size = static_cast<int>(paddings_.size() / 2);
|
||||
ge::TensorDesc padding_tensor_desc(ge::Shape({size, 2}), ge::FORMAT_NCHW, ge::DT_INT32);
|
||||
ge::TensorPtr padding_tensor = std::make_shared<hiai::Tensor>(padding_tensor_desc);
|
||||
padding_tensor->SetData(reinterpret_cast<uint8_t *>(paddings_.data()), size * sizeof(int));
|
||||
auto paddings = new hiai::op::Const(name_ + "paddings");
|
||||
paddings->set_attr_value(padding_tensor);
|
||||
|
||||
ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc);
|
||||
vector<float> constant_values_data_value = {constant_value_};
|
||||
constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float));
|
||||
auto constant = new hiai::op::Const(name_ + "constant");
|
||||
constant->set_attr_value(constant_values_tensor);
|
||||
|
||||
op_->set_input_x(*npu_inputs[0]);
|
||||
op_->set_input_constant_values(*constant);
|
||||
op_->set_input_paddings(*paddings);
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
PadNPUKernel::~PadNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Pad, NPUKernelCreator<PadNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,51 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_
|
||||
#include <vector>
|
||||
#include "nnacl/pad_parameter.h"
|
||||
#include "src/ops/pad.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class PadNPUKernel : public NPUKernel {
|
||||
public:
|
||||
PadNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
auto pad = reinterpret_cast<const mindspore::lite::Pad *>(primitive);
|
||||
constant_value_ = pad->GetConstantValue();
|
||||
paddings_ = pad->GetPaddings();
|
||||
padding_mode_ = pad->GetPaddingMode();
|
||||
}
|
||||
~PadNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::PadV2 *op_ = nullptr;
|
||||
std::vector<int> paddings_;
|
||||
int padding_mode_;
|
||||
float constant_value_;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_
|
@ -0,0 +1,54 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/slice_npu.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/agent/npu/npu_converter_utils.h"
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_Slice;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int SliceNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int SliceNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::Slice(name_);
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << name_ << " op is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
op_->set_input_x(*npu_inputs[0]);
|
||||
op_->set_input_offsets(*npu_inputs[1]);
|
||||
op_->set_input_size(*npu_inputs[2]);
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::SliceNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
SliceNPUKernel::~SliceNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Slice, NPUKernelCreator<SliceNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_
|
||||
#include <vector>
|
||||
#include "src/ops/slice.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class SliceNPUKernel : public NPUKernel {
|
||||
public:
|
||||
SliceNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
|
||||
~SliceNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::Slice *op_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_
|
@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/split_npu.h"
|
||||
#include <memory>
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/agent/npu/npu_converter_utils.h"
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_Split;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int SplitNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int SplitNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::SplitV(name_);
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << name_ << " op is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
int size = size_splits_.size();
|
||||
ge::TensorDesc size_splits_tensor_desc(ge::Shape({size}), ge::FORMAT_NCHW, ge::DT_INT32);
|
||||
ge::TensorPtr size_splits_tensor = std::make_shared<hiai::Tensor>(size_splits_tensor_desc);
|
||||
size_splits_tensor->SetData(reinterpret_cast<uint8_t *>(size_splits_.data()), size * sizeof(int));
|
||||
auto size_splits = new hiai::op::Const(name_ + "_size");
|
||||
size_splits->set_attr_value(size_splits_tensor);
|
||||
|
||||
ge::TensorDesc split_dim_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_INT32);
|
||||
ge::TensorPtr split_dim_tensor = std::make_shared<hiai::Tensor>(split_dim_tensor_desc);
|
||||
vector<int32_t> split_dim_data_value = {split_dim_};
|
||||
split_dim_tensor->SetData(reinterpret_cast<uint8_t *>(split_dim_data_value.data()), 1 * sizeof(int));
|
||||
auto split_dim = new hiai::op::Const(name_ + "_dim");
|
||||
split_dim->set_attr_value(split_dim_tensor);
|
||||
|
||||
op_->set_input_x(*npu_inputs[0]);
|
||||
op_->set_attr_num_split(num_split_);
|
||||
op_->set_input_split_dim(*split_dim);
|
||||
op_->set_input_size_splits(*size_splits);
|
||||
op_->create_dynamic_output_y(num_split_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::SplitNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
SplitNPUKernel::~SplitNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Split, NPUKernelCreator<SplitNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_
|
||||
#include <vector>
|
||||
#include "src/ops/split.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class SplitNPUKernel : public NPUKernel {
|
||||
public:
|
||||
SplitNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
auto split = reinterpret_cast<const mindspore::lite::Split *>(primitive);
|
||||
num_split_ = split->GetNumberSplit();
|
||||
size_splits_ = split->GetSizeSplit();
|
||||
split_dim_ = split->GetSplitDim();
|
||||
}
|
||||
~SplitNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::SplitV *op_ = nullptr;
|
||||
int num_split_;
|
||||
std::vector<int> size_splits_;
|
||||
int split_dim_;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_
|
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/transpose_npu.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/agent/npu/npu_converter_utils.h"
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_Nchw2Nhwc;
|
||||
using mindspore::schema::PrimitiveType_Nhwc2Nchw;
|
||||
using mindspore::schema::PrimitiveType_Transpose;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int TransposeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
if (conjugate_) {
|
||||
MS_LOG(ERROR) << "Unsupported conjugate transpose.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int TransposeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::Permute(name_);
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << name_ << " op is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
op_->set_input_x(*npu_inputs[0]);
|
||||
op_->set_attr_order(perm_);
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::TransposeNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
TransposeNPUKernel::~TransposeNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Transpose, NPUKernelCreator<TransposeNPUKernel>)
|
||||
// REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, NPUKernelCreator<TransposeNPUKernel>)
|
||||
// REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, NPUKernelCreator<TransposeNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,56 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_
|
||||
#include <vector>
|
||||
#include "nnacl/transpose.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class TransposeNPUKernel : public NPUKernel {
|
||||
public:
|
||||
TransposeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
if (primitive->Type() == schema::PrimitiveType_Transpose) {
|
||||
auto transpose_parameter = reinterpret_cast<TransposeParameter *>(parameter);
|
||||
conjugate_ = transpose_parameter->conjugate_;
|
||||
for (int i = 0; i < transpose_parameter->num_axes_; i++) {
|
||||
perm_.push_back(transpose_parameter->perm_[i]);
|
||||
}
|
||||
} else if (primitive->Type() == schema::PrimitiveType_Nchw2Nhwc) {
|
||||
perm_ = {0, 2, 3, 1};
|
||||
} else if (primitive->Type() == schema::PrimitiveType_Nhwc2Nchw) {
|
||||
perm_ = {0, 3, 1, 2};
|
||||
}
|
||||
}
|
||||
~TransposeNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::Permute *op_ = nullptr;
|
||||
std::vector<int64_t> perm_;
|
||||
bool conjugate_ = false;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_
|
@ -0,0 +1,66 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/npu/unsqueeze_npu.h"
|
||||
#include <memory>
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/agent/npu/npu_converter_utils.h"
|
||||
using mindspore::kernel::KERNEL_ARCH::kNPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::schema::PrimitiveType_Unsqueeze;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int UnsqueezeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) {
|
||||
if (inputs[0]->shape().size() > 3) {
|
||||
MS_LOG(WARNING) << "The dimension of output not support bigger than 4.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int UnsqueezeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) {
|
||||
op_ = new (std::nothrow) hiai::op::ExpandDims(name_);
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << name_ << " op is nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
int size = axis_.size();
|
||||
ge::TensorDesc desc(ge::Shape({size}), ge::FORMAT_NCHW, ge::DT_INT32);
|
||||
ge::TensorPtr tensor = std::make_shared<hiai::Tensor>(desc);
|
||||
tensor->SetData(reinterpret_cast<uint8_t *>(axis_.data()), size * sizeof(int));
|
||||
auto axis = new hiai::op::Const(name_ + "_axis");
|
||||
axis->set_attr_value(tensor);
|
||||
|
||||
op_->set_input_x(*npu_inputs[0]);
|
||||
op_->set_input_axis(*axis);
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
ge::Operator *mindspore::kernel::UnsqueezeNPUKernel::GetNPUOp() { return this->op_; }
|
||||
|
||||
UnsqueezeNPUKernel::~UnsqueezeNPUKernel() {
|
||||
if (op_ != nullptr) {
|
||||
delete op_;
|
||||
op_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Unsqueeze, NPUKernelCreator<UnsqueezeNPUKernel>)
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,46 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_
|
||||
#include <vector>
|
||||
#include "src/ops/unsqueeze.h"
|
||||
#include "src/runtime/kernel/npu/npu_kernel.h"
|
||||
#include "include/graph/op/all_ops.h"
|
||||
namespace mindspore::kernel {
|
||||
class UnsqueezeNPUKernel : public NPUKernel {
|
||||
public:
|
||||
UnsqueezeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
auto unsqueeze = reinterpret_cast<const mindspore::lite::Unsqueeze *>(primitive);
|
||||
axis_ = unsqueeze->GetAxis();
|
||||
}
|
||||
~UnsqueezeNPUKernel() override;
|
||||
|
||||
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *opParameter) override;
|
||||
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
||||
const std::vector<ge::Operator *> &npu_inputs) override;
|
||||
ge::Operator *GetNPUOp() override;
|
||||
|
||||
private:
|
||||
hiai::op::ExpandDims *op_ = nullptr;
|
||||
vector<int> axis_;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_
|
Loading…
Reference in new issue