|
|
|
@ -25,56 +25,84 @@ using mindspore::schema::PrimitiveType_PadFusion;
|
|
|
|
|
namespace mindspore::kernel {
|
|
|
|
|
int PadNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
|
|
|
|
OpParameter *opParameter) {
|
|
|
|
|
if (param_->pad_mode_ != schema::PaddingMode_CONSTANT) {
|
|
|
|
|
MS_LOG(WARNING) << "NPU only support CONSTANT padding mode";
|
|
|
|
|
if (inputs.size() != 2) {
|
|
|
|
|
MS_LOG(WARNING) << "NPU pad only support input size 2, got " << inputs.size();
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
if (inputs.size() >= 2 && inputs[1]->data_c() != nullptr) {
|
|
|
|
|
for (int i = 0; i < inputs[1]->ElementsNum(); i++) {
|
|
|
|
|
param_->paddings_[i] = static_cast<int *>(inputs[1]->data_c())[i];
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
MS_LOG(WARNING) << "NPU axis is attribute.";
|
|
|
|
|
if (inputs[1]->data_c() == nullptr && inputs[1]->ElementsNum() != 8) {
|
|
|
|
|
MS_LOG(WARNING) << "npu pad input[1] nullptr or paddings size " << inputs[1]->ElementsNum() << " unsupported";
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
for (int i = 0; i < inputs[1]->ElementsNum(); i++) {
|
|
|
|
|
param_->paddings_[i] = static_cast<int *>(inputs[1]->data_c())[i];
|
|
|
|
|
}
|
|
|
|
|
if (param_->pad_mode_ != schema::PaddingMode_CONSTANT && param_->pad_mode_ != schema::PaddingMode_SYMMETRIC &&
|
|
|
|
|
param_->pad_mode_ != schema::PaddingMode_REFLECT) {
|
|
|
|
|
MS_LOG(WARNING) << "pad npu not support mode " << param_->pad_mode_;
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return RET_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int PadNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
|
|
|
|
|
const std::vector<ge::Operator *> &npu_inputs) {
|
|
|
|
|
op_ = new (std::nothrow) hiai::op::PadV2(name_);
|
|
|
|
|
if (op_ == nullptr) {
|
|
|
|
|
MS_LOG(ERROR) << name_ << " op is nullptr";
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
int size = static_cast<int>(param_->padding_length / 2);
|
|
|
|
|
ge::TensorDesc padding_tensor_desc(ge::Shape({size, 2}), ge::FORMAT_NCHW, ge::DT_INT32);
|
|
|
|
|
ge::TensorDesc padding_tensor_desc(ge::Shape({4, 2}), ge::FORMAT_NCHW, ge::DT_INT32);
|
|
|
|
|
ge::TensorPtr padding_tensor = std::make_shared<hiai::Tensor>(padding_tensor_desc);
|
|
|
|
|
padding_tensor->SetData(reinterpret_cast<uint8_t *>(param_->paddings_), 2 * size * sizeof(int));
|
|
|
|
|
padding_tensor->SetData(reinterpret_cast<uint8_t *>(param_->paddings_), 8 * sizeof(int));
|
|
|
|
|
hiai_paddings_ = new hiai::op::Const(name_ + "paddings");
|
|
|
|
|
hiai_paddings_->set_attr_value(padding_tensor);
|
|
|
|
|
if (param_->pad_mode_ == schema::PaddingMode_CONSTANT) {
|
|
|
|
|
op_ = new (std::nothrow) hiai::op::PadV2(name_);
|
|
|
|
|
if (op_ == nullptr) {
|
|
|
|
|
MS_LOG(ERROR) << name_ << " op is nullptr";
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
|
|
|
ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc);
|
|
|
|
|
vector<float> constant_values_data_value = {param_->constant_value_};
|
|
|
|
|
constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float));
|
|
|
|
|
hiai_constant_ = new hiai::op::Const(name_ + "constant");
|
|
|
|
|
hiai_constant_->set_attr_value(constant_values_tensor);
|
|
|
|
|
ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
|
|
|
ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc);
|
|
|
|
|
vector<float> constant_values_data_value = {param_->constant_value_};
|
|
|
|
|
constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float));
|
|
|
|
|
hiai_constant_ = new hiai::op::Const(name_ + "constant");
|
|
|
|
|
hiai_constant_->set_attr_value(constant_values_tensor);
|
|
|
|
|
|
|
|
|
|
op_->set_input_x(*npu_inputs[0]);
|
|
|
|
|
op_->set_input_constant_values(*hiai_constant_);
|
|
|
|
|
op_->set_input_paddings(*hiai_paddings_);
|
|
|
|
|
op_->set_input_x(*npu_inputs[0]);
|
|
|
|
|
op_->set_input_constant_values(*hiai_constant_);
|
|
|
|
|
op_->set_input_paddings(*hiai_paddings_);
|
|
|
|
|
} else {
|
|
|
|
|
mirror_op_ = new (std::nothrow) hiai::op::MirrorPad(name_);
|
|
|
|
|
if (mirror_op_ == nullptr) {
|
|
|
|
|
MS_LOG(ERROR) << name_ << " op is nullptr";
|
|
|
|
|
return RET_ERROR;
|
|
|
|
|
}
|
|
|
|
|
mirror_op_->set_input_x(*npu_inputs[0]);
|
|
|
|
|
mirror_op_->set_input_paddings(*hiai_paddings_);
|
|
|
|
|
if (param_->pad_mode_ == schema::PaddingMode_SYMMETRIC) {
|
|
|
|
|
mirror_op_->set_attr_mode("SYMMETRIC");
|
|
|
|
|
} else {
|
|
|
|
|
mirror_op_->set_attr_mode("REFLECT");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return RET_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() { return this->op_; }
|
|
|
|
|
ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() {
|
|
|
|
|
if (param_->pad_mode_ == schema::PaddingMode_CONSTANT) {
|
|
|
|
|
return op_;
|
|
|
|
|
}
|
|
|
|
|
return mirror_op_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PadNPUKernel::~PadNPUKernel() {
|
|
|
|
|
if (op_ != nullptr) {
|
|
|
|
|
delete op_;
|
|
|
|
|
op_ = nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (mirror_op_ != nullptr) {
|
|
|
|
|
delete mirror_op_;
|
|
|
|
|
mirror_op_ = nullptr;
|
|
|
|
|
}
|
|
|
|
|
if (hiai_paddings_ != nullptr) {
|
|
|
|
|
delete hiai_paddings_;
|
|
|
|
|
hiai_paddings_ = nullptr;
|
|
|
|
|