!6310 pad reflect and symmetric

Merge pull request !6310 from zhaozhenlong/lite/issue/pad_reflect_symmetric
pull/6310/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 00802a87a5

@ -33,3 +33,40 @@ void Pad(const float *input_data, float *output_data, const int *input_shape, co
}
}
}
int TransOut2InputDimIndex(int out_dim_index, int left_pad, int in_dim, int offset) {
if (out_dim_index < left_pad) {
// left pad
const int index_sum = left_pad + offset - 1;
return MSMAX(index_sum - out_dim_index, offset);
}
out_dim_index -= left_pad;
if (out_dim_index < in_dim) {
return out_dim_index;
}
// right pad
out_dim_index -= in_dim;
const int index_sum = in_dim - 1 - offset;
return MSMAX(index_sum - out_dim_index, 0);
}
int GetInputFlattenIndex(int out_flatten_index, const int *input_shape, const PadParameter *pad_param) {
int in_flatten_index = 0;
int i;
for (i = 0; i < DEFAULT_PAD_NDIMS; ++i) {
int left_pad = pad_param->paddings_[i * 2];
int out_dim_index = out_flatten_index / pad_param->out_strides[i];
out_flatten_index %= pad_param->out_strides[i];
int in_dim_index = TransOut2InputDimIndex(out_dim_index, left_pad, input_shape[i], pad_param->mirror_offset_);
in_flatten_index += in_dim_index * pad_param->in_strides[i];
}
return in_flatten_index;
}
void MirrorPad(const float *input_data, float *output_data, const int *input_shape, const PadParameter *pad_param,
int begin, int end) {
int i = 0;
for (i = begin; i < end; ++i) {
output_data[i] = input_data[GetInputFlattenIndex(i, input_shape, pad_param)];
}
}

@ -29,6 +29,8 @@ extern "C" {
#endif
void Pad(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
const int *paddings, const int tid, const int thread_num);
void MirrorPad(const float *input_data, float *output_data, const int *input_shape, const PadParameter *pad_param,
int begin, int end);
#ifdef __cplusplus
}
#endif

@ -26,8 +26,12 @@ typedef struct PadParameter {
OpParameter op_parameter_;
PadQuantArg pad_quant_arg_;
int paddings_[MAX_PAD_SIZE];
int padding_length;
int pad_mode_;
float constant_value_;
int mirror_offset_;
int in_strides[DEFAULT_PAD_NDIMS];
int out_strides[DEFAULT_PAD_NDIMS];
} PadParameter;
#endif // MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_

@ -64,8 +64,6 @@ int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs)
return RET_NULL_PTR;
}
auto paddings = GetPaddings();
auto input = inputs.front();
if (input == nullptr) {
return RET_NULL_PTR;
@ -79,6 +77,27 @@ int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs)
if (!GetInferFlag()) {
return RET_OK;
}
std::vector<int> paddings;
if (GetPaddingMode() == static_cast<int>(schema::PaddingMode_CONSTANT)) {
paddings = GetPaddings();
} else {
// mirror pad
MS_ASSERT(inputs.size() == 2);
auto paddings_tensor = inputs.at(1);
int rank = static_cast<int>(inputs.front()->shape().size());
MS_ASSERT(paddings_tensor->ElementsNum() == 2 * rank);
int *paddings_data = reinterpret_cast<int *>(paddings_tensor->MutableData());
if (paddings_data == nullptr) {
return RET_INFER_ERR;
}
paddings.clear();
for (auto i = 0; i < rank; ++i) {
paddings.emplace_back(paddings_data[i * 2]);
paddings.emplace_back(paddings_data[i * 2 + 1]);
}
}
auto input_shape = input->shape();
std::vector<int> output_shape;
MS_ASSERT(input->shape().size() <= 4);

@ -156,8 +156,9 @@ int StridedSlice::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbu
}
#endif
namespace {
constexpr int kStridedSliceOutputNum = 1;
constexpr int kStridedSliceInputNum = 1;
constexpr size_t kStridedSliceOutputNum = 1;
constexpr size_t kStridedSliceInputNum = 1;
constexpr size_t kStridedSliceMultiInputNum = 4;
} // namespace
void StridedSlice::ApplyNewAxisMask() {
@ -231,7 +232,7 @@ int StridedSlice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
MS_LOG(ERROR) << "Invalid output size:" << outputs.size();
return RET_PARAM_INVALID;
}
if (inputs.size() != kStridedSliceInputNum) {
if (inputs.size() != kStridedSliceInputNum && inputs.size() != kStridedSliceMultiInputNum) {
MS_LOG(ERROR) << "Invalid input size " << inputs.size();
return RET_PARAM_INVALID;
}
@ -244,13 +245,33 @@ int StridedSlice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
MS_ASSERT(input != nullptr);
auto input_shape = input->shape();
std::vector<int> output_shape;
ndim_ = static_cast<int>(GetBegin().size());
for (int i = 0; i < ndim_; i++) {
in_shape_.emplace_back(input_shape.at(i));
begins_.emplace_back((GetBegin())[i]);
ends_.emplace_back((GetEnd())[i]);
strides_.emplace_back((GetStride())[i]);
if (inputs.size() == kStridedSliceInputNum) {
ndim_ = static_cast<int>(GetBegin().size());
for (int i = 0; i < ndim_; i++) {
in_shape_.emplace_back(input_shape.at(i));
begins_.emplace_back((GetBegin())[i]);
ends_.emplace_back((GetEnd())[i]);
strides_.emplace_back((GetStride())[i]);
}
} else {
auto begin_tensor = inputs.at(1);
int *begin_data = reinterpret_cast<int *>(begin_tensor->MutableData());
auto end_tensor = inputs.at(2);
int *end_data = reinterpret_cast<int *>(end_tensor->MutableData());
auto stride_tensor = inputs.at(3);
int *stride_data = reinterpret_cast<int *>(stride_tensor->MutableData());
if (begin_data == nullptr || end_data == nullptr || stride_data == nullptr) {
return RET_INFER_ERR;
}
ndim_ = begin_tensor->ElementsNum();
for (int i=0; i< ndim_; ++i) {
in_shape_.emplace_back(input_shape.at(i));
begins_.emplace_back(begin_data[i]);
ends_.emplace_back(end_data[i]);
strides_.emplace_back(stride_data[i]);
}
}
// set all mask to original input shape

@ -601,24 +601,24 @@ OpParameter *PopulatePadParameter(const mindspore::lite::PrimitiveC *primitive)
pad_param->op_parameter_.type_ = primitive->Type();
auto pad_node = reinterpret_cast<mindspore::lite::Pad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
pad_param->pad_mode_ = pad_node->GetPaddingMode();
if (pad_param->pad_mode_ == schema::PaddingMode_CONSTANT) {
if (pad_param->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) {
pad_param->constant_value_ = pad_node->GetConstantValue();
} else {
MS_LOG(ERROR) << "Invalid padding mode: " << pad_param->pad_mode_;
free(pad_param);
return nullptr;
}
auto size = pad_node->GetPaddings().size();
if (size > MAX_PAD_SIZE) {
MS_LOG(ERROR) << "Invalid padding size: " << size;
free(pad_param);
return nullptr;
}
auto size = pad_node->GetPaddings().size();
if (size > MAX_PAD_SIZE) {
MS_LOG(ERROR) << "Invalid padding size: " << size;
free(pad_param);
return nullptr;
for (size_t i = 0; i < MAX_PAD_SIZE - size; ++i) {
pad_param->paddings_[i] = 0;
}
for (size_t i = 0; i < size; i++) {
pad_param->paddings_[MAX_PAD_SIZE - size + i] = pad_node->GetPaddings()[i];
}
pad_param->padding_length = MAX_PAD_SIZE;
}
for (size_t i = 0; i < size; i++) {
pad_param->paddings_[MAX_PAD_SIZE - size + i] = pad_node->GetPaddings()[i];
}
return reinterpret_cast<OpParameter *>(pad_param);
}

File diff suppressed because it is too large Load Diff

@ -38,14 +38,24 @@ class PadCPUKernel : public LiteKernel {
int ReSize() override;
int Run() override;
virtual int RunImpl(int task_id);
int RunMirrorPadImpl(int task_id);
private:
int HandleMirrorPad();
int CheckPaddings(int *paddings, int length, int *input_shape, int mode);
int CopyPaddingFromInput();
void CalculateStrides();
int ExtendShape(int *shape, int length, const int *ori_shape, int rank);
int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);
protected:
const PadParameter *pad_param_;
int in_[4] = {1, 1, 1, 1};
int out_[4] = {1, 1, 1, 1};
PadParameter *pad_param_;
int in_[4];
int out_[4];
};
int PadImpl(void *cdata, int task_id);
int MirrorPadImpl(void *cdata, int task_id);
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_PAD_H_

Loading…
Cancel
Save