From d1f7ed115a28e44f672e0c408d21c533577beba4 Mon Sep 17 00:00:00 2001 From: zhaodezan Date: Mon, 8 Mar 2021 14:58:35 +0800 Subject: [PATCH] change many malloc to one --- mindspore/lite/nnacl/infer/common_infer.c | 28 ++------ mindspore/lite/nnacl/infer/common_infer.h | 8 +-- mindspore/lite/nnacl/infer/merge_infer.c | 2 +- mindspore/lite/nnacl/infer/select_infer.c | 2 +- mindspore/lite/nnacl/infer/switch_infer.c | 8 +-- .../nnacl/infer/tensorlist_fromtensor_infer.c | 30 ++++---- .../nnacl/infer/tensorlist_getitem_infer.c | 6 +- .../nnacl/infer/tensorlist_reserve_infer.c | 30 ++++---- .../nnacl/infer/tensorlist_setitem_infer.c | 56 ++++++--------- .../lite/nnacl/infer/tensorlist_stack_infer.c | 2 +- mindspore/lite/src/common/tensor_util.cc | 72 +++---------------- .../infer/tensorlist_fromtensor_infer_test.cc | 6 +- .../infer/tensorlist_getitem_infer_test.cc | 37 +++++----- .../infer/tensorlist_reserve_infer_test.cc | 2 +- .../infer/tensorlist_setitem_infer_test.cc | 54 +++++++------- .../infer/tensorlist_stack_infer_test.cc | 34 +++++---- 16 files changed, 139 insertions(+), 238 deletions(-) diff --git a/mindspore/lite/nnacl/infer/common_infer.c b/mindspore/lite/nnacl/infer/common_infer.c index 8ec609136e..0a21247c95 100644 --- a/mindspore/lite/nnacl/infer/common_infer.c +++ b/mindspore/lite/nnacl/infer/common_infer.c @@ -17,17 +17,6 @@ #include #include -int FreeTensorListData(TensorListC *tensor_list) { - // del each tensor in tensors_ and clear tensors_ - if (tensor_list->element_num_ == 0) { - return NNACL_OK; - } - for (int i = 0; i < tensor_list->element_num_; ++i) { - tensor_list->tensors_[i] = NULL; - } - return NNACL_OK; -} - int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape) { // This function will create a new tensors_ // Your must to set shape(param2: tensor_shape) and data_type_(tensors_data_type_ = param1: dtype) of each tensor in @@ -40,21 +29,16 @@ int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tenso return NNACL_ERR; } tensor_list->tensors_data_type_ = dtype; - tensor_list->tensors_ = (TensorC **)malloc(tensor_list->element_num_ * sizeof(TensorC *)); // free in infer_manager + tensor_list->tensors_ = (TensorC *)malloc(tensor_list->element_num_ * sizeof(TensorC)); // free in infer_manager if (tensor_list->tensors_ == NULL) { return NNACL_NULL_PTR; } - memset(tensor_list->tensors_, 0, tensor_list->element_num_ * sizeof(TensorC *)); + memset(tensor_list->tensors_, 0, tensor_list->element_num_ * sizeof(TensorC)); for (int i = 0; i < tensor_list->element_num_; ++i) { - TensorC *tensor_ptr = (TensorC *)malloc(sizeof(TensorC)); - if (tensor_ptr == NULL) { - return NNACL_ERR; - } - memset(tensor_ptr, 0, sizeof(TensorC)); - tensor_ptr->format_ = Format_NHWC; - tensor_ptr->data_type_ = dtype; - ShapeSet(tensor_ptr->shape_, &(tensor_ptr->shape_size_), tensor_shape->shape_[i], tensor_shape->shape_size_[i]); - tensor_list->tensors_[i] = tensor_ptr; + tensor_list->tensors_[i].format_ = Format_NHWC; + tensor_list->tensors_[i].data_type_ = dtype; + ShapeSet(tensor_list->tensors_[i].shape_, &(tensor_list->tensors_[i].shape_size_), tensor_shape->shape_[i], + tensor_shape->shape_size_[i]); } return NNACL_OK; } diff --git a/mindspore/lite/nnacl/infer/common_infer.h b/mindspore/lite/nnacl/infer/common_infer.h index 792dd98470..b7b39267b1 100644 --- a/mindspore/lite/nnacl/infer/common_infer.h +++ b/mindspore/lite/nnacl/infer/common_infer.h @@ -137,12 +137,12 @@ typedef struct TensorListC { int data_type_; int format_; - TensorC **tensors_; - size_t element_num_; int tensors_data_type_; // element_data_type_, keep same as c++ - int element_shape_[MAX_SHAPE_SIZE]; - size_t element_shape_size_; int max_elements_num_; + int element_shape_[8]; + size_t element_num_; + size_t element_shape_size_; + TensorC *tensors_; } TensorListC; typedef struct VectorC { diff --git a/mindspore/lite/nnacl/infer/merge_infer.c b/mindspore/lite/nnacl/infer/merge_infer.c index 5af32daf81..713650747d 100644 --- a/mindspore/lite/nnacl/infer/merge_infer.c +++ b/mindspore/lite/nnacl/infer/merge_infer.c @@ -42,7 +42,7 @@ int MergeInfer(const TensorC *const *inputs, size_t inputs_size, TensorC **outpu output_tensorlist->element_num_ = input_tensorlist->element_num_; for (size_t j = 0; j < output_tensorlist->element_num_; j++) { - memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); } } else { SetShapeTensor(outputs[i], inputs[i]); diff --git a/mindspore/lite/nnacl/infer/select_infer.c b/mindspore/lite/nnacl/infer/select_infer.c index 311af22f7d..68bd38353c 100644 --- a/mindspore/lite/nnacl/infer/select_infer.c +++ b/mindspore/lite/nnacl/infer/select_infer.c @@ -43,7 +43,7 @@ int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * output_tensorlist->element_num_ = input_tensorlist->element_num_; for (size_t j = 0; j < output_tensorlist->element_num_; j++) { - memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); } } else { SetShapeTensor(output, input); diff --git a/mindspore/lite/nnacl/infer/switch_infer.c b/mindspore/lite/nnacl/infer/switch_infer.c index b51af00b92..642fa75679 100644 --- a/mindspore/lite/nnacl/infer/switch_infer.c +++ b/mindspore/lite/nnacl/infer/switch_infer.c @@ -51,8 +51,8 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * // note: need delete below? for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) { - memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); - memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_true_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_false_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); } } else { @@ -89,8 +89,8 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * output_true_tensorlist->element_num_ = input_tensorlist->element_num_; for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) { - memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); - memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_true_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(&output_false_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC)); } } else { diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c index 694491f819..a3cb64460f 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c @@ -41,35 +41,29 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s int *ele_shape_ptr = (int *)(input1->data_); TensorListC *output = (TensorListC *)(outputs[0]); - vvector *tensor_shape = (vvector *)malloc(sizeof(vvector)); - if (tensor_shape == NULL) { + vvector tensor_shape; + tensor_shape.size_ = dim0; + tensor_shape.shape_ = (int **)malloc(tensor_shape.size_ * sizeof(int *)); + if (tensor_shape.shape_ == NULL) { return NNACL_NULL_PTR; } - tensor_shape->size_ = dim0; - tensor_shape->shape_ = (int **)malloc(tensor_shape->size_ * sizeof(int *)); - if (tensor_shape->shape_ == NULL) { - free(tensor_shape); - return NNACL_NULL_PTR; - } - tensor_shape->shape_size_ = (int *)malloc(tensor_shape->size_ * sizeof(int)); - if (tensor_shape->shape_size_ == NULL) { - free(tensor_shape->shape_); - free(tensor_shape); + tensor_shape.shape_size_ = (int *)malloc(tensor_shape.size_ * sizeof(int)); + if (tensor_shape.shape_size_ == NULL) { + free(tensor_shape.shape_); return NNACL_NULL_PTR; } for (size_t i = 0; i < dim0; i++) { - tensor_shape->shape_[i] = (int *)(input0->shape_ + 1); - tensor_shape->shape_size_[i] = input0->shape_size_ - 1; + tensor_shape.shape_[i] = (int *)(input0->shape_ + 1); + tensor_shape.shape_size_[i] = input0->shape_size_ - 1; } ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input1)); output->element_num_ = dim0; output->data_type_ = kObjectTypeTensorType; output->format_ = Format_NHWC; - MallocTensorListData(output, input0->data_type_, tensor_shape); - free(tensor_shape->shape_); - free(tensor_shape->shape_size_); - free(tensor_shape); + MallocTensorListData(output, input0->data_type_, &tensor_shape); + free(tensor_shape.shape_); + free(tensor_shape.shape_size_); return NNACL_OK; } diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c index 9c84fc65fe..36354bd6ba 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c @@ -37,7 +37,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size if (index < 0 || index > (input0->element_num_ - 1)) { return NNACL_ERR; } - TensorC *tensor_index = input0->tensors_[index]; + TensorC *tensor_index = &input0->tensors_[index]; TensorC *output = outputs[0]; if (tensor_index->data_type_ != kTypeUnknown) { output->data_type_ = tensor_index->data_type_; @@ -60,7 +60,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size } if (!TensorListIsFullyDefined(element_shape, element_shape_size)) { for (int i = 0; i < input0->element_num_; ++i) { - TensorC *input = input0->tensors_[i]; + TensorC *input = &input0->tensors_[i]; if (input->data_type_ != kTypeUnknown) { status = TensorListMergeShape(element_shape, &element_shape_size, input->shape_, input->shape_size_); if (status != NNACL_OK) { @@ -75,6 +75,6 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size output->data_type_ = input0->tensors_data_type_; SetShapeArray(output, element_shape, element_shape_size); } - output->format_ = input0->tensors_[index]->format_; + output->format_ = input0->tensors_[index].format_; return NNACL_OK; } diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c index cf6adc1a24..e53e3c3474 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c @@ -50,30 +50,24 @@ int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input0)); output->element_num_ = num_elements; - vvector *tmp_shape = (vvector *)malloc(sizeof(vvector)); - if (tmp_shape == NULL) { + vvector tmp_shape; + tmp_shape.size_ = num_elements; + tmp_shape.shape_ = (int **)malloc(tmp_shape.size_ * sizeof(int *)); + if (tmp_shape.shape_ == NULL) { return NNACL_NULL_PTR; } - tmp_shape->size_ = num_elements; - tmp_shape->shape_ = (int **)malloc(tmp_shape->size_ * sizeof(int *)); - if (tmp_shape->shape_ == NULL) { - free(tmp_shape); - return NNACL_NULL_PTR; - } - tmp_shape->shape_size_ = (int *)malloc(tmp_shape->size_ * sizeof(int)); - if (tmp_shape->shape_size_ == NULL) { - free(tmp_shape->shape_); - free(tmp_shape); + tmp_shape.shape_size_ = (int *)malloc(tmp_shape.size_ * sizeof(int)); + if (tmp_shape.shape_size_ == NULL) { + free(tmp_shape.shape_); return NNACL_NULL_PTR; } for (size_t i = 0; i < num_elements; i++) { - tmp_shape->shape_size_[i] = 0; - tmp_shape->shape_[i] = NULL; + tmp_shape.shape_size_[i] = 0; + tmp_shape.shape_[i] = NULL; } - MallocTensorListData(output, kTypeUnknown, tmp_shape); - free(tmp_shape->shape_size_); - free(tmp_shape->shape_); - free(tmp_shape); + MallocTensorListData(output, kTypeUnknown, &tmp_shape); + free(tmp_shape.shape_size_); + free(tmp_shape.shape_); return NNACL_OK; } diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c index e7ee7d310e..ec88854ba7 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c @@ -70,46 +70,35 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size input0->element_shape_size_); } - vvector *out_shape = (vvector *)malloc(sizeof(vvector)); - if (out_shape == NULL) { + vvector out_shape; + out_shape.size_ = 0; + out_shape.shape_ = (int **)malloc((input0->element_num_ + 1) * sizeof(int *)); + if (out_shape.shape_ == NULL) { return NNACL_NULL_PTR; } - out_shape->size_ = 0; - out_shape->shape_ = (int **)malloc((input0->element_num_ + 1) * sizeof(int *)); - if (out_shape->shape_ == NULL) { - free(out_shape); - return NNACL_NULL_PTR; - } - out_shape->shape_size_ = (int *)malloc((input0->element_num_ + 1) * sizeof(int)); - if (out_shape->shape_size_ == NULL) { - free(out_shape->shape_); - free(out_shape); + out_shape.shape_size_ = (int *)malloc((input0->element_num_ + 1) * sizeof(int)); + if (out_shape.shape_size_ == NULL) { + free(out_shape.shape_); return NNACL_NULL_PTR; } if (index == 0 && input0->element_num_ == 0) { // uninitialized tensorlist - out_shape->shape_[out_shape->size_] = (int *)(value_tensor->shape_); - out_shape->shape_size_[out_shape->size_] = value_tensor->shape_size_; - out_shape->size_++; + out_shape.shape_[out_shape.size_] = (int *)(value_tensor->shape_); + out_shape.shape_size_[out_shape.size_] = value_tensor->shape_size_; + out_shape.size_++; output0->element_num_ = 1; } else { output0->element_num_ = input0->element_num_; for (int i = 0; i < input0->element_num_; ++i) { - TensorC *src_ptr = input0->tensors_[i]; - if (src_ptr == NULL) { - free(out_shape->shape_); - free(out_shape->shape_size_); - free(out_shape); - return NNACL_ERR; - } + TensorC *src_ptr = &input0->tensors_[i]; if (src_ptr->data_type_ != kTypeUnknown) { - out_shape->shape_[out_shape->size_] = src_ptr->shape_; - out_shape->shape_size_[out_shape->size_] = src_ptr->shape_size_; - out_shape->size_++; + out_shape.shape_[out_shape.size_] = src_ptr->shape_; + out_shape.shape_size_[out_shape.size_] = src_ptr->shape_size_; + out_shape.size_++; } else { - out_shape->shape_[out_shape->size_] = NULL; - out_shape->shape_size_[out_shape->size_] = 0; - out_shape->size_++; + out_shape.shape_[out_shape.size_] = NULL; + out_shape.shape_size_[out_shape.size_] = 0; + out_shape.size_++; } } } @@ -118,11 +107,10 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size input0->tensors_data_type_ = value_tensor->data_type_; } - out_shape->shape_[index] = (int *)(value_tensor->shape_); - out_shape->shape_size_[index] = value_tensor->shape_size_; - MallocTensorListData(output0, input0->tensors_data_type_, out_shape); - free(out_shape->shape_); - free(out_shape->shape_size_); - free(out_shape); + out_shape.shape_[index] = (int *)(value_tensor->shape_); + out_shape.shape_size_[index] = value_tensor->shape_size_; + MallocTensorListData(output0, input0->tensors_data_type_, &out_shape); + free(out_shape.shape_); + free(out_shape.shape_size_); return NNACL_OK; } diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c index 0881cb67ff..6519a05f1f 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c @@ -50,7 +50,7 @@ int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, } if (!TensorListIsFullyDefined(input0->element_shape_, input0->element_shape_size_)) { for (int i = 0; i < input0->element_num_; ++i) { - TensorC *tensor_ele = input0->tensors_[i]; + TensorC *tensor_ele = &input0->tensors_[i]; if (tensor_ele->data_type_ != kTypeUnknown) { status = TensorListMergeShape(output_shape, &output_shape_size, tensor_ele->shape_, tensor_ele->shape_size_); if (status == NNACL_ERR) { diff --git a/mindspore/lite/src/common/tensor_util.cc b/mindspore/lite/src/common/tensor_util.cc index 7c00a156a0..a20d1cfdfc 100644 --- a/mindspore/lite/src/common/tensor_util.cc +++ b/mindspore/lite/src/common/tensor_util.cc @@ -87,10 +87,6 @@ void FreeAllTensorC(std::vector *tensors_in) { } void FreeTensorListC(TensorListC *tensorlist_c) { - for (size_t i = 0; i < tensorlist_c->element_num_; i++) { - free(tensorlist_c->tensors_[i]); - tensorlist_c->tensors_[i] = nullptr; - } if (tensorlist_c->tensors_ != nullptr) { free(tensorlist_c->tensors_); tensorlist_c->tensors_ = nullptr; @@ -132,18 +128,13 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) { dst->format_ = src->format(); dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size(); - dst->tensors_ = reinterpret_cast(malloc(dst->element_num_ * sizeof(TensorC *))); + dst->tensors_ = reinterpret_cast(malloc(dst->element_num_ * sizeof(TensorC))); if (dst->tensors_ == nullptr) { return RET_ERROR; } - memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC *)); + memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC)); for (size_t i = 0; i < dst->element_num_; i++) { - dst->tensors_[i] = reinterpret_cast(malloc(sizeof(TensorC))); - if (dst->tensors_[i] == nullptr) { - return NNACL_ERR; - } - memset(dst->tensors_[i], 0, sizeof(TensorC)); - Tensor2TensorC(src->tensors().at(i), dst->tensors_[i]); + Tensor2TensorC(src->tensors().at(i), &dst->tensors_[i]); } dst->tensors_data_type_ = src->tensors_data_type(); @@ -163,7 +154,7 @@ void TensorListC2TensorList(TensorListC *src, TensorList *dst) { // Set Tensors for (size_t i = 0; i < src->element_num_; i++) { - TensorC2Tensor(src->tensors_[i], dst->GetTensor(i)); + TensorC2Tensor(&src->tensors_[i], dst->GetTensor(i)); } dst->set_element_shape(std::vector(src->element_shape_, src->element_shape_ + src->element_shape_size_)); @@ -183,28 +174,13 @@ int GenerateMergeOutTensorC(const std::vector &inputs, std::vect output_tensorlist->element_num_ = inputs[i]->shape().empty() ? 0 : inputs[i]->shape().at(0); if (output_tensorlist->element_num_ != 0) { output_tensorlist->tensors_ = - reinterpret_cast(malloc(output_tensorlist->element_num_ * sizeof(TensorC *))); + reinterpret_cast(malloc(output_tensorlist->element_num_ * sizeof(TensorC))); if (output_tensorlist->tensors_ == nullptr) { free(output_tensorlist); output_tensorlist = nullptr; return RET_ERROR; } - memset(output_tensorlist->tensors_, 0, output_tensorlist->element_num_ * sizeof(TensorC *)); - for (size_t j = 0; j < output_tensorlist->element_num_; j++) { - output_tensorlist->tensors_[j] = reinterpret_cast(malloc(sizeof(TensorC))); - if (output_tensorlist->tensors_[j] == nullptr) { - for (size_t k = 0; k < j; k++) { - free(output_tensorlist->tensors_[k]); - output_tensorlist->tensors_[k] = nullptr; - } - free(output_tensorlist->tensors_); - output_tensorlist->tensors_ = nullptr; - free(output_tensorlist); - output_tensorlist = nullptr; - return RET_ERROR; - } - memset(output_tensorlist->tensors_[j], 0, sizeof(TensorC)); - } + memset(output_tensorlist->tensors_, 0, output_tensorlist->element_num_ * sizeof(TensorC)); } out_tensor_c->push_back(reinterpret_cast(output_tensorlist)); @@ -239,26 +215,13 @@ int GenerateSwitchOutTensorC(const std::vector &inputs, std::vec output_tensorlist1->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0); if (output_tensorlist1->element_num_ != 0) { output_tensorlist1->tensors_ = - reinterpret_cast(malloc(output_tensorlist1->element_num_ * sizeof(TensorC *))); + reinterpret_cast(malloc(output_tensorlist1->element_num_ * sizeof(TensorC))); if (output_tensorlist1->tensors_ == nullptr) { free(output_tensorlist1); output_tensorlist1 = nullptr; return RET_ERROR; } - memset(output_tensorlist1->tensors_, 0, output_tensorlist1->element_num_ * sizeof(TensorC *)); - for (size_t j = 0; j < output_tensorlist1->element_num_; j++) { - output_tensorlist1->tensors_[j] = reinterpret_cast(malloc(sizeof(TensorC))); - if (output_tensorlist1->tensors_[j] == nullptr) { - for (size_t k = 0; k < j; k++) { - free(output_tensorlist1->tensors_[k]); - output_tensorlist1->tensors_[k] = nullptr; - } - free(output_tensorlist1->tensors_); - output_tensorlist1->tensors_ = nullptr; - return RET_ERROR; - } - memset(output_tensorlist1->tensors_[j], 0, sizeof(TensorC)); - } + memset(output_tensorlist1->tensors_, 0, output_tensorlist1->element_num_ * sizeof(TensorC)); } out_tensor_c->at(i) = reinterpret_cast(output_tensorlist1); @@ -271,28 +234,13 @@ int GenerateSwitchOutTensorC(const std::vector &inputs, std::vec output_tensorlist2->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0); if (output_tensorlist2->element_num_ != 0) { output_tensorlist2->tensors_ = - reinterpret_cast(malloc(output_tensorlist2->element_num_ * sizeof(TensorC *))); + reinterpret_cast(malloc(output_tensorlist2->element_num_ * sizeof(TensorC))); if (output_tensorlist2->tensors_ == nullptr) { free(output_tensorlist2); output_tensorlist2 = nullptr; return RET_ERROR; } - memset(output_tensorlist2->tensors_, 0, output_tensorlist2->element_num_ * sizeof(TensorC *)); - for (size_t j = 0; j < output_tensorlist2->element_num_; j++) { - output_tensorlist2->tensors_[j] = reinterpret_cast(malloc(sizeof(TensorC))); - if (output_tensorlist2->tensors_[j] == nullptr) { - for (size_t k = 0; k < j; k++) { - free(output_tensorlist2->tensors_[k]); - output_tensorlist2->tensors_[k] = nullptr; - } - free(output_tensorlist2->tensors_); - output_tensorlist2->tensors_ = nullptr; - free(output_tensorlist2); - output_tensorlist2 = nullptr; - return RET_ERROR; - } - memset(output_tensorlist2->tensors_[j], 0, sizeof(TensorC)); - } + memset(output_tensorlist2->tensors_, 0, output_tensorlist2->element_num_ * sizeof(TensorC)); } out_tensor_c->at(i + outputs->size() / 2) = reinterpret_cast(output_tensorlist2); diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc index d4f165bbee..bc3d159bb4 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc @@ -58,9 +58,9 @@ TEST_F(TensorlistFromtensorInferTest, TensorlistFromtensorInferTest0) { ASSERT_EQ(out->tensors_data_type_, kNumberTypeInt32); // ASSERT_EQ(outputs[0]->format_, Format_NHWC); for (size_t i = 0; i < out->element_num_; i++) { - ASSERT_EQ(out->tensors_[i]->shape_size_, 2); - ASSERT_EQ(out->tensors_[i]->shape_[0], 6); - ASSERT_EQ(out->tensors_[i]->shape_[1], 5); + ASSERT_EQ(out->tensors_[i].shape_size_, 2); + ASSERT_EQ(out->tensors_[i].shape_[0], 6); + ASSERT_EQ(out->tensors_[i].shape_[1], 5); } delete parameter; for (size_t i = 0; i < inputs_size; i++) { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc index c82ac53291..abf83424cb 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc @@ -30,27 +30,22 @@ TEST_F(TensorlistGetItemInferTest, TensorlistGetItemInferTest0) { std::vector inputs(inputs_size, NULL); TensorListC *input0 = reinterpret_cast(malloc(sizeof(TensorListC))); input0->element_num_ = 3; - input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC *))); - input0->tensors_[0] = reinterpret_cast(malloc(sizeof(TensorC))); - input0->tensors_[0]->shape_size_ = 2; - input0->tensors_[0]->shape_[0] = 1; - input0->tensors_[0]->shape_[1] = 2; - input0->tensors_[0]->data_type_ = kNumberTypeInt32; - // input0->tensors_[0]->format_ = Format_NHWC; - input0->tensors_[1] = reinterpret_cast(malloc(sizeof(TensorC))); - input0->tensors_[1]->shape_size_ = 3; - input0->tensors_[1]->shape_[0] = 3; - input0->tensors_[1]->shape_[1] = 4; - input0->tensors_[1]->shape_[2] = 5; - input0->tensors_[1]->data_type_ = kNumberTypeInt32; - // input0->tensors_[1]->format_ = Format_NHWC; - input0->tensors_[2] = reinterpret_cast(malloc(sizeof(TensorC))); - input0->tensors_[2]->shape_size_ = 4; - input0->tensors_[2]->shape_[0] = 6; - input0->tensors_[2]->shape_[1] = 7; - input0->tensors_[2]->shape_[2] = 8; - input0->tensors_[2]->shape_[3] = 9; - input0->tensors_[2]->data_type_ = kNumberTypeInt32; + input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC))); + input0->tensors_[0].shape_size_ = 2; + input0->tensors_[0].shape_[0] = 1; + input0->tensors_[0].shape_[1] = 2; + input0->tensors_[0].data_type_ = kNumberTypeInt32; + input0->tensors_[1].shape_size_ = 3; + input0->tensors_[1].shape_[0] = 3; + input0->tensors_[1].shape_[1] = 4; + input0->tensors_[1].shape_[2] = 5; + input0->tensors_[1].data_type_ = kNumberTypeInt32; + input0->tensors_[2].shape_size_ = 4; + input0->tensors_[2].shape_[0] = 6; + input0->tensors_[2].shape_[1] = 7; + input0->tensors_[2].shape_[2] = 8; + input0->tensors_[2].shape_[3] = 9; + input0->tensors_[2].data_type_ = kNumberTypeInt32; // input0->tensors_[2]->format_ = Format_NHWC; inputs[0] = reinterpret_cast(input0); inputs[0]->data_type_ = kObjectTypeTensorType; diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc index 0389a81078..9398b0b2f7 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc @@ -57,7 +57,7 @@ TEST_F(TensorlistReserveInferTest, TensorlistReserveInferTest0) { ASSERT_EQ(out->tensors_data_type_, kTypeUnknown); // ASSERT_EQ(outputs[0]->format_, Format_NHWC); for (size_t i = 0; i < out->element_num_; i++) { - ASSERT_EQ(out->tensors_[i]->shape_size_, 0); + ASSERT_EQ(out->tensors_[i].shape_size_, 0); } delete parameter; for (size_t i = 0; i < inputs_size; i++) { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc index 09d25da9b5..23a958e102 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc @@ -29,29 +29,27 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) { std::vector inputs(inputs_size, NULL); TensorListC *input0 = new TensorListC; input0->element_num_ = 3; - input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC *))); + input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC))); input0->element_shape_size_ = 2; input0->element_shape_[0] = 2; input0->element_shape_[1] = 4; input0->tensors_data_type_ = kNumberTypeInt32; input0->data_type_ = kObjectTypeTensorType; - input0->tensors_[0] = new TensorC; - input0->tensors_[0]->shape_size_ = 2; - input0->tensors_[0]->shape_[0] = 2; - input0->tensors_[0]->shape_[1] = 4; - input0->tensors_[0]->data_type_ = kNumberTypeInt32; - // input0->tensors_[0]->format_ = Format_NHWC; - input0->tensors_[1] = new TensorC; - input0->tensors_[1]->shape_size_ = 2; - input0->tensors_[1]->shape_[0] = 2; - input0->tensors_[1]->shape_[1] = 4; - input0->tensors_[1]->data_type_ = kNumberTypeInt32; - // input0->tensors_[1]->format_ = Format_NHWC; - input0->tensors_[2] = new TensorC; - input0->tensors_[2]->shape_size_ = 2; - input0->tensors_[2]->shape_[0] = 2; - input0->tensors_[2]->shape_[1] = 4; - input0->tensors_[2]->data_type_ = kNumberTypeInt32; + + input0->tensors_[0].shape_size_ = 2; + input0->tensors_[0].shape_[0] = 2; + input0->tensors_[0].shape_[1] = 4; + input0->tensors_[0].data_type_ = kNumberTypeInt32; + + input0->tensors_[1].shape_size_ = 2; + input0->tensors_[1].shape_[0] = 2; + input0->tensors_[1].shape_[1] = 4; + input0->tensors_[1].data_type_ = kNumberTypeInt32; + + input0->tensors_[2].shape_size_ = 2; + input0->tensors_[2].shape_[0] = 2; + input0->tensors_[2].shape_[1] = 4; + input0->tensors_[2].data_type_ = kNumberTypeInt32; // input0->tensors_[2]->format_ = Format_NHWC; inputs[0] = reinterpret_cast(input0); @@ -67,6 +65,8 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) { inputs[2]->shape_[0] = 5; inputs[2]->shape_[1] = 6; inputs[2]->data_type_ = kNumberTypeInt32; + std::vector inputs2_data = {3}; + inputs[2]->data_ = inputs2_data.data(); std::vector outputs(1, NULL); outputs[0] = reinterpret_cast(new TensorListC); @@ -82,15 +82,15 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) { ASSERT_EQ(res->element_shape_[1], 4); ASSERT_EQ(res->tensors_data_type_, kNumberTypeInt32); ASSERT_EQ(res->data_type_, kObjectTypeTensorType); - ASSERT_EQ(res->tensors_[0]->shape_size_, 2); - ASSERT_EQ(res->tensors_[0]->shape_[0], 2); - ASSERT_EQ(res->tensors_[0]->shape_[1], 4); - ASSERT_EQ(res->tensors_[1]->shape_size_, 2); - ASSERT_EQ(res->tensors_[1]->shape_[0], 2); - ASSERT_EQ(res->tensors_[1]->shape_[1], 4); - ASSERT_EQ(res->tensors_[2]->shape_size_, 2); - ASSERT_EQ(res->tensors_[2]->shape_[0], 5); - ASSERT_EQ(res->tensors_[2]->shape_[1], 6); + ASSERT_EQ(res->tensors_[0].shape_size_, 2); + ASSERT_EQ(res->tensors_[0].shape_[0], 2); + ASSERT_EQ(res->tensors_[0].shape_[1], 4); + ASSERT_EQ(res->tensors_[1].shape_size_, 2); + ASSERT_EQ(res->tensors_[1].shape_[0], 2); + ASSERT_EQ(res->tensors_[1].shape_[1], 4); + ASSERT_EQ(res->tensors_[2].shape_size_, 2); + ASSERT_EQ(res->tensors_[2].shape_[0], 5); + ASSERT_EQ(res->tensors_[2].shape_[1], 6); // ASSERT_EQ(outputs[0]->format_, Format_NHWC); diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc index ec03bdc827..9dd2acb472 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc @@ -29,28 +29,26 @@ TEST_F(TensorlistStackInferTest, TensorlistStackInferTest0) { std::vector inputs(inputs_size, NULL); TensorListC *input0 = new TensorListC; input0->element_num_ = 3; - input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC *))); + input0->tensors_ = reinterpret_cast(malloc(input0->element_num_ * sizeof(TensorC))); input0->element_shape_size_ = 2; input0->element_shape_[0] = 2; input0->element_shape_[1] = 4; input0->tensors_data_type_ = kNumberTypeInt32; - input0->tensors_[0] = new TensorC; - input0->tensors_[0]->shape_size_ = 2; - input0->tensors_[0]->shape_[0] = 2; - input0->tensors_[0]->shape_[1] = 4; - input0->tensors_[0]->data_type_ = kNumberTypeInt32; - // input0->tensors_[0]->format_ = Format_NHWC; - input0->tensors_[1] = new TensorC; - input0->tensors_[1]->shape_size_ = 2; - input0->tensors_[1]->shape_[0] = 2; - input0->tensors_[1]->shape_[1] = 4; - input0->tensors_[1]->data_type_ = kNumberTypeInt32; - // input0->tensors_[1]->format_ = Format_NHWC; - input0->tensors_[2] = new TensorC; - input0->tensors_[2]->shape_size_ = 2; - input0->tensors_[2]->shape_[0] = 2; - input0->tensors_[2]->shape_[1] = 4; - input0->tensors_[2]->data_type_ = kNumberTypeInt32; + + input0->tensors_[0].shape_size_ = 2; + input0->tensors_[0].shape_[0] = 2; + input0->tensors_[0].shape_[1] = 4; + input0->tensors_[0].data_type_ = kNumberTypeInt32; + + input0->tensors_[1].shape_size_ = 2; + input0->tensors_[1].shape_[0] = 2; + input0->tensors_[1].shape_[1] = 4; + input0->tensors_[1].data_type_ = kNumberTypeInt32; + + input0->tensors_[2].shape_size_ = 2; + input0->tensors_[2].shape_[0] = 2; + input0->tensors_[2].shape_[1] = 4; + input0->tensors_[2].data_type_ = kNumberTypeInt32; // input0->tensors_[2]->format_ = Format_NHWC; inputs[0] = reinterpret_cast(input0); inputs[0]->data_type_ = kObjectTypeTensorType;