!12981 fix tensorlist too many malloc, transform two level pointer to one level

From: @zhaodezan
Reviewed-by: @hangangqiang
Signed-off-by:
pull/12981/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 0513c0390f

@ -17,17 +17,6 @@
#include <stdlib.h>
#include <string.h>
int FreeTensorListData(TensorListC *tensor_list) {
// del each tensor in tensors_ and clear tensors_
if (tensor_list->element_num_ == 0) {
return NNACL_OK;
}
for (int i = 0; i < tensor_list->element_num_; ++i) {
tensor_list->tensors_[i] = NULL;
}
return NNACL_OK;
}
int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape) {
// This function will create a new tensors_
// Your must to set shape(param2: tensor_shape) and data_type_(tensors_data_type_ = param1: dtype) of each tensor in
@ -40,21 +29,16 @@ int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tenso
return NNACL_ERR;
}
tensor_list->tensors_data_type_ = dtype;
tensor_list->tensors_ = (TensorC **)malloc(tensor_list->element_num_ * sizeof(TensorC *)); // free in infer_manager
tensor_list->tensors_ = (TensorC *)malloc(tensor_list->element_num_ * sizeof(TensorC)); // free in infer_manager
if (tensor_list->tensors_ == NULL) {
return NNACL_NULL_PTR;
}
memset(tensor_list->tensors_, 0, tensor_list->element_num_ * sizeof(TensorC *));
memset(tensor_list->tensors_, 0, tensor_list->element_num_ * sizeof(TensorC));
for (int i = 0; i < tensor_list->element_num_; ++i) {
TensorC *tensor_ptr = (TensorC *)malloc(sizeof(TensorC));
if (tensor_ptr == NULL) {
return NNACL_ERR;
}
memset(tensor_ptr, 0, sizeof(TensorC));
tensor_ptr->format_ = Format_NHWC;
tensor_ptr->data_type_ = dtype;
ShapeSet(tensor_ptr->shape_, &(tensor_ptr->shape_size_), tensor_shape->shape_[i], tensor_shape->shape_size_[i]);
tensor_list->tensors_[i] = tensor_ptr;
tensor_list->tensors_[i].format_ = Format_NHWC;
tensor_list->tensors_[i].data_type_ = dtype;
ShapeSet(tensor_list->tensors_[i].shape_, &(tensor_list->tensors_[i].shape_size_), tensor_shape->shape_[i],
tensor_shape->shape_size_[i]);
}
return NNACL_OK;
}

@ -137,12 +137,12 @@ typedef struct TensorListC {
int data_type_;
int format_;
TensorC **tensors_;
size_t element_num_;
int tensors_data_type_; // element_data_type_, keep same as c++
int element_shape_[MAX_SHAPE_SIZE];
size_t element_shape_size_;
int max_elements_num_;
int element_shape_[8];
size_t element_num_;
size_t element_shape_size_;
TensorC *tensors_;
} TensorListC;
typedef struct VectorC {

@ -42,7 +42,7 @@ int MergeInfer(const TensorC *const *inputs, size_t inputs_size, TensorC **outpu
output_tensorlist->element_num_ = input_tensorlist->element_num_;
for (size_t j = 0; j < output_tensorlist->element_num_; j++) {
memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
}
} else {
SetShapeTensor(outputs[i], inputs[i]);

@ -43,7 +43,7 @@ int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
output_tensorlist->element_num_ = input_tensorlist->element_num_;
for (size_t j = 0; j < output_tensorlist->element_num_; j++) {
memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
}
} else {
SetShapeTensor(output, input);

@ -51,8 +51,8 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
// note: need delete below?
for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) {
memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_true_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_false_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
}
} else {
@ -89,8 +89,8 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
output_true_tensorlist->element_num_ = input_tensorlist->element_num_;
for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) {
memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_true_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
memcpy(&output_false_tensorlist->tensors_[j], &input_tensorlist->tensors_[j], sizeof(TensorC));
}
} else {

@ -41,35 +41,29 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s
int *ele_shape_ptr = (int *)(input1->data_);
TensorListC *output = (TensorListC *)(outputs[0]);
vvector *tensor_shape = (vvector *)malloc(sizeof(vvector));
if (tensor_shape == NULL) {
vvector tensor_shape;
tensor_shape.size_ = dim0;
tensor_shape.shape_ = (int **)malloc(tensor_shape.size_ * sizeof(int *));
if (tensor_shape.shape_ == NULL) {
return NNACL_NULL_PTR;
}
tensor_shape->size_ = dim0;
tensor_shape->shape_ = (int **)malloc(tensor_shape->size_ * sizeof(int *));
if (tensor_shape->shape_ == NULL) {
free(tensor_shape);
return NNACL_NULL_PTR;
}
tensor_shape->shape_size_ = (int *)malloc(tensor_shape->size_ * sizeof(int));
if (tensor_shape->shape_size_ == NULL) {
free(tensor_shape->shape_);
free(tensor_shape);
tensor_shape.shape_size_ = (int *)malloc(tensor_shape.size_ * sizeof(int));
if (tensor_shape.shape_size_ == NULL) {
free(tensor_shape.shape_);
return NNACL_NULL_PTR;
}
for (size_t i = 0; i < dim0; i++) {
tensor_shape->shape_[i] = (int *)(input0->shape_ + 1);
tensor_shape->shape_size_[i] = input0->shape_size_ - 1;
tensor_shape.shape_[i] = (int *)(input0->shape_ + 1);
tensor_shape.shape_size_[i] = input0->shape_size_ - 1;
}
ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input1));
output->element_num_ = dim0;
output->data_type_ = kObjectTypeTensorType;
output->format_ = Format_NHWC;
MallocTensorListData(output, input0->data_type_, tensor_shape);
free(tensor_shape->shape_);
free(tensor_shape->shape_size_);
free(tensor_shape);
MallocTensorListData(output, input0->data_type_, &tensor_shape);
free(tensor_shape.shape_);
free(tensor_shape.shape_size_);
return NNACL_OK;
}

@ -37,7 +37,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size
if (index < 0 || index > (input0->element_num_ - 1)) {
return NNACL_ERR;
}
TensorC *tensor_index = input0->tensors_[index];
TensorC *tensor_index = &input0->tensors_[index];
TensorC *output = outputs[0];
if (tensor_index->data_type_ != kTypeUnknown) {
output->data_type_ = tensor_index->data_type_;
@ -60,7 +60,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size
}
if (!TensorListIsFullyDefined(element_shape, element_shape_size)) {
for (int i = 0; i < input0->element_num_; ++i) {
TensorC *input = input0->tensors_[i];
TensorC *input = &input0->tensors_[i];
if (input->data_type_ != kTypeUnknown) {
status = TensorListMergeShape(element_shape, &element_shape_size, input->shape_, input->shape_size_);
if (status != NNACL_OK) {
@ -75,6 +75,6 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size
output->data_type_ = input0->tensors_data_type_;
SetShapeArray(output, element_shape, element_shape_size);
}
output->format_ = input0->tensors_[index]->format_;
output->format_ = input0->tensors_[index].format_;
return NNACL_OK;
}

@ -50,30 +50,24 @@ int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size
ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input0));
output->element_num_ = num_elements;
vvector *tmp_shape = (vvector *)malloc(sizeof(vvector));
if (tmp_shape == NULL) {
vvector tmp_shape;
tmp_shape.size_ = num_elements;
tmp_shape.shape_ = (int **)malloc(tmp_shape.size_ * sizeof(int *));
if (tmp_shape.shape_ == NULL) {
return NNACL_NULL_PTR;
}
tmp_shape->size_ = num_elements;
tmp_shape->shape_ = (int **)malloc(tmp_shape->size_ * sizeof(int *));
if (tmp_shape->shape_ == NULL) {
free(tmp_shape);
return NNACL_NULL_PTR;
}
tmp_shape->shape_size_ = (int *)malloc(tmp_shape->size_ * sizeof(int));
if (tmp_shape->shape_size_ == NULL) {
free(tmp_shape->shape_);
free(tmp_shape);
tmp_shape.shape_size_ = (int *)malloc(tmp_shape.size_ * sizeof(int));
if (tmp_shape.shape_size_ == NULL) {
free(tmp_shape.shape_);
return NNACL_NULL_PTR;
}
for (size_t i = 0; i < num_elements; i++) {
tmp_shape->shape_size_[i] = 0;
tmp_shape->shape_[i] = NULL;
tmp_shape.shape_size_[i] = 0;
tmp_shape.shape_[i] = NULL;
}
MallocTensorListData(output, kTypeUnknown, tmp_shape);
free(tmp_shape->shape_size_);
free(tmp_shape->shape_);
free(tmp_shape);
MallocTensorListData(output, kTypeUnknown, &tmp_shape);
free(tmp_shape.shape_size_);
free(tmp_shape.shape_);
return NNACL_OK;
}

@ -70,46 +70,35 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size
input0->element_shape_size_);
}
vvector *out_shape = (vvector *)malloc(sizeof(vvector));
if (out_shape == NULL) {
vvector out_shape;
out_shape.size_ = 0;
out_shape.shape_ = (int **)malloc((input0->element_num_ + 1) * sizeof(int *));
if (out_shape.shape_ == NULL) {
return NNACL_NULL_PTR;
}
out_shape->size_ = 0;
out_shape->shape_ = (int **)malloc((input0->element_num_ + 1) * sizeof(int *));
if (out_shape->shape_ == NULL) {
free(out_shape);
return NNACL_NULL_PTR;
}
out_shape->shape_size_ = (int *)malloc((input0->element_num_ + 1) * sizeof(int));
if (out_shape->shape_size_ == NULL) {
free(out_shape->shape_);
free(out_shape);
out_shape.shape_size_ = (int *)malloc((input0->element_num_ + 1) * sizeof(int));
if (out_shape.shape_size_ == NULL) {
free(out_shape.shape_);
return NNACL_NULL_PTR;
}
if (index == 0 && input0->element_num_ == 0) { // uninitialized tensorlist
out_shape->shape_[out_shape->size_] = (int *)(value_tensor->shape_);
out_shape->shape_size_[out_shape->size_] = value_tensor->shape_size_;
out_shape->size_++;
out_shape.shape_[out_shape.size_] = (int *)(value_tensor->shape_);
out_shape.shape_size_[out_shape.size_] = value_tensor->shape_size_;
out_shape.size_++;
output0->element_num_ = 1;
} else {
output0->element_num_ = input0->element_num_;
for (int i = 0; i < input0->element_num_; ++i) {
TensorC *src_ptr = input0->tensors_[i];
if (src_ptr == NULL) {
free(out_shape->shape_);
free(out_shape->shape_size_);
free(out_shape);
return NNACL_ERR;
}
TensorC *src_ptr = &input0->tensors_[i];
if (src_ptr->data_type_ != kTypeUnknown) {
out_shape->shape_[out_shape->size_] = src_ptr->shape_;
out_shape->shape_size_[out_shape->size_] = src_ptr->shape_size_;
out_shape->size_++;
out_shape.shape_[out_shape.size_] = src_ptr->shape_;
out_shape.shape_size_[out_shape.size_] = src_ptr->shape_size_;
out_shape.size_++;
} else {
out_shape->shape_[out_shape->size_] = NULL;
out_shape->shape_size_[out_shape->size_] = 0;
out_shape->size_++;
out_shape.shape_[out_shape.size_] = NULL;
out_shape.shape_size_[out_shape.size_] = 0;
out_shape.size_++;
}
}
}
@ -118,11 +107,10 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size
input0->tensors_data_type_ = value_tensor->data_type_;
}
out_shape->shape_[index] = (int *)(value_tensor->shape_);
out_shape->shape_size_[index] = value_tensor->shape_size_;
MallocTensorListData(output0, input0->tensors_data_type_, out_shape);
free(out_shape->shape_);
free(out_shape->shape_size_);
free(out_shape);
out_shape.shape_[index] = (int *)(value_tensor->shape_);
out_shape.shape_size_[index] = value_tensor->shape_size_;
MallocTensorListData(output0, input0->tensors_data_type_, &out_shape);
free(out_shape.shape_);
free(out_shape.shape_size_);
return NNACL_OK;
}

@ -50,7 +50,7 @@ int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size,
}
if (!TensorListIsFullyDefined(input0->element_shape_, input0->element_shape_size_)) {
for (int i = 0; i < input0->element_num_; ++i) {
TensorC *tensor_ele = input0->tensors_[i];
TensorC *tensor_ele = &input0->tensors_[i];
if (tensor_ele->data_type_ != kTypeUnknown) {
status = TensorListMergeShape(output_shape, &output_shape_size, tensor_ele->shape_, tensor_ele->shape_size_);
if (status == NNACL_ERR) {

@ -87,10 +87,6 @@ void FreeAllTensorC(std::vector<TensorC *> *tensors_in) {
}
void FreeTensorListC(TensorListC *tensorlist_c) {
for (size_t i = 0; i < tensorlist_c->element_num_; i++) {
free(tensorlist_c->tensors_[i]);
tensorlist_c->tensors_[i] = nullptr;
}
if (tensorlist_c->tensors_ != nullptr) {
free(tensorlist_c->tensors_);
tensorlist_c->tensors_ = nullptr;
@ -132,18 +128,13 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) {
dst->format_ = src->format();
dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size();
dst->tensors_ = reinterpret_cast<TensorC **>(malloc(dst->element_num_ * sizeof(TensorC *)));
dst->tensors_ = reinterpret_cast<TensorC *>(malloc(dst->element_num_ * sizeof(TensorC)));
if (dst->tensors_ == nullptr) {
return RET_ERROR;
}
memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC *));
memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC));
for (size_t i = 0; i < dst->element_num_; i++) {
dst->tensors_[i] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
if (dst->tensors_[i] == nullptr) {
return NNACL_ERR;
}
memset(dst->tensors_[i], 0, sizeof(TensorC));
Tensor2TensorC(src->tensors().at(i), dst->tensors_[i]);
Tensor2TensorC(src->tensors().at(i), &dst->tensors_[i]);
}
dst->tensors_data_type_ = src->tensors_data_type();
@ -163,7 +154,7 @@ void TensorListC2TensorList(TensorListC *src, TensorList *dst) {
// Set Tensors
for (size_t i = 0; i < src->element_num_; i++) {
TensorC2Tensor(src->tensors_[i], dst->GetTensor(i));
TensorC2Tensor(&src->tensors_[i], dst->GetTensor(i));
}
dst->set_element_shape(std::vector<int>(src->element_shape_, src->element_shape_ + src->element_shape_size_));
@ -183,28 +174,13 @@ int GenerateMergeOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vect
output_tensorlist->element_num_ = inputs[i]->shape().empty() ? 0 : inputs[i]->shape().at(0);
if (output_tensorlist->element_num_ != 0) {
output_tensorlist->tensors_ =
reinterpret_cast<TensorC **>(malloc(output_tensorlist->element_num_ * sizeof(TensorC *)));
reinterpret_cast<TensorC *>(malloc(output_tensorlist->element_num_ * sizeof(TensorC)));
if (output_tensorlist->tensors_ == nullptr) {
free(output_tensorlist);
output_tensorlist = nullptr;
return RET_ERROR;
}
memset(output_tensorlist->tensors_, 0, output_tensorlist->element_num_ * sizeof(TensorC *));
for (size_t j = 0; j < output_tensorlist->element_num_; j++) {
output_tensorlist->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
if (output_tensorlist->tensors_[j] == nullptr) {
for (size_t k = 0; k < j; k++) {
free(output_tensorlist->tensors_[k]);
output_tensorlist->tensors_[k] = nullptr;
}
free(output_tensorlist->tensors_);
output_tensorlist->tensors_ = nullptr;
free(output_tensorlist);
output_tensorlist = nullptr;
return RET_ERROR;
}
memset(output_tensorlist->tensors_[j], 0, sizeof(TensorC));
}
memset(output_tensorlist->tensors_, 0, output_tensorlist->element_num_ * sizeof(TensorC));
}
out_tensor_c->push_back(reinterpret_cast<TensorC *const>(output_tensorlist));
@ -239,26 +215,13 @@ int GenerateSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vec
output_tensorlist1->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0);
if (output_tensorlist1->element_num_ != 0) {
output_tensorlist1->tensors_ =
reinterpret_cast<TensorC **>(malloc(output_tensorlist1->element_num_ * sizeof(TensorC *)));
reinterpret_cast<TensorC *>(malloc(output_tensorlist1->element_num_ * sizeof(TensorC)));
if (output_tensorlist1->tensors_ == nullptr) {
free(output_tensorlist1);
output_tensorlist1 = nullptr;
return RET_ERROR;
}
memset(output_tensorlist1->tensors_, 0, output_tensorlist1->element_num_ * sizeof(TensorC *));
for (size_t j = 0; j < output_tensorlist1->element_num_; j++) {
output_tensorlist1->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
if (output_tensorlist1->tensors_[j] == nullptr) {
for (size_t k = 0; k < j; k++) {
free(output_tensorlist1->tensors_[k]);
output_tensorlist1->tensors_[k] = nullptr;
}
free(output_tensorlist1->tensors_);
output_tensorlist1->tensors_ = nullptr;
return RET_ERROR;
}
memset(output_tensorlist1->tensors_[j], 0, sizeof(TensorC));
}
memset(output_tensorlist1->tensors_, 0, output_tensorlist1->element_num_ * sizeof(TensorC));
}
out_tensor_c->at(i) = reinterpret_cast<TensorC *const>(output_tensorlist1);
@ -271,28 +234,13 @@ int GenerateSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vec
output_tensorlist2->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0);
if (output_tensorlist2->element_num_ != 0) {
output_tensorlist2->tensors_ =
reinterpret_cast<TensorC **>(malloc(output_tensorlist2->element_num_ * sizeof(TensorC *)));
reinterpret_cast<TensorC *>(malloc(output_tensorlist2->element_num_ * sizeof(TensorC)));
if (output_tensorlist2->tensors_ == nullptr) {
free(output_tensorlist2);
output_tensorlist2 = nullptr;
return RET_ERROR;
}
memset(output_tensorlist2->tensors_, 0, output_tensorlist2->element_num_ * sizeof(TensorC *));
for (size_t j = 0; j < output_tensorlist2->element_num_; j++) {
output_tensorlist2->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
if (output_tensorlist2->tensors_[j] == nullptr) {
for (size_t k = 0; k < j; k++) {
free(output_tensorlist2->tensors_[k]);
output_tensorlist2->tensors_[k] = nullptr;
}
free(output_tensorlist2->tensors_);
output_tensorlist2->tensors_ = nullptr;
free(output_tensorlist2);
output_tensorlist2 = nullptr;
return RET_ERROR;
}
memset(output_tensorlist2->tensors_[j], 0, sizeof(TensorC));
}
memset(output_tensorlist2->tensors_, 0, output_tensorlist2->element_num_ * sizeof(TensorC));
}
out_tensor_c->at(i + outputs->size() / 2) = reinterpret_cast<TensorC *const>(output_tensorlist2);

@ -58,9 +58,9 @@ TEST_F(TensorlistFromtensorInferTest, TensorlistFromtensorInferTest0) {
ASSERT_EQ(out->tensors_data_type_, kNumberTypeInt32);
// ASSERT_EQ(outputs[0]->format_, Format_NHWC);
for (size_t i = 0; i < out->element_num_; i++) {
ASSERT_EQ(out->tensors_[i]->shape_size_, 2);
ASSERT_EQ(out->tensors_[i]->shape_[0], 6);
ASSERT_EQ(out->tensors_[i]->shape_[1], 5);
ASSERT_EQ(out->tensors_[i].shape_size_, 2);
ASSERT_EQ(out->tensors_[i].shape_[0], 6);
ASSERT_EQ(out->tensors_[i].shape_[1], 5);
}
delete parameter;
for (size_t i = 0; i < inputs_size; i++) {

@ -30,27 +30,22 @@ TEST_F(TensorlistGetItemInferTest, TensorlistGetItemInferTest0) {
std::vector<TensorC *> inputs(inputs_size, NULL);
TensorListC *input0 = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC)));
input0->element_num_ = 3;
input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *)));
input0->tensors_[0] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
input0->tensors_[0]->shape_size_ = 2;
input0->tensors_[0]->shape_[0] = 1;
input0->tensors_[0]->shape_[1] = 2;
input0->tensors_[0]->data_type_ = kNumberTypeInt32;
// input0->tensors_[0]->format_ = Format_NHWC;
input0->tensors_[1] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
input0->tensors_[1]->shape_size_ = 3;
input0->tensors_[1]->shape_[0] = 3;
input0->tensors_[1]->shape_[1] = 4;
input0->tensors_[1]->shape_[2] = 5;
input0->tensors_[1]->data_type_ = kNumberTypeInt32;
// input0->tensors_[1]->format_ = Format_NHWC;
input0->tensors_[2] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC)));
input0->tensors_[2]->shape_size_ = 4;
input0->tensors_[2]->shape_[0] = 6;
input0->tensors_[2]->shape_[1] = 7;
input0->tensors_[2]->shape_[2] = 8;
input0->tensors_[2]->shape_[3] = 9;
input0->tensors_[2]->data_type_ = kNumberTypeInt32;
input0->tensors_ = reinterpret_cast<TensorC *>(malloc(input0->element_num_ * sizeof(TensorC)));
input0->tensors_[0].shape_size_ = 2;
input0->tensors_[0].shape_[0] = 1;
input0->tensors_[0].shape_[1] = 2;
input0->tensors_[0].data_type_ = kNumberTypeInt32;
input0->tensors_[1].shape_size_ = 3;
input0->tensors_[1].shape_[0] = 3;
input0->tensors_[1].shape_[1] = 4;
input0->tensors_[1].shape_[2] = 5;
input0->tensors_[1].data_type_ = kNumberTypeInt32;
input0->tensors_[2].shape_size_ = 4;
input0->tensors_[2].shape_[0] = 6;
input0->tensors_[2].shape_[1] = 7;
input0->tensors_[2].shape_[2] = 8;
input0->tensors_[2].shape_[3] = 9;
input0->tensors_[2].data_type_ = kNumberTypeInt32;
// input0->tensors_[2]->format_ = Format_NHWC;
inputs[0] = reinterpret_cast<TensorC *>(input0);
inputs[0]->data_type_ = kObjectTypeTensorType;

@ -57,7 +57,7 @@ TEST_F(TensorlistReserveInferTest, TensorlistReserveInferTest0) {
ASSERT_EQ(out->tensors_data_type_, kTypeUnknown);
// ASSERT_EQ(outputs[0]->format_, Format_NHWC);
for (size_t i = 0; i < out->element_num_; i++) {
ASSERT_EQ(out->tensors_[i]->shape_size_, 0);
ASSERT_EQ(out->tensors_[i].shape_size_, 0);
}
delete parameter;
for (size_t i = 0; i < inputs_size; i++) {

@ -29,29 +29,27 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) {
std::vector<TensorC *> inputs(inputs_size, NULL);
TensorListC *input0 = new TensorListC;
input0->element_num_ = 3;
input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *)));
input0->tensors_ = reinterpret_cast<TensorC *>(malloc(input0->element_num_ * sizeof(TensorC)));
input0->element_shape_size_ = 2;
input0->element_shape_[0] = 2;
input0->element_shape_[1] = 4;
input0->tensors_data_type_ = kNumberTypeInt32;
input0->data_type_ = kObjectTypeTensorType;
input0->tensors_[0] = new TensorC;
input0->tensors_[0]->shape_size_ = 2;
input0->tensors_[0]->shape_[0] = 2;
input0->tensors_[0]->shape_[1] = 4;
input0->tensors_[0]->data_type_ = kNumberTypeInt32;
// input0->tensors_[0]->format_ = Format_NHWC;
input0->tensors_[1] = new TensorC;
input0->tensors_[1]->shape_size_ = 2;
input0->tensors_[1]->shape_[0] = 2;
input0->tensors_[1]->shape_[1] = 4;
input0->tensors_[1]->data_type_ = kNumberTypeInt32;
// input0->tensors_[1]->format_ = Format_NHWC;
input0->tensors_[2] = new TensorC;
input0->tensors_[2]->shape_size_ = 2;
input0->tensors_[2]->shape_[0] = 2;
input0->tensors_[2]->shape_[1] = 4;
input0->tensors_[2]->data_type_ = kNumberTypeInt32;
input0->tensors_[0].shape_size_ = 2;
input0->tensors_[0].shape_[0] = 2;
input0->tensors_[0].shape_[1] = 4;
input0->tensors_[0].data_type_ = kNumberTypeInt32;
input0->tensors_[1].shape_size_ = 2;
input0->tensors_[1].shape_[0] = 2;
input0->tensors_[1].shape_[1] = 4;
input0->tensors_[1].data_type_ = kNumberTypeInt32;
input0->tensors_[2].shape_size_ = 2;
input0->tensors_[2].shape_[0] = 2;
input0->tensors_[2].shape_[1] = 4;
input0->tensors_[2].data_type_ = kNumberTypeInt32;
// input0->tensors_[2]->format_ = Format_NHWC;
inputs[0] = reinterpret_cast<TensorC *>(input0);
@ -67,6 +65,8 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) {
inputs[2]->shape_[0] = 5;
inputs[2]->shape_[1] = 6;
inputs[2]->data_type_ = kNumberTypeInt32;
std::vector<int> inputs2_data = {3};
inputs[2]->data_ = inputs2_data.data();
std::vector<TensorC *> outputs(1, NULL);
outputs[0] = reinterpret_cast<TensorC *>(new TensorListC);
@ -82,15 +82,15 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) {
ASSERT_EQ(res->element_shape_[1], 4);
ASSERT_EQ(res->tensors_data_type_, kNumberTypeInt32);
ASSERT_EQ(res->data_type_, kObjectTypeTensorType);
ASSERT_EQ(res->tensors_[0]->shape_size_, 2);
ASSERT_EQ(res->tensors_[0]->shape_[0], 2);
ASSERT_EQ(res->tensors_[0]->shape_[1], 4);
ASSERT_EQ(res->tensors_[1]->shape_size_, 2);
ASSERT_EQ(res->tensors_[1]->shape_[0], 2);
ASSERT_EQ(res->tensors_[1]->shape_[1], 4);
ASSERT_EQ(res->tensors_[2]->shape_size_, 2);
ASSERT_EQ(res->tensors_[2]->shape_[0], 5);
ASSERT_EQ(res->tensors_[2]->shape_[1], 6);
ASSERT_EQ(res->tensors_[0].shape_size_, 2);
ASSERT_EQ(res->tensors_[0].shape_[0], 2);
ASSERT_EQ(res->tensors_[0].shape_[1], 4);
ASSERT_EQ(res->tensors_[1].shape_size_, 2);
ASSERT_EQ(res->tensors_[1].shape_[0], 2);
ASSERT_EQ(res->tensors_[1].shape_[1], 4);
ASSERT_EQ(res->tensors_[2].shape_size_, 2);
ASSERT_EQ(res->tensors_[2].shape_[0], 5);
ASSERT_EQ(res->tensors_[2].shape_[1], 6);
// ASSERT_EQ(outputs[0]->format_, Format_NHWC);

@ -29,28 +29,26 @@ TEST_F(TensorlistStackInferTest, TensorlistStackInferTest0) {
std::vector<TensorC *> inputs(inputs_size, NULL);
TensorListC *input0 = new TensorListC;
input0->element_num_ = 3;
input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *)));
input0->tensors_ = reinterpret_cast<TensorC *>(malloc(input0->element_num_ * sizeof(TensorC)));
input0->element_shape_size_ = 2;
input0->element_shape_[0] = 2;
input0->element_shape_[1] = 4;
input0->tensors_data_type_ = kNumberTypeInt32;
input0->tensors_[0] = new TensorC;
input0->tensors_[0]->shape_size_ = 2;
input0->tensors_[0]->shape_[0] = 2;
input0->tensors_[0]->shape_[1] = 4;
input0->tensors_[0]->data_type_ = kNumberTypeInt32;
// input0->tensors_[0]->format_ = Format_NHWC;
input0->tensors_[1] = new TensorC;
input0->tensors_[1]->shape_size_ = 2;
input0->tensors_[1]->shape_[0] = 2;
input0->tensors_[1]->shape_[1] = 4;
input0->tensors_[1]->data_type_ = kNumberTypeInt32;
// input0->tensors_[1]->format_ = Format_NHWC;
input0->tensors_[2] = new TensorC;
input0->tensors_[2]->shape_size_ = 2;
input0->tensors_[2]->shape_[0] = 2;
input0->tensors_[2]->shape_[1] = 4;
input0->tensors_[2]->data_type_ = kNumberTypeInt32;
input0->tensors_[0].shape_size_ = 2;
input0->tensors_[0].shape_[0] = 2;
input0->tensors_[0].shape_[1] = 4;
input0->tensors_[0].data_type_ = kNumberTypeInt32;
input0->tensors_[1].shape_size_ = 2;
input0->tensors_[1].shape_[0] = 2;
input0->tensors_[1].shape_[1] = 4;
input0->tensors_[1].data_type_ = kNumberTypeInt32;
input0->tensors_[2].shape_size_ = 2;
input0->tensors_[2].shape_[0] = 2;
input0->tensors_[2].shape_[1] = 4;
input0->tensors_[2].data_type_ = kNumberTypeInt32;
// input0->tensors_[2]->format_ = Format_NHWC;
inputs[0] = reinterpret_cast<TensorC *>(input0);
inputs[0]->data_type_ = kObjectTypeTensorType;

Loading…
Cancel
Save