!7685 support scalar

Merge pull request !7685 from hangq/master
pull/7685/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit c962ccbe07

@ -1,31 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_COMMON_OP_UTILS_H_
#define MINDSPORE_LITE_COMMON_OP_UTILS_H_
#include <functional>
#include <string>
#include "schema/model_generated.h"
namespace mindspore {
namespace lite {
inline schema::PrimitiveType GetOpType(const schema::CNode &opDef) { return opDef.primitive()->value_type(); }
inline std::string GetOpTypeName(const schema::CNode &opDef) { return schema::EnumNamePrimitiveType(GetOpType(opDef)); }
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_COMMON_OP_UTILS_H_

@ -44,7 +44,7 @@ int Executor::Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_
}
kernel::LiteKernelUtil::InitTensorRefCount(kernels);
for (auto out_tensor : out_tensors) { // increase RefCount of output tensors, such that Run will not free them
out_tensor->SetRefCount(out_tensor->RefCount() + 1);
out_tensor->set_ref_count(out_tensor->ref_count() + 1);
}
for (auto *kernel : kernels) {
@ -101,7 +101,7 @@ int Executor::TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_forma
return RET_ERROR;
}
PackNC4HW4ToNHWCFp32(src_data, dst_data, tensor->Batch(), tensor->Height() * tensor->Width(), tensor->Channel());
tensor->SetData(dst_data);
tensor->set_data(dst_data);
tensor->SetFormat(dst_format);
allocator->Free(src_data);
return RET_OK;

@ -39,14 +39,14 @@ void LiteKernel::FreeWorkspace() {
void LiteKernel::InitOutTensorRefCount() {
for (auto *tensor : this->out_tensors_) {
tensor->SetRefCount(this->out_kernels_.size());
tensor->set_ref_count(this->out_kernels_.size());
}
}
int LiteKernel::DecOutTensorRefCount() {
for (auto *tensor : this->out_tensors_) {
tensor->decRefCount();
if (0 >= tensor->RefCount()) {
tensor->DecRefCount();
if (0 >= tensor->ref_count()) {
auto ret = tensor->FreeData();
if (0 != ret) {
MS_LOG(ERROR) << "Free tensor data failed";
@ -190,8 +190,7 @@ std::vector<lite::Tensor *> LiteKernelUtil::SubgraphInputTensors(const std::vect
for (const auto &kernel : input_kernels) {
for (const auto &tensor : kernel->in_tensors()) {
auto iter = std::find(all_output_tensors.begin(), all_output_tensors.end(), tensor);
if (iter == all_output_tensors.end() &&
!(tensor->category() == mindspore::lite::Tensor::CONST && tensor->data_c() != nullptr)) {
if (iter == all_output_tensors.end() && !tensor->IsConst()) {
input_tensors.emplace_back(tensor);
}
}

@ -61,11 +61,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
MS_LOG(ERROR) << i << "th tensor in model is nullptr";
return RET_NULL_PTR;
}
auto src_category = TensorCategory(srcTensor);
std::vector<int> shape;
if (srcTensor->dims() == nullptr) {
MS_LOG(DEBUG) << "Dims of " << i << "th tensor is nullptr";
} else {
if (TensorCategory(srcTensor) == Tensor::Category::CONST) {
if (src_category == Tensor::Category::CONST_TENSOR) {
if (srcTensor->dataType() == kObjectTypeString && srcTensor->data() != nullptr) {
shape.push_back(srcTensor->data()->size());
} else {
@ -76,18 +77,13 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
}
}
int dataType = srcTensor->dataType();
auto *dstTensor =
new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), TensorCategory(srcTensor));
auto *dstTensor = new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), src_category);
if (dstTensor == nullptr) {
MS_LOG(ERROR) << "new " << i << "th tensor failed";
return RET_NULL_PTR;
}
if (TensorCategory(srcTensor) == Tensor::Category::CONST && srcTensor->data() != nullptr &&
srcTensor->data()->size() > 0) {
if (shape.empty()) {
shape.push_back(1);
dstTensor->set_shape(shape);
}
if ((src_category == Tensor::Category::CONST_TENSOR || src_category == Tensor::Category::CONST_SCALAR) &&
srcTensor->data() != nullptr && srcTensor->data()->size() > 0) {
MS_ASSERT(dstTensor->Size() == srcTensor->data()->size());
if (WeightTensorNeedCopy(model, i)) {
auto dst_data = dstTensor->MutableData();
@ -99,7 +95,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
memcpy(dst_data, srcTensor->data()->data(), dstTensor->Size());
copyed_tensor_idxes_.emplace_back(i);
} else {
dstTensor->SetData(const_cast<unsigned char *>(srcTensor->data()->data()));
dstTensor->set_data(const_cast<unsigned char *>(srcTensor->data()->data()));
}
}
auto quant_params = srcTensor->quantParams();
@ -395,7 +391,7 @@ void LiteSession::BindThread(bool if_bind) {
MS_LOG(ERROR) << "Device list is empty.";
return;
}
if (this->context_->IsCpuEnabled()) {
if (!this->context_->IsCpuEnabled()) {
return;
}
auto cpu_device_info = this->context_->GetCpuInfo();
@ -415,9 +411,8 @@ LiteSession::~LiteSession() {
auto *tensor = tensors_.at(i);
MS_ASSERT(tensor != nullptr);
// data of weight tensor of node in packed_op can not be to free, we will free weight data when freeing meta_graph
if (tensor->category() == Tensor::Category::CONST && !IsContain(this->inputs_, tensor) &&
!IsContain(copyed_tensor_idxes_, i)) {
tensor->SetData(nullptr);
if (tensor->IsConst() && !IsContain(this->inputs_, tensor) && !IsContain(copyed_tensor_idxes_, i)) {
tensor->set_data(nullptr);
}
delete tensor;
}

@ -50,14 +50,14 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::T
MS_LOG(ERROR) << "dequant data is nullptr.";
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto kernel = new (std::nothrow) FullconnectionCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (!kernel) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -69,13 +69,13 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::T
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -150,7 +150,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@ -165,7 +165,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -177,13 +177,13 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -192,7 +192,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@ -224,7 +224,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
MS_LOG(DEBUG) << "Create conv fp16 kernel failed.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -236,13 +236,13 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -214,7 +214,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto kernel = new (std::nothrow) DeconvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
@ -222,7 +222,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -234,13 +234,13 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -226,7 +226,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
kernel::LiteKernel *kernel;
@ -242,7 +242,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -254,13 +254,13 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -247,14 +247,14 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto *kernel = new (std::nothrow) FullconnectionFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -266,13 +266,13 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
delete kernel;
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -260,14 +260,14 @@ kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto *kernel = new (std::nothrow) MatmulFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -279,13 +279,13 @@ kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector<lite::Tensor *>
delete kernel;
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -225,8 +225,8 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
new_inputs.emplace_back(in_tensor);
// nwe weight
auto filter_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape, Format_NHWC, lite::Tensor::Category::CONST);
auto filter_tensor = new (std::nothrow) lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape,
Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
filter_tensor->MallocData();
int copy_length = kernel_h * kernel_w * new_in_channel * new_out_channel;
memcpy(filter_tensor->data_c(), origin_weight + i * copy_length, copy_length * sizeof(float));
@ -235,7 +235,7 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
// if has bias, set new bias
if (has_bias) {
auto bias_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST);
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_tensor->MallocData();
memcpy(bias_tensor->data_c(), origin_bias + i * new_out_channel, new_out_channel * sizeof(float));
new_inputs.emplace_back(bias_tensor);
@ -293,7 +293,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
free(op_parameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
kernel::LiteKernel *kernel;
@ -307,7 +307,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(op_parameter);
return nullptr;
@ -319,14 +319,14 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;

@ -132,7 +132,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@ -146,7 +146,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -158,14 +158,14 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;

@ -243,7 +243,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
kernel::LiteKernel *kernel;
@ -259,7 +259,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -271,14 +271,14 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;

@ -205,7 +205,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto kernel =
new (std::nothrow) kernel::DeconvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
@ -213,7 +213,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -225,13 +225,13 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}

@ -217,7 +217,7 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto input_tensor = inputs.at(kInputIndex);
@ -230,7 +230,7 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@ -242,14 +242,14 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;

@ -71,7 +71,7 @@ int ArithmeticOpenCLKernel::InitBuffer() {
for (auto in_tensor_ : in_tensors_) {
auto nhwc_shape = GetNHWCShape(in_tensor_->shape());
inputs_nhwc_shapes_.push_back(nhwc_shape);
if (in_tensor_->category() != lite::Tensor::Category::CONST || in_tensor_->data_c() == nullptr) {
if (!in_tensor_->IsConst()) {
inputs_weight_ptrs_.push_back(nullptr);
} else {
auto allocator = ocl_runtime_->GetAllocator();

@ -63,7 +63,7 @@ int ScaleOpenCLKernel::InitBuffer() {
if (!element_flag_) {
return RET_OK;
}
if (in_tensors_[1]->category() == lite::Tensor::Category::CONST && in_tensors_[1]->data_c() != nullptr) {
if (in_tensors_[1]->IsConst()) {
auto allocator = ocl_runtime_->GetAllocator();
std::vector<size_t> img_size;
GetImageSize(0, &img_size);

@ -209,7 +209,7 @@ int SubGraphOpenCLKernel::MallocTensorWithReuse() {
std::vector<size_t> img_size;
op_kernel->GetImageSize(i, &img_size);
auto data_ptr = allocator_->Malloc(output->Size(), img_size);
output->SetData(data_ptr);
output->set_data(data_ptr);
} else {
output->MallocData(allocator_);
}

@ -46,7 +46,7 @@ int OpenCLExecutor::Run(std::vector<Tensor *> &inputs, std::vector<Tensor *> &ou
std::vector<size_t> img_size;
op_kernel->GetImageSize(i, &img_size);
auto data_ptr = allocator_->Malloc(output->Size(), img_size);
output->SetData(data_ptr);
output->set_data(data_ptr);
} else {
output->MallocData(allocator_);
}

@ -315,7 +315,7 @@ void Scheduler::SetKernelTensorDataType(kernel::LiteKernel *kernel) {
}
} else if (kernel->desc().data_type == kNumberTypeFloat32) {
for (auto tensor : kernel->in_tensors()) {
if (tensor->category() != Tensor::Category::CONST && tensor->data_type() == kNumberTypeFloat16) {
if (!tensor->IsConst() && tensor->data_type() == kNumberTypeFloat16) {
tensor->set_data_type(kNumberTypeFloat32);
}
}

@ -54,7 +54,7 @@ int Tensor::CopyTensorData(const Tensor &srcTensor) {
}
}
memcpy(this->data_, srcTensor.data_, data_size);
return 0;
return RET_OK;
}
int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
@ -69,7 +69,7 @@ int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
return RET_ERROR;
}
}
return 0;
return RET_OK;
}
Tensor::~Tensor() {
@ -102,7 +102,7 @@ bool Tensor::operator==(const Tensor &tensor) {
int32_t Tensor::Batch() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NHWC:
@ -123,14 +123,14 @@ int32_t Tensor::Batch() const {
return this->shape_[1];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
return -1;
return RET_ERROR;
}
}
int32_t Tensor::Channel() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
@ -150,14 +150,14 @@ int32_t Tensor::Channel() const {
case schema::Format::Format_CHWK:
return this->shape_[0];
default:
return -1;
return RET_ERROR;
}
}
int32_t Tensor::Height() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
@ -177,7 +177,7 @@ int32_t Tensor::Height() const {
return this->shape_[0];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
return -1;
return RET_ERROR;
}
}
@ -203,11 +203,28 @@ int32_t Tensor::Width() const {
case schema::Format::Format_HW4:
return this->shape_[1];
default:
return -1;
return RET_ERROR;
}
}
size_t Tensor::Size() const {
size_t size = DataTypeSize(this->data_type_);
size *= (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) ? ElementsC4Num()
: ElementsNum();
return size;
}
int Tensor::ElementsNum() const {
if (this->category_ == CONST_SCALAR) {
return 1;
}
return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies<int>());
}
int32_t Tensor::ElementsC4Num() const {
if (this->category_ == CONST_SCALAR) {
return 1;
}
int32_t result = 0;
if (this->shape_.size() == 4) {
result = Batch() * Height() * Width() * ((Channel() + 3) / 4 * 4);
@ -217,6 +234,16 @@ int32_t Tensor::ElementsC4Num() const {
return result;
}
int Tensor::DimensionSize(size_t index) const {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
}
std::string Tensor::ToString() const {
std::ostringstream oss;
oss << "schema::Format: " << EnumNameFormat(this->format_);
@ -287,7 +314,7 @@ std::string Tensor::ToString() const {
int Tensor::MallocData(mindspore::lite::Allocator *allocator) {
if (nullptr != this->data_) {
return 0;
return RET_OK;
}
if (allocator != nullptr) {
allocator_ = allocator;
@ -299,15 +326,15 @@ int Tensor::MallocData(mindspore::lite::Allocator *allocator) {
}
if (nullptr == this->data_) {
MS_LOG(ERROR) << "Malloc tensor data failed, size=" << this->Size();
return -1;
return RET_ERROR;
}
return 0;
return RET_OK;
}
int Tensor::FreeData() {
if (nullptr == this->data_) {
return 0;
return RET_OK;
}
if (nullptr == allocator_) {
free(this->data_);
@ -316,7 +343,7 @@ int Tensor::FreeData() {
allocator_->Free(this->data_);
this->data_ = nullptr;
}
return 0;
return RET_OK;
}
void *Tensor::MutableData() {
@ -330,6 +357,12 @@ void *Tensor::MutableData() {
return this->data_;
}
bool Tensor::IsConst() {
return (this->category_ == CONST_TENSOR || this->category_ == CONST_SCALAR) && this->data_ != nullptr;
}
bool Tensor::IsScalar() { return this->category_ == CONST_SCALAR && this->data_ != nullptr; }
void Tensor::AddQuantParam(const QuantArg &quant_arg) { this->quant_params_.push_back(quant_arg); }
std::vector<QuantArg> Tensor::GetQuantParams() const { return this->quant_params_; }

@ -42,8 +42,9 @@ struct QuantArg {
class Tensor : public mindspore::tensor::MSTensor {
public:
enum Category {
CONST, // weight tensor
VAR // activation tensor
CONST_TENSOR, // weight tensor
CONST_SCALAR, // weight scalar
VAR // activation tensor
};
Tensor() = default;
@ -70,19 +71,9 @@ class Tensor : public mindspore::tensor::MSTensor {
void set_shape(const std::vector<int> &shape) { shape_ = shape; }
int DimensionSize(size_t index) const override {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
}
int DimensionSize(size_t index) const override;
int ElementsNum() const override {
return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies<int>());
}
int ElementsNum() const override;
int32_t Batch() const;
@ -94,58 +85,7 @@ class Tensor : public mindspore::tensor::MSTensor {
int32_t ElementsC4Num() const;
size_t Size() const override {
size_t size = 0;
switch (this->data_type_) {
case kNumberTypeFloat64:
size = sizeof(double);
break;
case kNumberTypeFloat:
case kNumberTypeFloat32:
size = sizeof(float);
break;
case kNumberTypeInt8:
size = sizeof(int8_t);
break;
case kNumberTypeUInt8:
size = sizeof(uint8_t);
break;
case kNumberTypeFloat16:
size = sizeof(int16_t);
break;
case kNumberTypeInt16:
size = sizeof(int16_t);
break;
case kNumberTypeInt32:
size = sizeof(int32_t);
break;
case kNumberTypeInt64:
size = sizeof(int64_t);
break;
case kNumberTypeUInt16:
size = sizeof(uint16_t);
break;
case kNumberTypeUInt32:
size = sizeof(uint32_t);
break;
case kNumberTypeUInt64:
size = sizeof(uint64_t);
break;
case kNumberTypeBool:
size = sizeof(bool);
break;
case kObjectTypeString:
size = sizeof(char);
break;
default:
MS_LOG(ERROR) << "Not support the type: " << this->data_type_;
return 0;
}
size *= (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) ? ElementsC4Num()
: ElementsNum();
return size;
}
size_t Size() const override;
void set_allocator(mindspore::lite::Allocator *allocator) { allocator_ = allocator; }
@ -157,7 +97,7 @@ class Tensor : public mindspore::tensor::MSTensor {
void *data_c() const { return data_; }
void SetData(void *data) { this->data_ = data; }
void set_data(void *data) { this->data_ = data; }
Category category() { return this->category_; }
@ -165,11 +105,11 @@ class Tensor : public mindspore::tensor::MSTensor {
schema::Format GetFormat() { return this->format_; }
size_t RefCount() { return this->refCount; }
size_t ref_count() { return this->ref_count_; }
void SetRefCount(size_t refCount) { this->refCount = refCount; }
void set_ref_count(size_t ref_count) { this->ref_count_ = ref_count; }
void decRefCount() { this->refCount--; }
void DecRefCount() { this->ref_count_--; }
std::string ToString() const;
@ -177,6 +117,10 @@ class Tensor : public mindspore::tensor::MSTensor {
std::vector<QuantArg> GetQuantParams() const;
bool IsConst();
bool IsScalar();
void Prepare() {
if (allocator_ != nullptr) {
data_ = allocator_->Prepare(data_);
@ -190,17 +134,63 @@ class Tensor : public mindspore::tensor::MSTensor {
std::vector<int> shape_;
schema::Format format_;
Category category_;
size_t refCount = 0;
size_t ref_count_ = 0;
std::vector<QuantArg> quant_params_;
mindspore::lite::Allocator *allocator_ = nullptr;
};
inline Tensor::Category TensorCategory(const schema::Tensor *tensor) {
return (tensor->nodeType() == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR;
inline size_t DataTypeSize(const TypeId type) {
switch (type) {
case kNumberTypeFloat64:
return sizeof(double);
case kNumberTypeFloat:
case kNumberTypeFloat32:
return sizeof(float);
case kNumberTypeInt8:
return sizeof(int8_t);
case kNumberTypeUInt8:
return sizeof(uint8_t);
case kNumberTypeFloat16:
case kNumberTypeInt16:
return sizeof(int16_t);
case kNumberTypeInt32:
return sizeof(int32_t);
case kNumberTypeInt64:
return sizeof(int64_t);
case kNumberTypeUInt16:
return sizeof(uint16_t);
case kNumberTypeUInt32:
return sizeof(uint32_t);
case kNumberTypeUInt64:
return sizeof(uint64_t);
case kNumberTypeBool:
return sizeof(bool);
case kObjectTypeString:
return sizeof(char);
default:
MS_LOG(ERROR) << "Not support the type: " << type;
return 0;
}
}
inline Tensor::Category TensorCategory(const schema::NodeType node_type, const size_t shape_num, const TypeId data_type,
const size_t data_size) {
return (node_type == schema::NodeType::NodeType_ValueNode)
? (shape_num == 0 && data_size == DataTypeSize(data_type) ? Tensor::Category::CONST_SCALAR
: Tensor::Category::CONST_TENSOR)
: Tensor::Category::VAR;
}
inline Tensor::Category TensorCategory(const schema::NodeType type) {
return (type == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR;
inline Tensor::Category TensorCategory(const schema::Tensor *tensor) {
if (tensor == nullptr) {
MS_LOG(ERROR) << "tensor is nullptr";
return Tensor::VAR;
}
auto shape_num = tensor->dims() == nullptr ? 0 : tensor->dims()->size();
auto data_size = tensor->data() == nullptr ? 0 : tensor->data()->size();
return TensorCategory(tensor->nodeType(), shape_num, TypeId(tensor->dataType()), data_size);
}
std::vector<tensor::MSTensor *> TensorVectorCast(const std::vector<Tensor *> &src);
} // namespace lite
} // namespace mindspore

@ -49,8 +49,8 @@ TEST_F(TestStridedSlice, StridedSlice) {
lite::Tensor out_tensor(kNumberTypeFloat32, {1, 1, 2});
float input_data[] = {0.2390374, 0.92039955, 0.05051243, 0.49574447, 0.8355223, 0.02647042, 0.08811307, 0.4566604};
float output_data[2] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};
@ -73,8 +73,8 @@ TEST_F(TestStridedSlice, StridedSlice) {
float expect[2] = {0.2390374, 0.05051243};
CompareOutputData(output_data, expect, 2, 0.000001);
in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
TEST_F(TestStridedSlice, StridedSliceInt8) {
@ -82,8 +82,8 @@ TEST_F(TestStridedSlice, StridedSliceInt8) {
lite::Tensor out_tensor(kNumberTypeInt8, {2, 3, 4});
int8_t input_data[] = {-12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
int8_t output_data[4] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};
@ -121,7 +121,7 @@ TEST_F(TestStridedSlice, StridedSliceInt8) {
EXPECT_EQ(output_data[i], expect[i]);
}
in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

@ -43,8 +43,8 @@ class TestReduceFp16 : public mindspore::CommonTest {
};
void TestReduceFp16::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}
void TestReduceFp16::Prepare(const std::vector<int> &input_shape, const std::vector<int> &output_shape,
@ -54,8 +54,8 @@ void TestReduceFp16::Prepare(const std::vector<int> &input_shape, const std::vec
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);
bool keep_axis = false;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save