!9391 [MSLITE][Develop] fix code review

From: @sunsuodong
Reviewed-by: @zhanghaibo5,@zhang_xue_tong,@zhang_xue_tong
Signed-off-by: @zhang_xue_tong,@zhang_xue_tong
pull/9391/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit acdcd32fb7

@ -19,13 +19,16 @@
typedef struct DepthToSpaceParameter { typedef struct DepthToSpaceParameter {
OpParameter op_parameter_; OpParameter op_parameter_;
// primitive parameter
int32_t block_size_; int32_t block_size_;
// shape correlative
int32_t in_stride_dim0_; int32_t in_stride_dim0_;
int32_t in_stride_dim1_; int32_t in_stride_dim1_;
int32_t in_stride_dim2_; int32_t in_stride_dim2_;
int32_t out_stride_dim0_; int32_t out_stride_dim0_;
int32_t out_stride_dim1_; int32_t out_stride_dim1_;
int32_t out_stride_dim2_; int32_t out_stride_dim2_;
// other parameter
uint8_t data_type_size_; uint8_t data_type_size_;
} DepthToSpaceParameter; } DepthToSpaceParameter;

@ -71,7 +71,7 @@ int Assign::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Ten
return RET_ERROR; return RET_ERROR;
} }
if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum()) { if (inputs.at(0)->ElementsNum() != inputs.at(1)->ElementsNum()) {
MS_LOG(ERROR) << "error input data size!"; MS_LOG(ERROR) << "error input data size!";
return RET_ERROR; return RET_ERROR;
} }
@ -79,8 +79,8 @@ int Assign::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Ten
if (!outputs.empty()) { if (!outputs.empty()) {
auto *out = outputs.front(); auto *out = outputs.front();
MS_ASSERT(out != nullptr); MS_ASSERT(out != nullptr);
out->set_data_type(inputs[0]->data_type()); out->set_data_type(inputs.at(0)->data_type());
out->set_format(inputs[0]->format()); out->set_format(inputs.at(0)->format());
out->set_shape({1}); out->set_shape({1});
} }
return RET_OK; return RET_OK;

@ -73,16 +73,16 @@ Registry AssignAddRegistry(schema::PrimitiveType_AssignAdd, AssignAddCreator);
#endif #endif
int AssignAdd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { int AssignAdd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
Tensor *x = inputs_[0]; Tensor *x = inputs_.at(0);
Tensor *y = inputs_[1]; Tensor *y = inputs_.at(1);
Tensor *out = outputs_[0]; Tensor *out = outputs_.at(0);
std::vector<int> x_shape = x->shape(); std::vector<int> x_shape = x->shape();
if (x->data_type() != y->data_type()) { if (x->data_type() != y->data_type()) {
MS_LOG(ERROR) << "no matched shape of x and y"; MS_LOG(ERROR) << "no matched shape of x and y";
return RET_ERROR; return RET_ERROR;
} }
std::vector<int> output_shape(x_shape.size()); std::vector<int> output_shape(x_shape.size());
for (int i = 0; i < static_cast<int>(x_shape.size()); i++) { for (size_t i = 0; i < x_shape.size(); i++) {
output_shape[i] = x_shape[i]; output_shape[i] = x_shape[i];
} }
out->set_shape(output_shape); out->set_shape(output_shape);

@ -100,8 +100,8 @@ PrimitiveC *BinaryCrossEntropyCreator(const schema::Primitive *primitive) {
Registry BinaryCrossEntropyRegistry(schema::PrimitiveType_BinaryCrossEntropy, BinaryCrossEntropyCreator); Registry BinaryCrossEntropyRegistry(schema::PrimitiveType_BinaryCrossEntropy, BinaryCrossEntropyCreator);
#endif #endif
int BinaryCrossEntropy::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { int BinaryCrossEntropy::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
Tensor *x = inputs_[0]; Tensor *x = inputs_.at(0);
Tensor *out = outputs_[0]; Tensor *out = outputs_.at(0);
out->set_format(x->format()); out->set_format(x->format());
out->set_data_type(x->data_type()); out->set_data_type(x->data_type());
int reduction = GetReduction(); int reduction = GetReduction();

@ -89,10 +89,10 @@ void ConvertConvWeight(const ParameterPtr &param_node) {
return; return;
} }
size_t filter_k = weight->tensor_shape()[0]; size_t filter_k = weight->tensor_shape().at(0);
size_t filter_c = weight->tensor_shape()[1]; size_t filter_c = weight->tensor_shape().at(1);
size_t filter_h = weight->tensor_shape()[2]; size_t filter_h = weight->tensor_shape().at(2);
size_t filter_w = weight->tensor_shape()[3]; size_t filter_w = weight->tensor_shape().at(3);
T *p1Buff = nullptr; T *p1Buff = nullptr;
T *p2Buff = nullptr; T *p2Buff = nullptr;
for (size_t k = 0; k < filter_k; ++k) { for (size_t k = 0; k < filter_k; ++k) {
@ -145,26 +145,26 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT
attr->format = schema::Format::Format_NUM_OF_FORMAT; attr->format = schema::Format::Format_NUM_OF_FORMAT;
} }
auto pad_list = CastToInt(prim.GetAttr("pad_list")); auto pad_list = CastToInt(prim.GetAttr("pad_list"));
attr->padUp = pad_list[0]; attr->padUp = pad_list.at(0);
attr->padDown = pad_list[1]; attr->padDown = pad_list.at(1);
attr->padLeft = pad_list[2]; attr->padLeft = pad_list.at(2);
attr->padRight = pad_list[3]; attr->padRight = pad_list.at(3);
auto dilation = CastToInt(prim.GetAttr("dilation")); auto dilation = CastToInt(prim.GetAttr("dilation"));
#ifdef SUPPORT_TRAIN #ifdef SUPPORT_TRAIN
attr->dilateH = dilation[2]; attr->dilateH = dilation.at(2);
attr->dilateW = dilation[3]; attr->dilateW = dilation.at(3);
#else #else
attr->dilateH = dilation[0]; attr->dilateH = dilation.at(0);
attr->dilateW = dilation[1]; attr->dilateW = dilation.at(1);
#endif #endif
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"));
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size.at(0);
attr->kernelW = kernel_size[1]; attr->kernelW = kernel_size.at(1);
auto stride = CastToInt(prim.GetAttr("stride")); auto stride = CastToInt(prim.GetAttr("stride"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode"));
if (pad_mode == "valid") { if (pad_mode == "valid") {
@ -229,22 +229,22 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive
attr->format = schema::Format::Format_NUM_OF_FORMAT; attr->format = schema::Format::Format_NUM_OF_FORMAT;
} }
auto pad_list = CastToInt(prim.GetAttr("pad_list")); auto pad_list = CastToInt(prim.GetAttr("pad_list"));
attr->padUp = pad_list[0]; attr->padUp = pad_list.at(0);
attr->padDown = pad_list[1]; attr->padDown = pad_list.at(1);
attr->padLeft = pad_list[2]; attr->padLeft = pad_list.at(2);
attr->padRight = pad_list[3]; attr->padRight = pad_list.at(3);
auto dilation = CastToInt(prim.GetAttr("dilation")); auto dilation = CastToInt(prim.GetAttr("dilation"));
attr->dilateH = dilation[2]; attr->dilateH = dilation.at(2);
attr->dilateW = dilation[3]; attr->dilateW = dilation.at(3);
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"));
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size.at(0);
attr->kernelW = kernel_size[1]; attr->kernelW = kernel_size.at(1);
auto stride = CastToInt(prim.GetAttr("stride")); auto stride = CastToInt(prim.GetAttr("stride"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front();

@ -82,10 +82,10 @@ void ConvertConvWeight(const ParameterPtr &param_node) {
return; return;
} }
size_t filter_k = weight->tensor_shape()[0]; size_t filter_k = weight->tensor_shape().at(0);
size_t filter_c = weight->tensor_shape()[1]; size_t filter_c = weight->tensor_shape().at(1);
size_t filter_h = weight->tensor_shape()[2]; size_t filter_h = weight->tensor_shape().at(2);
size_t filter_w = weight->tensor_shape()[3]; size_t filter_w = weight->tensor_shape().at(3);
T *p1Buff = nullptr; T *p1Buff = nullptr;
T *p2Buff = nullptr; T *p2Buff = nullptr;
for (size_t k = 0; k < filter_k; ++k) { for (size_t k = 0; k < filter_k; ++k) {
@ -137,22 +137,22 @@ void DeConv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::Primitiv
attr->format = schema::Format::Format_NUM_OF_FORMAT; attr->format = schema::Format::Format_NUM_OF_FORMAT;
} }
auto pad_list = CastToInt(prim.GetAttr("pad_list")); auto pad_list = CastToInt(prim.GetAttr("pad_list"));
attr->padUp = pad_list[0]; attr->padUp = pad_list.at(0);
attr->padDown = pad_list[1]; attr->padDown = pad_list.at(1);
attr->padLeft = pad_list[2]; attr->padLeft = pad_list.at(2);
attr->padRight = pad_list[3]; attr->padRight = pad_list.at(3);
auto dilation = CastToInt(prim.GetAttr("dilation")); auto dilation = CastToInt(prim.GetAttr("dilation"));
attr->dilateH = dilation[0]; attr->dilateH = dilation.at(0);
attr->dilateW = dilation[1]; attr->dilateW = dilation.at(1);
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"));
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size.at(0);
attr->kernelW = kernel_size[1]; attr->kernelW = kernel_size.at(1);
auto stride = CastToInt(prim.GetAttr("stride")); auto stride = CastToInt(prim.GetAttr("stride"));
attr->strideH = stride[0]; attr->strideH = stride.at(0);
attr->strideW = stride[1]; attr->strideW = stride.at(1);
auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode"));
if (pad_mode == "valid") { if (pad_mode == "valid") {
@ -204,22 +204,22 @@ void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::Primi
attr->format = schema::Format_NUM_OF_FORMAT; attr->format = schema::Format_NUM_OF_FORMAT;
} }
auto pad_list = CastToInt(prim.GetAttr("pad_list")); auto pad_list = CastToInt(prim.GetAttr("pad_list"));
attr->padUp = pad_list[0]; attr->padUp = pad_list.at(0);
attr->padDown = pad_list[1]; attr->padDown = pad_list.at(1);
attr->padLeft = pad_list[2]; attr->padLeft = pad_list.at(2);
attr->padRight = pad_list[3]; attr->padRight = pad_list.at(3);
auto dilation = CastToInt(prim.GetAttr("dilation")); auto dilation = CastToInt(prim.GetAttr("dilation"));
attr->dilateH = dilation[0]; attr->dilateH = dilation.at(0);
attr->dilateW = dilation[1]; attr->dilateW = dilation.at(1);
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"));
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size.at(0);
attr->kernelW = kernel_size[1]; attr->kernelW = kernel_size.at(1);
auto stride = CastToInt(prim.GetAttr("stride")); auto stride = CastToInt(prim.GetAttr("stride"));
attr->strideH = stride[0]; attr->strideH = stride.at(0);
attr->strideW = stride[1]; attr->strideW = stride.at(1);
attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front();

@ -87,19 +87,19 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
attr->format = schema::Format::Format_NUM_OF_FORMAT; attr->format = schema::Format::Format_NUM_OF_FORMAT;
} }
auto pad_list = CastToInt(prim.GetAttr("pads")); auto pad_list = CastToInt(prim.GetAttr("pads"));
attr->padUp = pad_list[0]; attr->padUp = pad_list.at(0);
attr->padDown = pad_list[1]; attr->padDown = pad_list.at(1);
attr->padLeft = pad_list[2]; attr->padLeft = pad_list.at(2);
attr->padRight = pad_list[3]; attr->padRight = pad_list.at(3);
auto dilation = CastToInt(prim.GetAttr("dilation")); auto dilation = CastToInt(prim.GetAttr("dilation"));
attr->dilateH = dilation[0]; attr->dilateH = dilation.at(0);
attr->dilateW = dilation[1]; attr->dilateW = dilation.at(1);
if (utils::isa<ValueSequeue>(prim.GetAttr("kernel_size"))) { if (utils::isa<ValueSequeue>(prim.GetAttr("kernel_size"))) {
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); auto kernel_size = CastToInt(prim.GetAttr("kernel_size"));
attr->kernelH = kernel_size[0]; attr->kernelH = kernel_size.at(0);
attr->kernelW = kernel_size[1]; attr->kernelW = kernel_size.at(1);
} else { } else {
auto kernel_size = CastToInt(prim.GetAttr("kernel_size")).front(); auto kernel_size = CastToInt(prim.GetAttr("kernel_size")).front();
attr->kernelH = kernel_size; attr->kernelH = kernel_size;
@ -107,8 +107,8 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
} }
auto stride = CastToInt(prim.GetAttr("stride")); auto stride = CastToInt(prim.GetAttr("stride"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode"));
if (pad_mode == "valid") { if (pad_mode == "valid") {
@ -252,11 +252,11 @@ int DepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector
std::vector<int> out_shape{input->shape()}; std::vector<int> out_shape{input->shape()};
out_shape.at(1) = output_h; out_shape.at(1) = output_h;
out_shape.at(2) = output_w; out_shape.at(2) = output_w;
if (GetChannelMultiplier() * input_channel != weight->shape()[0]) { if (GetChannelMultiplier() * input_channel != weight->shape().at(0)) {
MS_LOG(ERROR) << "Conv depthwise only support group equals output channel."; MS_LOG(ERROR) << "Conv depthwise only support group equals output channel.";
return 1; return 1;
} }
out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel out_shape.at(3) = weight->shape().at(0) * weight->shape().at(3); // in_channel * out_channel
output->set_shape(out_shape); output->set_shape(out_shape);
return 0; return 0;

@ -38,7 +38,7 @@ OpParameter *PopulateConstantOfShapeParameter(const mindspore::lite::PrimitiveC
if (value.empty() || value.size() > 1) { if (value.empty() || value.size() > 1) {
MS_LOG(ERROR) << "The value of constant of shape is empty or more than 1."; MS_LOG(ERROR) << "The value of constant of shape is empty or more than 1.";
} else { } else {
param->value_ = attr->GetValue()[0]; param->value_ = attr->GetValue().at(0);
} }
param->data_type_ = attr->GetDataType(); param->data_type_ = attr->GetDataType();
return reinterpret_cast<OpParameter *>(param); return reinterpret_cast<OpParameter *>(param);

@ -304,7 +304,7 @@ int StridedSlice::HandleAxesInputExist(const std::vector<lite::Tensor *> &inputs
std::vector<int> axes; std::vector<int> axes;
if (axes_data == nullptr) { if (axes_data == nullptr) {
for (int i = 0; i < begin_ndim; ++i) { for (int i = 0; i < begin_ndim; ++i) {
axes[i] = i; axes.push_back(i);
} }
} else { } else {
axes.assign(axes_data, axes_data + begin_ndim); axes.assign(axes_data, axes_data + begin_ndim);

@ -59,7 +59,7 @@ int ArgMinMaxBaseCPUKernel::ReSize() {
MS_LOG(ERROR) << "Invalid topk " << param->topk_; MS_LOG(ERROR) << "Invalid topk " << param->topk_;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }
param->topk_ = MSMIN(param->topk_, in_shape[axis]); param->topk_ = MSMIN(param->topk_, in_shape.at(axis));
ComputeStrides(in_shape.data(), param->in_strides_, in_shape.size()); ComputeStrides(in_shape.data(), param->in_strides_, in_shape.size());
auto out_shape = out_tensors_.at(0)->shape(); auto out_shape = out_tensors_.at(0)->shape();
ComputeStrides(out_shape.data(), param->out_strides_, out_shape.size()); ComputeStrides(out_shape.data(), param->out_strides_, out_shape.size());

@ -30,7 +30,7 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel { namespace mindspore::kernel {
int BatchToSpaceBaseCPUKernel::Init() { int BatchToSpaceBaseCPUKernel::Init() {
if (in_tensors_[0]->format() != schema::Format::Format_NHWC) { if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; MS_LOG(ERROR) << "batch_to_space only support NHWC now!";
return RET_FORMAT_ERR; return RET_FORMAT_ERR;
} }
@ -44,7 +44,7 @@ int BatchToSpaceBaseCPUKernel::Init() {
} }
int BatchToSpaceBaseCPUKernel::ReSize() { int BatchToSpaceBaseCPUKernel::ReSize() {
auto shape = in_tensors_[0]->shape(); auto shape = in_tensors_.at(0)->shape();
if (shape.size() != 4) { if (shape.size() != 4) {
MS_LOG(ERROR) << "Unsupport shape size: " << shape.size(); MS_LOG(ERROR) << "Unsupport shape size: " << shape.size();
return RET_ERROR; return RET_ERROR;

@ -33,7 +33,7 @@ namespace mindspore::kernel {
int DepthToSpaceBaseCPUKernel::Init() { return RET_OK; } int DepthToSpaceBaseCPUKernel::Init() { return RET_OK; }
int DepthToSpaceBaseCPUKernel::ReSize() { int DepthToSpaceBaseCPUKernel::ReSize() {
if (in_tensors_[0]->format() != schema::Format::Format_NHWC) { if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; MS_LOG(ERROR) << "depth_to_space only support NHWC now!";
return RET_FORMAT_ERR; return RET_FORMAT_ERR;
} }
@ -42,18 +42,18 @@ int DepthToSpaceBaseCPUKernel::ReSize() {
MS_LOG(ERROR) << "Input block_size should > 0!"; MS_LOG(ERROR) << "Input block_size should > 0!";
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }
auto shape_size = in_tensors_[0]->shape().size(); auto shape_size = in_tensors_.at(0)->shape().size();
if (shape_size != DIMENSION_4D) { if (shape_size != DIMENSION_4D) {
MS_LOG(ERROR) << "Input shape size should be " << DIMENSION_4D; MS_LOG(ERROR) << "Input shape size should be " << DIMENSION_4D;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }
int32_t in_strides[DIMENSION_4D]; int32_t in_strides[DIMENSION_4D];
ComputeStrides(const_cast<int *>(in_tensors_[0]->shape().data()), in_strides, shape_size); ComputeStrides(const_cast<int *>(in_tensors_.at(0)->shape().data()), in_strides, shape_size);
param->in_stride_dim0_ = in_strides[0]; param->in_stride_dim0_ = in_strides[0];
param->in_stride_dim1_ = in_strides[1]; param->in_stride_dim1_ = in_strides[1];
param->in_stride_dim2_ = in_strides[2]; param->in_stride_dim2_ = in_strides[2];
int32_t out_strides[DIMENSION_4D]; int32_t out_strides[DIMENSION_4D];
ComputeStrides(const_cast<int *>(out_tensors_[0]->shape().data()), out_strides, shape_size); ComputeStrides(const_cast<int *>(out_tensors_.at(0)->shape().data()), out_strides, shape_size);
param->out_stride_dim0_ = out_strides[0]; param->out_stride_dim0_ = out_strides[0];
param->out_stride_dim1_ = out_strides[1]; param->out_stride_dim1_ = out_strides[1];
param->out_stride_dim2_ = out_strides[2]; param->out_stride_dim2_ = out_strides[2];

@ -44,8 +44,8 @@ class DequantUtil {
return nullptr; return nullptr;
} }
if (input_tensor->shape().size() == kPerBatch && if (input_tensor->shape().size() == kPerBatch &&
input_tensor->quant_params().size() == static_cast<size_t>(input_tensor->shape()[0])) { // per batch matmul input_tensor->quant_params().size() == static_cast<size_t>(input_tensor->shape().at(0))) { // per batch matmul
auto per_batch_size = input_tensor->shape()[0]; auto per_batch_size = input_tensor->shape().at(0);
auto quant_param = input_tensor->quant_params(); auto quant_param = input_tensor->quant_params();
for (int i = 0; i < per_batch_size; i++) { for (int i = 0; i < per_batch_size; i++) {
auto param = quant_param.at(i); auto param = quant_param.at(i);

@ -156,8 +156,8 @@ int DetectionPostProcessBaseCPUKernel::Run() {
auto output_scores = reinterpret_cast<float *>(out_tensors_.at(2)->MutableData()); auto output_scores = reinterpret_cast<float *>(out_tensors_.at(2)->MutableData());
auto output_num = reinterpret_cast<float *>(out_tensors_.at(3)->MutableData()); auto output_num = reinterpret_cast<float *>(out_tensors_.at(3)->MutableData());
num_boxes_ = in_tensors_.at(0)->shape()[1]; num_boxes_ = in_tensors_.at(0)->shape().at(1);
num_classes_with_bg_ = in_tensors_.at(1)->shape()[2]; num_classes_with_bg_ = in_tensors_.at(1)->shape().at(2);
params_->decoded_boxes_ = context_->allocator->Malloc(num_boxes_ * 4 * sizeof(float)); params_->decoded_boxes_ = context_->allocator->Malloc(num_boxes_ * 4 * sizeof(float));
if (params_->decoded_boxes_ == nullptr) { if (params_->decoded_boxes_ == nullptr) {
MS_LOG(ERROR) << "malloc params->decoded_boxes_ failed."; MS_LOG(ERROR) << "malloc params->decoded_boxes_ failed.";

@ -74,9 +74,9 @@ int ArithmeticCompareFP16CPUKernel::Init() {
} }
int ArithmeticCompareFP16CPUKernel::ReSize() { int ArithmeticCompareFP16CPUKernel::ReSize() {
param_->in_elements_num0_ = in_tensors_[0]->ElementsNum(); param_->in_elements_num0_ = in_tensors_.at(0)->ElementsNum();
param_->in_elements_num1_ = in_tensors_[1]->ElementsNum(); param_->in_elements_num1_ = in_tensors_.at(1)->ElementsNum();
param_->out_elements_num_ = out_tensors_[0]->ElementsNum(); param_->out_elements_num_ = out_tensors_.at(0)->ElementsNum();
if (param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) { if (param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) {
param_->broadcasting_ = false; param_->broadcasting_ = false;

@ -135,9 +135,9 @@ int ArithmeticFP16CPUKernel::PreProcess() {
} }
int ArithmeticFP16CPUKernel::ReSize() { int ArithmeticFP16CPUKernel::ReSize() {
param_->in_elements_num0_ = in_tensors_[0]->ElementsNum(); param_->in_elements_num0_ = in_tensors_.at(0)->ElementsNum();
param_->in_elements_num1_ = in_tensors_[1]->ElementsNum(); param_->in_elements_num1_ = in_tensors_.at(1)->ElementsNum();
param_->out_elements_num_ = out_tensors_[0]->ElementsNum(); param_->out_elements_num_ = out_tensors_.at(0)->ElementsNum();
if (param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) { if (param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) {
param_->broadcasting_ = false; param_->broadcasting_ = false;

@ -44,7 +44,7 @@ ArithmeticSelfFp16Func ArithmeticSelfFp16CPUKernel::GetArithmeticSelfFp16Fun(int
{mindspore::schema::PrimitiveType_Ceil, ElementCeilFp16}, {mindspore::schema::PrimitiveType_Ceil, ElementCeilFp16},
{mindspore::schema::PrimitiveType_Round, ElementRoundFp16}, {mindspore::schema::PrimitiveType_Round, ElementRoundFp16},
{mindspore::schema::PrimitiveType_Neg, ElementNegativeFp16}}; {mindspore::schema::PrimitiveType_Neg, ElementNegativeFp16}};
for (size_t i = 0; i < sizeof(type_func_table); i++) { for (size_t i = 0; i < sizeof(type_func_table) / sizeof(TYPE_FUNC_INFO); i++) {
if (type_func_table[i].primitive_type_ == primitive_type) { if (type_func_table[i].primitive_type_ == primitive_type) {
return type_func_table[i].func_; return type_func_table[i].func_;
} }

@ -48,7 +48,7 @@ int CastFp16CPUKernel::Init() {
} }
int CastFp16CPUKernel::ReSize() { int CastFp16CPUKernel::ReSize() {
data_num_ = in_tensors_[0]->ElementsNum(); data_num_ = in_tensors_.at(0)->ElementsNum();
if (data_num_ == 0) { if (data_num_ == 0) {
return RET_OK; return RET_OK;
} }

@ -102,7 +102,7 @@ int ConcatFp16CPUKernel::Run() {
std::vector<std::vector<int>> shapes; std::vector<std::vector<int>> shapes;
for (size_t i = 0; i < input_num; ++i) { for (size_t i = 0; i < input_num; ++i) {
const auto in_tensor = in_tensors_[i]; const auto in_tensor = in_tensors_.at(i);
if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) { if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) {
auto in_tensor_data = reinterpret_cast<float *>(in_tensor->MutableData()); auto in_tensor_data = reinterpret_cast<float *>(in_tensor->MutableData());
Float32ToFloat16(in_tensor_data, fp16_inputs_[i], in_tensor->ElementsNum()); Float32ToFloat16(in_tensor_data, fp16_inputs_[i], in_tensor->ElementsNum());

@ -42,7 +42,7 @@ int ConvolutionDepthwiseFp16CPUKernel::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1 // init weight: o, h, w, i; o == group, i == 1
ConvolutionBaseFP16CPUKernel::GetExecuteFilter(); ConvolutionBaseFP16CPUKernel::GetExecuteFilter();
auto weight_tensor = in_tensors_[kWeightIndex]; auto weight_tensor = in_tensors_.at(kWeightIndex);
int channel = weight_tensor->Batch(); int channel = weight_tensor->Batch();
int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width(); int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width();

@ -64,7 +64,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::InitPackedInputOutput() {
int ConvolutionDepthwiseSWFp16CPUKernel::InitWeightBias() { int ConvolutionDepthwiseSWFp16CPUKernel::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1 // init weight: o, h, w, i; o == group, i == 1
auto weight_tensor = in_tensors_[kWeightIndex]; auto weight_tensor = in_tensors_.at(kWeightIndex);
int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM); int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM);
auto origin_weight = reinterpret_cast<float *>(weight_tensor->MutableData()); auto origin_weight = reinterpret_cast<float *>(weight_tensor->MutableData());
int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width(); int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width();

@ -77,7 +77,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitPackedInputOutput() {
int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1 // init weight: o, h, w, i; o == group, i == 1
auto weight_tensor = in_tensors_[kWeightIndex]; auto weight_tensor = in_tensors_.at(kWeightIndex);
int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM); int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM);
auto origin_weight = reinterpret_cast<float *>(weight_tensor->MutableData()); auto origin_weight = reinterpret_cast<float *>(weight_tensor->MutableData());
int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width(); int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width();

@ -64,7 +64,7 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() {
} }
memset(bias_data_, 0, UP_ROUND(output_channel, C4NUM) * sizeof(float16_t)); memset(bias_data_, 0, UP_ROUND(output_channel, C4NUM) * sizeof(float16_t));
if (in_tensors_.size() == 3) { if (in_tensors_.size() == 3) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[2]->MutableData()), Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(2)->MutableData()),
reinterpret_cast<float16_t *>(bias_data_), output_channel); reinterpret_cast<float16_t *>(bias_data_), output_channel);
} }
@ -75,7 +75,7 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() {
return RET_ERROR; return RET_ERROR;
} }
memset(execute_weight_, 0, weight_pack_size); memset(execute_weight_, 0, weight_pack_size);
PackNHWCFp32ToC8HWN8Fp16(reinterpret_cast<float *>(in_tensors_[1]->MutableData()), execute_weight_, input_channel, PackNHWCFp32ToC8HWN8Fp16(reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()), execute_weight_, input_channel,
kernel_w * kernel_h, output_channel); kernel_w * kernel_h, output_channel);
return RET_OK; return RET_OK;
} }

@ -239,7 +239,7 @@ int DeConvWgPostFp16Run(void *cdata, int task_id) {
} }
int DeConvWinogradFp16CPUKernel::InitComputeParam() { int DeConvWinogradFp16CPUKernel::InitComputeParam() {
auto weight_tensor = in_tensors_[1]; auto weight_tensor = in_tensors_.at(1);
conv_param_->input_channel_ = weight_tensor->Batch(); conv_param_->input_channel_ = weight_tensor->Batch();
conv_param_->output_channel_ = weight_tensor->Channel(); conv_param_->output_channel_ = weight_tensor->Channel();

@ -28,7 +28,6 @@ using mindspore::schema::PrimitiveType_LessEqual;
using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_NotEqual;
namespace mindspore::kernel { namespace mindspore::kernel {
int ArithmeticCompareCPUKernel::BroadcastRun(void *input0, void *input1, void *output, int dim, int out_count, int ArithmeticCompareCPUKernel::BroadcastRun(void *input0, void *input1, void *output, int dim, int out_count,
int out_thread_stride) { int out_thread_stride) {
if (dim > break_pos_) { if (dim > break_pos_) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save