From: @zhaozhenlong
Reviewed-by: @zhanghaibo5,@hangangqiang
Signed-off-by: @hangangqiang
pull/9289/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 3eb4c14d86

@ -51,8 +51,8 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
return RET_ERROR; return RET_ERROR;
} }
// use axis instead of dim // use axis instead of dim
if (inputs[1]->isa<ValueNode>()) { if (inputs.at(1)->isa<ValueNode>()) {
auto axis_tensor = inputs[1]->cast<ValueNodePtr>(); auto axis_tensor = inputs.at(1)->cast<ValueNodePtr>();
int axis = CastToInt(axis_tensor->value()).front(); int axis = CastToInt(axis_tensor->value()).front();
attr->dim = axis; attr->dim = axis;
} else { } else {

@ -76,7 +76,7 @@ int Fill::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
std::vector<int> output_shape; std::vector<int> output_shape;
for (size_t i = 0; i < GetDims().size(); i++) { for (size_t i = 0; i < GetDims().size(); i++) {
output_shape.push_back(GetDims()[i]); output_shape.push_back(GetDims().at(i));
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

@ -45,10 +45,10 @@ int Flatten::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
auto input_shape = input->shape(); auto input_shape = input->shape();
std::vector<int> output_shape(2); std::vector<int> output_shape(2);
output_shape[0] = input_shape[0]; output_shape.at(0) = input_shape.at(0);
output_shape[1] = 1; output_shape.at(1) = 1;
for (size_t i = 1; i < input_shape.size(); i++) { for (size_t i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i]; output_shape.at(1) *= input_shape.at(i);
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

@ -44,10 +44,10 @@ int FlattenGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
auto input_shape = input->shape(); auto input_shape = input->shape();
std::vector<int> output_shape(2); std::vector<int> output_shape(2);
output_shape[0] = input_shape[0]; output_shape.at(0) = input_shape.at(0);
output_shape[1] = 1; output_shape.at(1) = 1;
for (size_t i = 1; i < input_shape.size(); i++) { for (size_t i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i]; output_shape.at(1) *= input_shape.at(i);
} }
output->set_shape(output_shape); output->set_shape(output_shape);
return RET_OK; return RET_OK;

@ -65,7 +65,7 @@ int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<
MS_ASSERT(this->primitive_ != nullptr); MS_ASSERT(this->primitive_ != nullptr);
auto input0 = inputs_.front(); auto input0 = inputs_.front();
MS_ASSERT(input0 != nullptr); MS_ASSERT(input0 != nullptr);
auto input1 = inputs_[1]; auto input1 = inputs_.at(1);
MS_ASSERT(input1 != nullptr); MS_ASSERT(input1 != nullptr);
auto output = outputs_.front(); auto output = outputs_.front();
MS_ASSERT(output != nullptr); MS_ASSERT(output != nullptr);
@ -83,34 +83,34 @@ int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<
int new_k = 1; int new_k = 1;
if (GetUseAxis()) { if (GetUseAxis()) {
for (size_t i = GetAxis(); i < input0->shape().size(); ++i) { for (size_t i = GetAxis(); i < input0->shape().size(); ++i) {
new_k *= input0->shape()[i]; new_k *= input0->shape().at(i);
} }
if (new_k != input1->shape()[1]) { if (new_k != input1->shape().at(1)) {
MS_LOG(ERROR) << "Input1 size invalid"; MS_LOG(ERROR) << "Input1 size invalid";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
} else { } else {
new_k = input1->shape()[1]; new_k = input1->shape().at(1);
} }
if (GetHasBias()) { if (GetHasBias()) {
if (inputs_[2]->shape()[0] != input1->shape()[0]) { if (inputs_.at(2)->shape().at(0) != input1->shape().at(0)) {
MS_LOG(ERROR) << "bias size invalid"; MS_LOG(ERROR) << "bias size invalid";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
} }
std::vector<int> out_shape{inputs_[0]->shape()}; std::vector<int> out_shape{inputs_.at(0)->shape()};
if (GetUseAxis()) { if (GetUseAxis()) {
out_shape.resize(GetAxis() + 1); out_shape.resize(GetAxis() + 1);
out_shape[GetAxis()] = input1->shape()[0]; out_shape.at(GetAxis()) = input1->shape().at(0);
} else { } else {
int total = 1; int total = 1;
for (size_t i = 0; i < input0->shape().size(); ++i) { for (size_t i = 0; i < input0->shape().size(); ++i) {
total *= input0->shape()[i]; total *= input0->shape().at(i);
} }
out_shape.resize(2); out_shape.resize(2);
auto batch_size = total / new_k; auto batch_size = total / new_k;
out_shape[0] = batch_size; out_shape.at(0) = batch_size;
out_shape[1] = input1->shape()[0]; out_shape.at(1) = input1->shape().at(0);
} }
output->set_shape(out_shape); output->set_shape(out_shape);
output->set_data_type(input0->data_type()); output->set_data_type(input0->data_type());

@ -57,8 +57,8 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
gather_attr = nullptr; gather_attr = nullptr;
return RET_ERROR; return RET_ERROR;
} }
if (inputs[2]->isa<ValueNode>()) { if (inputs.at(2)->isa<ValueNode>()) {
ValueNodePtr axis_tensor = inputs[2]->cast<ValueNodePtr>(); ValueNodePtr axis_tensor = inputs.at(2)->cast<ValueNodePtr>();
int axis = CastToInt(axis_tensor->value()).front(); int axis = CastToInt(axis_tensor->value()).front();
gather_attr->axis = axis; gather_attr->axis = axis;
} else { } else {
@ -137,7 +137,7 @@ int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
std::vector<int> out_shape{in_shape}; std::vector<int> out_shape{in_shape};
out_shape.erase(out_shape.begin() + axis); out_shape.erase(out_shape.begin() + axis);
for (int i = indices_rank - 1; i >= 0; --i) { for (int i = indices_rank - 1; i >= 0; --i) {
out_shape.insert(out_shape.begin() + axis, indices_shape[i]); out_shape.insert(out_shape.begin() + axis, indices_shape.at(i));
} }
output->set_shape(out_shape); output->set_shape(out_shape);
return RET_OK; return RET_OK;

@ -72,17 +72,17 @@ int GatherNd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> ou
int in_rank = in_shape.size(); int in_rank = in_shape.size();
auto indices_shape = indices->shape(); auto indices_shape = indices->shape();
int indices_rank = indices_shape.size(); int indices_rank = indices_shape.size();
if (indices_shape[indices_rank - 1] > in_rank) { if (indices_shape.at(indices_rank - 1) > in_rank) {
MS_LOG(ERROR) << "Input of indices data is error!"; MS_LOG(ERROR) << "Input of indices data is error!";
return RET_ERROR; return RET_ERROR;
} }
std::vector<int> out_shape; std::vector<int> out_shape;
int i = 0; int i = 0;
for (i = 0; i < indices_rank - 1; ++i) { for (i = 0; i < indices_rank - 1; ++i) {
out_shape.emplace_back(indices_shape[i]); out_shape.emplace_back(indices_shape.at(i));
} }
for (i = indices_shape[indices_rank - 1]; i < in_rank; ++i) { for (i = indices_shape.at(indices_rank - 1); i < in_rank; ++i) {
out_shape.emplace_back(in_shape[i]); out_shape.emplace_back(in_shape.at(i));
} }
output->set_shape(out_shape); output->set_shape(out_shape);
return RET_OK; return RET_OK;

@ -97,7 +97,7 @@ int LayerNorm::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
} }
size_t first_index = input_shape.size() - normalized_shape.size(); size_t first_index = input_shape.size() - normalized_shape.size();
for (size_t i = first_index; i < input_shape.size(); ++i) { for (size_t i = first_index; i < input_shape.size(); ++i) {
if (input_shape[i] != normalized_shape[i - first_index]) { if (input_shape.at(i) != normalized_shape.at(i - first_index)) {
MS_LOG(INFO) << "normalized_shape attr invalid"; MS_LOG(INFO) << "normalized_shape attr invalid";
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }

@ -59,13 +59,13 @@ int Lstm::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
} }
auto input = inputs_.front(); auto input = inputs_.front();
MS_ASSERT(input != nullptr); MS_ASSERT(input != nullptr);
auto weight_i = inputs_[1]; auto weight_i = inputs_.at(1);
MS_ASSERT(input != nullptr); MS_ASSERT(weight_i != nullptr);
auto output = outputs_.front(); auto output = outputs_.front();
MS_ASSERT(output != nullptr); MS_ASSERT(output != nullptr);
for (int i = 0; i < kLstmOutputNum; i++) { for (int i = 0; i < kLstmOutputNum; i++) {
outputs_[i]->set_data_type(input->data_type()); outputs_.at(i)->set_data_type(input->data_type());
outputs_[i]->set_format(input->format()); outputs_.at(i)->set_format(input->format());
} }
if (!infer_flag()) { if (!infer_flag()) {
return RET_OK; return RET_OK;

@ -125,7 +125,7 @@ int MatMul::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
del_end = true; del_end = true;
} }
for (size_t i = 0; i < (a_shape.size() - 2) && i < (b_shape.size() - 2); ++i) { for (size_t i = 0; i < (a_shape.size() - 2) && i < (b_shape.size() - 2); ++i) {
if (a_shape[a_shape.size() - 3 - i] != b_shape[b_shape.size() - 3 - i]) { if (a_shape.at(a_shape.size() - 3 - i) != b_shape.at(b_shape.size() - 3 - i)) {
MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; MS_LOG(ERROR) << "Op MatMul's dimensions must be equal";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }

@ -103,7 +103,7 @@ int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
for (size_t i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
bool reduce_axis = false; bool reduce_axis = false;
for (size_t idx = 0; idx < num_axes; ++idx) { for (size_t idx = 0; idx < num_axes; ++idx) {
if (static_cast<size_t>(axes[idx]) == i) { if (static_cast<size_t>(axes.at(idx)) == i) {
reduce_axis = true; reduce_axis = true;
break; break;
} }
@ -113,7 +113,7 @@ int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
out_shape.push_back(1); out_shape.push_back(1);
} }
} else { } else {
out_shape.push_back(in_shape[i]); out_shape.push_back(in_shape.at(i));
} }
} }
output->set_shape(out_shape); output->set_shape(out_shape);

@ -72,8 +72,8 @@ PrimitiveC *OnesLikeCreator(const schema::Primitive *primitive) {
Registry OnesLikeRegistry(schema::PrimitiveType_OnesLike, OnesLikeCreator); Registry OnesLikeRegistry(schema::PrimitiveType_OnesLike, OnesLikeCreator);
#endif #endif
int OnesLike::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { int OnesLike::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
Tensor *x = inputs_[0]; Tensor *x = inputs_.at(0);
Tensor *out = outputs_[0]; Tensor *out = outputs_.at(0);
std::vector<int> x_shape = x->shape(); std::vector<int> x_shape = x->shape();
std::vector<int> output_shape(x_shape.size()); std::vector<int> output_shape(x_shape.size());
output_shape.assign(x_shape.begin(), x_shape.end()); output_shape.assign(x_shape.begin(), x_shape.end());

@ -110,7 +110,7 @@ int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs)
MS_ASSERT(input->shape().size() <= 4); MS_ASSERT(input->shape().size() <= 4);
for (size_t i = 0; i < input_shape.size(); i++) { for (size_t i = 0; i < input_shape.size(); i++) {
auto paddings_index = i; auto paddings_index = i;
auto shape = input_shape[i] + paddings[2 * paddings_index] + paddings[2 * paddings_index + 1]; auto shape = input_shape.at(i) + paddings.at(2 * paddings_index) + paddings.at(2 * paddings_index + 1);
output_shape.push_back(shape); output_shape.push_back(shape);
} }

@ -111,12 +111,12 @@ int Pooling::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
} }
auto kernel_size = CastToInt(prim.GetAttr("ksize")); auto kernel_size = CastToInt(prim.GetAttr("ksize"));
attr->windowH = kernel_size[2]; attr->windowH = kernel_size.at(2);
attr->windowW = kernel_size[3]; attr->windowW = kernel_size.at(3);
auto stride = CastToInt(prim.GetAttr("strides")); auto stride = CastToInt(prim.GetAttr("strides"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
this->primitive_->value.value = attr; this->primitive_->value.value = attr;
if (this->primitive_->value.value == nullptr) { if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "primitive value is nullptr"; MS_LOG(ERROR) << "primitive value is nullptr";

@ -100,12 +100,12 @@ int PoolingGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
} }
auto kernel_size = CastToInt(prim.GetAttr("ksize")); auto kernel_size = CastToInt(prim.GetAttr("ksize"));
attr->windowH = kernel_size[2]; attr->windowH = kernel_size.at(2);
attr->windowW = kernel_size[3]; attr->windowW = kernel_size.at(3);
auto stride = CastToInt(prim.GetAttr("strides")); auto stride = CastToInt(prim.GetAttr("strides"));
attr->strideH = stride[2]; attr->strideH = stride.at(2);
attr->strideW = stride[3]; attr->strideW = stride.at(3);
this->primitive_->value.value = attr; this->primitive_->value.value = attr;
if (this->primitive_->value.value == nullptr) { if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "primitive value is nullptr"; MS_LOG(ERROR) << "primitive value is nullptr";

@ -103,14 +103,14 @@ Registry PowerRegistry(schema::PrimitiveType_Power, PowerCreator);
int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) {
MS_ASSERT(this->primitive_ != nullptr); MS_ASSERT(this->primitive_ != nullptr);
auto x_tensor = inputs[0]; auto x_tensor = inputs.at(0);
MS_ASSERT(x_tensor != nullptr); MS_ASSERT(x_tensor != nullptr);
Tensor *exp_tensor = nullptr; Tensor *exp_tensor = nullptr;
if (inputs.size() == 2) { if (inputs.size() == 2) {
exp_tensor = inputs[1]; exp_tensor = inputs.at(1);
MS_ASSERT(exp_tensor != nullptr); MS_ASSERT(exp_tensor != nullptr);
} }
auto output_tensor = outputs[0]; auto output_tensor = outputs.at(0);
MS_ASSERT(output_tensor != nullptr); MS_ASSERT(output_tensor != nullptr);
output_tensor->set_data_type(x_tensor->data_type()); output_tensor->set_data_type(x_tensor->data_type());
output_tensor->set_format(x_tensor->format()); output_tensor->set_format(x_tensor->format());
@ -119,7 +119,7 @@ int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output
} }
if (exp_tensor != nullptr) { if (exp_tensor != nullptr) {
if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) || if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) ||
(exp_tensor->shape().size() == 1 && exp_tensor->shape()[0] != 1) || (exp_tensor->shape().size() == 1 && exp_tensor->shape().at(0) != 1) ||
exp_tensor->data_type() != x_tensor->data_type()) { exp_tensor->data_type() != x_tensor->data_type()) {
MS_LOG(ERROR) << "Power inputs shape or type is not equal!"; MS_LOG(ERROR) << "Power inputs shape or type is not equal!";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;

@ -331,7 +331,7 @@ void PrimitiveC::GetAttrDataFromInput(const AnfNodePtr &inputNode, std::vector<i
auto tuple = val->cast<ValueTuplePtr>(); auto tuple = val->cast<ValueTuplePtr>();
MS_ASSERT(tuple != nullptr); MS_ASSERT(tuple != nullptr);
for (size_t i = 0; i < tuple->size(); i++) { for (size_t i = 0; i < tuple->size(); i++) {
auto elem = tuple->value()[i]; auto elem = tuple->value().at(i);
MS_ASSERT(elem != nullptr); MS_ASSERT(elem != nullptr);
data->emplace_back(CastToInt(elem).front()); data->emplace_back(CastToInt(elem).front());
} }
@ -349,7 +349,7 @@ void PrimitiveC::set_input_quant_params(const std::vector<std::vector<schema::Qu
void PrimitiveC::set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) { void PrimitiveC::set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) {
MS_ASSERT(index < this->input_quant_param_.size()); MS_ASSERT(index < this->input_quant_param_.size());
this->input_quant_param_[index] = input_quant_param; this->input_quant_param_.at(index) = input_quant_param;
} }
void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) { void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) {
@ -359,7 +359,7 @@ void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::Q
void PrimitiveC::set_output_quant_param(const size_t &index, void PrimitiveC::set_output_quant_param(const size_t &index,
const std::vector<schema::QuantParamT> &output_quant_param) { const std::vector<schema::QuantParamT> &output_quant_param) {
MS_ASSERT(index < this->output_quant_param_.size()); MS_ASSERT(index < this->output_quant_param_.size());
this->output_quant_param_[index] = output_quant_param; this->output_quant_param_.at(index) = output_quant_param;
} }
bool PrimitiveC::IsInputQuantParamsInited() { bool PrimitiveC::IsInputQuantParamsInited() {

@ -58,11 +58,11 @@ int PriorBoxCPUKernel::Init() {
int PriorBoxCPUKernel::ReSize() { return GeneratePriorBox(); } int PriorBoxCPUKernel::ReSize() { return GeneratePriorBox(); }
int PriorBoxCPUKernel::GeneratePriorBox() { int PriorBoxCPUKernel::GeneratePriorBox() {
const int fmap_w = in_tensors_[0]->Width(); const int fmap_w = in_tensors_.at(0)->Width();
const int fmap_h = in_tensors_[0]->Height(); const int fmap_h = in_tensors_.at(0)->Height();
const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : in_tensors_[1]->Width(); const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : in_tensors_.at(1)->Width();
const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : in_tensors_[1]->Height(); const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : in_tensors_.at(1)->Height();
const float step_w = const float step_w =
prior_box_param_->step_w > 0.0f ? prior_box_param_->step_w : static_cast<float>(image_w) / fmap_w; prior_box_param_->step_w > 0.0f ? prior_box_param_->step_w : static_cast<float>(image_w) / fmap_w;

@ -54,10 +54,10 @@ void FullconnectionFP16CPUKernel::FreeTmpBuffer() {
int FullconnectionFP16CPUKernel::ReSize() { int FullconnectionFP16CPUKernel::ReSize() {
FreeTmpBuffer(); FreeTmpBuffer();
int row = 1; int row = 1;
for (size_t i = 0; i < out_tensors_[0]->shape().size() - 1; ++i) row *= (out_tensors_[0]->shape())[i]; for (size_t i = 0; i < out_tensors_.at(0)->shape().size() - 1; ++i) row *= (out_tensors_.at(0)->shape())[i];
fc_param_->row_ = row; fc_param_->row_ = row;
fc_param_->col_ = out_tensors_[0]->shape().back(); fc_param_->col_ = out_tensors_.at(0)->shape().back();
fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_16_ = UP_ROUND(fc_param_->row_, C16NUM); fc_param_->row_16_ = UP_ROUND(fc_param_->row_, C16NUM);
fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM);
thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_, C8NUM)); thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_, C8NUM));
@ -89,21 +89,21 @@ int FullconnectionFP16CPUKernel::ReSize() {
} }
memset(b_pack_ptr_, 0, b_pack_col * fc_param_->deep_ * sizeof(float16_t)); memset(b_pack_ptr_, 0, b_pack_col * fc_param_->deep_ * sizeof(float16_t));
fc_param_->b_const_ = (in_tensors_[1]->data_c() != nullptr); fc_param_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (fc_param_->b_const_) { if (fc_param_->b_const_) {
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_, Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_,
fc_param_->col_ * fc_param_->deep_); fc_param_->col_ * fc_param_->deep_);
} else { } else {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
memcpy(b_pack_ptr_, reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), memcpy(b_pack_ptr_, reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()),
fc_param_->col_ * fc_param_->deep_ * sizeof(float16_t)); fc_param_->col_ * fc_param_->deep_ * sizeof(float16_t));
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} }
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
@ -116,10 +116,10 @@ int FullconnectionFP16CPUKernel::ReSize() {
return RET_MEMORY_FAILED; return RET_MEMORY_FAILED;
} }
memset(bias_ptr_, 0, b_pack_col * sizeof(float16_t)); memset(bias_ptr_, 0, b_pack_col * sizeof(float16_t));
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[2]->data_c()), bias_ptr_, fc_param_->col_); Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(2)->data_c()), bias_ptr_, fc_param_->col_);
} }
if (out_tensors_[0]->data_type() == kNumberTypeFloat32) { if (out_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
output_fp16_ = output_fp16_ =
reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t))); reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t)));
if (output_fp16_ == nullptr) { if (output_fp16_ == nullptr) {
@ -183,43 +183,43 @@ int FcFP16Run(void *cdata, int task_id) {
} }
int FullconnectionFP16CPUKernel::Run() { int FullconnectionFP16CPUKernel::Run() {
auto out_tensor = out_tensors_[0]; auto out_tensor = out_tensors_.at(0);
if (out_tensor->data_type() == kNumberTypeFloat32) { if (out_tensor->data_type() == kNumberTypeFloat32) {
output_ptr_ = output_fp16_; output_ptr_ = output_fp16_;
} else { } else {
output_ptr_ = reinterpret_cast<float16_t *>(out_tensor->data_c()); output_ptr_ = reinterpret_cast<float16_t *>(out_tensor->data_c());
} }
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_, fc_param_->deep_); Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_, fc_param_->deep_);
} else { } else {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} }
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
a_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()); a_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c());
} else { } else {
InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} }
} }
if (!fc_param_->b_const_) { if (!fc_param_->b_const_) {
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
if (is_vector_input_) { if (is_vector_input_) {
Float32ToFloat16(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_, Float32ToFloat16(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_,
fc_param_->col_ * fc_param_->deep_); fc_param_->col_ * fc_param_->deep_);
} else { } else {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} else { } else {
if (is_vector_input_) { if (is_vector_input_) {
b_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()); b_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c());
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} }
} }

@ -28,7 +28,7 @@ using mindspore::schema::PrimitiveType_Conv2D;
namespace mindspore::kernel { namespace mindspore::kernel {
int GroupConvolutionFP16CPUKernel::Init() { int GroupConvolutionFP16CPUKernel::Init() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->Init(); auto ret = group_convs_.at(i)->Init();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel init failed."; MS_LOG(ERROR) << "Sub kernel init failed.";
return ret; return ret;
@ -40,7 +40,7 @@ int GroupConvolutionFP16CPUKernel::Init() {
int GroupConvolutionFP16CPUKernel::ReSize() { int GroupConvolutionFP16CPUKernel::ReSize() {
for (int i = 0; i < group_num_; ++i) { for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->ReSize(); auto ret = group_convs_.at(i)->ReSize();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel resize failed."; MS_LOG(ERROR) << "Sub kernel resize failed.";
return RET_ERROR; return RET_ERROR;
@ -94,7 +94,7 @@ int GroupConvolutionFP16CPUKernel::PreProcess() {
int in_w = conv_param_->input_w_; int in_w = conv_param_->input_w_;
int in_c = conv_param_->input_channel_; int in_c = conv_param_->input_channel_;
in_shape = {in_batch, in_h, in_w, in_c}; in_shape = {in_batch, in_h, in_w, in_c};
auto sub_kernel_in_tensor = group_convs_[i]->in_tensors().front(); auto sub_kernel_in_tensor = group_convs_.at(i)->in_tensors().front();
sub_kernel_in_tensor->set_shape(in_shape); sub_kernel_in_tensor->set_shape(in_shape);
ret = sub_kernel_in_tensor->MallocData(); ret = sub_kernel_in_tensor->MallocData();
if (ret != RET_OK) { if (ret != RET_OK) {
@ -141,9 +141,9 @@ int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) {
int in_plane = in_h * in_w; int in_plane = in_h * in_w;
int sub_in_channel = conv_param_->input_channel_; int sub_in_channel = conv_param_->input_channel_;
int ori_in_channel = sub_in_channel * group_num_; int ori_in_channel = sub_in_channel * group_num_;
auto sub_in_data = group_convs_[group_id]->in_tensors().front()->data_c(); auto sub_in_data = group_convs_.at(group_id)->in_tensors().front()->data_c();
auto in_data_type = in_tensors_.front()->data_type(); auto in_data_type = in_tensors_.front()->data_type();
auto sub_in_data_type = group_convs_[group_id]->in_tensors().front()->data_type(); auto sub_in_data_type = group_convs_.at(group_id)->in_tensors().front()->data_type();
if (in_data_type != sub_in_data_type) { if (in_data_type != sub_in_data_type) {
MS_LOG(ERROR) << "data type of sub conv kernel input should be the same as origin input's."; MS_LOG(ERROR) << "data type of sub conv kernel input should be the same as origin input's.";
return RET_ERROR; return RET_ERROR;
@ -183,7 +183,7 @@ void GroupConvolutionFP16CPUKernel::PostConcat(int group_id) {
int out_plane = out_h * out_w; int out_plane = out_h * out_w;
int sub_out_channel = conv_param_->output_channel_; int sub_out_channel = conv_param_->output_channel_;
int ori_out_channel = sub_out_channel * group_num_; int ori_out_channel = sub_out_channel * group_num_;
auto sub_out_data = reinterpret_cast<float16_t *>(group_convs_[group_id]->out_tensors().front()->data_c()); auto sub_out_data = reinterpret_cast<float16_t *>(group_convs_.at(group_id)->out_tensors().front()->data_c());
MS_ASSERT(sub_out_data); MS_ASSERT(sub_out_data);
float16_t *src_ptr = sub_out_data; float16_t *src_ptr = sub_out_data;
float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel; float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel;
@ -206,7 +206,7 @@ int GroupConvolutionFP16CPUKernel::Run() {
return ret; return ret;
} }
// sun kernels run // sun kernels run
ret = group_convs_[i]->Run(); ret = group_convs_.at(i)->Run();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; MS_LOG(ERROR) << "sub kernel " << i << " execute failed.";
return ret; return ret;

@ -262,7 +262,7 @@ int MatmulFP16Run(void *cdata, int task_id) {
} }
int MatmulFP16CPUKernel::Run() { int MatmulFP16CPUKernel::Run() {
auto out_tensor = out_tensors_[0]; auto out_tensor = out_tensors_.at(0);
auto ret = MallocFp16Output(); auto ret = MallocFp16Output();
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "Matmul MallocFp16Output failed"; MS_LOG(ERROR) << "Matmul MallocFp16Output failed";
@ -280,10 +280,10 @@ int MatmulFP16CPUKernel::Run() {
MS_LOG(ERROR) << "Matmul fp16 malloc matrix A buffer failed"; MS_LOG(ERROR) << "Matmul fp16 malloc matrix A buffer failed";
return RET_ERROR; return RET_ERROR;
} }
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} else { } else {
InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c()), a_pack_ptr_);
} }
} }
if (!params_->b_const_) { if (!params_->b_const_) {
@ -292,10 +292,10 @@ int MatmulFP16CPUKernel::Run() {
MS_LOG(ERROR) << "Matmul fp16 malloc matrix B buffer failed"; MS_LOG(ERROR) << "Matmul fp16 malloc matrix B buffer failed";
return RET_ERROR; return RET_ERROR;
} }
if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (in_tensors_.at(1)->data_type() == kNumberTypeFloat32) {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} else { } else {
InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_[1]->data_c()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float16_t *>(in_tensors_.at(1)->data_c()), b_pack_ptr_);
} }
} }
for (int i = 0; i < params_->batch; ++i) { for (int i = 0; i < params_->batch; ++i) {

@ -115,14 +115,14 @@ int QuantDTypeCastFP16Run(void *cdata, int task_id) {
} }
int QuantDTypeCastFp16CPUKernel::Run() { int QuantDTypeCastFp16CPUKernel::Run() {
if (in_tensors_[0]->data_type() == TypeId::kNumberTypeInt8 && if (in_tensors_.at(0)->data_type() == TypeId::kNumberTypeInt8 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16) { out_tensors_.at(0)->data_type() == TypeId::kNumberTypeFloat16) {
int8_ptr_ = reinterpret_cast<int8_t *>(in_tensors_[0]->data_c()); int8_ptr_ = reinterpret_cast<int8_t *>(in_tensors_.at(0)->data_c());
float16_ptr_ = reinterpret_cast<float16_t *>(out_tensors_[0]->data_c()); float16_ptr_ = reinterpret_cast<float16_t *>(out_tensors_.at(0)->data_c());
} else if (in_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16 && } else if (in_tensors_.at(0)->data_type() == TypeId::kNumberTypeFloat16 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeInt8) { out_tensors_.at(0)->data_type() == TypeId::kNumberTypeInt8) {
float16_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[0]->data_c()); float16_ptr_ = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data_c());
int8_ptr_ = reinterpret_cast<int8_t *>(out_tensors_[0]->data_c()); int8_ptr_ = reinterpret_cast<int8_t *>(out_tensors_.at(0)->data_c());
} else { } else {
MS_LOG(ERROR) << "QuantDTypeCastFp16 not support input or output type"; MS_LOG(ERROR) << "QuantDTypeCastFp16 not support input or output type";
return RET_ERROR; return RET_ERROR;

@ -48,14 +48,14 @@ int ExpandDimsCPUKernel::DoExpandDims(int task_id) {
return RET_OK; return RET_OK;
} }
int offset = task_id * thread_sz_stride_; int offset = task_id * thread_sz_stride_;
if (this->in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (this->in_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset, reinterpret_cast<float *>(out_ptr_) + offset, int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset, reinterpret_cast<float *>(out_ptr_) + offset,
size * sizeof(float)); size * sizeof(float));
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]"; MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]";
return ret; return ret;
} }
} else if (this->in_tensors_[0]->data_type() == kNumberTypeInt8) { } else if (this->in_tensors_.at(0)->data_type() == kNumberTypeInt8) {
int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset, reinterpret_cast<int8_t *>(out_ptr_) + offset, int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset, reinterpret_cast<int8_t *>(out_ptr_) + offset,
size * sizeof(int8_t)); size * sizeof(int8_t));
if (ret != RET_OK) { if (ret != RET_OK) {

@ -35,17 +35,17 @@ int FlattenCPUKernel::Init() {
} }
int FlattenCPUKernel::ReSize() { int FlattenCPUKernel::ReSize() {
auto output_shape = out_tensors_[0]->shape(); auto output_shape = out_tensors_.at(0)->shape();
flatten_param_->size = sizeof(float); flatten_param_->size = sizeof(float);
for (size_t i = 0; i < output_shape.size(); i++) { for (size_t i = 0; i < output_shape.size(); i++) {
flatten_param_->size *= output_shape[i]; flatten_param_->size *= output_shape.at(i);
} }
return RET_OK; return RET_OK;
} }
int FlattenCPUKernel::Run() { int FlattenCPUKernel::Run() {
auto input = reinterpret_cast<float *>(in_tensors_[0]->MutableData()); auto input = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto output = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); auto output = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
Flatten(input, output, flatten_param_); Flatten(input, output, flatten_param_);
return RET_OK; return RET_OK;
} }

@ -44,12 +44,12 @@ void FullconnectionCPUKernel::FreeBuf() {
int FullconnectionCPUKernel::ReSize() { int FullconnectionCPUKernel::ReSize() {
FreeBuf(); FreeBuf();
int row = 1; int row = 1;
for (size_t i = 0; i < out_tensors_[0]->shape().size() - 1; ++i) { for (size_t i = 0; i < out_tensors_.at(0)->shape().size() - 1; ++i) {
row *= (out_tensors_[0]->shape())[i]; row *= (out_tensors_.at(0)->shape())[i];
} }
fc_param_->row_ = row; fc_param_->row_ = row;
fc_param_->col_ = out_tensors_[0]->shape().back(); fc_param_->col_ = out_tensors_.at(0)->shape().back();
fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_12_ = UP_ROUND(fc_param_->row_, C12NUM); fc_param_->row_12_ = UP_ROUND(fc_param_->row_, C12NUM);
fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM);
@ -98,14 +98,14 @@ int FullconnectionCPUKernel::ReSize() {
} }
memset(b_pack_ptr_, 0, col_tmp * fc_param_->deep_ * sizeof(float)); memset(b_pack_ptr_, 0, col_tmp * fc_param_->deep_ * sizeof(float));
fc_param_->a_const_ = (in_tensors_[0]->data_c() != nullptr); fc_param_->a_const_ = (in_tensors_.at(0)->data_c() != nullptr);
fc_param_->b_const_ = (in_tensors_[1]->data_c() != nullptr); fc_param_->b_const_ = (in_tensors_.at(1)->data_c() != nullptr);
if (fc_param_->a_const_) { if (fc_param_->a_const_) {
InitMatrixA(reinterpret_cast<float *>(in_tensors_[0]->MutableData()), a_pack_ptr_); InitMatrixA(reinterpret_cast<float *>(in_tensors_.at(0)->MutableData()), a_pack_ptr_);
a_ptr_ = a_pack_ptr_; a_ptr_ = a_pack_ptr_;
} }
if (fc_param_->b_const_) { if (fc_param_->b_const_) {
InitMatrixB(reinterpret_cast<float *>(in_tensors_[1]->MutableData()), b_pack_ptr_); InitMatrixB(reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()), b_pack_ptr_);
b_ptr_ = b_pack_ptr_; b_ptr_ = b_pack_ptr_;
} }
return RET_OK; return RET_OK;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save