modify host_kernels

pull/49/head
zhangzhenghai 5 years ago
parent 139d3f6147
commit 2b23e1b5a9

@ -133,24 +133,25 @@ Status AddKernel::BCastAdd(const OpDescPtr &op_desc_ptr, const std::vector<Const
Status AddKernel::AddCheck(const OpDescPtr &op_desc_ptr, const std::vector<ConstGeTensorPtr> &input) {
if (op_desc_ptr == nullptr) {
GELOGW("Op_desc_ptr must not be null.");
GELOGE(PARAM_INVALID, "Op_desc_ptr must not be null.");
return PARAM_INVALID;
}
// check how many inputs
if ((input.size() != kAddInputSize) || (op_desc_ptr->GetOutputsSize() != kAddOutputSize)) {
GELOGW("The number of input for add must be %zu, output number must be %zu.", kAddInputSize, kAddOutputSize);
GELOGE(PARAM_INVALID, "The number of input for add must be %zu, output number must be %zu.", kAddInputSize,
kAddOutputSize);
return PARAM_INVALID;
}
// input vector elements must not be null
if ((input[kAddFirstInput] == nullptr) || (input[kAddSecondInput] == nullptr)) {
GELOGW("Input vector elements must not be null.");
GELOGE(PARAM_INVALID, "Input vector elements must not be null.");
return PARAM_INVALID;
}
// Inputs must have the same datatype.
DataType data_type_0 = input[kAddFirstInput]->GetTensorDesc().GetDataType();
DataType data_type_1 = input[kAddSecondInput]->GetTensorDesc().GetDataType();
if (data_type_0 != data_type_1) {
GELOGW("Data type of inputs for add not matched, data_type_0:%s, data_type_1:%s",
GELOGE(PARAM_INVALID, "Data type of inputs for add not matched, data_type_0:%s, data_type_1:%s",
TypeUtils::DataTypeToSerialString(data_type_0).c_str(),
TypeUtils::DataTypeToSerialString(data_type_1).c_str());
return PARAM_INVALID;
@ -191,7 +192,7 @@ Status AddKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGe
}
if (ret != SUCCESS) {
GELOGW("Greater broadcasting failed.");
GELOGE(ret, "Greater broadcasting failed.");
return NOT_CHANGED;
}
return SUCCESS;

@ -55,6 +55,7 @@ Status BroadcastArgsKernel::Compute(const OpDescPtr op_desc_ptr, const std::vect
vector<int64_t> x2_dims;
const auto &op_in_desc = op_desc_ptr->MutableInputDesc(0);
GE_CHECK_NOTNULL(op_in_desc);
;
DataType data_type = op_in_desc->GetDataType();
bool result = (OpUtils::GetShapeDataFromConstTensor(input[0], data_type, x1_dims) == SUCCESS) &&
(OpUtils::GetShapeDataFromConstTensor(input[1], data_type, x2_dims) == SUCCESS);

@ -41,7 +41,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con
// validate attrs
int N = 0;
if (!(AttrUtils::GetInt(op_desc_ptr, "N", N))) {
GELOGW("Attr %s is not exist.", "N");
GELOGE(PARAM_INVALID, "Attr %s is not exist.", "N");
return NOT_CHANGED;
}
// follow IR def, the first input is concat_dim
@ -50,7 +50,8 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con
int32_t concat_dim = *(const_cast<int32_t *>(reinterpret_cast<const int32_t *>(input_0->GetData().data())));
// validate inputs
if (static_cast<int>(input.size()) != (N + kNumOne) || input.size() <= kConcatOffsetInputIndexOne) {
GELOGW("The number of input for concat offset must be equal with %d, and must be more than one.", (N + kNumOne));
GELOGE(PARAM_INVALID, "The number of input for concat offset must be equal with %d, and must be more than one.",
(N + kNumOne));
return NOT_CHANGED;
}
@ -58,7 +59,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con
GeShape output_shape = input[kConcatOffsetInputIndexOne]->GetTensorDesc().GetShape();
int64_t output_size = output_shape.GetShapeSize();
if (concat_dim >= output_size) {
GELOGW("Concat dim is biger than the size of output_shape.");
GELOGE(PARAM_INVALID, "Concat dim is biger than the size of output_shape.");
return NOT_CHANGED;
}
GELOGI("Output shape size is %ld", output_size);
@ -78,7 +79,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("Failed to fold node %s, out of memeory", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memeory", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}
@ -86,7 +87,7 @@ Status ConcatOffsetKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Con
output_ptr->MutableTensorDesc().SetShape(output_shape);
GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast<uint8_t *>(buf.get()),
static_cast<size_t>(sizeof(DT_INT32) * output_size)) != GRAPH_SUCCESS,
GELOGW("set data failed");
GELOGE(INTERNAL_ERROR, "set data failed");
return NOT_CHANGED);
v_output.push_back(output_ptr);
// caculate offset

@ -63,11 +63,11 @@ Status DynamicStitchKernel::Compute(const OpDescPtr op_desc_ptr, const vector<Co
Status DynamicStitchKernel::ValidateParams(const OpDescPtr &op_desc_ptr, const std::vector<ConstGeTensorPtr> &input) {
if (op_desc_ptr == nullptr) {
GELOGW("Input op_desc is nullptr.");
GELOGE(PARAM_INVALID, "Input op_desc is nullptr.");
return PARAM_INVALID;
}
if (op_desc_ptr->GetOutputsSize() == 0) {
GELOGW("Current output_desc is empty.");
GELOGE(PARAM_INVALID, "Current output_desc is empty.");
return PARAM_INVALID;
}
// validate input
@ -78,7 +78,7 @@ Status DynamicStitchKernel::ValidateParams(const OpDescPtr &op_desc_ptr, const s
}
for (const auto &in : input) {
if (in == nullptr) {
GELOGW("input is nullptr.");
GELOGE(PARAM_INVALID, "input is nullptr.");
return PARAM_INVALID;
}
}
@ -150,7 +150,7 @@ Status DynamicStitchKernel::GenData(const vector<ConstGeTensorPtr> &input, GeTen
// 2.allocate memery for output
std::unique_ptr<uint8_t[]> buf(new (std::nothrow) uint8_t[allowance]);
if (buf == nullptr) {
GELOGW("new buffer failed");
GELOGE(MEMALLOC_FAILED, "new buffer failed");
return INTERNAL_ERROR;
}
// 3.copy data from input_data along with the sequence of input_indices
@ -164,7 +164,7 @@ Status DynamicStitchKernel::GenData(const vector<ConstGeTensorPtr> &input, GeTen
output_ptr->MutableTensorDesc().SetShape(merged_shape);
Status ret = output_ptr->SetData(buf.get(), allowance);
if (ret != GRAPH_SUCCESS) {
GELOGW("set data failed");
GELOGE(INTERNAL_ERROR, "set data failed");
return NOT_CHANGED;
}
return SUCCESS;

@ -38,7 +38,7 @@ const size_t kShapeMaxDims = 1;
} // namespace
Status EmptyKernel::EmptyCheck(const OpDescPtr &op_desc_ptr, const std::vector<ConstGeTensorPtr> &input) {
if (op_desc_ptr == nullptr) {
GELOGW("Parameter's invalid, Input opDescPtr is nullptr.");
GELOGE(PARAM_INVALID, "Parameter's invalid, Input opDescPtr is nullptr.");
return PARAM_INVALID;
}
// check input size
@ -46,19 +46,20 @@ Status EmptyKernel::EmptyCheck(const OpDescPtr &op_desc_ptr, const std::vector<C
((op_desc_ptr->GetAllInputsDesc().size() != kEmptyInputsSize) || (input.size() != kEmptyInputsSize) ||
(op_desc_ptr->GetAllOutputsDesc().size() != kEmptyOutputsSize));
if (size_check) {
GELOGW("Input/Output size error. InDesc size:%zu, OutDesc size:%zu, in size:%zu ",
GELOGE(PARAM_INVALID, "Input/Output size error. InDesc size:%zu, OutDesc size:%zu, in size:%zu ",
op_desc_ptr->GetAllInputsDesc().size(), op_desc_ptr->GetAllOutputsDesc().size(), input.size());
return PARAM_INVALID;
}
if (input.at(kEmptyFirstInput) == nullptr) {
GELOGW("Parameter's invalid, first input is nullptr.");
GELOGE(PARAM_INVALID, "Parameter's invalid, first input is nullptr.");
return PARAM_INVALID;
}
ConstGeTensorPtr shape = input.at(kEmptyFirstInput);
// Check if the dimension is 1-D
if (shape->GetTensorDesc().GetShape().GetDimNum() > kShapeMaxDims) {
GELOGW("Check if the dimension is 1-D failed, dims:%zu", shape->GetTensorDesc().GetShape().GetDimNum());
GELOGE(PARAM_INVALID, "Check if the dimension is 1-D failed, dims:%zu",
shape->GetTensorDesc().GetShape().GetDimNum());
return PARAM_INVALID;
}
return SUCCESS;
@ -83,7 +84,7 @@ Status EmptyKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Const
} else if (shape_type == DT_INT64) {
ret = KernelUtils::CalcDims<int64_t>(shape, shape_vec, total_data_size);
} else {
GELOGW("shape type must be DT_INT32 or DT_INT64.");
GELOGE(PARAM_INVALID, "shape type must be DT_INT32 or DT_INT64.");
return NOT_CHANGED;
}

@ -66,7 +66,7 @@ Status ExpanddimsKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kExpandDimsIndexZero);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -260,7 +260,7 @@ Status FloorDivKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Co
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -122,7 +122,7 @@ Status FloorModKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Co
GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(kFloorModFirstOutput));
if (output_ptr == nullptr) {
GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -274,7 +274,7 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr
auto indices_ptr = const_cast<int32_t *>(reinterpret_cast<const int32_t *>(indices_tensor_ptr->GetData().data()));
for (int64_t i = 0; i < indices_shape.GetShapeSize(); i++) {
if (*(indices_ptr + i) < 0 || *(indices_ptr + i) >= x_shape.GetDim(axis)) {
GELOGW("indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis));
GELOGE(NOT_CHANGED, "indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis));
return NOT_CHANGED;
}
indicates_.push_back(*(indices_ptr + i));
@ -284,7 +284,7 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr
auto indices_ptr = const_cast<int64_t *>(reinterpret_cast<const int64_t *>(indices_tensor_ptr->GetData().data()));
for (int64_t i = 0; i < indices_shape.GetShapeSize(); i++) {
if (*(indices_ptr + i) < 0 || *(indices_ptr + i) >= x_shape.GetDim(axis)) {
GELOGW("indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis));
GELOGE(NOT_CHANGED, "indices %ld value is not in range [0, %ld)", i, x_shape.GetDim(axis));
return NOT_CHANGED;
}
indicates_.push_back(*(indices_ptr + i));
@ -296,19 +296,19 @@ Status GatherV2Kernel::SaveIndicesByDataType(ConstGeTensorPtr indices_tensor_ptr
Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector<ConstGeTensorPtr> &input,
vector<GeTensorPtr> &v_output) const {
if (op_desc_ptr == nullptr) {
GELOGW("input opdesc is nullptr.");
GELOGE(NOT_CHANGED, "input opdesc is nullptr.");
return NOT_CHANGED;
}
if (input.size() != kGatherV2InpotNum) {
GELOGW("The number of input for GatherV2 must be %zu.", kGatherV2InpotNum);
GELOGE(NOT_CHANGED, "The number of input for GatherV2 must be %zu.", kGatherV2InpotNum);
return NOT_CHANGED;
}
bool is_null = (input[kGatherV2InputIndexZero] == nullptr || input[kGatherV2InputIndexOne] == nullptr ||
input[kGatherV2InputIndexTwo] == nullptr);
if (is_null) {
GELOGW("some input is nullptr.");
GELOGE(NOT_CHANGED, "some input is nullptr.");
return NOT_CHANGED;
}
ConstGeTensorPtr tensor0 = input.at(kGatherV2InputIndexZero);
@ -318,7 +318,7 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector<ConstGeT
bool size_is_zero =
((tensor0->GetData().size() == 0) || (tensor1->GetData().size() == 0) || (tensor2->GetData().size() == 0));
if (size_is_zero) {
GELOGW("some input size is zero.");
GELOGE(NOT_CHANGED, "some input size is zero.");
return NOT_CHANGED;
}
@ -326,13 +326,13 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector<ConstGeT
auto axis_shape = tensor2->GetTensorDesc().GetShape();
// axis must be scalar
if (axis_shape.GetDimNum() != 0) {
GELOGW("axis must be scalar but its shape is %zu", axis_shape.GetDimNum());
GELOGE(NOT_CHANGED, "axis must be scalar but its shape is %zu", axis_shape.GetDimNum());
return NOT_CHANGED;
}
auto axis_data_type = tensor2->GetTensorDesc().GetDataType();
bool is_valid_axis_data_type = axis_data_type == DT_INT32 || axis_data_type == DT_INT64;
if (!is_valid_axis_data_type) {
GELOGW("axis datatype must be DT_INT32 or DT_INT64");
GELOGE(NOT_CHANGED, "axis datatype must be DT_INT32 or DT_INT64");
return NOT_CHANGED;
}
@ -340,11 +340,11 @@ Status GatherV2Kernel::Check(const OpDescPtr &op_desc_ptr, const vector<ConstGeT
auto indices_data_type = tensor1->GetTensorDesc().GetDataType();
bool is_valid_indices_data_type = indices_data_type == DT_INT32 || indices_data_type == DT_INT64;
if (!is_valid_indices_data_type) {
GELOGW("indices datatype must be DT_INT32 or DT_INT64");
GELOGE(NOT_CHANGED, "indices datatype must be DT_INT32 or DT_INT64");
return NOT_CHANGED;
}
if (indices_shape.GetDimNum() > kMaxIndicatesDims) {
GELOGW("indices input only support 0 or 1 dims");
GELOGE(NOT_CHANGED, "indices input only support 0 or 1 dims");
return NOT_CHANGED;
}
return SUCCESS;
@ -372,7 +372,7 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector<ConstGe
GELOGI("Enter GatherV2Kernel Process.");
Status ret = Check(op_desc_ptr, input, v_output);
if (ret != SUCCESS) {
GELOGW("param check failed.");
GELOGE(NOT_CHANGED, "param check failed.");
return NOT_CHANGED;
}
GELOGI("GatherV2Kernel[%s] start Process.", op_desc_ptr->GetName().c_str());
@ -390,13 +390,13 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector<ConstGe
axis = axis >= 0 ? axis : axis + x_shape.GetDimNum();
// check axis value
if (axis < 0 || (axis + 1) > static_cast<int64_t>(x_shape.GetDimNum())) {
GELOGW("axis is invalid");
GELOGE(NOT_CHANGED, "axis is invalid");
return NOT_CHANGED;
}
auto indices_data_type = tensor1->GetTensorDesc().GetDataType();
ret = SaveIndicesByDataType(tensor1, x_shape, indices_shape, indices_data_type, static_cast<size_t>(axis));
if (ret != SUCCESS) {
GELOGW("Save indeices by data type failed!");
GELOGE(NOT_CHANGED, "Save indeices by data type failed!");
return ret;
}
@ -420,7 +420,7 @@ Status GatherV2Kernel::Compute(const OpDescPtr op_desc_ptr, const vector<ConstGe
GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
if (output_ptr == nullptr) {
GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}
output_ptr->MutableTensorDesc().SetShape(GeShape(y_shape));

@ -63,7 +63,7 @@ Status PackKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge
Status PackKernel::ValidateKernelParams(const ge::OpDescPtr &op_desc_ptr,
const std::vector<ge::ConstGeTensorPtr> &input) {
if (op_desc_ptr == nullptr) {
GELOGW("input opdesc is nullptr.");
GELOGE(PARAM_INVALID, "input opdesc is nullptr.");
return PARAM_INVALID;
}
if (!(AttrUtils::GetInt(op_desc_ptr, PACK_ATTR_NAME_NUM, n_))) {
@ -71,15 +71,16 @@ Status PackKernel::ValidateKernelParams(const ge::OpDescPtr &op_desc_ptr,
GELOGD("Attr %s is not set, default value %ld is used.", PACK_ATTR_NAME_NUM.c_str(), n_);
}
if (!(AttrUtils::GetInt(op_desc_ptr, ATTR_NAME_AXIS, axis_))) {
GELOGW("Attr %s is not exist.", ATTR_NAME_AXIS.c_str());
GELOGE(PARAM_INVALID, "Attr %s is not exist.", ATTR_NAME_AXIS.c_str());
return PARAM_INVALID;
}
if (input.empty()) {
GELOGW("The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size());
GELOGE(PARAM_INVALID, "The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size());
return NOT_CHANGED;
}
if (input.size() != static_cast<size_t>(n_)) {
GELOGW("The number of input for Pack should be %d, in fact it is %ld ", static_cast<int>(n_), input.size());
GELOGE(PARAM_INVALID, "The number of input for Pack should be %d, in fact it is %ld ", static_cast<int>(n_),
input.size());
return PARAM_INVALID;
}
data_type_ = op_desc_ptr->GetInputDesc(0).GetDataType();

@ -110,14 +110,14 @@ Status PermuteKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Con
return NOT_CHANGED;
}
if (!KernelUtils::CheckSizeForTransOp(const_weight_ptr, op_desc_ptr)) {
GELOGW("CheckSize failed, input size is not equal to weight size");
GELOGE(FAILED, "CheckSize failed, input size is not equal to weight size");
return NOT_CHANGED;
}
const uint8_t *src_data = const_weight_ptr->GetData().data();
formats::TransResult trans_result;
auto ret = formats::TransposeWithShapeCheck(src_data, src_shape, data_shape, src_data_type, perm_list, trans_result);
if (ret != SUCCESS) {
GELOGW("Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s",
GELOGE(INTERNAL_ERROR, "Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s",
TypeUtils::FormatToSerialString(src_format).c_str(), TypeUtils::FormatToSerialString(data_format).c_str(),
formats::ShapeToString(src_shape).c_str(), formats::ShapeToString(data_shape).c_str(),
formats::ShapeToString(perm_list).c_str(), TypeUtils::DataTypeToSerialString(src_data_type).c_str());

@ -49,7 +49,7 @@ Status RankKernel::Compute(const NodePtr &node, std::vector<GeTensorPtr> &v_outp
auto ndims = input_shape->GetShape().GetDimNum();
GeTensorDesc tensor_desc(op_desc->GetOutputDesc(0));
GeTensorPtr output_ptr;
output_ptr = MakeShared<ge::GeTensor>(tensor_desc, reinterpret_cast<uint8_t *>(&ndims), GetSizeByDataType(DT_INT32));
output_ptr = MakeShared<ge::GeTensor>(tensor_desc, reinterpret_cast<uint8_t *>(&ndims), sizeof(ndims));
if (output_ptr == nullptr) {
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed");
return MEMALLOC_FAILED;

@ -51,7 +51,7 @@ Status ReduceProdKernel::ReduceProdCheck(const ge::OpDescPtr &op_desc_ptr,
op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}
GELOGW("Unexpected ReduceProd node, node input size: %zu, node name: %s", input.size(),
GELOGE(PARAM_INVALID, "Unexpected ReduceProd node, node input size: %zu, node name: %s", input.size(),
op_desc_ptr->GetName().c_str());
return PARAM_INVALID;
}
@ -60,13 +60,13 @@ Status ReduceProdKernel::ReduceProdCheck(const ge::OpDescPtr &op_desc_ptr,
GE_CHECK_NOTNULL(data_tensor);
GE_CHECK_NOTNULL(axis_tensor);
if (axis_tensor->GetTensorDesc().GetShape().GetDimNum() > kReduceProdMaxAxisRank) {
GELOGW("Axis must be at most rank 1, node node: %s", op_desc_ptr->GetName().c_str());
GELOGE(PARAM_INVALID, "Axis must be at most rank 1, node node: %s", op_desc_ptr->GetName().c_str());
return PARAM_INVALID;
}
DataType data_type = data_tensor->GetTensorDesc().GetDataType();
if (kReduceProdSupportedType.find(data_type) == kReduceProdSupportedType.end()) {
GELOGW("ReduceProdKernel data type %s not support, node name: %s",
GELOGE(PARAM_INVALID, "ReduceProdKernel data type %s not support, node name: %s",
TypeUtils::DataTypeToSerialString(data_type).c_str(), op_desc_ptr->GetName().c_str());
return PARAM_INVALID;
}
@ -83,7 +83,7 @@ Status ReduceProdKernel::AxisCal(const std::vector<ge::ConstGeTensorPtr> &input)
int32_t *axis = const_cast<int32_t *>(reinterpret_cast<const int32_t *>(axis_tensor->GetData().GetData()));
GE_CHECK_NOTNULL(axis);
if (static_cast<size_t>(*axis) >= data_dim_size) {
GELOGW("axis is out of rank of data_dims, axis is %d.", *axis);
GELOGE(PARAM_INVALID, "axis is out of rank of data_dims, axis is %d.", *axis);
return PARAM_INVALID;
}
axis_dim_ = data_dims[static_cast<size_t>(*axis)];
@ -98,13 +98,13 @@ Status ReduceProdKernel::AxisCal(const std::vector<ge::ConstGeTensorPtr> &input)
// data_dims is the vector of dims, element in data_dims isn't negative.
if (axis_appear) {
if (data_dims[i] != 0 && end_dim_ > (INT64_MAX / data_dims[i])) {
GELOGW("Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", end_dim_, data_dims[i]);
GELOGE(INTERNAL_ERROR, "Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", end_dim_, data_dims[i]);
return INTERNAL_ERROR;
}
end_dim_ *= data_dims[i];
} else {
if (data_dims[i] != 0 && head_dim_ > (INT64_MAX / data_dims[i])) {
GELOGW("Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", head_dim_, data_dims[i]);
GELOGE(INTERNAL_ERROR, "Product is overflow. multiplier 1: %ld. multiplier 2: %ld.", head_dim_, data_dims[i]);
return INTERNAL_ERROR;
}
head_dim_ *= data_dims[i];
@ -122,7 +122,7 @@ Status ReduceProdKernel::DataCal(const std::vector<ge::ConstGeTensorPtr> &input,
size_t data_num = data_tensor->GetData().size() / sizeof(int32_t);
unique_ptr<int32_t[]> buf(new (std::nothrow) int32_t[data_num]());
if (buf == nullptr) {
GELOGW("new buf failed");
GELOGE(MEMALLOC_FAILED, "new buf failed");
return INTERNAL_ERROR;
}
@ -190,12 +190,12 @@ Status ReduceProdKernel::ComputeNoAxis(const ge::OpDescPtr &op_desc_ptr, const s
ConstGeTensorPtr data_tensor = input.at(kReduceProdDataIndex);
GE_CHECK_NOTNULL(data_tensor);
if (data_tensor->GetData().size() == 0) {
GELOGW("ReduceProdKernel data size of inputs is 0, node node: %s", op_desc_ptr->GetName().c_str());
GELOGE(PARAM_INVALID, "ReduceProdKernel data size of inputs is 0, node node: %s", op_desc_ptr->GetName().c_str());
return PARAM_INVALID;
}
DataType data_type = data_tensor->GetTensorDesc().GetDataType();
if (kReduceProdSupportedType.find(data_type) == kReduceProdSupportedType.end()) {
GELOGW("ReduceProdKernel data type %s not support, node name: %s",
GELOGE(PARAM_INVALID, "ReduceProdKernel data type %s not support, node name: %s",
TypeUtils::DataTypeToSerialString(data_type).c_str(), op_desc_ptr->GetName().c_str());
return PARAM_INVALID;
}
@ -206,7 +206,7 @@ Status ReduceProdKernel::ComputeNoAxis(const ge::OpDescPtr &op_desc_ptr, const s
size_t data_num = data_tensor->GetData().size() / sizeof(int32_t);
unique_ptr<int32_t[]> buf(new (std::nothrow) int32_t[data_num]());
if (buf == nullptr) {
GELOGW("new buf failed");
GELOGE(MEMALLOC_FAILED, "new buf failed");
return INTERNAL_ERROR;
}
@ -235,7 +235,7 @@ Status ReduceProdKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec
GELOGI("ReduceProdKernel in.");
Status ret = ReduceProdCheck(op_desc_ptr, input);
if (ret != SUCCESS && ret != NOT_CHANGED) {
GELOGW("ReduceProdKernel input is invalid, failed to fold node.");
GELOGE(PARAM_INVALID, "ReduceProdKernel input is invalid, failed to fold node.");
return NOT_CHANGED;
}
@ -243,7 +243,7 @@ Status ReduceProdKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vec
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -56,7 +56,7 @@ Status ReFormatKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Co
ConstGeTensorPtr const_weight_ptr = input[kReformatFirstInput];
if (const_weight_ptr == nullptr) {
GELOGW("Parameter's invalid, Input_0 is nullptr.");
GELOGE(PARAM_INVALID, "Parameter's invalid, Input_0 is nullptr.");
return NOT_CHANGED;
}
@ -75,17 +75,18 @@ Status ReFormatKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Co
return NOT_CHANGED;
}
if (!KernelUtils::CheckSizeForTransOp(const_weight_ptr, op_desc_ptr)) {
GELOGW("CheckSize failed, input size(shape %s) is not equal to weight size(shape %s)",
GELOGE(FAILED, "CheckSize failed, input size(shape %s) is not equal to weight size(shape %s)",
formats::ShapeToString(src_shape).c_str(),
formats::ShapeToString(const_weight_ptr->GetTensorDesc().GetShape()).c_str());
return NOT_CHANGED;
}
GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(kReformatFirstOutput));
if (output_ptr == nullptr) {
GELOGW("Create shared ptr for GeTensor failed");
GELOGE(INTERNAL_ERROR, "Create shared ptr for GeTensor failed");
return NOT_CHANGED;
}
GE_IF_BOOL_EXEC(output_ptr->SetData(input.at(0)->GetData()) != GRAPH_SUCCESS, GELOGW("set data failed");
GE_IF_BOOL_EXEC(output_ptr->SetData(input.at(0)->GetData()) != GRAPH_SUCCESS,
GELOGE(INTERNAL_ERROR, "set data failed");
return NOT_CHANGED);
v_output.emplace_back(output_ptr);
GELOGD("ReFormatKernel success.");

@ -67,7 +67,7 @@ Status ReshapeKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kOutputDescFirstIndex);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -64,7 +64,7 @@ Status RsqrtKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Const
if (data_count > 0) {
unique_ptr<float[]> buf(new (std::nothrow) float[data_count]());
if (buf == nullptr) {
GELOGW("new buf failed");
GELOGE(MEMALLOC_FAILED, "new buf failed");
return NOT_CHANGED;
}
@ -81,13 +81,13 @@ Status RsqrtKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Const
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("MakeShared GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}
output_ptr->MutableTensorDesc().SetDataType(DT_FLOAT);
GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast<uint8_t *>(buf.get()), data_size) != GRAPH_SUCCESS,
GELOGW("set data failed");
GELOGE(INTERNAL_ERROR, "set data failed");
return NOT_CHANGED);
output_ptr->MutableTensorDesc().SetShape(x_shape);
v_output.push_back(output_ptr);

@ -129,7 +129,7 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Cons
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "Failed to fold node %s, out of memory", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}
@ -143,14 +143,8 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Cons
void *data = reinterpret_cast<void *>(const_cast<uint8_t *>(x_tensor->GetData().data()));
int64_t x_data_size = x_tensor->GetTensorDesc().GetShape().GetShapeSize();
Status ret = CheckOutputDims(size_list, op_desc_ptr);
if (ret != SUCCESS) {
return ret;
}
ret = OpUtils::SetOutputSliceData(data, x_data_size, x_data_type, x_dims, begin_list, size_list, output_ptr.get(),
stride_list);
Status ret = OpUtils::SetOutputSliceData(data, x_data_size, x_data_type, x_dims, begin_list, size_list,
output_ptr.get(), stride_list);
if (ret != SUCCESS) {
GELOGW("Set output data of SliceD failed.");
return NOT_CHANGED;
@ -161,16 +155,5 @@ Status SliceDKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<Cons
return SUCCESS;
}
Status SliceDKernel::CheckOutputDims(const std::vector<int64_t> &output_dims, const OpDescPtr attr) {
// check dim not all less than 0
for (auto dim : output_dims) {
if (dim > 0) {
return SUCCESS;
}
}
GELOGW("all output dim <=0, can't be processed. op_name : %s", attr->GetName().c_str());
return NOT_CHANGED;
}
REGISTER_KERNEL(SLICED, SliceDKernel);
} // namespace ge

@ -29,7 +29,6 @@ class SliceDKernel : public Kernel {
private:
Status SliceDCheck(const OpDescPtr &op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
std::vector<int64_t> &begin_list, std::vector<int64_t> &size_list);
Status CheckOutputDims(const std::vector<int64_t> &output_dims, const OpDescPtr attr);
};
} // namespace ge

@ -21,8 +21,8 @@
#include "common/types.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "graph/utils/type_utils.h"
#include "host_kernels/kernel_utils.h"
#include "graph/utils/type_utils.h"
#include "inc/kernel_factory.h"
namespace ge {

@ -365,7 +365,7 @@ Status SsdPriorboxKernel::Compute(const NodePtr &node, std::vector<GeTensorPtr>
// make TensorDesc
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("Create shared ptr for GeTensor failed");
GELOGE(INTERNAL_ERROR, "Create shared ptr for GeTensor failed");
return NOT_CHANGED;
}
GE_IF_BOOL_EXEC(output_ptr->SetData(reinterpret_cast<uint8_t *>(output_data.get()),

@ -46,31 +46,31 @@ Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr, const std::vec
int64_t shrink_axis_mask = 0;
if (attr == nullptr) {
GELOGW("input opdescptr is nullptr.");
GELOGE(PARAM_INVALID, "input opdescptr is nullptr.");
return PARAM_INVALID;
}
if (input.size() != kStridedSliceInputSize) {
GELOGW("The number of input for strided slice must be %zu.", kStridedSliceInputSize);
GELOGE(PARAM_INVALID, "The number of input for strided slice must be %zu.", kStridedSliceInputSize);
return PARAM_INVALID;
}
if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_BEGIN_MASK, begin_mask)) {
GELOGW("get begin_mask attr failed.");
GELOGE(PARAM_INVALID, "get begin_mask attr failed.");
return PARAM_INVALID;
}
if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_END_MASK, end_mask)) {
GELOGW("get end_mask attr failed.");
GELOGE(PARAM_INVALID, "get end_mask attr failed.");
return PARAM_INVALID;
}
if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_ELLIPSIS_MASK, ellipsis_mask)) {
GELOGW("get ellipsis_mask attr failed.");
GELOGE(PARAM_INVALID, "get ellipsis_mask attr failed.");
return PARAM_INVALID;
}
if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_NEW_AXIS_MASK, new_axis_mask)) {
GELOGW("get new_axis_mask attr failed.");
GELOGE(PARAM_INVALID, "get new_axis_mask attr failed.");
return PARAM_INVALID;
}
if (!AttrUtils::GetInt(attr, STRIDE_SLICE_ATTR_SHRINK_AXIS_MASK, shrink_axis_mask)) {
GELOGW("get shrink_axis_mask attr failed.");
GELOGE(PARAM_INVALID, "get shrink_axis_mask attr failed.");
return PARAM_INVALID;
}
if ((ellipsis_mask != 0) || (new_axis_mask != 0)) {
@ -98,7 +98,7 @@ Status StridedSliceKernel::CheckAndGetAttr(const OpDescPtr &attr, const std::vec
ConstGeTensorPtr weight2 = input[kStridedSliceInputIndex2];
ConstGeTensorPtr weight3 = input[kStridedSliceInputIndex3];
if (CheckWeight(weight0, weight1, weight2, weight3) != SUCCESS) {
GELOGW("Check And Get Attr failed.");
GELOGE(PARAM_INVALID, "Check And Get Attr failed.");
return PARAM_INVALID;
}
@ -168,17 +168,6 @@ void StridedSliceKernel::GetOutputDims(uint32_t dims_size, const std::vector<int
}
}
Status StridedSliceKernel::CheckOutputDims(const std::vector<int64_t> &output_dims, const OpDescPtr attr) {
// check dim not all less than 0
for (auto dim : output_dims) {
if (dim > 0) {
return SUCCESS;
}
}
GELOGW("all output dim <=0, can't be processed. op_name : %s", attr->GetName().c_str());
return NOT_CHANGED;
}
Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<ge::ConstGeTensorPtr> &input,
vector<ge::GeTensorPtr> &v_output) {
GELOGI("StridedSliceKernel in.");
@ -202,7 +191,7 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<g
const int32_t *end = reinterpret_cast<const int32_t *>(weight2->GetData().data());
const int32_t *stride = reinterpret_cast<const int32_t *>(weight3->GetData().data());
if ((begin == nullptr) || (end == nullptr) || (stride == nullptr)) {
GELOGW("input weight tensor is nullptr.");
GELOGE(PARAM_INVALID, "input weight tensor is nullptr.");
return NOT_CHANGED;
}
@ -248,22 +237,16 @@ Status StridedSliceKernel::Compute(const ge::OpDescPtr attr, const std::vector<g
auto output_tensor_desc = attr->GetOutputDesc(0);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("MakeShared GeTensor failed, node name %s.", attr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "MakeShared GeTensor failed, node name %s.", attr->GetName().c_str());
return NOT_CHANGED;
}
void *data = reinterpret_cast<void *>(const_cast<uint8_t *>(weight0->GetData().data()));
GE_CHECK_NOTNULL(data);
ret = CheckOutputDims(output_dims, attr);
if (ret != SUCCESS) {
return ret;
}
ret = OpUtils::SetOutputSliceData(data, static_cast<int64_t>(data_size), args.data_type, input_dims, begin_vec,
output_dims, output_ptr.get(), stride_vec);
if (ret != SUCCESS) {
GELOGW("SetOutputSliceData failed.");
GELOGE(INTERNAL_ERROR, "SetOutputSliceData failed.");
return NOT_CHANGED;
}

@ -44,7 +44,6 @@ class StridedSliceKernel : public Kernel {
int32_t &end_i, int32_t &dim_i) const;
void GetOutputDims(uint32_t dims_size, const std::vector<int64_t> &output_dims, const Attr &args,
vector<int64_t> &v_dims);
Status CheckOutputDims(const std::vector<int64_t> &output_dims, const OpDescPtr attr);
};
} // namespace ge
#endif // GE_GRAPH_PASSES_FOLDING_KERNEL_STRIDED_SLICE_KERNEL_H_

@ -162,7 +162,7 @@ Status SubKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge:
auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kSubFirstOutput);
GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
if (output_ptr == nullptr) {
GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
return NOT_CHANGED;
}

@ -113,7 +113,7 @@ Status TransdataKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<C
return NOT_CHANGED;
}
if (formats::TransFormat(trans_args, trans_result) != SUCCESS) {
GELOGW("Failed to trans formats from %s to %s, shape %s to %s, data type %s",
GELOGE(INTERNAL_ERROR, "Failed to trans formats from %s to %s, shape %s to %s, data type %s",
TypeUtils::FormatToSerialString(src_format).c_str(), TypeUtils::FormatToSerialString(data_format).c_str(),
formats::ShapeToString(src_shape).c_str(), formats::ShapeToString(data_shape).c_str(),
TypeUtils::DataTypeToSerialString(src_data_type).c_str());

@ -132,14 +132,14 @@ Status TransposeKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<C
return NOT_CHANGED;
}
if (!KernelUtils::CheckSizeForTransOp(const_weight_ptr, op_desc_ptr)) {
GELOGW("CheckSize failed, input size is not equal to weight size");
GELOGE(FAILED, "CheckSize failed, input size is not equal to weight size");
return NOT_CHANGED;
}
const uint8_t *src_data = const_weight_ptr->GetData().data();
formats::TransResult trans_result;
auto ret = formats::TransposeWithShapeCheck(src_data, src_shape, data_shape, src_data_type, perm_list, trans_result);
if (ret != SUCCESS) {
GELOGW("Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s",
GELOGE(INTERNAL_ERROR, "Failed to Transpose from %s to %s, shape %s to %s, perm_list %s, data type %s",
TypeUtils::FormatToSerialString(src_format).c_str(), TypeUtils::FormatToSerialString(data_format).c_str(),
formats::ShapeToString(src_shape).c_str(), formats::ShapeToString(data_shape).c_str(),
formats::ShapeToString(perm_list).c_str(), TypeUtils::DataTypeToSerialString(src_data_type).c_str());

Loading…
Cancel
Save