!12016 remove no use log

From: @jjfeing
Reviewed-by: @zhoufeng54,@chujinjin
Signed-off-by: @chujinjin
pull/12016/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit f9080da723

@ -184,7 +184,6 @@ void SetNodeAttr(const std::shared_ptr<AnfNode> &anf_node, mindspore::NodeDef *p
ParseAttrValue(type, attr_name, value, node_attr);
}
}
MS_LOG(INFO) << "Set node attr end!";
}
void SetNodeInputs(const std::shared_ptr<AnfNode> &anf_node, mindspore::NodeDef *proto) {
@ -256,7 +255,6 @@ void SetNodeOutputs(const std::shared_ptr<AnfNode> &anf_node, mindspore::NodeDef
void SetNodedefProto(const std::shared_ptr<AnfNode> &anf_node, mindspore::NodeDef *proto) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(proto);
MS_LOG(INFO) << "SetNodedefProto entry";
std::string op_name = AnfAlgo::GetCNodeName(anf_node);
if (op_name == kInitDataSetQueue) {
op_name = kInitData;
@ -269,14 +267,12 @@ void SetNodedefProto(const std::shared_ptr<AnfNode> &anf_node, mindspore::NodeDe
SetNodeOutputs(anf_node, proto);
// set node attr
SetNodeAttr(anf_node, proto);
MS_LOG(INFO) << "SetNodedefProto end!";
}
bool CreateNodeDefBytes(const std::shared_ptr<AnfNode> &anf_node,
const std::shared_ptr<AicpuOpKernelMod> &kernel_mod_ptr) {
MS_EXCEPTION_IF_NULL(kernel_mod_ptr);
MS_EXCEPTION_IF_NULL(anf_node);
MS_LOG(INFO) << "CreateNodeDefBytes entry";
mindspore::NodeDef proto;
SetNodedefProto(anf_node, &proto);
@ -286,7 +282,6 @@ bool CreateNodeDefBytes(const std::shared_ptr<AnfNode> &anf_node,
return false;
}
kernel_mod_ptr->SetNodeDef(nodeDefStr);
MS_LOG(INFO) << "CreateNodeDefBytes end!";
return true;
}
@ -381,8 +376,6 @@ bool CreateExtInfo(const std::shared_ptr<AnfNode> &anf_node, const std::shared_p
return true;
}
MS_LOG(INFO) << "CreateExtInfo start, " << anf_node->fullname_with_scope();
uint64_t ext_info_head_len = kExtInfoHeadSize;
std::string ext_info;
size_t input_num = AnfAlgo::GetInputTensorNum(anf_node);
@ -428,11 +421,11 @@ KernelModPtr AicpuOpBuild(const std::shared_ptr<AnfNode> &anf_node) {
kernel_mod_ptr->SetAnfNode(anf_node);
kernel_mod_ptr->SetNodeName(op_name);
if (!CreateNodeDefBytes(anf_node, kernel_mod_ptr)) {
MS_LOG(EXCEPTION) << "Create nodeDefBytes faild!";
MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!";
}
if (!CreateExtInfo(anf_node, kernel_mod_ptr)) {
MS_LOG(EXCEPTION) << "Create nodeDefBytes faild!";
MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!";
}
if (!SetIOSize(anf_node, kernel_mod_ptr)) {

@ -74,7 +74,6 @@ bool KernelPack::ReadFromJsonFileHelper(std::ifstream &kernelbin) {
return false;
}
kernel_->len = binsize;
MS_LOG(INFO) << "kernel len:" << kernel_->len;
(void)kernelbin.seekg(0, std::ios::beg);
(void)kernelbin.read(kernel_->contents, SizeToLong(kernel_->len));
return true;
@ -183,7 +182,6 @@ void KernelPack::ParseKernelJson(const nlohmann::json &js) {
}
std::vector<size_t> sizes = js.at("parameters");
for (auto size : sizes) {
MS_LOG(INFO) << "parameter " << size;
kernel_json_info_.parameters.push_back(size);
}
}
@ -191,7 +189,6 @@ void KernelPack::ParseKernelJson(const nlohmann::json &js) {
auto workspace = js.at("workspace");
std::vector<size_t> sizes = workspace.at("size");
for (auto size : sizes) {
MS_LOG(INFO) << "workspace_size_list " << size;
kernel_json_info_.workspaces.push_back(size);
}
}
@ -243,7 +240,6 @@ bool KernelPack::LoadKernelMeta(const std::string &json_f, const std::string &pr
return false;
}
MS_LOG(INFO) << "kernelbin_name:" << bin_f;
if (!ReadFromJsonFileHelper(kernelbin)) {
return false;
}

@ -52,7 +52,6 @@ static size_t GenFusionJsonHash(const nlohmann::json &fusion_json) {
}
std::map<int64_t, KernelModPtr> KernelFusion(const std::vector<FusionScopeInfo> &fusion_scopes) {
MS_LOG(INFO) << "kernel fusion build start, scope size:" << fusion_scopes.size();
std::map<int64_t, KernelModPtr> kernel_mod_ret;
auto build_manger = std::make_shared<ParallelBuildManager>();
MS_EXCEPTION_IF_NULL(build_manger);
@ -65,7 +64,6 @@ std::map<int64_t, KernelModPtr> KernelFusion(const std::vector<FusionScopeInfo>
}
// gen kernel_name & check cache
size_t hash_id = GenFusionJsonHash(fusion_op);
MS_LOG(INFO) << "Fusion op hash id: " << hash_id;
auto context_ptr = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context_ptr);
auto device_id = context_ptr->get_param<uint32_t>(MS_CTX_DEVICE_ID);
@ -82,7 +80,6 @@ std::map<int64_t, KernelModPtr> KernelFusion(const std::vector<FusionScopeInfo>
// search cache
auto kernel_pack = TbeUtils::SearchCache(json_name, tbe::kProcessorAiCore);
if (kernel_pack != nullptr) {
MS_LOG(INFO) << "Use cached kernel, kernel json name: " << json_name;
auto kernel_mod =
build_manger->GenKernelMod(json_name, tbe::kProcessorAiCore, input_size_list, output_size_list, kernel_pack);
if (kernel_mod != nullptr) {

@ -71,7 +71,7 @@ static std::string ImplTypeToStr(OpImplyType impl_type) {
case kAICPU:
return kAiCPU;
default:
return "unknow";
return "unknown";
}
}
bool OpLib::RegOp(const std::string &json_string, const std::string &impl_path) {
@ -146,7 +146,7 @@ bool OpLib::RegOpFromLocalInfo() {
has_load = true;
std::string dir = common::GetEnv("MINDSPORE_OP_INFO_PATH");
if (dir.empty()) {
MS_LOG(INFO) << "MindSpore op info path does not been setted. use op info from python pass.";
MS_LOG(INFO) << "MindSpore op info path does not been set. use op info from python pass.";
return true;
}
char real_path[PATH_MAX] = {0};
@ -220,7 +220,7 @@ bool OpLib::DecodeOpInfo(const nlohmann::json &obj, const mindspore::kernel::OpI
}
}
if (CheckRepetition(op_info)) {
MS_LOG(WARNING) << "This op info has been already registed. op name: " << op_info->op_name()
MS_LOG(WARNING) << "This op info has been already registered. op name: " << op_info->op_name()
<< ", impl type: " << ImplTypeToStr(op_info->imply_type())
<< ", impl path: " << op_info->impl_path();
return true;
@ -273,7 +273,7 @@ bool OpLib::DecodeDtypeFormat(const nlohmann::json &dtype_format, const std::sha
op_io->set_dtypes(dtype);
op_io->set_formats(format);
} catch (const std::exception &e) {
MS_LOG(ERROR) << "DecodeDtypeFormat falied" << e.what();
MS_LOG(ERROR) << "DecodeDtypeFormat failed" << e.what();
ret = false;
}
return ret;
@ -378,8 +378,6 @@ bool OpLib::GetRefInfo(const std::shared_ptr<OpInfo> &op_info) {
return false;
}
op_info->add_ref_pair(out_index, in_index);
MS_LOG(INFO) << "add ref info, op name is " << op_info->op_name() << ", outindex is " << out_index
<< ", in_index is " << in_index;
}
}
}

@ -227,7 +227,6 @@ void TbeAdapter::CastAttrJsonPass(const mindspore::AnfNodePtr &anf_node,
attr_obj["valid"] = true;
attr_obj["name"] = attr_name;
attrs_json->push_back(attr_obj);
MS_LOG(INFO) << "CastAttrJsonPass done.";
}
void TbeAdapter::GenTopKV2IndicesTensorInfo(const std::shared_ptr<mindspore::AnfNode> &anf_node,

@ -74,7 +74,6 @@ bool TbeDynamicShapeUtil::GetDynamicShapeAttr(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
auto is_dynamic_shape = AnfAlgo::HasNodeAttr(kAttrIsDynamicShape, cnode);
if (!is_dynamic_shape) {
MS_LOG(INFO) << "Node(" << cnode->fullname_with_scope() << ") does not has is_dynamic_shape attribute.";
return false;
}
is_dynamic_shape = AnfAlgo::GetNodeAttr<bool>(cnode, kAttrIsDynamicShape);

@ -97,7 +97,6 @@ constexpr auto kJIsDynamicShape = "is_dynamic_shape";
bool IsNeedChangeDefaultFormat(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
MS_LOG(INFO) << "Check if need change default format";
if (AnfAlgo::HasNodeAttr("io_format", cnode->cast<CNodePtr>())) {
auto attr = AnfAlgo::GetNodeAttr<std::string>(cnode, "io_format");
return attr == kOpFormat_NCDHW;
@ -149,8 +148,8 @@ bool TbeKernelJsonCreator::GenTbeSingleKernelJson(const std::shared_ptr<mindspor
(*kernel_json)[kJOpInfo] = op_info_json;
(*kernel_json)[kJFullName] = anf_node->fullname_with_scope();
MS_LOG(INFO) << "Operate type:" << creater_type_ << ", full scope name is :" << anf_node->fullname_with_scope()
<< ", json info name is : " << json_name_ << ", kernel json:" << kernel_json->dump();
MS_LOG(DEBUG) << "Operate type:" << creater_type_ << ", full scope name is :" << anf_node->fullname_with_scope()
<< ", json info name is : " << json_name_ << ", kernel json:" << kernel_json->dump();
return true;
}
@ -465,26 +464,29 @@ bool TbeKernelJsonCreator::GenTbeAttrJson(const std::shared_ptr<AnfNode> &anf_no
string TbeKernelJsonCreator::GetSocVersion() {
// Get default soc version.
const int kSocVersionLen = 50;
char soc_version[kSocVersionLen] = {0};
auto ret = rtGetSocVersion(soc_version, kSocVersionLen);
if (ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "GetSocVersion failed.";
}
MS_LOG(INFO) << "Default SocVersion is " << soc_version;
// Get soc version from env value.
const char *soc_version_env = getenv(kSOC_VERSION);
if (soc_version_env != nullptr) {
if (std::strcmp(soc_version, soc_version_env) != 0) {
MS_LOG(WARNING) << "SocVerison change to " << soc_version_env;
ret = rtSetSocVersion(soc_version_env);
if (ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "SetSocVersion to " << soc_version_env << " failed, errorno: " << ret;
static std::string version;
if (version.empty()) {
const int kSocVersionLen = 50;
char soc_version[kSocVersionLen] = {0};
auto ret = rtGetSocVersion(soc_version, kSocVersionLen);
if (ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "GetSocVersion failed.";
}
// Get soc version from env value.
const char *soc_version_env = getenv(kSOC_VERSION);
if (soc_version_env != nullptr) {
if (std::strcmp(soc_version, soc_version_env) != 0) {
MS_LOG(WARNING) << "SocVerison will be change.";
ret = rtSetSocVersion(soc_version_env);
if (ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "SetSocVersion failed, errorno: " << ret;
}
return soc_version_env;
}
return soc_version_env;
}
version = soc_version;
}
return soc_version;
return version;
}
void TbeKernelJsonCreator::ParseAttrValue(const std::string &type, const mindspore::ValuePtr &value,
@ -650,7 +652,6 @@ void GetInputSizeList(const nlohmann::json &input_json, std::vector<size_t> *inp
size_t size_i = 1;
if (input_json[i][m][kJValid] == false) {
std::string input_name = input_json[i][m][kJName];
MS_LOG(INFO) << "Input name:" << input_name << "is optional, valid is false.";
continue;
}
for (size_t j = 0; j < input_json[i][m][kJShape].size(); ++j) {
@ -746,7 +747,7 @@ bool TbeKernelBuild::GenFusionScopeJson(const std::vector<mindspore::AnfNodePtr>
for (const auto &data_input : layer) {
nlohmann::json data_str;
if (!GenFusionDataInputJson(data_input, spec_data_input, &data_str, &index)) {
MS_LOG(INFO) << "Fusion error: gen fusion datainput json faild.";
MS_LOG(INFO) << "Fusion error: gen fusion data input json failed.";
return false;
}
data_list.push_back(data_str);
@ -831,7 +832,7 @@ void TbeKernelBuild::GenSuffixDescJson(nlohmann::json *output_desc) {
(*output_desc)[kJValidShape] = nlohmann::json::array();
}
// anf_node: this node is used to get output desc(type\foramt\shape ...)
// anf_node: this node is used to get output desc(type\format\shape ...)
// node_out_idx: node output index
// desc_output_idx: this index use to add json
// nlohmann::json *output_desc: for return
@ -912,7 +913,6 @@ bool TbeKernelBuild::GetSpecInputLayers(const std::string &op_name,
MS_LOG(INFO) << "Fusion error: node(" << op_name << " )'s input is null. ";
return false;
}
MS_LOG(INFO) << "Fusion info: op_name: " << op_name << "input layer size: " << reorder_layer.size();
if (op_name == kReluGradV2OpName) {
(*spec_data_input)[reorder_layer[0]] = kFusionReLUGradV2;
} else if (op_name == kAddNOpName) {
@ -948,21 +948,15 @@ bool TbeKernelBuild::GetInputLayers(const std::vector<mindspore::AnfNodePtr> &in
MS_LOG(INFO) << "Fusion error: fusion compute node must be cnode";
return false;
}
MS_LOG(INFO) << "Fusion info: compute name: " << compute_node->fullname_with_scope();
for (size_t i = 1; i < ccompute_node->inputs().size(); ++i) {
auto input = ccompute_node->input(i);
auto find_iter = std::find(input_nodes.begin(), input_nodes.end(), input);
if (find_iter != input_nodes.end()) {
MS_LOG(INFO) << "Fusion info: add compute node's [" << i << "] input: " << input->fullname_with_scope();
layer.emplace_back((*find_iter));
} else {
MS_LOG(INFO) << "Fusion warning: this input [" << i << "] may be pre compute(" << input->fullname_with_scope()
<< ") node's output.";
}
}
TbeAdapter::FusionDataOrderPass(op_name, layer, &reorder_layer);
if (need_spec) {
MS_LOG(INFO) << "Fusion info: match conv2d backprop input + ... patten.";
if (!GetSpecInputLayers(op_name, reorder_layer, spec_data_input)) {
return false;
}
@ -986,7 +980,6 @@ bool TbeKernelBuild::GenFusionDataInputJson(const std::shared_ptr<mindspore::Anf
std::vector<nlohmann::json> output_desc_list;
// if data_input is null, this is optional input.
if (!data_input) {
MS_LOG(INFO) << "Fusion info: data input is optional node";
auto name = std::string(kOptional) + std::to_string(*index);
(*data_str)[kJName] = name;
nlohmann::json output_desc;
@ -1003,7 +996,6 @@ bool TbeKernelBuild::GenFusionDataInputJson(const std::shared_ptr<mindspore::Anf
auto kernel_idx = AnfAlgo::VisitKernel(data_input, 0);
auto real_node = kernel_idx.first;
size_t real_idx = kernel_idx.second;
MS_LOG(INFO) << "Fusion info: Real name: " << real_node->fullname_with_scope() << ". index:" << real_idx;
// kJOutputDesc
nlohmann::json output_desc;
GenDescJson(real_node, real_idx, real_idx, &output_desc, fusion_data_type);
@ -1109,7 +1101,6 @@ bool TbeKernelBuild::GenFusionComputeInputJson(const mindspore::CNodePtr &cnode,
auto kernel_idx = AnfAlgo::VisitKernel(input, 0);
auto real_node = kernel_idx.first;
size_t real_idx = kernel_idx.second;
MS_LOG(INFO) << "Fusion info: real name: " << real_node->fullname_with_scope() << ". index:" << real_idx;
nlohmann::json input_desc;
GenDescJson(real_node, real_idx, real_idx, &input_desc);
if (is_dynamic_input) {
@ -1122,7 +1113,6 @@ bool TbeKernelBuild::GenFusionComputeInputJson(const mindspore::CNodePtr &cnode,
size_t optional_num = GetOptionalInput(cnode, is_dynamic_input);
if (optional_num > 0) {
// 3. optional input
MS_LOG(INFO) << "Fusion info: node has optional input.";
for (size_t i = 0; i < optional_num; ++i) {
nlohmann::json optional_input_desc;
optional_input_desc[kJName] = std::string(kOptional) + std::to_string(*index);
@ -1141,7 +1131,6 @@ std::vector<size_t> TbeKernelBuild::GetDescOutputIndex(const std::vector<int64_t
std::vector<size_t> desc_output_index = {};
for (size_t idx = 0; idx < output_used_nums.size(); ++idx) {
auto output_use_num_item = output_used_nums[idx];
MS_LOG(INFO) << "Fusion info: output used num[" << idx << "] = " << output_use_num_item;
desc_output_index.emplace_back(idx);
if (output_use_num_item > 1) {
desc_output_index.emplace_back(idx);
@ -1156,7 +1145,6 @@ bool TbeKernelBuild::GenFusionComputeOutputJson(const mindspore::CNodePtr &cnode
auto output_size = AnfAlgo::GetOutputTensorNum(cnode);
if (AnfAlgo::HasNodeAttr(kAttrOutputUsedNum, cnode)) {
auto output_used_nums = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, kAttrOutputUsedNum);
MS_LOG(INFO) << "Fusion info: this node's output has been reused, node name: " << cnode->fullname_with_scope();
if (output_used_nums.size() != output_size) {
MS_LOG(INFO) << "Fusion error: output tenor num(" << output_size << ")"
<< " is not match output used num(" << output_used_nums.size() << ")";
@ -1238,7 +1226,6 @@ bool TbeKernelBuild::GetIOSize(const nlohmann::json &fusion_op_list,
}
auto ret = GetIOSizeImpl(data_output);
input_size_list->push_back(ret);
MS_LOG(INFO) << "Fusion info: input node name " << op[kJName] << ", size: " << ret;
}
}
}
@ -1248,13 +1235,11 @@ bool TbeKernelBuild::GetIOSize(const nlohmann::json &fusion_op_list,
auto real_node = kernel_idx.first;
size_t real_idx = kernel_idx.second;
auto full_name = real_node->fullname_with_scope();
MS_LOG(INFO) << "Fusion info: real output node name: " << full_name << ", real output index: " << real_idx;
for (const auto &op : fusion_op_list) {
if (op[kJName] == full_name) {
auto op_output_desces = op[kJOutputDesc];
if (output_node != real_node) {
// tuple_get item
MS_LOG(INFO) << "Fusion info: output is a tuple get_item node";
auto output_desc = op_output_desces[real_idx];
if (output_desc[kJShape].empty()) {
MS_LOG(INFO) << "Fusion error: output_desc's shape is empty. real_index " << real_idx;
@ -1262,17 +1247,13 @@ bool TbeKernelBuild::GetIOSize(const nlohmann::json &fusion_op_list,
}
auto ret = GetIOSizeImpl(output_desc);
output_size_list->push_back(ret);
MS_LOG(INFO) << "Fusion info: scope output index " << real_idx << ", size: " << ret;
} else {
MS_LOG(INFO) << "Fusion info: output is self.";
for (const auto &output_desc : op_output_desces) {
if (output_desc[kJShape].empty()) {
MS_LOG(INFO) << "Fusion info: output_desc's shape is empty, may be this node output";
continue;
}
auto ret = GetIOSizeImpl(output_desc);
output_size_list->push_back(ret);
MS_LOG(INFO) << "Fusion info: scope output size: " << ret;
}
}
}

@ -47,7 +47,7 @@ bool TbeOpParallelBuild(const std::vector<AnfNodePtr> &anf_nodes) {
TbeKernelJsonCreator creator(SINGLE_BUILD);
if (!creator.GenTbeSingleKernelJson(anf_node, &kernel_json)) {
MS_LOG(ERROR) << "GenTbeSingleKernelJson failed";
TbeUtils::SaveJsonInfo(kernel_json["op_info"]["kernel_name"], kernel_json["op_info"].dump());
TbeUtils::SaveJsonInfo(kernel_json["op_info"]["kernel_name"], kernel_json.dump());
return false;
}
// get size
@ -59,8 +59,6 @@ bool TbeOpParallelBuild(const std::vector<AnfNodePtr> &anf_nodes) {
auto IsDynamicShape = tbe::TbeDynamicShapeUtil::GetDynamicShapeAttr(anf_node);
if (build_manger->SearchInCache(json_name, processor, input_size_list, output_size_list, anf_node.get()) &&
!IsDynamicShape) {
MS_LOG(INFO) << "Node:" << anf_node->fullname_with_scope() << " Use cached kernel, kernel json name:."
<< json_name;
continue;
}
// same op not need build, but need wait build finish to set kernel mode
@ -70,7 +68,7 @@ bool TbeOpParallelBuild(const std::vector<AnfNodePtr> &anf_nodes) {
}
(void)processed_kernel.insert(json_name);
// op build
TbeUtils::SaveJsonInfo(kernel_json["op_info"]["kernel_name"], kernel_json["op_info"].dump());
TbeUtils::SaveJsonInfo(kernel_json["op_info"]["kernel_name"], kernel_json.dump());
auto task_id = build_manger->StartCompileOp(kernel_json);
build_manger->SaveTaskInfo(task_id, anf_node, json_name, input_size_list, output_size_list);
}
@ -207,7 +205,6 @@ bool ParallelBuildManager::SearchInCache(const std::string &json_name, const std
const std::vector<size_t> &output_size_list, mindspore::AnfNode *node) const {
auto cached_kernel_pack = TbeUtils::SearchCache(json_name, processor);
if (cached_kernel_pack != nullptr) {
MS_LOG(INFO) << "Find cached kernel, kernel json name" << json_name;
auto kernel_mod_ptr = GenKernelMod(json_name, processor, input_size_list, output_size_list, cached_kernel_pack);
MS_EXCEPTION_IF_NULL(kernel_mod_ptr);
AnfAlgo::SetKernelMod(kernel_mod_ptr, node);

@ -34,7 +34,6 @@ bool TbeKernelBroadCastSelecter::GetShapeInfo(SupportFormat *support_format) {
input_shapes_.clear();
output_shapes_.clear();
if (AnfAlgo::HasNodeAttr(kAttrDynInputSizes, cnode_ptr_)) {
MS_LOG(INFO) << "This broadcast node has dynamic input.";
auto dynamic_size_vec = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode_ptr_, kAttrDynInputSizes);
if (dynamic_size_vec.empty() || dynamic_size_vec[0] < 2) {
MS_LOG(EXCEPTION) << "dynamic attr set error, please check.";

@ -57,15 +57,12 @@ void TbeKernelSelect::TbeMetadataInfoEx() {
auto op_info_ptr = tbe::TbeDynamicShapeUtil::FindOp(node_name_, cnode_ptr_);
if (!op_info_ptr) {
MS_LOG(INFO) << "Warning: Cann't find tbe core opinfo, node type: " << node_name_;
return;
}
if (!TbePropertyChecker::CheckTbeProperties(cnode_ptr_)) {
MS_LOG(INFO) << "Warning: node(" << cnode_ptr_->fullname_with_scope() << ") not support tbe aicore.";
return;
}
MS_LOG(INFO) << "Start to tbe metadata info. node type: " << node_name_
<< ", node name: " << cnode_ptr_->fullname_with_scope();
OpPattern pattern = op_info_ptr->op_pattern();
if (pattern == kCommonPattern) {
GetCommonPatternKernelInfo(*op_info_ptr);
@ -82,11 +79,9 @@ void TbeKernelSelect::TbeMetadataInfoEx() {
}
// check support
FilterInVaildKernelInfo(*op_info_ptr);
MS_LOG(INFO) << "End get kernel build info size: " << kernel_info_list_->size() << ", after tbe select.";
}
void TbeKernelSelect::GetCommonPatternKernelInfo(const OpInfo &op_info) {
MS_LOG(INFO) << "start.";
// get dynamic inputs
auto primitive = AnfAlgo::GetCNodePrimitive(cnode_ptr_);
MS_EXCEPTION_IF_NULL(primitive);
@ -132,20 +127,15 @@ void TbeKernelSelect::GetCommonPatternKernelInfo(const OpInfo &op_info) {
builder.SetOutputsReshapeType(outputs_reshape_type);
kernel_info_list_->emplace_back(builder.Build());
}
MS_LOG(INFO) << "end.";
}
void TbeKernelSelect::GetDynamicFormatPatternKernelInfo(const OpInfo &op_info) {
MS_LOG(INFO) << "start.";
//
OpInfo op_info_new;
CreateNewOpInfo(op_info, &op_info_new);
GetCommonPatternKernelInfo(op_info_new);
MS_LOG(INFO) << "end.";
}
void TbeKernelSelect::GetAgnosticPatternKernelInfo(const OpInfo &op_info) {
MS_LOG(INFO) << "start.";
if (op_info.inputs_ptr().size() != 1) {
MS_LOG(EXCEPTION) << "AgnosticPattern only support one input.";
}
@ -161,62 +151,36 @@ void TbeKernelSelect::GetAgnosticPatternKernelInfo(const OpInfo &op_info) {
output_item.assign(op_info.outputs_ptr().size(), format);
support_format.input_format.emplace_back(input_item);
support_format.output_format.emplace_back(output_item);
PrintSupportedFormat(support_format);
OpInfo op_info_new;
CreateNewOpInfo(op_info, support_format, &op_info_new);
GetCommonPatternKernelInfo(op_info_new);
MS_LOG(INFO) << "end.";
}
void TbeKernelSelect::GetBroadcastPatternKernelInfo(const OpInfo &op_info) {
MS_LOG(INFO) << "start.";
auto broadcast_selecter = TbeKernelBroadCastSelecter(cnode_ptr_);
SupportFormat support_format;
broadcast_selecter.GetShapeInfo(&support_format);
if (!broadcast_selecter.IsBroadCastSupport5HD(&support_format)) {
MS_LOG(INFO) << "Node(" << node_name_ << ") does not support 5HD.";
}
if (!broadcast_selecter.IsBroadCastSupportFracZ(&support_format)) {
MS_LOG(INFO) << "Node(" << node_name_ << ") does not support FracZ.";
}
if (!broadcast_selecter.IsBroadCastSupportC1HWNCoC0(&support_format)) {
MS_LOG(INFO) << "Node(" << node_name_ << ") does not support C1HWNCoC0.";
}
if (!broadcast_selecter.IsBroadCastSupportFracNZ(&support_format)) {
MS_LOG(INFO) << "Node(" << node_name_ << ") does not support FracNZ.";
}
if (!broadcast_selecter.IsBroadCastSupportNDC1HWC0(&support_format)) {
MS_LOG(INFO) << "Node(" << node_name_ << ") does not support NDC1HWC0.";
}
PrintSupportedFormat(support_format);
(void)broadcast_selecter.IsBroadCastSupport5HD(&support_format);
(void)broadcast_selecter.IsBroadCastSupportFracZ(&support_format);
(void)broadcast_selecter.IsBroadCastSupportC1HWNCoC0(&support_format);
(void)broadcast_selecter.IsBroadCastSupportFracNZ(&support_format);
(void)broadcast_selecter.IsBroadCastSupportNDC1HWC0(&support_format);
OpInfo op_info_new;
CreateNewOpInfo(op_info, support_format, &op_info_new);
GetCommonPatternKernelInfo(op_info_new);
MS_LOG(INFO) << "end.";
}
void TbeKernelSelect::GetReducePatternKernelInfo(const OpInfo &op_info) {
MS_LOG(INFO) << "start.";
auto reduce_selecter = TbeKernelReduceSelecter(cnode_ptr_);
SupportFormat support_format;
reduce_selecter.GetShapeInfo(&support_format);
if (!reduce_selecter.IsReduceSupport5HD(&support_format)) {
MS_LOG(INFO) << "Node (" << node_name_ << ") reduce not support 5HD.";
}
if (reduce_selecter.IsReduceSupportFracZ(&support_format)) {
MS_LOG(INFO) << "Node (" << node_name_ << ") reduce not support FracZ.";
}
if (reduce_selecter.IsReduceSupportC1HWNCoC0(&support_format)) {
MS_LOG(INFO) << "Node (" << node_name_ << ") reduce not support C1HWNCoC0.";
}
if (reduce_selecter.IsReduceSupportFracNZ(&support_format)) {
MS_LOG(INFO) << "Node (" << node_name_ << ") reduce not support FracNZ.";
}
PrintSupportedFormat(support_format);
(void)reduce_selecter.IsReduceSupport5HD(&support_format);
(void)reduce_selecter.IsReduceSupportFracZ(&support_format);
(void)reduce_selecter.IsReduceSupportC1HWNCoC0(&support_format);
(void)reduce_selecter.IsReduceSupportFracNZ(&support_format);
OpInfo op_info_new;
CreateNewOpInfo(op_info, support_format, &op_info_new);
GetCommonPatternKernelInfo(op_info_new);
MS_LOG(INFO) << "end.";
}
void TbeKernelSelect::FilterInVaildKernelInfo(const OpInfo &op_info) {
@ -227,12 +191,10 @@ void TbeKernelSelect::FilterInVaildKernelInfo(const OpInfo &op_info) {
std::vector<std::shared_ptr<KernelBuildInfo>> new_kernel_info_list;
for (auto iter = kernel_info_list_->begin(); iter != kernel_info_list_->end(); ++iter) {
if (!FilterInVaildShape(iter)) {
MS_LOG(INFO) << "Filter invaild shape, filter item info: " << (*iter)->ToString();
continue;
}
if (op_info.need_check_supported()) {
if (!TbeCheckSupported(iter)) {
MS_LOG(INFO) << "Check support shape, filter item info: " << (*iter)->ToString();
continue;
}
}
@ -249,7 +211,6 @@ bool TbeKernelSelect::FilterInVaildShape(
auto shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode_ptr_, i);
const auto &format = kernel_build_info_inputs_format[i];
if (!IsShapeMatchFormat(shape, format)) {
MS_LOG(INFO) << "The " << i << "th input check failed.";
return false;
}
}
@ -258,7 +219,6 @@ bool TbeKernelSelect::FilterInVaildShape(
auto shape = AnfAlgo::GetOutputInferShape(cnode_ptr_, j);
const auto &format = kernel_build_info_outputs_format[j];
if (!IsShapeMatchFormat(shape, format)) {
MS_LOG(INFO) << "The " << j << "th input check failed.";
return false;
}
}
@ -286,14 +246,12 @@ bool TbeKernelSelect::IsShapeMatchFormat(const std::vector<size_t> &shape, const
// not support format:
// 1 NCDHW with shape size != 5
if (format == kOpFormat_NCDHW && shape.size() != kShape5dDims) {
MS_LOG(INFO) << "Warning: Shape format check failed, format: " << format << ", size: " << shape.size();
return false;
}
return true;
}
bool TbeKernelSelect::TbeCheckSupported(const KernelBuildInfoIter &kernel_build_info_iter) {
MS_LOG(INFO) << "Check support start.";
MS_EXCEPTION_IF_NULL((*kernel_build_info_iter));
// replace kernel_info with current kernel info
auto kernel_build_info_tmp = AnfAlgo::GetSelectKernelBuildInfo(cnode_ptr_);
@ -369,7 +327,7 @@ bool TbeKernelSelect::GenBuilderItem(bool is_input, size_t kernel_build_info_ind
real_io_tensor_index += real_io_tensor_num;
}
} else if (io_param_type == kParamTypeRequre || io_param_type == kParamTypeOptional) {
// requre or optional io
// require or optional io
device_types->emplace_back(tbe::DtypeToTypeId(kernel_build_info_dtype));
formats->emplace_back(kernel_build_info_format);
reshape_types->emplace_back(reshape_type);
@ -378,10 +336,7 @@ bool TbeKernelSelect::GenBuilderItem(bool is_input, size_t kernel_build_info_ind
MS_LOG(EXCEPTION) << "op info's param type is not match: " << io_param_type;
}
}
if (io_info_index != io_info_num) {
MS_LOG(INFO) << "Warning: io_info_index(" << io_info_index << ") != io_info_num(" << io_info_num
<< "), this node may has optional input/output.";
}
if (real_io_tensor_index != real_io_tensor_num) {
std::string io_type = is_input ? "inputs " : "outputs";
MS_LOG(INFO) << node_name_ << "'s " << io_type << "op io info num: " << io_info_num
@ -484,7 +439,6 @@ std::string TbeKernelSelect::OpSelectFormat() {
if (res_json_str.find("TBEException") != std::string::npos) {
MS_LOG(EXCEPTION) << "Dynamic op select failed: " << res_json_str << ", input args: " << kernel_json.dump();
}
MS_LOG(INFO) << "Dynamic select foramt response result:" << res_json_str;
return res_json_str;
}

@ -76,8 +76,6 @@ void TbeUtils::LoadCache() {
KernelMeta *bin_map = KernelMeta::GetInstance();
if (bin_map != nullptr && !bin_map->ReadIndex(kCceKernelMeta)) {
MS_LOG(INFO) << "Cache initialize failed[" << kCceKernelMeta << "]";
} else {
MS_LOG(INFO) << "Cache initialize to " << kCceKernelMeta;
}
has_load = true;
}
@ -214,7 +212,6 @@ bool KernelMeta::ReadIndex(const std::string &bin_dir) {
}
(void)closedir(dir);
MS_LOG(INFO) << "Cache kernel initialized, kernel size: " << kernel_index_map_.size();
return true;
}
@ -223,7 +220,6 @@ KernelPackPtr KernelMeta::GetKernelPack(const std::string &kernel_name, const st
// 1. pack has been created
auto kernel_pack_iter = kernel_pack_map_.find(kernel_name);
if (kernel_pack_iter != kernel_pack_map_.end()) {
MS_LOG(INFO) << "kernel pack [" << kernel_name << "]has been created.";
ret = kernel_pack_iter->second;
} else {
// 2. kernel file has been create, but pack does not been created.
@ -237,7 +233,7 @@ KernelPackPtr KernelMeta::GetKernelPack(const std::string &kernel_name, const st
kernel_pack_map_[kernel_name] = ret;
auto iter = kernel_index_map_.find(kernel_name);
if (iter == kernel_index_map_.end()) {
MS_LOG(INFO) << "kernel name [" << kernel_name << "] has been ceated first.";
MS_LOG(INFO) << "kernel name [" << kernel_name << "] has been created first.";
kernel_index_map_[kernel_name] = cce_json;
}
}

@ -833,7 +833,6 @@ std::vector<CNodePtr> AscendControlParser::RecurseGraph(NotNull<KernelGraphPtr>
}
}
graph->set_execution_order(execution_order);
graph->PrintGraphExecuteOrder();
return execution_order;
}

@ -124,9 +124,7 @@ void SetStreamDistinctionLabel(const KernelGraphPtr &graph, uint32_t label, bool
std::vector<CNodePtr> GetCNodes(const std::vector<AnfNodePtr> &anf_nodes) {
std::vector<CNodePtr> cnodes = {};
size_t i = 0;
for (const auto &anf : anf_nodes) {
MS_LOG(INFO) << "Apply_list[" << i++ << "] = " << anf->DebugString();
MS_EXCEPTION_IF_NULL(anf);
if (anf->isa<CNode>()) {
cnodes.push_back(anf->cast<CNodePtr>());
@ -1387,7 +1385,6 @@ void AscendSession::RecurseSelectKernelInfo(NotNull<KernelGraphPtr> graph,
} else if (status == device::ascend::kStatusReducePrecision) {
(*reduce_precision_count)++;
}
MS_LOG(INFO) << "Select ApplyKernel: " << cnode->DebugString();
}
auto context_ptr = MsContext::GetInstance();

@ -1514,7 +1514,6 @@ void SessionBasic::AddParameterToGraphInputs(const std::vector<AnfNodePtr> &para
MS_LOG(INFO) << "Can't find parameter:" << parameter->DebugString();
continue;
}
MS_LOG(INFO) << "Graph[" << graph->graph_id() << "],parameter:" << parameter->DebugString();
graph_inputs->push_back(backend_parameter);
}
}

@ -117,11 +117,6 @@ uint8_t *AscendMemoryManager::MallocStaticMem(size_t size, bool communication_me
node->AddStaticMemorySize(align_size);
}
auto device_mem_pool_offset = AscendMemoryPool::GetInstance().device_mem_pool_offset();
MS_LOG(INFO) << "Malloc Memory: Static, total[" << device_mem_size_ << "] (dynamic[" << total_dynamic_size_
<< "] memory pool[" << device_mem_size_ - device_mem_pool_offset << "])"
<< " malloc [" << align_size << "] communication_mem: " << communication_mem;
if (communication_mem) {
// create protect area [kMemAlignSize -- data -- kMemAlignSize]
uint8_t *alloc_address = reinterpret_cast<uint8_t *>(AscendMemoryPool::GetInstance().AllocTensorMem(align_size));

@ -138,9 +138,8 @@ void FeedTeOpConstTensor(const NotNull<CNodePtr> &cnode, const std::map<uint32_t
void OpTilingCalculater::Init() {
MS_LOG(INFO) << "Start init OpTilingCalculater";
tiling_func_map_ = optiling::OpTilingRegistryInterf::RegisteredOpInterf();
MS_LOG(INFO) << "tiling_func_map_ size:" << tiling_func_map_.size();
for (const auto &iter : tiling_func_map_) {
MS_LOG(INFO) << "Register tiling func:" << iter.first;
if (tiling_func_map_.empty()) {
MS_LOG(EXCEPTION) << "Get register tiling func failed.";
}
}

@ -171,7 +171,6 @@ static bool IsAtomicNode(const CNodePtr &kernel_node) {
size_t workspace_num = kernel_mod->GetWorkspaceSizeList().size();
size_t param_num = parameters_indexs.size();
size_t total_num = input_num + workspace_num + output_num;
MS_LOG(INFO) << "parameters size: " << param_num << ", input & workspace & output num: " << total_num;
size_t pad_index = param_num;
for (; pad_index < total_num; ++pad_index) {
parameters_indexs.emplace_back(0);
@ -179,7 +178,7 @@ static bool IsAtomicNode(const CNodePtr &kernel_node) {
// process input
for (size_t j = 0; j < input_num; ++j) {
if (parameters_indexs.at(j) == 1) {
MS_LOG(EXCEPTION) << "Atomic addr clean does't support clean input address, input index: " << j;
MS_LOG(EXCEPTION) << "Atomic addr clean doesn't support clean input address, input index: " << j;
}
}
// process output

@ -363,8 +363,6 @@ void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) {
if (mem_manager_->MallocMem(kStaticMem, tensor_size, device_address, graph->graph_id()) == nullptr) {
MS_LOG(EXCEPTION) << "Cannot alloc address when flag is: " << kStaticMem << ", tensor size is: " << tensor_size;
}
MS_LOG(INFO) << "Malloc Input for graph " << graph->graph_id() << ", node: " << item->fullname_with_scope()
<< " index: " << index << " size: " << tensor_size;
AnfAlgo::SetOutputAddr(device_address, index, item.get());
}
}
@ -419,9 +417,9 @@ void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(origin_node_output_addr);
auto cur_node_output_addr = AnfAlgo::GetMutableOutputAddr(kernel, i);
if (origin_node_output_addr.get() != cur_node_output_addr.get()) {
MS_LOG(INFO) << "REF address is not same, ref node output need address update";
MS_LOG(INFO) << "REF origin op is " << origin_pair.first->DebugString() << ", output index is "
<< origin_pair.second << ", cur op is " << kernel->DebugString() << ", out index is " << i;
MS_LOG(DEBUG) << "REF address is not same, ref node output need address update";
MS_LOG(DEBUG) << "REF origin op is " << origin_pair.first->DebugString() << ", output index is "
<< origin_pair.second << ", cur op is " << kernel->DebugString() << ", out index is " << i;
AnfAlgo::SetOutputAddr(origin_node_output_addr, i, kernel.get());
}
}
@ -597,7 +595,6 @@ void KernelRuntime::AssignNodeOutputMem(MemType type, const AnfNodePtr &node, in
MS_EXCEPTION_IF_NULL(kernel_mod);
auto output_sizes = kernel_mod->GetOutputSizeList();
if (output_sizes.empty()) {
MS_LOG(INFO) << "This kernel[" << node->DebugString() << "] has no output size.";
return;
}
for (size_t i = 0; i < output_sizes.size(); ++i) {
@ -679,7 +676,7 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) {
for (auto &value_node : graph->graph_value_nodes()) {
MS_EXCEPTION_IF_NULL(value_node);
if (NodeOutputDeviceAddressExist(value_node, 0)) {
MS_LOG(INFO) << "value_node[" << value_node->DebugString() << "] address already exist";
MS_LOG(DEBUG) << "value_node[" << value_node->DebugString() << "] address already exist";
continue;
}
auto &node_value = value_node->value();

Loading…
Cancel
Save