!8854 Fix some redundant codes

From: @chenfei52
Reviewed-by: 
Signed-off-by:
pull/8854/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 617c175a98

@ -872,9 +872,6 @@ void AscendSession::CreateMultiBranchOutput(NotNull<KernelGraphPtr> graph, NotNu
if (memo->find(child_graph) != memo->end()) { if (memo->find(child_graph) != memo->end()) {
continue; continue;
} }
if (child_graph->get_output_null()) {
continue;
}
AscendControlParser::InsertMultipleAssignToGraph(NOT_NULL(child_graph), nullptr, AscendControlParser::InsertMultipleAssignToGraph(NOT_NULL(child_graph), nullptr,
NOT_NULL(child_graph->output()), NOT_NULL(output_param)); NOT_NULL(child_graph->output()), NOT_NULL(output_param));
} }

@ -69,9 +69,6 @@ std::vector<AnfNodePtr> GetCallRealOutputs(const AnfNodePtr &call_node) {
std::vector<AnfNodePtr> real_inputs; std::vector<AnfNodePtr> real_inputs;
auto child_graphs = AnfAlgo::GetCallSwitchKernelGraph(node->cast<CNodePtr>()); auto child_graphs = AnfAlgo::GetCallSwitchKernelGraph(node->cast<CNodePtr>());
for (const auto &child_graph : child_graphs) { for (const auto &child_graph : child_graphs) {
if (child_graph->get_output_null()) {
continue;
}
auto real_input = child_graph->output(); auto real_input = child_graph->output();
auto child_real_inputs = GetCallRealOutputs(real_input); auto child_real_inputs = GetCallRealOutputs(real_input);
std::copy(child_real_inputs.begin(), child_real_inputs.end(), std::back_inserter(real_inputs)); std::copy(child_real_inputs.begin(), child_real_inputs.end(), std::back_inserter(real_inputs));

@ -38,13 +38,7 @@ namespace session {
using AnfWithOutIndex = std::pair<AnfNodePtr, size_t>; using AnfWithOutIndex = std::pair<AnfNodePtr, size_t>;
class KernelGraph : public FuncGraph { class KernelGraph : public FuncGraph {
public: public:
KernelGraph() KernelGraph() : graph_id_(0), start_label_(nullptr), end_goto_(nullptr), current_epoch_(0), is_dynamic_shape_(false) {
: graph_id_(0),
start_label_(nullptr),
end_goto_(nullptr),
null_output_(false),
current_epoch_(0),
is_dynamic_shape_(false) {
inputs_ = std::make_shared<std::vector<AnfNodePtr>>(); inputs_ = std::make_shared<std::vector<AnfNodePtr>>();
execution_order_ = {}; execution_order_ = {};
executable_ = true; executable_ = true;
@ -75,7 +69,6 @@ class KernelGraph : public FuncGraph {
parent_graph_ = graph.parent_graph_; parent_graph_ = graph.parent_graph_;
start_label_ = graph.start_label_; start_label_ = graph.start_label_;
end_goto_ = graph.end_goto_; end_goto_ = graph.end_goto_;
null_output_ = graph.null_output_;
front_to_internal_outputs_map_ = graph.front_to_internal_outputs_map_; front_to_internal_outputs_map_ = graph.front_to_internal_outputs_map_;
internal_outputs_to_front_map_ = graph.internal_outputs_to_front_map_; internal_outputs_to_front_map_ = graph.internal_outputs_to_front_map_;
internal_outputs_tensor_map_ = graph.internal_outputs_tensor_map_; internal_outputs_tensor_map_ = graph.internal_outputs_tensor_map_;
@ -190,8 +183,6 @@ class KernelGraph : public FuncGraph {
CNodePtr get_start_label() { return start_label_; } CNodePtr get_start_label() { return start_label_; }
void set_end_goto(const CNodePtr &end_goto) { end_goto_ = end_goto; } void set_end_goto(const CNodePtr &end_goto) { end_goto_ = end_goto; }
CNodePtr get_end_goto() { return end_goto_; } CNodePtr get_end_goto() { return end_goto_; }
bool get_output_null() { return null_output_; }
void set_output_null(bool is_output_null) { null_output_ = is_output_null; }
void PrintGraphExecuteOrder() const; void PrintGraphExecuteOrder() const;
const std::map<std::string, std::pair<AnfNodePtr, int>> &summary_nodes() const { return summary_nodes_; } const std::map<std::string, std::pair<AnfNodePtr, int>> &summary_nodes() const { return summary_nodes_; }
void set_summary_nodes(const std::map<std::string, std::pair<AnfNodePtr, int>> &nodes) { summary_nodes_ = nodes; } void set_summary_nodes(const std::map<std::string, std::pair<AnfNodePtr, int>> &nodes) { summary_nodes_ = nodes; }
@ -343,7 +334,6 @@ class KernelGraph : public FuncGraph {
CNodePtr start_label_; CNodePtr start_label_;
CNodePtr end_goto_; CNodePtr end_goto_;
bool null_output_{false};
std::unordered_map<AnfNodePtr, AnfNodePtr> front_to_internal_outputs_map_; std::unordered_map<AnfNodePtr, AnfNodePtr> front_to_internal_outputs_map_;
std::unordered_map<AnfNodePtr, std::unordered_map<int, std::pair<AnfNodePtr, bool>>> internal_outputs_to_front_map_; std::unordered_map<AnfNodePtr, std::unordered_map<int, std::pair<AnfNodePtr, bool>>> internal_outputs_to_front_map_;
std::unordered_map<AnfNodePtr, std::unordered_map<int, tensor::TensorPtr>> internal_outputs_tensor_map_; std::unordered_map<AnfNodePtr, std::unordered_map<int, tensor::TensorPtr>> internal_outputs_tensor_map_;

@ -20,25 +20,22 @@
#include "c_ops/primitive_c.h" #include "c_ops/primitive_c.h"
#include "ir/manager.h" #include "ir/manager.h"
#include "ir/param_info.h"
#include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/common_utils.h"
#include "base/core_ops.h" #include "base/core_ops.h"
#include "common/trans.h" #include "common/trans.h"
#include "utils/config_manager.h" #include "utils/config_manager.h"
#include "backend/session/anf_runtime_algorithm.h" #include "backend/session/anf_runtime_algorithm.h"
#include "backend/session/executor.h"
#include "backend/session/executor_manager.h" #include "backend/session/executor_manager.h"
#include "backend/optimizer/common/common_backend_optimization.h" #include "backend/optimizer/common/common_backend_optimization.h"
#include "backend/optimizer/common/helper.h" #include "backend/optimizer/common/helper.h"
#include "runtime/device/kernel_runtime_manager.h" #include "runtime/device/kernel_runtime_manager.h"
#include "utils/ms_utils.h" #include "utils/ms_utils.h"
#include "ir/dtype.h"
#include "ir/anf.h" #include "ir/anf.h"
#include "ir/func_graph_cloner.h" #include "ir/func_graph_cloner.h"
#include "utils/utils.h" #include "utils/utils.h"
#include "debug/anf_ir_dump.h"
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU)) #if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "ps/worker.h" #include "ps/worker.h"
#include "ps/common.h"
#include "ps/util.h" #include "ps/util.h"
#endif #endif
@ -665,8 +662,8 @@ std::vector<AnfNodePtr> SessionBasic::CreateSwitchOrPartialNode(const CNodePtr &
MS_EXCEPTION_IF_NULL(attr_input); MS_EXCEPTION_IF_NULL(attr_input);
auto cnode_input = graph->GetBackendAnfByFrontAnf(attr_input); auto cnode_input = graph->GetBackendAnfByFrontAnf(attr_input);
if (cnode_input == nullptr) { if (cnode_input == nullptr) {
MS_LOG(EXCEPTION) << "CNode input[0] is CNode:" << attr_input->DebugString() MS_LOG(ERROR) << "CNode input[0] is CNode:" << attr_input->DebugString() << ", but input[0] has not been created.";
<< ", but input[0] has not been created."; return {};
} }
// if the node is partial, insert the inputs of partial to the call // if the node is partial, insert the inputs of partial to the call
if (AnfAlgo::CheckPrimitiveType(cnode_input, prim::kPrimPartial)) { if (AnfAlgo::CheckPrimitiveType(cnode_input, prim::kPrimPartial)) {
@ -682,7 +679,9 @@ std::vector<AnfNodePtr> SessionBasic::CreateSwitchOrPartialNode(const CNodePtr &
} else if (AnfAlgo::CheckPrimitiveType(cnode_input, prim::kPrimSwitch)) { } else if (AnfAlgo::CheckPrimitiveType(cnode_input, prim::kPrimSwitch)) {
return CreateCallSwitchInputs(cnode, graph); return CreateCallSwitchInputs(cnode, graph);
} }
MS_LOG(EXCEPTION) << "CNode input[0] must be partial or switch."; MS_LOG(ERROR) << "CNode:" << cnode->DebugString() << " input[0]" << cnode_input->DebugString()
<< "must be partial or switch.";
return {};
} }
std::vector<AnfNodePtr> SessionBasic::CreateValueNode(const CNodePtr &cnode, KernelGraph *graph) { std::vector<AnfNodePtr> SessionBasic::CreateValueNode(const CNodePtr &cnode, KernelGraph *graph) {
@ -752,6 +751,10 @@ CNodePtr SessionBasic::CreateNewCNode(CNodePtr cnode, KernelGraph *graph) {
// 1. take the args of call to the partial node, as the real_args to call switch's or switch_layer's child graph // 1. take the args of call to the partial node, as the real_args to call switch's or switch_layer's child graph
// 2. the call in frontend is map to the partial/switch/switch_layer in backend and haven't been created // 2. the call in frontend is map to the partial/switch/switch_layer in backend and haven't been created
cnode_inputs = CreateSwitchOrPartialNode(cnode, graph); cnode_inputs = CreateSwitchOrPartialNode(cnode, graph);
if (cnode_inputs.empty()) {
MS_LOG_ERROR << "Create switch or partial failed, cnode:" << cnode->DebugString();
return nullptr;
}
} else { } else {
// get primitive of old node // get primitive of old node
auto prim = AnfAlgo::GetCNodePrimitive(cnode); auto prim = AnfAlgo::GetCNodePrimitive(cnode);
@ -877,14 +880,16 @@ KernelGraphPtr SessionBasic::ConstructKernelGraph(const AnfNodePtrList &lst, con
return graph; return graph;
} }
void SessionBasic::CreateCNodeKernelGraph(const AnfNodePtr node, KernelGraphPtr graph) { bool SessionBasic::CreateCNodeOfKernelGraph(const AnfNodePtr &node, KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(graph);
auto cnode = node->cast<CNodePtr>(); auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(cnode);
// create a new cnode object // create a new cnode object
auto new_cnode = CreateNewCNode(cnode, graph.get()); auto new_cnode = CreateNewCNode(cnode, graph);
MS_EXCEPTION_IF_NULL(new_cnode); if (new_cnode == nullptr) {
return false;
}
new_cnode->set_abstract(cnode->abstract()); new_cnode->set_abstract(cnode->abstract());
std::string fullname; std::string fullname;
if (cnode->input(kAnfPrimitiveIndex)->isa<CNode>()) { if (cnode->input(kAnfPrimitiveIndex)->isa<CNode>()) {
@ -898,6 +903,7 @@ void SessionBasic::CreateCNodeKernelGraph(const AnfNodePtr node, KernelGraphPtr
if (AnfAlgo::CheckPrimitiveType(new_cnode, prim::kPrimReturn)) { if (AnfAlgo::CheckPrimitiveType(new_cnode, prim::kPrimReturn)) {
graph->set_return(new_cnode); graph->set_return(new_cnode);
} }
return true;
} }
std::shared_ptr<KernelGraph> SessionBasic::ConstructKernelGraph(const FuncGraphPtr &func_graph, std::shared_ptr<KernelGraph> SessionBasic::ConstructKernelGraph(const FuncGraphPtr &func_graph,
@ -909,11 +915,10 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructKernelGraph(const FuncGraphP
MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(graph);
front_backend_graph_map_[func_graph] = graph; front_backend_graph_map_[func_graph] = graph;
MS_LOG(INFO) << "Create graph: " << graph->graph_id(); MS_LOG(INFO) << "Create graph: " << graph->graph_id();
bool is_trace_back = false;
for (const auto &node : node_list) { for (const auto &node : node_list) {
MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(node);
MS_LOG(DEBUG) << "Start create new cnode, node = " << node->DebugString(); MS_LOG(DEBUG) << "Start create new cnode, node = " << node->DebugString();
// Create parameter
if (node->isa<Parameter>()) { if (node->isa<Parameter>()) {
auto graph_inputs = graph->MutableInputs(); auto graph_inputs = graph->MutableInputs();
MS_EXCEPTION_IF_NULL(graph_inputs); MS_EXCEPTION_IF_NULL(graph_inputs);
@ -921,25 +926,28 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructKernelGraph(const FuncGraphP
graph_inputs->push_back(new_parameter); graph_inputs->push_back(new_parameter);
graph->FrontBackendlMapAdd(node, new_parameter); graph->FrontBackendlMapAdd(node, new_parameter);
continue; continue;
} else if (node->isa<ValueNode>()) { }
// Create value node
if (node->isa<ValueNode>()) {
// Create common value node
if (!IsValueNode<FuncGraph>(node)) { if (!IsValueNode<FuncGraph>(node)) {
// if input is a common value node,
(void)CreateNewValueNode(node, graph.get()); (void)CreateNewValueNode(node, graph.get());
} else { continue;
// if input is a ValueNode<FuncGraph>
FuncGraphPtr child_graph = AnfAlgo::GetValueNodeFuncGraph(node);
if (front_backend_graph_map_.find(child_graph) == front_backend_graph_map_.end()) {
(void)ConstructKernelGraph(child_graph, all_out_graph);
}
(void)CreateValueNodeKernelGraph(node, graph.get());
} }
// Create child kernel graph according ValueNode<FuncGraph>
FuncGraphPtr child_graph = AnfAlgo::GetValueNodeFuncGraph(node);
if (front_backend_graph_map_.find(child_graph) == front_backend_graph_map_.end()) {
(void)ConstructKernelGraph(child_graph, all_out_graph);
}
(void)CreateValueNodeKernelGraph(node, graph.get());
continue; continue;
} else { }
CreateCNodeKernelGraph(node, graph); // Create cnode
if (!CreateCNodeOfKernelGraph(node, graph.get())) {
DumpIR("contruct_kernel_graph_fail.ir", func_graph);
MS_LOG_EXCEPTION << "construct func graph " << func_graph->ToString() << "fail!";
} }
} }
// if a graph jump back unconditionally, return op of this graph will never be executed, so output is null.
graph->set_output_null(is_trace_back);
AddParameterToGraphInputs(func_graph->parameters(), graph.get()); AddParameterToGraphInputs(func_graph->parameters(), graph.get());
graph->SetExecOrderByDefault(); graph->SetExecOrderByDefault();
if (ExistSummaryNode(graph.get())) { if (ExistSummaryNode(graph.get())) {

@ -82,7 +82,7 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
virtual void RegisterSummaryCallBackFunc(const CallBackFunc &callback); virtual void RegisterSummaryCallBackFunc(const CallBackFunc &callback);
void CreateCNodeKernelGraph(const AnfNodePtr node, KernelGraphPtr graph); bool CreateCNodeOfKernelGraph(const AnfNodePtr &node, KernelGraph *graph);
std::shared_ptr<KernelGraph> ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs); std::shared_ptr<KernelGraph> ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs);
std::shared_ptr<KernelGraph> ConstructKernelGraph(const FuncGraphPtr &func_graph, std::shared_ptr<KernelGraph> ConstructKernelGraph(const FuncGraphPtr &func_graph,

@ -244,7 +244,6 @@ EvalResultPtr MixedPrecisionCastEvaluator::Run(AnalysisEnginePtr engine, const C
<< " args size should equal to inputs size minus 1, but args size " << args_conf_list.size() << " args size should equal to inputs size minus 1, but args size " << args_conf_list.size()
<< ", inputs size " << out_node_inputs.size(); << ", inputs size " << out_node_inputs.size();
} }
AnfNodePtrList args_inputs{out_node_inputs.begin() + 1, out_node_inputs.end()};
(void)std::transform(args_conf_list.begin(), args_conf_list.end(), std::back_inserter(args_spec_list), (void)std::transform(args_conf_list.begin(), args_conf_list.end(), std::back_inserter(args_spec_list),
[](const ConfigPtr &ref) -> AbstractBasePtr { return ref->GetEvaluatedValue()->abstract(); }); [](const ConfigPtr &ref) -> AbstractBasePtr { return ref->GetEvaluatedValue()->abstract(); });

@ -134,12 +134,7 @@ AnfNodePtr FuncGraphSpecializer::ReplicateDisconnectedNode(const AnfNodePtr &nod
auto inputs = c_node->inputs(); auto inputs = c_node->inputs();
std::vector<AnfNodePtr> new_inputs; std::vector<AnfNodePtr> new_inputs;
(void)std::transform(inputs.begin(), inputs.end(), std::back_inserter(new_inputs), (void)std::transform(inputs.begin(), inputs.end(), std::back_inserter(new_inputs),
[this](const AnfNodePtr &inp) -> AnfNodePtr { [this](const AnfNodePtr &inp) -> AnfNodePtr { return ReplicateDisconnectedNode(inp); });
if (inp->isa<ValueNode>()) {
return inp;
}
return ReplicateDisconnectedNode(inp);
});
auto c_new_node = new_node->cast<CNodePtr>(); auto c_new_node = new_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(c_new_node); MS_EXCEPTION_IF_NULL(c_new_node);
c_new_node->set_inputs(new_inputs); c_new_node->set_inputs(new_inputs);

Loading…
Cancel
Save