!1172 [AutoParallel] Elementwise operators implicit semantics handling by rec's parser

Merge pull request !1172 from Chong/support_squeeze_and_reduce
pull/1172/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit e42631c127

@ -27,23 +27,29 @@
namespace mindspore {
namespace parallel {
void GenerateStrategy(std::shared_ptr<Graph> graph, const std::vector<std::shared_ptr<OperatorInfo>> &ops);
std::vector<int32_t> PrepareMatMul(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_nodes,
const size_t iter_op_inputs);
void GenerateStrategy(std::shared_ptr<Graph> graph, const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> index_list);
std::vector<std::vector<int32_t>> PrepareMatMul(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareVirtualDataset(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareBiasAdd(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<std::vector<int32_t>> PrepareOneHot(std::vector<int32_t> s);
std::vector<int32_t> MakeRecSearchStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::shared_ptr<Graph> &graph, const size_t iter_ops,
const size_t iter_op_inputs);
std::vector<int32_t> MakeDataParallelStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, const size_t iter_op_inputs);
std::vector<int32_t> PrepareStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops,
const size_t iter_op_inputs);
std::vector<std::vector<int32_t>> MakeRecSearchStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> MakeDataParallelStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
void GeneratePartitionedOperatorStrategy(const std::shared_ptr<Graph> graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::shared_ptr<std::vector<size_t>> index_list);
int FindIndexOfOperatorIncoming(const std::vector<std::vector<std::string>> &input_tensor_names, const size_t iter_ops);
std::vector<int32_t> CopyIncomingOperatorOutputStrategy(const std::shared_ptr<Graph> graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
@ -56,12 +62,27 @@ std::vector<int32_t> ModifyStrategyIfSqueezeIncoming(const std::vector<std::shar
std::vector<int32_t> GetDimList(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
std::vector<int32_t> ModifyStrategyIfReduceIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const int incoming_op_index, std::vector<int32_t> s);
std::vector<int32_t> CopyIncomingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const int incoming_op_index, const size_t iter_ops,
const std::shared_ptr<std::vector<size_t>> no_stra_op_list);
std::vector<std::vector<int32_t>> GenerateStrategiesFromStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
void GenerateEliminatedOperatorStrategyForward(std::shared_ptr<Graph> graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> index_list,
const std::shared_ptr<std::vector<size_t>> no_stra_op_list);
std::vector<int32_t> ModifyStrategyIfSqueezeOutgoing(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<int32_t> ModifyStrategyIfReduceOutgoing(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<int32_t> CopyOutgoingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const size_t iter_ops);
void GenerateEliminatedOperatorStrategyBackward(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> no_stra_op_list);
} // namespace parallel
} // namespace mindspore
#endif // PARALLEL_AUTO_PARALLEL_REC_GENERATE_STRATEGY_H_

@ -46,7 +46,8 @@ enum OperatorType {
kRecMul,
kRecDiv,
kRecSqueeze,
kRecCast
kRecCast,
kRecReduce
};
enum InfoType { kApplication, kConstant };

@ -20,6 +20,7 @@
#include <memory>
#include <string>
#include <vector>
#include <set>
#include "ir/value.h"
#include "parallel/auto_parallel/rec_core/rec_graph.h"
@ -161,5 +162,71 @@ size_t GetIndexInInputTensorNames(const std::vector<std::vector<std::string>> &i
MS_LOG(INFO) << "Get index failed, using SIZE_MAX insted";
return SIZE_MAX;
}
void Eliminate_Aux(const size_t node_index, const std::shared_ptr<Graph> graph,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list) {
std::vector<size_t> eli;
eli.push_back(node_index);
for (size_t i = 0; i < (size_t)graph->nodes[node_index].node_out.size(); i++) {
eli.push_back(graph->nodes[node_index].node_out[i]);
}
eli_list->push_back(eli);
for (auto input_index : graph->nodes[node_index].node_in) {
auto it = find(graph->nodes[input_index].node_out.begin(), graph->nodes[input_index].node_out.end(), node_index);
if (it != graph->nodes[input_index].node_out.end()) {
graph->nodes[input_index].node_out.erase(it);
for (auto output_index : graph->nodes[node_index].node_out) {
graph->nodes[input_index].node_out.push_back(output_index);
}
}
}
for (auto output_index : graph->nodes[node_index].node_out) {
auto it = find(graph->nodes[output_index].node_in.begin(), graph->nodes[output_index].node_in.end(), node_index);
if (it != graph->nodes[output_index].node_in.end()) {
graph->nodes[output_index].node_in.erase(it);
for (auto input_index : graph->nodes[node_index].node_in) {
graph->nodes[output_index].node_in.push_back(input_index);
}
}
}
}
std::shared_ptr<Graph> EliminateGraph(const std::shared_ptr<Graph> graph,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list,
const std::shared_ptr<std::vector<size_t>> index_list) {
MS_EXCEPTION_IF_NULL(graph);
const std::set<OperatorType> type_list = {
OperatorType::kRecOneHot, OperatorType::kRecReLU, OperatorType::kRecLog, OperatorType::kRecExp,
OperatorType::kRecAdd, OperatorType::kRecElmWiseOp, OperatorType::kRecBiasAdd, OperatorType::kRecSub,
OperatorType::kRecMul, OperatorType::kRecDiv, OperatorType::kRecSqueeze, OperatorType::kRecReduce,
OperatorType::kRecCast};
for (size_t node_index = 0; node_index < (size_t)graph->nodes.size(); node_index++) {
auto type = graph->nodes[node_index].apply.op_type;
if (type_list.find(type) != type_list.end()) {
Eliminate_Aux(node_index, graph, eli_list);
}
}
index_list->reserve(graph->nodes.size());
for (size_t i = 0; i < (size_t)graph->nodes.size(); i++) {
index_list->push_back(i);
}
for (size_t i = 0; i < (size_t)eli_list->size(); i++) {
if (eli_list->at(i)[0] >= index_list->size()) {
MS_LOG(EXCEPTION) << "Failure: Operators' elements out of range.";
}
index_list->at(eli_list->at(i)[0]) = SIZE_MAX;
for (size_t j = eli_list->at(i)[0] + 1; j < (size_t)index_list->size(); j++) {
index_list->at(j)--;
}
}
std::shared_ptr<Graph> new_graph(new Graph);
for (size_t i = 0; i < (size_t)graph->nodes.size(); i++) {
if (index_list->at(i) > SIZE_MAX / 2) {
continue;
}
new_graph->nodes.push_back(graph->nodes[i]);
}
return new_graph;
}
} // namespace parallel
} // namespace mindspore

@ -50,10 +50,10 @@ const std::map<std::string, OperatorType> DictOpType{
{DIV, OperatorType::kRecElmWiseOp},
{SQUEEZE, OperatorType::kRecSqueeze},
{CAST, OperatorType::kRecCast},
{REDUCE_SUM, OperatorType::kRecCast},
{REDUCE_MAX, OperatorType::kRecCast},
{REDUCE_MIN, OperatorType::kRecCast},
{REDUCE_MEAN, OperatorType::kRecCast}};
{REDUCE_SUM, OperatorType::kRecReduce},
{REDUCE_MAX, OperatorType::kRecReduce},
{REDUCE_MIN, OperatorType::kRecReduce},
{REDUCE_MEAN, OperatorType::kRecReduce}};
const TensorParam MakeTensor(int n, int c, int h, int w);
@ -72,6 +72,13 @@ void MakeEdge(const std::vector<std::vector<std::string>> &input_tensor_names, s
size_t GetIndexInInputTensorNames(const std::vector<std::vector<std::string>> &input_tensor_names,
const std::string &input_name);
void Eliminate_Aux(const size_t node_index, const std::shared_ptr<Graph> graph,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list);
std::shared_ptr<Graph> EliminateGraph(const std::shared_ptr<Graph> graph,
const std::shared_ptr<std::vector<std::vector<size_t>>> eli_list,
const std::shared_ptr<std::vector<size_t>> index_list);
} // namespace parallel
} // namespace mindspore
#endif // PARALLEL_AUTO_PARALLEL_REC_PARSE_GRAPH_H_

@ -1158,11 +1158,12 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
}
std::shared_ptr<std::vector<size_t>> ops_nodes_list(new std::vector<size_t>);
std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
graph = EliminateGraph(graph, eli_list, index_list);
size_t num_device = g_device_manager->DeviceNum();
double device_memory = entire_costgraph->GetDeviceMemory();
if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
@ -1172,7 +1173,7 @@ Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const
return FAILED;
}
GenerateStrategy(graph, ops);
GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list);
if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
MS_LOG(INFO) << "Init selected strategy succeeded.";

Loading…
Cancel
Save