!3904 回退 'Pull Request !3103 : change type of Shape from int32 to int64'

Merge pull request !3904 from suteng/revert-merge-3103-master
pull/3904/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 277cfc2caf

@ -402,36 +402,31 @@ AbstractBasePtr InferImplDropoutGenMask(const AnalysisEnginePtr &, const Primiti
for (std::size_t i = 0; i < x_shape->size(); ++i) {
auto value_track = x_shape_data[i]->GetValueTrack();
MS_EXCEPTION_IF_NULL(value_track);
int64_t e_value = 0;
if (value_track->isa<Int64Imm>()) {
e_value = GetValue<int64_t>(value_track);
} else if (value_track->isa<Int32Imm>()) {
e_value = static_cast<int64_t>(GetValue<int>(value_track));
} else {
MS_LOG(EXCEPTION) << "DropOutGenMask input x_shape elements is not int64 or int32, but "
<< value_track->ToString() << ".";
if (!value_track->isa<Int32Imm>()) {
MS_LOG(EXCEPTION) << "DropOutGenMask input x_shape elements is not int32, but " << value_track->ToString() << ".";
}
int e_value = GetValue<int>(value_track);
if (e_value <= 0) {
MS_LOG(EXCEPTION) << "DropOutGenMask product of x_shape should be > 0";
}
if (std::numeric_limits<int64_t>::max() / count / e_value < 1) {
if (std::numeric_limits<int>::max() / count / e_value < 1) {
MS_LOG(EXCEPTION) << "integer multiply integer overflow";
}
count = count * e_value;
}
// convert to bytes(8 bits) mask, using round up
int64_t n128s = count / 128;
int n128s = count / 128;
if ((count % 128) != 0) {
n128s++;
}
int64_t bytes_count = n128s * 16;
std::vector<int64_t> shape_y{bytes_count};
int bytes_count = n128s * 16;
std::vector<int> shape_y{bytes_count};
primitive->set_attr("T", kInt32);
return std::make_shared<AbstractTensor>(std::make_shared<AbstractScalar>(kAnyValue, kUInt8),
std::make_shared<Shape>(std::vector<int64_t>{shape_y}));
std::make_shared<Shape>(std::vector<int>{shape_y}));
}
} // namespace abstract
} // namespace mindspore

@ -1580,7 +1580,7 @@ Status CostGraph::InitSelectedStrategy() {
if (stra.empty()) {
MS_LOG(EXCEPTION) << "Infer strategy by tensor_info failed";
}
Strategys stra_inputs = {stra};
std::vector<Dimensions> stra_inputs = {stra};
StrategyPtr reshape_stra =
std::make_shared<Strategy>((*pre_iter)->prev_operator()->strategy()->GetInputStage(), stra_inputs);
reshape_info->set_strategy(reshape_stra);

@ -31,60 +31,68 @@ void GenerateStrategy(const std::shared_ptr<Graph> &graph, const std::vector<std
const std::shared_ptr<std::vector<std::vector<size_t>>> &eli_list,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> &index_list);
Strategys PrepareMatMul(const std::shared_ptr<Graph> &graph, const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
Strategys PrepareBiasAdd(const std::shared_ptr<Dimensions> &s);
Strategys PrepareOneHot(const std::shared_ptr<Graph> &graph, const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
Strategys PrepareGatherV2(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops, Dimensions s);
Strategys PrepareL2Normalize(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops,
Dimensions s);
Strategys MakeRecSearchStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_graph,
const size_t iter_ops);
Strategys CheckBroadcast(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops, Dimensions s);
Dimensions ApplyBroadcast(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops, Dimensions s,
size_t target_tensor_dim, size_t refer_tensor_dim, bool braoadcast_first_tensor);
Strategys CheckDivisible(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops, Dimensions s);
Strategys MakeDataParallelStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_graph,
const size_t iter_ops);
Strategys PrepareStrategy(const std::shared_ptr<Graph> &graph, const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareMatMul(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareBiasAdd(const std::shared_ptr<std::vector<int32_t>> &s);
std::vector<std::vector<int32_t>> PrepareOneHot(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareGatherV2(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<std::vector<int32_t>> PrepareL2Normalize(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<std::vector<int32_t>> CheckBroadcast(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<int32_t> ApplyBroadcast(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops,
std::vector<int32_t> s, size_t target_tensor_dim, size_t refer_tensor_dim,
bool braoadcast_first_tensor);
std::vector<std::vector<int32_t>> CheckDivisible(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<std::vector<int32_t>> MakeRecSearchStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> MakeDataParallelStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
std::vector<std::vector<int32_t>> PrepareStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_graph, const size_t iter_ops);
void GeneratePartitionedOperatorStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::shared_ptr<std::vector<size_t>> &index_list);
size_t FindIndexOfOperatorIncoming(const std::vector<std::vector<std::string>> &input_tensor_names,
const size_t iter_ops);
Dimensions CopyIncomingOperatorOutputStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, const size_t iter_graph);
Dimensions PrepareIncomingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index);
Dimensions GetAxisList(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const int iter_ops);
Dimensions ModifyStrategyIfSqueezeIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, Dimensions s);
std::vector<int32_t> CopyIncomingOperatorOutputStrategy(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, const size_t iter_graph);
std::vector<int32_t> PrepareIncomingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index);
std::vector<int32_t> GetAxisList(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const int iter_ops);
std::vector<int32_t> ModifyStrategyIfSqueezeIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, std::vector<int32_t> s);
bool GetKeepDims(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
Dimensions GetDimList(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
Dimensions ModifyStrategyIfReduceIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, Dimensions s);
Dimensions GetDimListFromAttrs(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
Dimensions ModifyStrategyIfArgIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, Dimensions s);
Dimensions CopyIncomingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, const size_t incoming_op_index);
Strategys GenerateStrategiesFromStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops,
Dimensions basic_stra);
std::vector<int32_t> GetDimList(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
std::vector<int32_t> ModifyStrategyIfReduceIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, std::vector<int32_t> s);
std::vector<int32_t> GetDimListFromAttrs(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops);
std::vector<int32_t> ModifyStrategyIfArgIncoming(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t incoming_op_index, std::vector<int32_t> s);
std::vector<int32_t> CopyIncomingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, const size_t incoming_op_index);
std::vector<std::vector<int32_t>> GenerateStrategiesFromStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops,
std::vector<int32_t> basic_stra);
void GenerateEliminatedOperatorStrategyForward(const std::shared_ptr<Graph> &graph,
const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> &index_list,
const std::shared_ptr<std::vector<size_t>> &no_stra_op_list);
Dimensions ModifyStrategyIfSqueezeOutgoing(const std::vector<std::shared_ptr<OperatorInfo>> &ops, const size_t iter_ops,
Dimensions s);
Dimensions CopyOutgoingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const size_t iter_ops);
std::vector<int32_t> ModifyStrategyIfSqueezeOutgoing(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const size_t iter_ops, std::vector<int32_t> s);
std::vector<int32_t> CopyOutgoingOperatorInputStrategy(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const size_t iter_ops);
void GenerateEliminatedOperatorStrategyBackward(const std::vector<std::shared_ptr<OperatorInfo>> &ops,
const std::vector<std::vector<std::string>> &input_tensor_names,
const std::shared_ptr<std::vector<size_t>> &no_stra_op_list);

@ -29,7 +29,7 @@
namespace mindspore {
namespace parallel {
static std::map<std::string, Shape> param_shapes;
static std::map<std::string, std::vector<int>> param_shapes;
std::vector<std::string> PARALLEL_MODE_LIST = {STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL,
AUTO_PARALLEL};
@ -173,7 +173,7 @@ void ParallelParameterContextRestoreInNoTraining(const FuncGraphPtr &func_graph,
MS_LOG(WARNING) << "Can not found the shape for parameter " << param_node->name();
return;
}
Shape shape = iter->second;
std::vector<int> shape = iter->second;
std::shared_ptr<abstract::BaseShape> base_shape = std::make_shared<abstract::Shape>(shape);
ptr->set_shape(base_shape);
MS_LOG(DEBUG) << "The parameter name is " << param_node->name() << ", the shape is " << shape;
@ -189,10 +189,7 @@ void ParallelParameterContextCkptInTraining(const FuncGraphPtr &func_graph, cons
return;
}
std::vector<int> shape_int = dyn_cast<abstract::Shape>(ptr->GetShapeTrack())->shape();
Shape shape;
(void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
[](const int &value) { return static_cast<int64_t>(value); });
std::vector<int> shape = dyn_cast<abstract::Shape>(ptr->GetShapeTrack())->shape();
auto ret = param_shapes.try_emplace(param_node->name(), shape);
if (!ret.second) {
MS_LOG(EXCEPTION) << "The shape for parameter name " << param_node->name() << " is existed";

@ -159,7 +159,7 @@ std::string ShapeToString(const Shape &shape) {
return str + "]";
}
std::string ListToString(const RankList &list) {
std::string ListToString(const std::vector<int32_t> &list) {
std::string str = "[";
for (auto &element : list) {
str += std::to_string(element) + ", ";

@ -27,7 +27,7 @@
namespace mindspore {
namespace parallel {
using RankList = std::vector<int32_t>;
using Shape = std::vector<int64_t>;
using Shape = std::vector<int32_t>;
class DeviceMatrix {
public:
@ -48,7 +48,7 @@ class DeviceMatrix {
};
std::string ShapeToString(const Shape &shape);
std::string ListToString(const RankList &list);
std::string ListToString(const std::vector<int32_t> &list);
} // namespace parallel
} // namespace mindspore

@ -45,13 +45,13 @@ py::dict GetParameterLayout(const FuncGraphPtr &graph) {
auto tensor_map = tensor_layout->tensor_map().array();
auto slice_shape = tensor_layout->slice_shape().array();
int32_t _field_size = tensor_layout->get_field_size();
Shape field_size;
std::vector<int32_t> field_size;
if (_field_size != 0) {
field_size.push_back(_field_size);
} else {
field_size = {0};
}
std::vector<Shape> layout = {device_arrangement, tensor_map, slice_shape, field_size};
std::vector<std::vector<int32_t>> layout = {device_arrangement, tensor_map, slice_shape, field_size};
dict[py::str(name)] = layout;
MS_LOG(INFO) << "GetParameterLayout name = " << name << ", layout " << tensor_layout->ToString();
}

@ -130,7 +130,7 @@ Status Softmax::CheckStrategy(const StrategyPtr &strategy) {
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions input_strategy = stra.at(0);
for (auto &element : axis_) {
@ -181,7 +181,7 @@ Status Softmax::GetAttrs() {
MS_LOG(ERROR) << name_ << " : The axis tuple is empty.";
return FAILED;
}
MS_LOG(INFO) << name_ << " : The axis is tuple, value is " << ListToString(axis_);
MS_LOG(INFO) << name_ << " : The axis is tuple, value is " << ShapeToString(axis_);
} else {
MS_LOG(ERROR) << name_ << " : The value of axis is not int or tuple int.";
return FAILED;
@ -258,7 +258,7 @@ Status Softmax::GenerateStrategies(int32_t stage_id) {
}
Status ActivationBase::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions input_strategy = stra.at(0);
dev_matrix_shape_ = input_strategy;
@ -296,11 +296,11 @@ Status ActivationBase::InferForwardCommunication() {
}
Status ActivationBase::InferTensorMap() {
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
size_t size = inputs_shape_.at(0).size();
// such as 4: tensor_map_index [3,2,1,0]
for (size_t i = 0; i < size; ++i) {
tensor_map_index.push_back((int64_t)(size - i - 1));
tensor_map_index.push_back((int32_t)(size - i - 1));
}
inputs_tensor_map_.push_back(tensor_map_index);
@ -425,7 +425,7 @@ Status ExpandDimsInfo::InferTensorMap() {
// for example: if the dimension of input is 3, and the axis is 2,
// then the input_tensor_map is [2, 1, 0], the output_tensor_map is [2, 1, -1, 0]
Shape input_tensor_map, output_tensor_map;
std::vector<int32_t> input_tensor_map, output_tensor_map;
size_t size = inputs_shape_[0].size();
for (size_t i = 0; i < size; ++i) {
input_tensor_map.push_back(SizeToInt(size - i - 1));
@ -607,7 +607,7 @@ Status SqueezeInfo::InferReplaceOps(const StrategyPtr &strategy) {
Status SqueezeInfo::InferTensorMap() {
// for example: if the shape of input is [32, 32, 1], and the axis is (2, ),
// then the input_tensor_map is [2, 1, 0], the output_tensor_map is [2, 1]
Shape input_tensor_map, output_tensor_map;
std::vector<int32_t> input_tensor_map, output_tensor_map;
if (inputs_shape_.empty()) {
MS_LOG(ERROR) << name_ << ": The inputs shape is empty";
return FAILED;

@ -54,9 +54,9 @@ Shapes ArithmeticBase::InferExpendShape() {
return input_shapes;
}
Strategys ExpendStrategy(const StrategyPtr &strategy) {
Strategys expend_strategy;
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> ExpendStrategy(const StrategyPtr &strategy) {
std::vector<Dimensions> expend_strategy;
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions sub_a_strategy = stra.at(0);
Dimensions sub_b_strategy = stra.at(1);
size_t input_a_size = sub_a_strategy.size();
@ -83,7 +83,7 @@ Status ArithmeticBase::CheckStrategy(const StrategyPtr &strategy) {
return FAILED;
}
Shapes input_shapes = InferExpendShape();
Strategys expend_strategy = ExpendStrategy(strategy);
std::vector<Dimensions> expend_strategy = ExpendStrategy(strategy);
Dimensions sub_a_strategy = expend_strategy.at(0);
Dimensions sub_b_strategy = expend_strategy.at(1);
Shape input_a_shape = input_shapes.at(0);
@ -103,7 +103,7 @@ Status ArithmeticBase::CheckStrategy(const StrategyPtr &strategy) {
}
Status ArithmeticBase::InferDevMatrixShape() {
Strategys expend_strategy = ExpendStrategy(strategy_);
std::vector<Dimensions> expend_strategy = ExpendStrategy(strategy_);
Dimensions sub_a_strategy = expend_strategy.at(0);
Dimensions sub_b_strategy = expend_strategy.at(1);
Shape dev_shape;
@ -123,7 +123,7 @@ TensorMap SetExpendTensorMap(const Shape &strategy, const Shape &dev_matrix_shap
TensorMap tensor_map_index;
for (size_t i = 0; i < strategy.size(); ++i) {
if (strategy[i] == dev_matrix_shape[i]) {
tensor_map_index.push_back((int64_t)(LAST_INDEX(strategy.size()) - i));
tensor_map_index.push_back((int32_t)(LAST_INDEX(SizeToUint(strategy.size())) - i));
} else {
tensor_map_index.push_back(-1);
}
@ -159,15 +159,15 @@ void ArithmeticBase::ReComputeBatchSplitFlagList() {
}
Status ArithmeticBase::InferTensorMap() {
Shape tensor_map_index;
Strategys expend_strategy = ExpendStrategy(strategy_);
std::vector<int32_t> tensor_map_index;
std::vector<Dimensions> expend_strategy = ExpendStrategy(strategy_);
Dimensions sub_a_expend_strategy = expend_strategy.at(0);
Dimensions sub_b_expend_strategy = expend_strategy.at(1);
Strategys stra = strategy_->GetInputDim();
Dimensions sub_a_strategy = stra.at(0);
Dimensions sub_b_strategy = stra.at(1);
for (size_t i = 0; i < sub_a_expend_strategy.size(); ++i) {
tensor_map_index.push_back((int64_t)(LAST_INDEX(sub_a_expend_strategy.size()) - i));
tensor_map_index.push_back((int32_t)(LAST_INDEX(SizeToUint(sub_a_expend_strategy.size())) - i));
}
Shape dev_shape;
@ -261,7 +261,7 @@ Status ArithmeticBase::InferTensorInfo() {
// infer slice shape
Shapes inputs_slice_shape, outputs_slice_shape;
Strategys expend_strategy = ExpendStrategy(strategy_);
std::vector<Dimensions> expend_strategy = ExpendStrategy(strategy_);
Dimensions sub_a_expend_strategy = expend_strategy.at(0);
Dimensions sub_b_expend_strategy = expend_strategy.at(1);
Strategys inputs_strategy = strategy_->GetInputDim();

@ -43,13 +43,13 @@ Status BatchParallelInfo::CheckStrategy(const StrategyPtr &strategy) {
dev_num_ = dev_num;
size_t strategy_size = strategy->GetInputNumber();
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
for (size_t i = 0; i < strategy_size; ++i) {
Shape sub_strategy = stra.at(i);
size_t strategy_len = sub_strategy.size();
bool flag = false;
for (size_t j = 0; j < strategy_len; ++j) {
int64_t strategy_value = sub_strategy.at(j);
int32_t strategy_value = sub_strategy.at(j);
if (strategy_value > 1) {
if (flag || strategy_value != dev_num_) {
if (is_auto_parallel_) {
@ -95,7 +95,7 @@ Status BatchParallelInfo::InferTensorMap() {
return FAILED;
}
for (size_t i = 0; i < inputs_shape_.size(); i++) {
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
for (size_t j = 0; j < inputs_shape_[i].size(); ++j) {
if (strategy_->GetInputDim()[i][j] == dev_num_ && j == 0) {
tensor_map_index.push_back(0);
@ -106,7 +106,7 @@ Status BatchParallelInfo::InferTensorMap() {
inputs_tensor_map_.push_back(tensor_map_index);
}
for (size_t i = 0; i < outputs_shape_.size(); i++) {
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
for (size_t j = 0; j < outputs_shape_[i].size(); ++j) {
if (i == 0 && j == 0) {
tensor_map_index.push_back(0);
@ -123,7 +123,7 @@ Strategys BatchParallelInfo::GetOutputsStrategy() {
Strategys outputs_strategy;
for (size_t i = 0; i < outputs_shape_.size(); ++i) {
Dimensions strategy;
std::vector<int32_t> strategy;
for (size_t j = 0; j < outputs_shape_[i].size(); ++j) {
if (i == 0 && j == 0) {
strategy.push_back(dev_num_);
@ -201,7 +201,7 @@ Status BatchParallelInfo::GenerateStrategies(int32_t stage_id) {
is_auto_parallel_ = true;
size_t total_dev_num = g_device_manager->GetDeviceListByStageId(stage_id).size();
StrategyPtr sp;
Strategys strategy;
std::vector<Dimensions> strategy;
for (size_t i = 0; i < inputs_shape_.size(); i++) {
Shape temp(inputs_shape_[i].size(), 1);
if (split_flag_list_[i]) {

@ -36,11 +36,11 @@ Status BiasAddInfo::CheckStrategy(const StrategyPtr &strategy) {
}
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions sub_a_strategy = stra.at(0);
Dimensions sub_b_strategy = stra.at(1);
int64_t channel_a_strategy = sub_a_strategy.at(1);
int64_t channel_b_strategy = sub_b_strategy.at(0);
int32_t channel_a_strategy = sub_a_strategy.at(1);
int32_t channel_b_strategy = sub_b_strategy.at(0);
if (channel_a_strategy != channel_b_strategy) {
if (is_auto_parallel_) {
MS_LOG(DEBUG) << name_ << " : Invalid strategy.";
@ -53,7 +53,7 @@ Status BiasAddInfo::CheckStrategy(const StrategyPtr &strategy) {
}
Status BiasAddInfo::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions sub_a_strategy = stra.at(0);
dev_matrix_shape_ = sub_a_strategy;
return SUCCESS;
@ -67,13 +67,13 @@ void BiasAddInfo::ReComputeBatchSplitFlagList() {
Status BiasAddInfo::InferTensorMap() {
TensorMap sub_a_tensor_map;
TensorMap sub_b_tensor_map;
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions sub_a_strategy = stra.at(0);
size_t sub_a_strategy_size = sub_a_strategy.size();
for (size_t i = 0; i < sub_a_strategy_size; ++i) {
sub_a_tensor_map.push_back((int32_t)(LAST_INDEX(sub_a_strategy_size) - i));
sub_a_tensor_map.push_back((int32_t)(LAST_INDEX(SizeToUint(sub_a_strategy_size)) - i));
}
sub_b_tensor_map.push_back((int32_t)(LAST_INDEX(sub_a_strategy_size) - 1));
sub_b_tensor_map.push_back((int32_t)(LAST_INDEX(SizeToUint(sub_a_strategy_size)) - 1));
inputs_tensor_map_.push_back(sub_a_tensor_map);
inputs_tensor_map_.push_back(sub_b_tensor_map);
@ -213,7 +213,7 @@ Status BiasAddInfo::GenerateStrategies(int32_t stage_id) {
MS_LOG(INFO) << name_ << " : Generate strategies with broadcast success.";
for (auto &sp : sp_vector) {
Strategys tmp_strategy;
std::vector<Dimensions> tmp_strategy;
Dimensions input0_strategy = sp->GetInputDim()[0];
tmp_strategy.push_back(input0_strategy); // input0

@ -38,7 +38,7 @@ Status DropoutDoMaskInfo::CheckStrategy(const StrategyPtr &strategy) {
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
if (stra.size() != 1) {
MS_LOG(ERROR) << name_ << ": Invalid strategy size " << stra.size() << ", it must be 1";
return FAILED;
@ -68,7 +68,7 @@ Status DropoutDoMaskInfo::InferDevMatrixShape() {
return FAILED;
}
Strategys strategy = strategy_->GetInputDim();
std::vector<Dimensions> strategy = strategy_->GetInputDim();
if (strategy.empty()) {
MS_LOG(ERROR) << name_ << ": The strategy is empty";
return FAILED;
@ -84,7 +84,7 @@ Status DropoutDoMaskInfo::InferTensorMap() {
return FAILED;
}
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
size_t size = inputs_shape_[0].size();
// if the dimension of input is 4, and tensor_map_index is [3, 2, 1, 0]
for (size_t i = 0; i < size; ++i) {
@ -169,13 +169,13 @@ Status DropoutDoMaskInfo::GenerateStrategies(int32_t stage_id) {
return SUCCESS;
}
std::shared_ptr<Strategys> DropoutDoMaskInfo::GenerateBatchStrategies() {
std::shared_ptr<std::vector<std::vector<int32_t>>> DropoutDoMaskInfo::GenerateBatchStrategies() {
CheckGlobalDeviceManager();
size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size();
Dimensions strategy(inputs_shape_[0].size() - 1, 1);
(void)strategy.insert(strategy.begin(), SizeToInt(dev_num));
Strategys strategy_v = {strategy};
return std::make_shared<Strategys>(strategy_v);
std::vector<Dimensions> strategy_v = {strategy};
return std::make_shared<std::vector<std::vector<int32_t>>>(strategy_v);
}
Status DropoutDoMaskInfo::Init(const StrategyPtr &strategy) {

@ -40,7 +40,7 @@ class DropoutDoMaskInfo : public OperatorInfo {
Status GenerateStrategies(int32_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override;
Status InitForCostModel(const StrategyPtr &strategy) override;
std::shared_ptr<Strategys> GenerateBatchStrategies() override;
std::shared_ptr<std::vector<std::vector<int32_t>>> GenerateBatchStrategies() override;
std::vector<Operator> GetDropoutGenMaskReplaceOp(const CNodePtr &cnode);
protected:

@ -109,7 +109,7 @@ Status GatherV2Info::CheckStrategy(const StrategyPtr &strategy) {
}
Status GatherV2Info::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
dev_matrix_shape_ = stra.at(0);
return SUCCESS;
}
@ -129,8 +129,8 @@ Status GatherV2Info::InferTensorMap() {
<< outputs_shape_.size();
return FAILED;
}
Shape tensor_map_in;
Shape tensor_map_out;
std::vector<int32_t> tensor_map_in;
std::vector<int32_t> tensor_map_out;
size_t size = inputs_shape_.at(0).size();
// such as 4: tensor_map_index [3,2,1,0]
for (size_t i = 0; i < size; ++i) {
@ -149,7 +149,7 @@ Status GatherV2Info::InferTensorMap() {
return FAILED;
}
Shape tensor_map_in_index;
std::vector<int32_t> tensor_map_in_index;
if (index_size_ >= 1) {
tensor_map_in_index.push_back(SizeToInt(size - axis_ - 1));
}
@ -323,7 +323,7 @@ Status GatherV2Info::SetCostUnderStrategy(const StrategyPtr &strategy) {
return SUCCESS;
}
std::shared_ptr<Strategys> GatherV2Info::GenerateBatchStrategies() {
std::shared_ptr<std::vector<std::vector<int32_t>>> GatherV2Info::GenerateBatchStrategies() {
if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) {
MS_LOG(EXCEPTION) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is "
<< inputs_shape_.size();
@ -343,8 +343,8 @@ std::shared_ptr<Strategys> GatherV2Info::GenerateBatchStrategies() {
for (size_t i = 1; i < inputs_shape_[0].size(); i++) {
strategy.push_back(1);
}
Strategys strategy_v = {strategy};
return std::make_shared<Strategys>(strategy_v);
std::vector<Dimensions> strategy_v = {strategy};
return std::make_shared<std::vector<std::vector<int32_t>>>(strategy_v);
}
} // namespace parallel
} // namespace mindspore

@ -50,7 +50,7 @@ class GatherV2Info : public OperatorInfo {
Status GenerateStrategies(int32_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override;
std::shared_ptr<Strategys> GenerateBatchStrategies() override;
std::shared_ptr<std::vector<std::vector<int32_t>>> GenerateBatchStrategies() override;
protected:
Status CheckStrategy(const StrategyPtr &strategy) override;

@ -73,8 +73,8 @@ Status GatherV2PInfo::GetAttrs() {
MS_LOG(ERROR) << "Failure: Size of manual_split element must be 2.";
return FAILED;
}
param_split_shapes_.push_back(static_cast<int64_t>(GetValue<int>(value_vector[0])));
index_offsets_.push_back(static_cast<int64_t>(GetValue<int>(value_vector[1])));
param_split_shapes_.push_back(static_cast<int32_t>(GetValue<int>(value_vector[0])));
index_offsets_.push_back(static_cast<int32_t>(GetValue<int>(value_vector[1])));
} else {
MS_LOG(ERROR) << "Failure: Manual split strategy's format is wrong! Need ValueSequeue";
return FAILED;
@ -93,14 +93,14 @@ Status GatherV2PInfo::GetAttrs() {
Status GatherV2PInfo::CheckManualSplit() {
auto param_shape = inputs_shape_.at(0);
int64_t split_shape_sum = std::accumulate(param_split_shapes_.begin(), param_split_shapes_.end(), 0,
[](int64_t s, int64_t shape) { return s + shape; });
int32_t split_shape_sum = std::accumulate(param_split_shapes_.begin(), param_split_shapes_.end(), 0,
[](int32_t s, int32_t shape) { return s + shape; });
if (split_shape_sum < param_shape.at(0)) {
MS_LOG(ERROR) << "Failure: Sum of splited shapes should not be smaller than param_shape.";
return FAILED;
}
if (std::any_of(index_offsets_.begin(), index_offsets_.end(), [](const int64_t &offset) { return offset < 0; })) {
if (std::any_of(index_offsets_.begin(), index_offsets_.end(), [](const int32_t &offset) { return offset < 0; })) {
MS_LOG(ERROR) << "Failure: Index offset must not less than 0.";
return FAILED;
}
@ -269,8 +269,8 @@ Status GatherV2PInfo::InferTensorMap() {
size_t param_size = inputs_shape_.at(0).size();
size_t index_size = inputs_shape_.at(1).size();
size_t total_size = param_size + index_size;
Shape tensor_map_index;
Shape tensor_map_params;
std::vector<int32_t> tensor_map_index;
std::vector<int32_t> tensor_map_params;
auto param_strategy = strategy_->GetInputDim().at(0);
if (param_strategy.at(IntToSize(axis_)) != 1) {
tensor_map_index.insert(tensor_map_index.begin(), index_size, -1);
@ -288,7 +288,7 @@ Status GatherV2PInfo::InferTensorMap() {
}
// infer output tensor map
Shape tensor_map_out;
std::vector<int32_t> tensor_map_out;
if (param_strategy.at(IntToSize(axis_)) == 1) {
// param_strategy(axis) == 1
for (size_t i = 0; i < param_size; ++i) {
@ -427,8 +427,8 @@ Status GatherV2PInfo::InferGroup() {
return SUCCESS;
}
RankList GetRankFromGroup(const Group &group) {
RankList rank_list;
std::vector<int32_t> GetRankFromGroup(const Group &group) {
std::vector<int32_t> rank_list;
auto device_list = group.GetDevicesList();
for (auto &device : device_list) {
rank_list.insert(rank_list.end(), device.rank() % 8);
@ -634,7 +634,7 @@ Status GatherV2PInfo::GenerateStrategies(int32_t stage_id) {
return SUCCESS;
}
std::shared_ptr<Strategys> GatherV2PInfo::GenerateBatchStrategies() {
std::shared_ptr<std::vector<std::vector<int32_t>>> GatherV2PInfo::GenerateBatchStrategies() {
CheckGlobalDeviceManager();
size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size();
Dimensions param_strategy(inputs_shape_[0].size(), 1);
@ -643,8 +643,8 @@ std::shared_ptr<Strategys> GatherV2PInfo::GenerateBatchStrategies() {
for (size_t i = 1; i < inputs_shape_[1].size(); i++) {
index_strategy.push_back(1);
}
Strategys strategy_v = {param_strategy, index_strategy};
return std::make_shared<Strategys>(strategy_v);
std::vector<Dimensions> strategy_v = {param_strategy, index_strategy};
return std::make_shared<std::vector<std::vector<int32_t>>>(strategy_v);
}
} // namespace parallel
} // namespace mindspore

@ -45,7 +45,7 @@ class GatherV2PInfo : public OperatorInfo {
Status GenerateStrategies(int32_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override;
ReplaceGraphPtr replace_graph(const CNodePtr &cnode) override;
std::shared_ptr<Strategys> GenerateBatchStrategies() override;
std::shared_ptr<std::vector<std::vector<int32_t>>> GenerateBatchStrategies() override;
protected:
Status CheckStrategy(const StrategyPtr &strategy) override;
@ -67,13 +67,13 @@ class GatherV2PInfo : public OperatorInfo {
std::string target_ = DEVICE;
std::string replace_op_name_ = GATHERV2;
int32_t bias_;
int64_t index_offset_;
int32_t index_offset_;
int32_t slice_size_;
Shape out_dev_matrix_shape_;
Group group_;
bool manual_split_ = false;
std::vector<int64_t> param_split_shapes_;
std::vector<int64_t> index_offsets_;
std::vector<int32_t> param_split_shapes_;
std::vector<int32_t> index_offsets_;
};
class SparseGatherV2Info : public GatherV2PInfo {

@ -118,7 +118,7 @@ Status GetNextInfo::Init(const StrategyPtr &strategy) {
}
Status GetNextInfo::CheckStrategy(const StrategyPtr &strategy) {
Strategys stras = strategy->GetInputDim();
std::vector<Dimensions> stras = strategy->GetInputDim();
for (Dimensions stra : stras) {
if (stra.size() != 0) {
if (is_auto_parallel_) {
@ -254,7 +254,7 @@ Status GetNextInfo::SetCostUnderStrategy(const StrategyPtr &strategy) {
Status GetNextInfo::GenerateStrategies(int32_t stage_id) {
is_auto_parallel_ = true;
Strategys stra;
std::vector<Dimensions> stra;
StrategyPtr sp = std::make_shared<Strategy>(stage_id, stra);
if (SetCostUnderStrategy(sp) == SUCCESS) {
MS_LOG(INFO) << name_ << " : Successfully generated strategy.";

@ -37,7 +37,7 @@ Status L2NormalizeInfo::CheckStrategy(const StrategyPtr &strategy) {
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions input_strategy = stra.at(0);
int32_t axis_index = axis_;
if (axis_ < 0) {

@ -49,7 +49,7 @@ Status LayerNormInfo::GetAttrs() {
Status LayerNormInfo::CheckStrategy(const StrategyPtr &strategy) {
MS_EXCEPTION_IF_NULL(strategy);
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
if (stra.size() != LAYER_NORM_INPUT_SIZE) {
MS_LOG(ERROR) << name_ << ": Invalid strategy size " << stra.size();
return FAILED;
@ -104,7 +104,7 @@ Status LayerNormInfo::InferDevMatrixShape() {
MS_LOG(ERROR) << name_ << ": The strategy is null";
return FAILED;
}
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
if (stra.empty()) {
MS_LOG(ERROR) << name_ << ": The strategy is empty";
return FAILED;
@ -228,7 +228,7 @@ Status LayerNormInfo::GenerateGammaAndBetaStrategies(const std::vector<StrategyP
MS_LOG(ERROR) << name_ << ": Invalid strategy";
return FAILED;
}
Strategys tmp_strategy;
std::vector<Dimensions> tmp_strategy;
Dimensions input_strategy = sp->GetInputDim()[0];
Dimensions gamma_strategy = input_strategy;
(void)gamma_strategy.erase(gamma_strategy.begin(),

@ -38,7 +38,7 @@ Status SoftmaxCrossEntropyWithLogitsInfo::CheckStrategy(const mindspore::paralle
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions input_strategy = stra.at(0);
Dimensions label_strategy = stra.at(1);
if (input_strategy != label_strategy) {
@ -52,8 +52,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::CheckStrategy(const mindspore::paralle
axis_index = static_cast<int32_t>(input_dim) + axis_;
}
int64_t input_axis_strategy = input_strategy.at(IntToSize(axis_index));
int64_t label_axis_strategy = label_strategy.at(IntToSize(axis_index));
int32_t input_axis_strategy = input_strategy.at(IntToSize(axis_index));
int32_t label_axis_strategy = label_strategy.at(IntToSize(axis_index));
// Dimension corresponding to axis is un-splittable
if ((input_axis_strategy != MIN_SLICE_NUM) && (label_axis_strategy != MIN_SLICE_NUM)) {
if (is_auto_parallel_) {
@ -82,21 +82,21 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GetAttrs() {
}
Status SoftmaxCrossEntropyWithLogitsInfo::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions input_strategy = stra.at(0);
dev_matrix_shape_ = input_strategy;
return SUCCESS;
}
Status SoftmaxCrossEntropyWithLogitsInfo::InferTensorMap() {
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
size_t size = inputs_shape_[0].size();
// such as 4: tensor_map_index [3,2,1,0]
for (size_t i = 0; i < size; ++i) {
tensor_map_index.push_back((int64_t)(size - i - 1));
tensor_map_index.push_back((int32_t)(size - i - 1));
}
Shape first_output_tensor_map = {tensor_map_index[0]};
std::vector<int32_t> first_output_tensor_map = {tensor_map_index[0]};
inputs_tensor_map_.push_back(tensor_map_index); // input
inputs_tensor_map_.push_back(tensor_map_index); // label
outputs_tensor_map_.push_back(first_output_tensor_map); // output-0

@ -158,7 +158,7 @@ Status MatMul::CheckStrategy(const StrategyPtr &strategy) {
return FAILED;
}
Strategys stra = strategy->GetInputDim();
std::vector<Dimensions> stra = strategy->GetInputDim();
Dimensions mat_a_strategy = stra.at(0);
Dimensions mat_b_strategy = stra.at(1);
@ -207,7 +207,7 @@ Status MatMul::CheckStrategy(const StrategyPtr &strategy) {
}
Status MatMulBase::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions mat_a_strategy = stra.at(0);
Dimensions mat_b_strategy = stra.at(1);
@ -279,10 +279,10 @@ Status MatMulBase::InferTensorMap() {
size = dev_matrix_shape_.size() - 1;
}
Shape tensor_map_index;
std::vector<int32_t> tensor_map_index;
// such as 5: tensor_map_index [4,3,2,1,0]
for (size_t i = 0; i < size; ++i) {
tensor_map_index.push_back((int64_t)(LAST_INDEX(size) - i));
tensor_map_index.push_back((int32_t)(LAST_INDEX(size) - i));
}
// infer output tensor map: [4,3,2,0], delete the second-from-end element
@ -309,7 +309,7 @@ Status MatMulBase::InferTensorMap() {
mat_b_tensor_map.begin() + static_cast<different_type>(LAST_INDEX(size) - mat_b_dimension_));
if (transpose_b_) {
// swap the last two elements
int64_t last_value = mat_b_tensor_map.back();
int32_t last_value = mat_b_tensor_map.back();
mat_b_tensor_map.pop_back();
(void)mat_b_tensor_map.insert(
mat_b_tensor_map.begin() + static_cast<different_type>(LAST_INDEX(mat_b_tensor_map.size())), last_value);
@ -436,7 +436,7 @@ Status MatMulBase::GenerateStrategies(int32_t stage_id) {
return FAILED;
}
CheckGlobalDeviceManager();
RankList dev_list = g_device_manager->GetDeviceListByStageId(stage_id);
std::vector<int32_t> dev_list = g_device_manager->GetDeviceListByStageId(stage_id);
size_t dev_num = dev_list.size();
Shape input0_shape = inputs_shape_[0], input1_shape = inputs_shape_[1];
if (transpose_a_) {
@ -503,14 +503,13 @@ Status MatMulBase::GenerateStrategies(int32_t stage_id) {
Status MatMulBase::PrepareStrategy(int32_t stage_id, size_t dev_num,
mindspore::parallel::Dimensions combined_partitions, size_t input0_shape_size,
size_t input1_shape_size, mindspore::parallel::StrategyPtr *const sp) {
int64_t product =
std::accumulate(combined_partitions.begin(), combined_partitions.end(), 1, std::multiplies<int64_t>());
int32_t product = std::accumulate(combined_partitions.begin(), combined_partitions.end(), 1, std::multiplies<int>());
if (!FULLY_USE_DEVICES) {
if (LongToSize(product) > dev_num) {
if (IntToSize(product) > dev_num) {
return FAILED;
}
} else {
if (LongToSize(product) != dev_num) {
if (IntToSize(product) != dev_num) {
return FAILED;
}
}
@ -551,7 +550,7 @@ Status MatMulBase::PrepareStrategy(int32_t stage_id, size_t dev_num,
MS_LOG(ERROR) << name_ << " : Swap last two elements failed.";
}
}
Strategys stras;
std::vector<Dimensions> stras;
stras.push_back(input0_partitions);
stras.push_back(input1_partitions);
(*sp) = std::make_shared<Strategy>(stage_id, stras);

@ -77,7 +77,7 @@ Status OneHotInfo::CheckStrategy(const StrategyPtr &strategy) {
}
Status OneHotInfo::InferDevMatrixShape() {
Strategys stra = strategy_->GetInputDim();
std::vector<Dimensions> stra = strategy_->GetInputDim();
Dimensions input_strategy = stra.at(0);
// Now input only support 1-D tensor, so the output is a 2-D tensor
@ -96,16 +96,16 @@ Status OneHotInfo::InferDevMatrixShape() {
}
Status OneHotInfo::InferTensorMap() {
Shape input_tensor_map_index, output_tensor_map_index;
std::vector<int32_t> input_tensor_map_index, output_tensor_map_index;
size_t size = outputs_shape_[0].size();
// such as 2: tensor_map_index [1,0]
if (axis_ == 0) {
for (size_t i = 0; i < size; ++i) {
output_tensor_map_index.push_back((int64_t)(i));
output_tensor_map_index.push_back((int32_t)(i));
}
} else {
for (size_t i = 0; i < size; ++i) {
output_tensor_map_index.push_back((int64_t)(LAST_INDEX(size) - i));
output_tensor_map_index.push_back((int32_t)(LAST_INDEX(size) - i));
}
}
outputs_tensor_map_.push_back(output_tensor_map_index);
@ -299,13 +299,13 @@ Status OneHotInfo::SetCostUnderStrategy(const StrategyPtr &strategy) {
return SUCCESS;
}
std::shared_ptr<Strategys> OneHotInfo::GenerateBatchStrategies() {
std::shared_ptr<std::vector<std::vector<int32_t>>> OneHotInfo::GenerateBatchStrategies() {
CheckGlobalDeviceManager();
size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size();
Dimensions strategy = {SizeToInt(dev_num), 1};
Dimensions empty_strategy;
Strategys strategy_v = {strategy, empty_strategy, empty_strategy};
return std::make_shared<Strategys>(strategy_v);
std::vector<Dimensions> strategy_v = {strategy, empty_strategy, empty_strategy};
return std::make_shared<std::vector<std::vector<int32_t>>>(strategy_v);
}
} // namespace parallel
} // namespace mindspore

@ -41,7 +41,7 @@ class OneHotInfo : public OperatorInfo {
Status GenerateStrategies(int32_t stage_id) override;
Status SetCostUnderStrategy(const StrategyPtr &strategy) override;
ReplaceGraphPtr replace_graph(const CNodePtr &cnode) override;
std::shared_ptr<Strategys> GenerateBatchStrategies() override;
std::shared_ptr<std::vector<std::vector<int32_t>>> GenerateBatchStrategies() override;
protected:
Status CheckStrategy(const StrategyPtr &strategy) override;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save