!3198 synchronize latest Ascend software suite 18 Jul 2020, and merging branches
Merge pull request !3198 from yanghaoran/code_sync_0718pull/3198/MERGE
commit
6f8863b65d
@ -1 +1 @@
|
||||
Subproject commit 18cf690152add623ffbddfbbb4674d1b34484ca7
|
||||
Subproject commit 103f2d1019dc50d781d7a964551d9f1f50b3b009
|
@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "backend/optimizer/ascend/ir_fission/tensor_scatter_update_fission.h"
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "backend/session/anf_runtime_algorithm.h"
|
||||
#include "backend/optimizer/common/helper.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
namespace {
|
||||
CNodePtr CreateTensorMove(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update) {
|
||||
MS_EXCEPTION_IF_NULL(graph);
|
||||
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
|
||||
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kTensorMoveOpName)),
|
||||
tensor_scatter_update->input(1)};
|
||||
auto tensor_move = graph->NewCNode(inputs);
|
||||
MS_EXCEPTION_IF_NULL(tensor_move);
|
||||
tensor_move->set_scope(tensor_scatter_update->scope());
|
||||
tensor_move->set_abstract(tensor_scatter_update->abstract());
|
||||
AnfAlgo::SetNodeAttr(kAttrUseLocking, MakeValue(false), tensor_move);
|
||||
return tensor_move;
|
||||
}
|
||||
|
||||
CNodePtr CreateScatterNdUpdate(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update,
|
||||
const CNodePtr &tensor_move) {
|
||||
MS_EXCEPTION_IF_NULL(graph);
|
||||
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
|
||||
MS_EXCEPTION_IF_NULL(tensor_move);
|
||||
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kScatterNdUpdateOpName)), tensor_move,
|
||||
tensor_scatter_update->input(2), tensor_scatter_update->input(3)};
|
||||
auto scatter_nd_update = graph->NewCNode(inputs);
|
||||
MS_EXCEPTION_IF_NULL(scatter_nd_update);
|
||||
scatter_nd_update->set_scope(tensor_scatter_update->scope());
|
||||
scatter_nd_update->set_abstract(tensor_scatter_update->abstract());
|
||||
return scatter_nd_update;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
const BaseRef TensorScatterUpdateFission::DefinePattern() const {
|
||||
VarPtr Xs = std::make_shared<SeqVar>();
|
||||
auto prim = std::make_shared<Primitive>(kTensorScatterUpdateOpName);
|
||||
return VectorRef({prim, Xs});
|
||||
}
|
||||
|
||||
const AnfNodePtr TensorScatterUpdateFission::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
|
||||
const EquivPtr &) const {
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto tensor_scatter_update = node->cast<CNodePtr>();
|
||||
if (tensor_scatter_update == nullptr || tensor_scatter_update->size() != 4) {
|
||||
return nullptr;
|
||||
}
|
||||
auto tensor_move = CreateTensorMove(func_graph, tensor_scatter_update);
|
||||
return CreateScatterNdUpdate(func_graph, tensor_scatter_update, tensor_move);
|
||||
}
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
@ -0,0 +1,33 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
|
||||
#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
|
||||
|
||||
#include "backend/optimizer/common/optimizer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
class TensorScatterUpdateFission : public PatternProcessPass {
|
||||
public:
|
||||
explicit TensorScatterUpdateFission(bool multigraph = true)
|
||||
: PatternProcessPass("tensor_scatter_update_fission", multigraph) {}
|
||||
~TensorScatterUpdateFission() override = default;
|
||||
const BaseRef DefinePattern() const override;
|
||||
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
|
||||
};
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,93 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
|
||||
#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ir/pattern_matcher.h"
|
||||
#include "optimizer/irpass.h"
|
||||
#include "optimizer/optimizer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
namespace irpass {
|
||||
// {prim::kPrimMakeRef, X, Y, Z} -> Y
|
||||
class MakeRefEliminater : public OptimizerCaller {
|
||||
public:
|
||||
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
|
||||
PatternNode<AnfNodePtr> x, y, z;
|
||||
MATCH_REPLACE(node, PPrimitive(prim::kPrimMakeRef, x, y, z), y);
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// {prim::kPrimGetRefValue, Parameter} -> Parameter
|
||||
// {prim::kPrimGetRefOrigin, Parameter} -> Parameter
|
||||
class GetRefParamEliminater : public OptimizerCaller {
|
||||
public:
|
||||
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
|
||||
PatternNode<AnfNodePtr> x;
|
||||
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefValue, x), x, x.CheckFunc(IsParam, node));
|
||||
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefOrigin, x), x, x.CheckFunc(IsParam, node));
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// {prim::kPrimGetRefKey, {prim::kPrimMakeRef, X, Y, Z}} -> X
|
||||
// {prim::kPrimGetRefValue, {prim::kPrimMakeRef, X, Y, Z}} -> Y
|
||||
// {prim::kPrimGetRefOrigin, {prim::kPrimMakeRef, X, Y, Z}} -> Z
|
||||
class GetMakeRefEliminater : public OptimizerCaller {
|
||||
public:
|
||||
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
|
||||
PatternNode<AnfNodePtr> x, y, z;
|
||||
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefKey, PPrimitive(prim::kPrimMakeRef, x, y, z)), x);
|
||||
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefValue, PPrimitive(prim::kPrimMakeRef, x, y, z)), y);
|
||||
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefOrigin, PPrimitive(prim::kPrimMakeRef, x, y, z)), z);
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// IsValueNode<RefKey>
|
||||
class ReplaceRefkeyByParam : public OptimizerCaller {
|
||||
public:
|
||||
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
|
||||
auto RefKeyLambda = [&node, &optimizer]() -> AnfNodePtr {
|
||||
auto refkey = GetValueNode<RefKeyPtr>(node);
|
||||
auto resource = std::dynamic_pointer_cast<pipeline::Resource>(optimizer->resource());
|
||||
MS_EXCEPTION_IF_NULL(resource);
|
||||
|
||||
auto top_graph = resource->func_graph();
|
||||
MS_EXCEPTION_IF_NULL(top_graph);
|
||||
|
||||
for (const auto &tnode : top_graph->parameters()) {
|
||||
auto para = tnode->cast<ParameterPtr>();
|
||||
if (para != nullptr && para->name() == refkey->tag()) {
|
||||
return para;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
};
|
||||
PatternNode<AnfNodePtr> x;
|
||||
MATCH_REPLACE_LAMBDA_IF(node, x, RefKeyLambda, x.CheckFunc(IsValueNode<RefKey>, node));
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
} // namespace irpass
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
|
@ -0,0 +1,175 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "parallel/graph_util/generate_graph.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
using mindspore::tensor::Tensor;
|
||||
|
||||
namespace mindspore {
|
||||
namespace parallel {
|
||||
std::string GetOpPythonPath(const OperatorName &op_name) {
|
||||
// almost all ops are defined in two main paths
|
||||
const std::string ops_module = OP_PATH;
|
||||
const std::string inner_ops_module = INNER_OP_PATH;
|
||||
py::module mod = py::module::import(common::SafeCStr(ops_module));
|
||||
py::module inner_mod = py::module::import(common::SafeCStr(inner_ops_module));
|
||||
if (!py::hasattr(inner_mod, common::SafeCStr(op_name))) {
|
||||
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
|
||||
MS_LOG(EXCEPTION) << ops_module << " or " << inner_ops_module << " don't have op:" << op_name;
|
||||
}
|
||||
return ops_module;
|
||||
}
|
||||
return inner_ops_module;
|
||||
}
|
||||
|
||||
ValuePtr CreatOpInstance(const OperatorAttrs &attrs, const OperatorName &op_name, const std::string &instance_name) {
|
||||
std::string op_path = GetOpPythonPath(op_name);
|
||||
py::module mod = py::module::import(common::SafeCStr(op_path));
|
||||
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
|
||||
MS_LOG(ERROR) << "Failure: op_path:" << op_path << " don't have attr " << op_name;
|
||||
return nullptr;
|
||||
}
|
||||
std::vector<py::object> arg_list;
|
||||
(void)std::transform(attrs.begin(), attrs.end(), std::back_inserter(arg_list),
|
||||
[](const Attr &attr) { return ValuePtrToPyData(attr.second); });
|
||||
py::object obj =
|
||||
parse::python_adapter::CallPyFn(GET_OP_FUNCTION_PATH, GET_OP_FUNCTION, op_name, op_path, instance_name, arg_list);
|
||||
ValuePtr op_instance = nullptr;
|
||||
bool succ = parse::ConvertData(obj, &op_instance);
|
||||
if (!succ) {
|
||||
MS_LOG(ERROR) << "Failure:get Python op " << op_path << " from " << op_name << " fail";
|
||||
return nullptr;
|
||||
}
|
||||
return op_instance;
|
||||
}
|
||||
|
||||
AnfNodePtr ValuePtrToAnfNodePtr(const ValuePtr &value_ptr) {
|
||||
auto value_node = NewValueNode(value_ptr);
|
||||
MS_EXCEPTION_IF_NULL(value_node);
|
||||
return value_node->cast<AnfNodePtr>();
|
||||
}
|
||||
|
||||
static std::unordered_map<int32_t, AnfNodePtr> int_tensor_map = {};
|
||||
AnfNodePtr CreateInt32Tensor(int32_t value) {
|
||||
auto it = int_tensor_map.find(value);
|
||||
if (it != int_tensor_map.end()) {
|
||||
return it->second;
|
||||
}
|
||||
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<tensor::Tensor>(py::int_(value), kInt32);
|
||||
ValuePtr value_ptr = MakeValue(tensor_ptr);
|
||||
auto anf_node_ptr = ValuePtrToAnfNodePtr(value_ptr);
|
||||
int_tensor_map[value] = anf_node_ptr;
|
||||
return anf_node_ptr;
|
||||
}
|
||||
|
||||
AnfNodePtr CreatTypeInt(int32_t value) {
|
||||
ValuePtr value_ptr = MakeValue(std::make_shared<Int>(value));
|
||||
return ValuePtrToAnfNodePtr(value_ptr);
|
||||
}
|
||||
|
||||
AnfNodePtr CreatInt32Imm(int32_t value) {
|
||||
ValuePtr value_ptr = MakeValue(std::make_shared<Int32Imm>(value));
|
||||
return ValuePtrToAnfNodePtr(value_ptr);
|
||||
}
|
||||
|
||||
std::string GetInstanceNameByCNode(const CNodePtr &cnode) {
|
||||
PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
|
||||
if (!prim) {
|
||||
MS_LOG(EXCEPTION) << "The first input of the cnode is not a PrimitivePtr.";
|
||||
}
|
||||
std::string instance_name = prim->instance_name();
|
||||
return HashInstanceName(instance_name);
|
||||
}
|
||||
|
||||
std::string HashInstanceName(const std::string &name) {
|
||||
auto using_hash_name = common::GetEnv(USING_HASH_NAME);
|
||||
std::string instance_name;
|
||||
if ((using_hash_name.empty()) || (using_hash_name == "on")) {
|
||||
instance_name = HashName(name);
|
||||
} else {
|
||||
instance_name = name;
|
||||
}
|
||||
return instance_name;
|
||||
}
|
||||
|
||||
Status GenerateGraph::Init(const CNodePtr &cnode) {
|
||||
if (!cnode) {
|
||||
MS_LOG(ERROR) << "Init:cnode is nullptr";
|
||||
return FAILED;
|
||||
}
|
||||
cnode_ = cnode;
|
||||
func_graph_ = cnode->func_graph();
|
||||
if (!func_graph_) {
|
||||
MS_LOG(ERROR) << "Init:func_graph_ is nullptr";
|
||||
return FAILED;
|
||||
}
|
||||
manager_ = func_graph_->manager();
|
||||
if (!manager_) {
|
||||
MS_LOG(ERROR) << "Init:manager_ is nullptr";
|
||||
return FAILED;
|
||||
}
|
||||
scope_ = cnode_->scope();
|
||||
if (!scope_) {
|
||||
MS_LOG(ERROR) << "Init:scope_ is nullptr";
|
||||
return FAILED;
|
||||
}
|
||||
virtual_input_node_ = std::make_shared<AnfNode>(nullptr);
|
||||
virtual_input_node_->set_scope(scope_);
|
||||
instance_name_base_ = GetInstanceNameByCNode(cnode_);
|
||||
name_idx_ = 0;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
AnfNodePtr GenerateGraph::PushBack(const std::vector<AnfNodePtr> &inputs) {
|
||||
CNodePtr cnode = func_graph_->NewCNode(inputs); // using NewCNode to creat anfnode
|
||||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
cnode->set_scope(scope_);
|
||||
if (inputs.size() < 2) {
|
||||
MS_LOG(EXCEPTION) << "inputs.size() must be more than 1";
|
||||
}
|
||||
(void)manager_->Replace(inputs.at(1), cnode); // using Replace function to insert cnode after inputs[0]
|
||||
auto new_anf_node_ptr = cnode->cast<AnfNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(new_anf_node_ptr);
|
||||
return new_anf_node_ptr;
|
||||
}
|
||||
|
||||
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name, const OperatorAttrs &attrs) {
|
||||
name_idx_++;
|
||||
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + op_name + std::to_string(name_idx_));
|
||||
if (pyop_instance == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
|
||||
}
|
||||
auto value_node = NewValueNode(pyop_instance);
|
||||
return value_node->cast<AnfNodePtr>();
|
||||
}
|
||||
|
||||
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name) {
|
||||
name_idx_++;
|
||||
OperatorAttrs attrs;
|
||||
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + std::to_string(name_idx_));
|
||||
if (pyop_instance == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
|
||||
}
|
||||
auto value_node = NewValueNode(pyop_instance);
|
||||
return value_node->cast<AnfNodePtr>();
|
||||
}
|
||||
} // namespace parallel
|
||||
} // namespace mindspore
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,39 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""array_ops"""
|
||||
|
||||
from .. import operations as P
|
||||
from ..operations import _grad_ops as G
|
||||
from ..operations import _inner_ops as inner
|
||||
from ..composite.multitype_ops.zeros_like_impl import zeros_like
|
||||
from .grad_base import bprop_getters
|
||||
|
||||
|
||||
@bprop_getters.register(inner.StridedSliceAICPU)
|
||||
def get_bprop_strided_slice_aicpu(self):
|
||||
"""Generate bprop for StridedSlice"""
|
||||
shape_op = P.Shape()
|
||||
input_grad = G.StridedSliceGradAICPU(self.begin_mask,
|
||||
self.end_mask,
|
||||
self.ellipsis_mask,
|
||||
self.new_axis_mask,
|
||||
self.shrink_axis_mask)
|
||||
|
||||
def bprop(x, begin, end, strides, out, dout):
|
||||
dx = input_grad(dout, shape_op(x), begin, end, strides)
|
||||
return dx, zeros_like(begin), zeros_like(end), zeros_like(strides)
|
||||
|
||||
return bprop
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue