!3229 modify syn_code bug

Merge pull request !3229 from changzherui/mod_syc_code
pull/3229/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 30e27049a1

@ -31,13 +31,13 @@ std::string GetOpPythonPath(const OperatorName &op_name) {
const std::string inner_ops_module = INNER_OP_PATH;
py::module mod = py::module::import(common::SafeCStr(ops_module));
py::module inner_mod = py::module::import(common::SafeCStr(inner_ops_module));
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
if (!py::hasattr(inner_mod, common::SafeCStr(op_name))) {
if (!py::hasattr(inner_mod, common::SafeCStr(op_name))) {
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
MS_LOG(EXCEPTION) << ops_module << " or " << inner_ops_module << " don't have op:" << op_name;
}
return inner_ops_module;
return ops_module;
}
return ops_module;
return inner_ops_module;
}
ValuePtr CreatOpInstance(const OperatorAttrs &attrs, const OperatorName &op_name, const std::string &instance_name) {

File diff suppressed because it is too large Load Diff

@ -1,93 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_
#include <memory>
#include "ir/pattern_matcher.h"
#include "optimizer/irpass.h"
#include "optimizer/optimizer.h"
namespace mindspore {
namespace opt {
namespace irpass {
// {prim::kPrimMakeRef, X, Y, Z} -> Y
class MakeRefEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x, y, z;
MATCH_REPLACE(node, PPrimitive(prim::kPrimMakeRef, x, y, z), y);
return nullptr;
}
};
// {prim::kPrimGetRefValue, Parameter} -> Parameter
// {prim::kPrimGetRefOrigin, Parameter} -> Parameter
class GetRefParamEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x;
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefValue, x), x, x.CheckFunc(IsParam, node));
MATCH_REPLACE_IF(node, PPrimitive(prim::kPrimGetRefOrigin, x), x, x.CheckFunc(IsParam, node));
return nullptr;
}
};
// {prim::kPrimGetRefKey, {prim::kPrimMakeRef, X, Y, Z}} -> X
// {prim::kPrimGetRefValue, {prim::kPrimMakeRef, X, Y, Z}} -> Y
// {prim::kPrimGetRefOrigin, {prim::kPrimMakeRef, X, Y, Z}} -> Z
class GetMakeRefEliminater : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
PatternNode<AnfNodePtr> x, y, z;
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefKey, PPrimitive(prim::kPrimMakeRef, x, y, z)), x);
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefValue, PPrimitive(prim::kPrimMakeRef, x, y, z)), y);
MATCH_REPLACE(node, PPrimitive(prim::kPrimGetRefOrigin, PPrimitive(prim::kPrimMakeRef, x, y, z)), z);
return nullptr;
}
};
// IsValueNode<RefKey>
class ReplaceRefkeyByParam : public OptimizerCaller {
public:
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
auto RefKeyLambda = [&node, &optimizer]() -> AnfNodePtr {
auto refkey = GetValueNode<RefKeyPtr>(node);
auto resource = std::dynamic_pointer_cast<pipeline::Resource>(optimizer->resource());
MS_EXCEPTION_IF_NULL(resource);
auto top_graph = resource->func_graph();
MS_EXCEPTION_IF_NULL(top_graph);
for (const auto &tnode : top_graph->parameters()) {
auto para = tnode->cast<ParameterPtr>();
if (para != nullptr && para->name() == refkey->tag()) {
return para;
}
}
return nullptr;
};
PatternNode<AnfNodePtr> x;
MATCH_REPLACE_LAMBDA_IF(node, x, RefKeyLambda, x.CheckFunc(IsValueNode<RefKey>, node));
return nullptr;
}
};
} // namespace irpass
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_OPTIMIZER_IRPASS_REF_ELIMINATE_H_

@ -1,175 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parallel/graph_util/generate_graph.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
using mindspore::tensor::Tensor;
namespace mindspore {
namespace parallel {
std::string GetOpPythonPath(const OperatorName &op_name) {
// almost all ops are defined in two main paths
const std::string ops_module = OP_PATH;
const std::string inner_ops_module = INNER_OP_PATH;
py::module mod = py::module::import(common::SafeCStr(ops_module));
py::module inner_mod = py::module::import(common::SafeCStr(inner_ops_module));
if (!py::hasattr(inner_mod, common::SafeCStr(op_name))) {
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
MS_LOG(EXCEPTION) << ops_module << " or " << inner_ops_module << " don't have op:" << op_name;
}
return ops_module;
}
return inner_ops_module;
}
ValuePtr CreatOpInstance(const OperatorAttrs &attrs, const OperatorName &op_name, const std::string &instance_name) {
std::string op_path = GetOpPythonPath(op_name);
py::module mod = py::module::import(common::SafeCStr(op_path));
if (!py::hasattr(mod, common::SafeCStr(op_name))) {
MS_LOG(ERROR) << "Failure: op_path:" << op_path << " don't have attr " << op_name;
return nullptr;
}
std::vector<py::object> arg_list;
(void)std::transform(attrs.begin(), attrs.end(), std::back_inserter(arg_list),
[](const Attr &attr) { return ValuePtrToPyData(attr.second); });
py::object obj =
parse::python_adapter::CallPyFn(GET_OP_FUNCTION_PATH, GET_OP_FUNCTION, op_name, op_path, instance_name, arg_list);
ValuePtr op_instance = nullptr;
bool succ = parse::ConvertData(obj, &op_instance);
if (!succ) {
MS_LOG(ERROR) << "Failure:get Python op " << op_path << " from " << op_name << " fail";
return nullptr;
}
return op_instance;
}
AnfNodePtr ValuePtrToAnfNodePtr(const ValuePtr &value_ptr) {
auto value_node = NewValueNode(value_ptr);
MS_EXCEPTION_IF_NULL(value_node);
return value_node->cast<AnfNodePtr>();
}
static std::unordered_map<int32_t, AnfNodePtr> int_tensor_map = {};
AnfNodePtr CreateInt32Tensor(int32_t value) {
auto it = int_tensor_map.find(value);
if (it != int_tensor_map.end()) {
return it->second;
}
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<tensor::Tensor>(py::int_(value), kInt32);
ValuePtr value_ptr = MakeValue(tensor_ptr);
auto anf_node_ptr = ValuePtrToAnfNodePtr(value_ptr);
int_tensor_map[value] = anf_node_ptr;
return anf_node_ptr;
}
AnfNodePtr CreatTypeInt(int32_t value) {
ValuePtr value_ptr = MakeValue(std::make_shared<Int>(value));
return ValuePtrToAnfNodePtr(value_ptr);
}
AnfNodePtr CreatInt32Imm(int32_t value) {
ValuePtr value_ptr = MakeValue(std::make_shared<Int32Imm>(value));
return ValuePtrToAnfNodePtr(value_ptr);
}
std::string GetInstanceNameByCNode(const CNodePtr &cnode) {
PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
if (!prim) {
MS_LOG(EXCEPTION) << "The first input of the cnode is not a PrimitivePtr.";
}
std::string instance_name = prim->instance_name();
return HashInstanceName(instance_name);
}
std::string HashInstanceName(const std::string &name) {
auto using_hash_name = common::GetEnv(USING_HASH_NAME);
std::string instance_name;
if ((using_hash_name.empty()) || (using_hash_name == "on")) {
instance_name = HashName(name);
} else {
instance_name = name;
}
return instance_name;
}
Status GenerateGraph::Init(const CNodePtr &cnode) {
if (!cnode) {
MS_LOG(ERROR) << "Init:cnode is nullptr";
return FAILED;
}
cnode_ = cnode;
func_graph_ = cnode->func_graph();
if (!func_graph_) {
MS_LOG(ERROR) << "Init:func_graph_ is nullptr";
return FAILED;
}
manager_ = func_graph_->manager();
if (!manager_) {
MS_LOG(ERROR) << "Init:manager_ is nullptr";
return FAILED;
}
scope_ = cnode_->scope();
if (!scope_) {
MS_LOG(ERROR) << "Init:scope_ is nullptr";
return FAILED;
}
virtual_input_node_ = std::make_shared<AnfNode>(nullptr);
virtual_input_node_->set_scope(scope_);
instance_name_base_ = GetInstanceNameByCNode(cnode_);
name_idx_ = 0;
return SUCCESS;
}
AnfNodePtr GenerateGraph::PushBack(const std::vector<AnfNodePtr> &inputs) {
CNodePtr cnode = func_graph_->NewCNode(inputs); // using NewCNode to creat anfnode
MS_EXCEPTION_IF_NULL(cnode);
cnode->set_scope(scope_);
if (inputs.size() < 2) {
MS_LOG(EXCEPTION) << "inputs.size() must be more than 1";
}
(void)manager_->Replace(inputs.at(1), cnode); // using Replace function to insert cnode after inputs[0]
auto new_anf_node_ptr = cnode->cast<AnfNodePtr>();
MS_EXCEPTION_IF_NULL(new_anf_node_ptr);
return new_anf_node_ptr;
}
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name, const OperatorAttrs &attrs) {
name_idx_++;
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + op_name + std::to_string(name_idx_));
if (pyop_instance == nullptr) {
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
}
auto value_node = NewValueNode(pyop_instance);
return value_node->cast<AnfNodePtr>();
}
AnfNodePtr GenerateGraph::NewOpInst(const OperatorName &op_name) {
name_idx_++;
OperatorAttrs attrs;
ValuePtr pyop_instance = CreatOpInstance(attrs, op_name, instance_name_base_ + std::to_string(name_idx_));
if (pyop_instance == nullptr) {
MS_LOG(EXCEPTION) << "Failure:" << op_name << " CreatOpInstance failed";
}
auto value_node = NewValueNode(pyop_instance);
return value_node->cast<AnfNodePtr>();
}
} // namespace parallel
} // namespace mindspore

@ -330,7 +330,6 @@ __all__ = [
"ApplyCenteredRMSProp",
"SpaceToBatchND",
"BatchToSpaceND",
"ReverseSequence",
"SquareSumAll",
"BitwiseAnd",
"BitwiseOr",
@ -346,12 +345,10 @@ __all__ = [
"ApproximateEqual",
"InplaceUpdate",
"InTopK",
"CropAndResize",
"LRN",
"Mod",
"PopulationCount",
"ParallelConcat",
"EmbeddingLookup",
"Push",
"Pull"
]

Loading…
Cancel
Save