parent
6c4ee3f3d1
commit
87668d6ea2
@ -1,14 +0,0 @@
|
||||
file(GLOB_RECURSE _PREDICT_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
"predict.cc"
|
||||
"generator/utils/ir_model_util.cc"
|
||||
"converter/*.cc"
|
||||
"converter/attr_utils/*.cc"
|
||||
"converter/lite_model/*.cc"
|
||||
"converter/lite_model/operations/*.cc"
|
||||
)
|
||||
|
||||
if (ENABLE_D)
|
||||
file(GLOB_RECURSE _D_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "generator/ir/*.cc")
|
||||
list(APPEND _PREDICT_SRC_LIST ${_D_SRC_LIST})
|
||||
endif ()
|
||||
add_library(_mindspore_predict_obj OBJECT ${_PREDICT_SRC_LIST})
|
||||
@ -1,229 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/attr_utils/convert_util.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace utils {
|
||||
TypePtr GetTypePtr(const AnfNodePtr &anf_node) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
TypePtr type_ptr = anf_node->Type();
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
if (type_ptr->isa<TensorType>()) {
|
||||
auto tensor_ptr = type_ptr->cast<TensorTypePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor_ptr);
|
||||
TypePtr elem = tensor_ptr->element();
|
||||
return elem;
|
||||
} else if (type_ptr->isa<Tuple>()) {
|
||||
auto tuple_ptr = type_ptr->cast<TuplePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tuple_ptr);
|
||||
auto tuple_i = (*tuple_ptr)[0];
|
||||
MS_EXCEPTION_IF_NULL(tuple_i);
|
||||
if (tuple_i->isa<TensorType>()) {
|
||||
auto tensor_ptr = tuple_i->cast<TensorTypePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor_ptr);
|
||||
TypePtr elem = tensor_ptr->element();
|
||||
MS_EXCEPTION_IF_NULL(elem);
|
||||
return elem;
|
||||
} else if (tuple_i->isa<Number>()) {
|
||||
return type_ptr;
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "unsupported type: " << type_ptr->ToString();
|
||||
}
|
||||
} else if (type_ptr->isa<Number>()) {
|
||||
return type_ptr;
|
||||
}
|
||||
std::string type_name = type_ptr->ToString();
|
||||
MS_LOG(EXCEPTION)
|
||||
<< "The output type of node should be a tensor type a number or a tuple of tensor type, but this is: "
|
||||
<< type_name;
|
||||
}
|
||||
|
||||
MsDataType GetMSDataType(TypeId ori_data_type) {
|
||||
MsDataType dst_data_type;
|
||||
switch (ori_data_type) {
|
||||
case kNumberTypeFloat16:
|
||||
dst_data_type = mindspore::predict::DataType_DT_FLOAT16;
|
||||
return dst_data_type;
|
||||
case kNumberTypeFloat32:
|
||||
dst_data_type = mindspore::predict::DataType_DT_FLOAT;
|
||||
return dst_data_type;
|
||||
case kNumberTypeInt8:
|
||||
dst_data_type = mindspore::predict::DataType_DT_INT8;
|
||||
return dst_data_type;
|
||||
case kNumberTypeInt32:
|
||||
dst_data_type = mindspore::predict::DataType_DT_INT32;
|
||||
return dst_data_type;
|
||||
case kNumberTypeUInt8:
|
||||
dst_data_type = mindspore::predict::DataType_DT_UINT8;
|
||||
return dst_data_type;
|
||||
case kNumberTypeUInt32:
|
||||
dst_data_type = mindspore::predict::DataType_DT_UINT32;
|
||||
return dst_data_type;
|
||||
case kTypeUnknown:
|
||||
dst_data_type = mindspore::predict::DataType_DT_UNDEFINED;
|
||||
return dst_data_type;
|
||||
default:
|
||||
MS_LOG(EXCEPTION) << "Ms don't support this DataType";
|
||||
}
|
||||
}
|
||||
|
||||
MsFormat GetMsFormat(const std::string &format_str) {
|
||||
if (format_str == kOpFormat_DEFAULT) {
|
||||
MsFormat ms_format = predict::Format_NCHW;
|
||||
return ms_format;
|
||||
} else {
|
||||
// all middle format default to NCHW
|
||||
return predict::Format_NCHW;
|
||||
}
|
||||
}
|
||||
|
||||
TensorPtr GetParaAscendTensor(const AnfNodePtr &anf_node) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
if (!anf_node->isa<Parameter>()) {
|
||||
return nullptr;
|
||||
}
|
||||
auto device_type_id = AnfAlgo::GetOutputDeviceDataType(anf_node, 0);
|
||||
// device type_ptr
|
||||
auto device_type_ptr = GetTypePtr(anf_node);
|
||||
// device shape
|
||||
auto shape = AnfAlgo::GetOutputDeviceShape(anf_node, 0);
|
||||
std::vector<int> tensor_shape;
|
||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(tensor_shape), SizeToInt);
|
||||
// device format
|
||||
auto format = AnfAlgo::GetOutputFormat(anf_node, 0);
|
||||
// device tensor
|
||||
TensorPtr device_tensor = std::make_shared<tensor::Tensor>(device_type_id, tensor_shape);
|
||||
// device info
|
||||
device_tensor->SetDeviceInfo(format, device_type_ptr);
|
||||
return device_tensor;
|
||||
}
|
||||
|
||||
TensorPtr GetParaCpuTensor(const AnfNodePtr &anf_node) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
if (!(anf_node->isa<Parameter>())) {
|
||||
return nullptr;
|
||||
} else {
|
||||
auto ori_type_id = AnfAlgo::GetOutputInferDataType(anf_node, 0);
|
||||
auto ori_type_ptr = GetTypePtr(anf_node);
|
||||
auto ori_shape = AnfAlgo::GetOutputInferShape(anf_node, 0);
|
||||
std::vector<int> tensor_shape;
|
||||
(void)std::transform(ori_shape.begin(), ori_shape.end(), std::back_inserter(tensor_shape), SizeToInt);
|
||||
auto ori_format = AnfAlgo::GetOutputFormat(anf_node, 0);
|
||||
TensorPtr cpu_tensor = std::make_shared<tensor::Tensor>(ori_type_id, tensor_shape);
|
||||
cpu_tensor->SetDeviceInfo(ori_format, ori_type_ptr);
|
||||
return cpu_tensor;
|
||||
}
|
||||
}
|
||||
|
||||
TensorPtr GetValueTensor(const ValueNodePtr &const_node) {
|
||||
MS_EXCEPTION_IF_NULL(const_node);
|
||||
auto value_ptr = const_node->value();
|
||||
MS_EXCEPTION_IF_NULL(value_ptr);
|
||||
if (!value_ptr->isa<tensor::Tensor>()) {
|
||||
return nullptr;
|
||||
}
|
||||
TensorPtr tensor = value_ptr->cast<TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
auto data_type = tensor->Dtype();
|
||||
MS_EXCEPTION_IF_NULL(data_type);
|
||||
auto type_id = data_type->type_id();
|
||||
auto shape = tensor->shape();
|
||||
TensorPtr tensor_constant = std::make_shared<tensor::Tensor>(type_id, shape);
|
||||
tensor_constant->SetDeviceInfo(tensor->device_info().format_, tensor->device_info().data_type_);
|
||||
return tensor_constant;
|
||||
}
|
||||
|
||||
TensorPtr GetKernelCpuTensor(const CNodePtr &c_node_ptr, size_t inx) {
|
||||
if (c_node_ptr == nullptr || inx >= AnfAlgo::GetOutputTensorNum(c_node_ptr)) {
|
||||
MS_LOG(ERROR) << "GetKernelCpuTensor failed";
|
||||
return nullptr;
|
||||
}
|
||||
auto ori_shape = AnfAlgo::GetOutputInferShape(c_node_ptr, inx);
|
||||
auto ori_type_id = AnfAlgo::GetOutputInferDataType(c_node_ptr, inx);
|
||||
std::vector<int> tensor_shape;
|
||||
(void)std::transform(ori_shape.begin(), ori_shape.end(), std::back_inserter(tensor_shape), SizeToInt);
|
||||
auto ori_output_type = GetTypePtr(c_node_ptr);
|
||||
TensorPtr device_tensor = std::make_shared<tensor::Tensor>(ori_type_id, tensor_shape);
|
||||
auto format = AnfAlgo::GetOutputFormat(c_node_ptr, inx);
|
||||
device_tensor->SetDeviceInfo(format, ori_output_type);
|
||||
return device_tensor;
|
||||
}
|
||||
|
||||
TensorPtr GetKernelAscendTensor(const CNodePtr &c_node_ptr, size_t inx) {
|
||||
if (c_node_ptr == nullptr || inx >= AnfAlgo::GetOutputTensorNum(c_node_ptr)) {
|
||||
MS_LOG(ERROR) << "GetKernelAscendTensor failed";
|
||||
return nullptr;
|
||||
}
|
||||
auto shape = AnfAlgo::GetOutputDeviceShape(c_node_ptr, inx);
|
||||
std::vector<int> tensor_shape;
|
||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(tensor_shape), SizeToInt);
|
||||
auto format = AnfAlgo::GetOutputFormat(c_node_ptr, inx);
|
||||
auto type_id = AnfAlgo::GetOutputDeviceDataType(c_node_ptr, inx);
|
||||
auto output_type_ptr = GetTypePtr(c_node_ptr);
|
||||
TensorPtr device_tensor = std::make_shared<tensor::Tensor>(type_id, tensor_shape);
|
||||
device_tensor->SetDeviceInfo(format, output_type_ptr);
|
||||
return device_tensor;
|
||||
}
|
||||
|
||||
TensorPtr GetOutputTensor(const AnfNodePtr &out_node, size_t inx) {
|
||||
MS_EXCEPTION_IF_NULL(out_node);
|
||||
auto shape = AnfAlgo::GetOutputInferShape(out_node, inx);
|
||||
std::vector<int> tensor_shape;
|
||||
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(tensor_shape), SizeToInt);
|
||||
auto type_id = AnfAlgo::GetOutputInferDataType(out_node, inx);
|
||||
auto output_type_ptr = GetTypePtr(out_node);
|
||||
auto format = AnfAlgo::GetOutputFormat(out_node, inx);
|
||||
TensorPtr output_tensor = std::make_shared<tensor::Tensor>(type_id, tensor_shape);
|
||||
output_tensor->SetDeviceInfo(format, output_type_ptr);
|
||||
return output_tensor;
|
||||
}
|
||||
|
||||
bool FindNodeInMap(const std::unordered_map<MsKernelKey, int> &node_map, const AnfNodePtr &node) {
|
||||
return std::any_of(node_map.begin(), node_map.end(),
|
||||
[node](const std::pair<MsKernelKey, int> &kernel_key) { return kernel_key.first == node.get(); });
|
||||
}
|
||||
|
||||
bool SaveDeviceModelUtil(const std::shared_ptr<GraphDefT> &new_ms_graph_ptr, const std::string &save_path_name,
|
||||
SubGraphDefT *sub_graph) {
|
||||
MS_EXCEPTION_IF_NULL(new_ms_graph_ptr);
|
||||
MS_EXCEPTION_IF_NULL(sub_graph);
|
||||
// save mindspore schema to file
|
||||
new_ms_graph_ptr->name = "default_graph";
|
||||
std::unique_ptr<mindspore::predict::SubGraphDefT> sub_graph_ptr(sub_graph);
|
||||
new_ms_graph_ptr->subgraphs.emplace_back(std::move(sub_graph_ptr));
|
||||
// get flatbuffer builder
|
||||
flatbuffers::FlatBufferBuilder builder(1024);
|
||||
auto offset = mindspore::predict::GraphDef::Pack(builder, new_ms_graph_ptr.get());
|
||||
builder.Finish(offset);
|
||||
auto size = builder.GetSize();
|
||||
if (size == 0) {
|
||||
MS_LOG(ERROR) << "builder has no size";
|
||||
return false;
|
||||
}
|
||||
auto content = builder.GetBufferPointer();
|
||||
std::ofstream output(save_path_name);
|
||||
if (!output.is_open()) {
|
||||
MS_LOG(EXCEPTION) << "mindspore.mindspoire output failed";
|
||||
}
|
||||
(void)output.write((const char *)content, size);
|
||||
output.close();
|
||||
return true;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,60 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PREDICT_CONVERTER_ATTR_UTILS_CONVERT_UTIL_H_
|
||||
#define MINDSPORE_CCSRC_PREDICT_CONVERTER_ATTR_UTILS_CONVERT_UTIL_H_
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include "ir/tensor.h"
|
||||
#include "backend/session/anf_runtime_algorithm.h"
|
||||
#include "predict/schema/inner/ms_generated.h"
|
||||
|
||||
using TensorPtr = mindspore::tensor::TensorPtr;
|
||||
using TensorPtrList = std::vector<mindspore::tensor::TensorPtr>;
|
||||
using AllOutputTensors = std::unordered_map<int, TensorPtrList>;
|
||||
using OpDefT = mindspore::predict::OpDefT;
|
||||
using GraphDefT = mindspore::predict::GraphDefT;
|
||||
using TensorDefT = mindspore::predict::TensorDefT;
|
||||
using SubGraphDefT = mindspore::predict::SubGraphDefT;
|
||||
using SubGraphPtr = std::unique_ptr<mindspore::predict::SubGraphDefT>;
|
||||
using MsDataType = mindspore::predict::DataType;
|
||||
using MsFormat = mindspore::predict::Format;
|
||||
using MsKernelKey = void *;
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace utils {
|
||||
TypePtr GetTypePtr(const AnfNodePtr &anf_node);
|
||||
MsDataType GetMSDataType(TypeId ori_data_type);
|
||||
MsFormat GetMsFormat(const std::string &format_str);
|
||||
TensorPtr GetParaAscendTensor(const AnfNodePtr &anf_node);
|
||||
TensorPtr GetParaCpuTensor(const AnfNodePtr &anf_node);
|
||||
TensorPtr GetValueTensor(const ValueNodePtr &const_node);
|
||||
TensorPtr GetKernelCpuTensor(const CNodePtr &c_node_ptr, size_t inx);
|
||||
TensorPtr GetKernelAscendTensor(const CNodePtr &c_node_ptr, size_t inx);
|
||||
TensorPtr GetOutputTensor(const AnfNodePtr &out_node, size_t inx);
|
||||
bool FindNodeInMap(const std::unordered_map<MsKernelKey, int> &Nodemap, const AnfNodePtr &node);
|
||||
bool SaveDeviceModelUtil(const std::shared_ptr<GraphDefT> &new_ms_graph_ptr, const std::string &save_path_name,
|
||||
SubGraphDefT *sub_graph_def_t);
|
||||
} // namespace utils
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PREDICT_CONVERTER_ATTR_UTILS_CONVERT_UTIL_H_
|
||||
@ -1,65 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PREDICT_CONVERTER_CPU_ATTR_UTILS_OP_ATTR_TYPE_H_
|
||||
#define MINDSPORE_CCSRC_PREDICT_CONVERTER_CPU_ATTR_UTILS_OP_ATTR_TYPE_H_
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
typedef enum CpuOpType {
|
||||
CPU_OP_PAD = 0,
|
||||
CPU_OP_MAXIMUM,
|
||||
CPU_OP_CONCAT,
|
||||
CPU_OP_SOFTMAX,
|
||||
CPU_OP_ACTIVATION,
|
||||
CPU_OP_CONV2D,
|
||||
CPU_OP_FUSEDBATCHNORM,
|
||||
CPU_OP_CAFFEBATCHNORM,
|
||||
CPU_OP_SQUEEZE,
|
||||
CPU_OP_BIASADD,
|
||||
CPU_OP_POOLING,
|
||||
CPU_OP_DEPTHWISECONV2D,
|
||||
CPU_OP_DEDEPTHWISECONV2D,
|
||||
CPU_OP_RESIZE,
|
||||
CPU_OP_DETECTIONPOSTPROCESS,
|
||||
CPU_OP_FULLCONNECTION,
|
||||
CPU_OP_MEAN,
|
||||
CPU_OP_DECONV2D,
|
||||
CPU_OP_SCALE,
|
||||
CPU_OP_ELTWISE,
|
||||
CPU_OP_ADD,
|
||||
CPU_OP_SLICE,
|
||||
CPU_OP_MUL,
|
||||
CPU_OP_EXP,
|
||||
CPU_OP_RESHAPE,
|
||||
CPU_OP_POWER,
|
||||
CPU_OP_ARGMAX,
|
||||
CPU_OP_ARGMAX_NETOUTPUT,
|
||||
CPU_OP_MATMUL,
|
||||
CPU_OP_CAFFEPRELU,
|
||||
CPU_OP_STRIDEDSLICE,
|
||||
CPU_OP_STACK,
|
||||
CPU_OP_RANGE,
|
||||
CPU_OP_EXPANDDIMS,
|
||||
CPU_OP_TILE,
|
||||
CPU_OP_CAST,
|
||||
CPU_OP_CAFFECROP,
|
||||
CPU_OP_PRESERVEED = 37
|
||||
} CpuOpType_t;
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PREDICT_CONVERTER_CPU_ATTR_UTILS_OP_ATTR_TYPE_H_
|
||||
@ -1,49 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/executor_tensor.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace executor {
|
||||
int TensorCache::addExTensor(int tensor_key, const TensorPtr &tensor, int refCount, const std::vector<int> &host_shape,
|
||||
ExTensorType stable, bool inc) {
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
TensorPtr tmp_tensor = tensor;
|
||||
ExTensorPtr ex_tensor_ptr =
|
||||
std::make_shared<ExTensor>(tensor_key, tmp_tensor, refCount, nodeIndex, host_shape, stable);
|
||||
int pre_index = ex_tensor_ptr->index_;
|
||||
if (inc) {
|
||||
nodeIndex++;
|
||||
}
|
||||
// no need to judge,just add to map directly
|
||||
tensors[tensor_key].push_back(ex_tensor_ptr);
|
||||
return pre_index;
|
||||
}
|
||||
|
||||
std::vector<ExTensorPtr> TensorCache::findTensor(int key) {
|
||||
std::vector<ExTensorPtr> ex_tensors;
|
||||
auto iter = tensors.find(key);
|
||||
if (iter != tensors.end()) {
|
||||
return iter->second;
|
||||
} else {
|
||||
MS_LOG(INFO) << "can not find any tensorlist";
|
||||
return ex_tensors;
|
||||
}
|
||||
}
|
||||
|
||||
void TensorCache::deleteTensor(int key) { (void)tensors.erase(key); }
|
||||
} // namespace executor
|
||||
} // namespace mindspore
|
||||
@ -1,70 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PREDICT_CONVERTER_EXECUTOR_TENSOR_H_
|
||||
#define MINDSPORE_CCSRC_PREDICT_CONVERTER_EXECUTOR_TENSOR_H_
|
||||
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include "ir/tensor.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace executor {
|
||||
using TensorPtr = tensor::TensorPtr;
|
||||
static constexpr int MS_MAX_REFCOUNT = 999;
|
||||
enum ExTensorType { INPUTDATA, WEIGHTS, CONSTANT, KERNEL, OUTPUT };
|
||||
class ExTensor {
|
||||
public:
|
||||
int key_;
|
||||
TensorPtr device_tensor_ptr_;
|
||||
int ref_count_;
|
||||
int index_;
|
||||
std::vector<int> host_shape_;
|
||||
ExTensorType stable_;
|
||||
ExTensor(int key, TensorPtr tensor_ptr, int ref_count, int index, std::vector<int> host_shape,
|
||||
ExTensorType ex_tensor_type)
|
||||
: key_(key),
|
||||
device_tensor_ptr_(std::move(tensor_ptr)),
|
||||
ref_count_(ref_count),
|
||||
index_(index),
|
||||
host_shape_(std::move(host_shape)),
|
||||
stable_(ex_tensor_type) {}
|
||||
~ExTensor() { host_shape_.clear(); }
|
||||
};
|
||||
using ExTensorPtr = std::shared_ptr<ExTensor>;
|
||||
class TensorCache {
|
||||
public:
|
||||
TensorCache() = default;
|
||||
|
||||
~TensorCache() { tensors.clear(); }
|
||||
|
||||
int addExTensor(int tensor_key, const TensorPtr &tensor, int refCount, const std::vector<int> &host_shape,
|
||||
ExTensorType stable, bool inc = true);
|
||||
// just adjust for dynamic tensor
|
||||
std::vector<ExTensorPtr> findTensor(int key);
|
||||
void deleteTensor(int key);
|
||||
const std::unordered_map<int, std::vector<ExTensorPtr>> &GetCachedTensor() const { return tensors; }
|
||||
|
||||
private:
|
||||
std::unordered_map<int, std::vector<ExTensorPtr>> tensors;
|
||||
int nodeIndex = 0;
|
||||
};
|
||||
using TensorCachePtr = std::shared_ptr<TensorCache>;
|
||||
} // namespace executor
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PREDICT_CONVERTER_EXECUTOR_TENSOR_H_
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,118 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PREDICT_CONVERTER_KERNEL_TO_MS_H_
|
||||
#define MINDSPORE_CCSRC_PREDICT_CONVERTER_KERNEL_TO_MS_H_
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include "backend/session/kernel_graph.h"
|
||||
#include "predict/converter/executor_tensor.h"
|
||||
#include "predict/schema/inner/ms_generated.h"
|
||||
#include "predict/converter/attr_utils/convert_util.h"
|
||||
|
||||
static constexpr size_t kTupleGetItemIndex = 2;
|
||||
namespace mindspore {
|
||||
namespace executor {
|
||||
using KernelGraphPtr = std::shared_ptr<mindspore::session::KernelGraph>;
|
||||
enum ConvertMode { kConvertCpuMode, kConvertAscendMode, kConvertUnused };
|
||||
enum TargetMode { kCPUTarget, kGPUTarget, kUnknowTarget };
|
||||
class Kernel2Ms {
|
||||
public:
|
||||
static Kernel2Ms &GetInstance();
|
||||
|
||||
Kernel2Ms(const Kernel2Ms &) = delete;
|
||||
|
||||
Kernel2Ms &operator=(const Kernel2Ms &) = delete;
|
||||
|
||||
bool KernelGraph2MsGraph(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool KernelInput2MS(const std::vector<TensorPtr> &input_tensors);
|
||||
|
||||
ConvertMode convert_mode() const { return convert_mode_; }
|
||||
|
||||
void set_convert_mode(ConvertMode convert_mode) { convert_mode_ = convert_mode; }
|
||||
|
||||
TargetMode device_target() const { return device_target_; }
|
||||
|
||||
void set_device_target(TargetMode device_target) { device_target_ = device_target; }
|
||||
|
||||
bool SaveDeviceModel(const std::shared_ptr<GraphDefT> &new_ms_graph_ptr, const std::string &save_path_name);
|
||||
|
||||
private:
|
||||
Kernel2Ms() : graph_index_(0) {}
|
||||
|
||||
void ReleaseContextRes();
|
||||
|
||||
~Kernel2Ms() = default;
|
||||
|
||||
bool SetAllTensors(const TensorCachePtr &tensor_cache, SubGraphDefT *sub_graph_def_t);
|
||||
|
||||
bool SetOpInputIdx(const CNodePtr &c_node_ptr, const TensorCachePtr &tensor_cache, OpDefT *ms_node);
|
||||
|
||||
bool SetOpOutputIdx(const CNodePtr &c_node_ptr, const TensorPtr &output_tensor, const TensorCachePtr &tensor_cache,
|
||||
int ref_count, size_t order_index, OpDefT *ms_node);
|
||||
|
||||
bool SetGraphOutputIdx(const KernelGraphPtr &kernel_graph_ptr, const TensorCachePtr &tensor_cache,
|
||||
SubGraphDefT *sub_graph_def_t, AllOutputTensors *all_output_tensors);
|
||||
|
||||
void TransformGraphIndx();
|
||||
|
||||
void GetRealInpoutsPtr(const AnfNodePtr &node, std::vector<AnfNodePtr> *real_inputs,
|
||||
std::vector<size_t> *real_output_idx);
|
||||
|
||||
bool InitGraphIndx(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool InitGraphInputsIndx(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool InitGraphValueNodesIndx(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool InitGraphOpsIndx(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool InitGraphOutputsIndx(const KernelGraphPtr &kernel_graph_ptr);
|
||||
|
||||
bool SetGraphInputTensors(const KernelGraphPtr &kernel_graph_ptr, const TensorCachePtr &tensor_cache,
|
||||
SubGraphDefT *sub_graph_def_t);
|
||||
|
||||
bool SetGraphValueTensors(const KernelGraphPtr &kernel_graph_ptr, const TensorCachePtr &tensor_cache);
|
||||
|
||||
bool SetGraphOpTensors(const KernelGraphPtr &kernel_graph_ptr, const TensorCachePtr &tensor_cache,
|
||||
SubGraphDefT *sub_graph_def_t);
|
||||
std::vector<uint32_t> GetAllInputWeightIdxs() const { return input_weight_idxs_; }
|
||||
std::vector<uint32_t> GetAllInputIdxs() const { return all_input_idxs_; }
|
||||
|
||||
bool CheckInputSizes(const std::vector<TensorPtr> &input_tensors, const std::vector<uint32_t> &all_input_idxs);
|
||||
|
||||
bool SetMemResue() const;
|
||||
SubGraphPtr sub_ms_graph_;
|
||||
AllOutputTensors all_output_tensors_;
|
||||
std::vector<OpDefT *> tmp_op_nodes_;
|
||||
std::unordered_map<MsKernelKey, int> node_indexs_;
|
||||
std::unordered_map<int, MsKernelKey> index_nodes_;
|
||||
int graph_index_ = 0;
|
||||
TensorCachePtr tensor_cache_ptr_ = nullptr;
|
||||
ConvertMode convert_mode_ = kConvertCpuMode;
|
||||
TargetMode device_target_ = kCPUTarget;
|
||||
std::vector<uint32_t> input_weight_idxs_;
|
||||
std::vector<uint32_t> all_input_idxs_;
|
||||
};
|
||||
using Kernel2MsPtr = std::shared_ptr<Kernel2Ms>;
|
||||
} // namespace executor
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_PREDICT_CONVERTER_KERNEL_TO_MS_H_
|
||||
@ -1,110 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
#include "./securec.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
// forward declare
|
||||
bool Conv2dPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool MatMulPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool BiasAddPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool ReshapePacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool ActivationPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool PoolingPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool FusedBatchNormPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool AddPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool CastPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool MeanPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool SoftmaxPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool ScalePacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool AddFoldPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool ArgMaxPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool BatchNormFoldPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool FakeQuantWithMinMaxPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool FakeQuantWithMinMaxPerChannelPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool MulPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool MulFoldPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
bool SqueezePacker(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
|
||||
OpAttrFactory::OpAttrFactory() {
|
||||
pack_funs_ = {{"Conv2D", Conv2dPacker},
|
||||
{"MatMul", MatMulPacker},
|
||||
{"BiasAdd", BiasAddPacker},
|
||||
{"Reshape", ReshapePacker},
|
||||
{"Activation", ActivationPacker},
|
||||
{"ReLU", ActivationPacker},
|
||||
{"ReLU6", ActivationPacker},
|
||||
{"EReLU", ActivationPacker},
|
||||
{"LeakyReLU", ActivationPacker},
|
||||
{"Sigmoid", ActivationPacker},
|
||||
{"Softsign", ActivationPacker},
|
||||
{"Softplus", ActivationPacker},
|
||||
{"Tanh", ActivationPacker},
|
||||
{"HSwish", ActivationPacker},
|
||||
{"HSigmoid", ActivationPacker},
|
||||
{"MaxPool", PoolingPacker},
|
||||
{"MaxPool2D", PoolingPacker},
|
||||
{"MeanPool", PoolingPacker},
|
||||
{"GlobalPool", PoolingPacker},
|
||||
{"FusedBatchNorm", FusedBatchNormPacker},
|
||||
{"FusedBatchNormGrad", FusedBatchNormPacker},
|
||||
{"Cast", CastPacker},
|
||||
{"TensorAdd", AddPacker},
|
||||
{"SoftMax", SoftmaxPacker},
|
||||
{"SimpleMean", MeanPacker},
|
||||
{"ReduceMean", MeanPacker},
|
||||
{"AddFold", AddFoldPacker},
|
||||
{"ArgMax", ArgMaxPacker},
|
||||
{"BatchNorm", BatchNormFoldPacker},
|
||||
{"FakeQuantPerLayer", FakeQuantWithMinMaxPacker},
|
||||
{"FakeQuantPerChannel", FakeQuantWithMinMaxPerChannelPacker},
|
||||
{"Mul", MulPacker},
|
||||
{"MulFold", MulFoldPacker},
|
||||
{"Squeeze", SqueezePacker}};
|
||||
}
|
||||
OpAttrPackFun OpAttrFactory::GetPackFun(const std::string &opType) {
|
||||
if (pack_funs_.find(opType) == pack_funs_.end()) {
|
||||
MS_LOG(WARNING) << "Op Attr pack fun [" << opType << "] not found.";
|
||||
return nullptr;
|
||||
}
|
||||
return pack_funs_[opType];
|
||||
}
|
||||
|
||||
mindspore::predict::Format GetAttrFormat(const std::string &format) {
|
||||
if (format == kOpFormat_NCHW) {
|
||||
return predict::Format::Format_NCHW;
|
||||
} else if (format == kOpFormat_NHWC) {
|
||||
return predict::Format::Format_NHWC;
|
||||
} else {
|
||||
return predict::Format::Format_NUM_OF_FORMAT;
|
||||
}
|
||||
}
|
||||
|
||||
mindspore::predict::PadMode GetAttrPadMode(const std::string &pad_mode) {
|
||||
if (pad_mode == "same") {
|
||||
return mindspore::predict::PadMode::PadMode_SAME;
|
||||
} else if (pad_mode == "valid") {
|
||||
return mindspore::predict::PadMode::PadMode_VALID;
|
||||
} else {
|
||||
return mindspore::predict::PadMode::PadMode_NOTSET;
|
||||
}
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,58 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_PREDICT_CONVERTER_OP_ATTR_PACKER_H_
|
||||
#define MINDSPORE_CCSRC_PREDICT_CONVERTER_OP_ATTR_PACKER_H_
|
||||
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include "backend/session/anf_runtime_algorithm.h"
|
||||
#include "predict/schema/inner/ms_generated.h"
|
||||
|
||||
static constexpr size_t kNIndex = 0;
|
||||
static constexpr size_t kCIndex = 1;
|
||||
static constexpr size_t kHIndex = 2;
|
||||
static constexpr size_t kWIndex = 3;
|
||||
static constexpr size_t kNCHWSize = 4;
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
using OpAttrPackFun = bool (*)(const CNodePtr &c_node_ptr, OpDefT *ms_op);
|
||||
class OpAttrFactory {
|
||||
public:
|
||||
static OpAttrFactory *GetInstance() {
|
||||
static OpAttrFactory instance;
|
||||
return &instance;
|
||||
}
|
||||
OpAttrFactory(const OpAttrFactory &) = delete;
|
||||
OpAttrFactory &operator=(const OpAttrFactory &) = delete;
|
||||
OpAttrPackFun GetPackFun(const std::string &op_type);
|
||||
~OpAttrFactory() { pack_funs_.clear(); }
|
||||
OpAttrFactory();
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string, OpAttrPackFun> pack_funs_;
|
||||
};
|
||||
|
||||
mindspore::predict::Format GetAttrFormat(const std::string &format);
|
||||
|
||||
mindspore::predict::PadMode GetAttrPadMode(const std::string &pad_mode);
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_PREDICT_CONVERTER_CPU_OP_INFO_OP_ATTR_FACTORY_H_
|
||||
@ -1,59 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool ActivationPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<ActivationT> attr(new ActivationT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
if (AnfAlgo::GetCNodeName(c_node_ptr) == "ReLU") {
|
||||
attr->type = predict::ActivationType::ActivationType_RELU;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "Sigmoid") {
|
||||
attr->type = predict::ActivationType::ActivationType_SIGMOID;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "ReLU6") {
|
||||
attr->type = predict::ActivationType::ActivationType_RELU6;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "ELU") {
|
||||
attr->type = predict::ActivationType::ActivationType_ELU;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "Leaky_ReLU") {
|
||||
attr->type = predict::ActivationType::ActivationType_LEAKY_RELU;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "ABS") {
|
||||
attr->type = predict::ActivationType::ActivationType_ABS;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "ReLU1") {
|
||||
attr->type = predict::ActivationType::ActivationType_RELU1;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "Softsign") {
|
||||
attr->type = predict::ActivationType::ActivationType_SOFTSIGN;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "Softplus") {
|
||||
attr->type = predict::ActivationType::ActivationType_SOFTPLUS;
|
||||
} else if (AnfAlgo::GetCNodeName(c_node_ptr) == "Tanh") {
|
||||
attr->type = predict::ActivationType::ActivationType_TANH;
|
||||
} else {
|
||||
attr->type = predict::ActivationType::ActivationType_UNKNOW;
|
||||
MS_LOG(WARNING) << "unknow Activation";
|
||||
}
|
||||
ms_op->name = c_node_ptr->fullname_with_scope();
|
||||
ms_op->attr.type = OpT_Activation;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,35 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool AddPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<AddT> attr(new AddT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->name = c_node_ptr->fullname_with_scope();
|
||||
ms_op->attr.type = OpT_Add;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool AddFoldPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<AddFoldT> attr(new AddFoldT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->attr.type = OpT_AddFold;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool ArgMaxPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<ArgMaxT> attr(new ArgMaxT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->attr.type = OpT_ArgMax;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool BatchNormFoldPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<BatchNormFoldT> attr(new BatchNormFoldT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->attr.type = OpT_BatchNormFold;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,37 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool BiasAddPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<BiasAddT> attr(new BiasAddT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
attr->axis = {1};
|
||||
ms_op->name = c_node_ptr->fullname_with_scope();
|
||||
ms_op->attr.type = OpT_BiasAdd;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,37 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool CastPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<CastT> attr(new CastT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
attr->srcT = 0;
|
||||
attr->dstT = 0;
|
||||
ms_op->name = c_node_ptr->fullname_with_scope();
|
||||
ms_op->attr.type = OpT_Cast;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,63 +0,0 @@
|
||||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool Conv2dPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
int kernel_group_value = AnfAlgo::GetNodeAttr<int>(c_node_ptr, "group");
|
||||
int kernel_channel_value = AnfAlgo::GetNodeAttr<int>(c_node_ptr, "out_channel");
|
||||
std::vector<int> kernel_size_value = AnfAlgo::GetNodeAttr<std::vector<int>>(c_node_ptr, "kernel_size");
|
||||
std::string kernel_pad_mode_value = AnfAlgo::GetNodeAttr<std::string>(c_node_ptr, "pad_mode");
|
||||
int kernel_pad_value = AnfAlgo::GetNodeAttr<int>(c_node_ptr, "pad");
|
||||
auto kernel_stride_value = AnfAlgo::GetNodeAttr<std::vector<int>>(c_node_ptr, "stride");
|
||||
auto kernel_dilation_value = AnfAlgo::GetNodeAttr<std::vector<int>>(c_node_ptr, "dilation");
|
||||
std::string kernel_data_format_value = AnfAlgo::GetNodeAttr<std::string>(c_node_ptr, "data_format");
|
||||
std::unique_ptr<Conv2DT> attr(new Conv2DT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
attr->format = GetAttrFormat(kernel_data_format_value);
|
||||
attr->group = kernel_group_value;
|
||||
auto in_shape = AnfAlgo::GetPrevNodeOutputInferShape(c_node_ptr, 1);
|
||||
if (in_shape.size() != kNCHWSize) {
|
||||
return false;
|
||||
}
|
||||
attr->channelIn = SizeToInt(in_shape[1]);
|
||||
attr->channelOut = kernel_channel_value;
|
||||
attr->kernelW = kernel_size_value[0];
|
||||
attr->kernelH = kernel_size_value[1];
|
||||
attr->strideW = kernel_stride_value[0];
|
||||
attr->strideH = kernel_stride_value[1];
|
||||
attr->padMode = GetAttrPadMode(kernel_pad_mode_value);
|
||||
attr->padUp = kernel_pad_value;
|
||||
attr->padDown = kernel_pad_value;
|
||||
attr->padLeft = kernel_pad_value;
|
||||
attr->padRight = kernel_pad_value;
|
||||
attr->dilateW = kernel_dilation_value[0];
|
||||
attr->dilateH = kernel_dilation_value[1];
|
||||
attr->hasBias = false;
|
||||
ms_op->name = c_node_ptr->fullname_with_scope();
|
||||
ms_op->attr.type = OpT_Conv2D;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool FakeQuantWithMinMaxPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<FakeQuantWithMinMaxT> attr(new FakeQuantWithMinMaxT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->attr.type = OpT_FakeQuantWithMinMax;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "predict/converter/lite_model/op_attr_packer.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace predict {
|
||||
namespace convert {
|
||||
bool FakeQuantWithMinMaxPerChannelPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) {
|
||||
if (c_node_ptr == nullptr || ms_op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<FakeQuantWithMinMaxPerChannelT> attr(new FakeQuantWithMinMaxPerChannelT());
|
||||
MS_EXCEPTION_IF_NULL(attr);
|
||||
ms_op->attr.type = OpT_FakeQuantWithMinMaxPerChannel;
|
||||
ms_op->attr.value = attr.release();
|
||||
return true;
|
||||
}
|
||||
} // namespace convert
|
||||
} // namespace predict
|
||||
} // namespace mindspore
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue