!8684 [MS][LITE] remove internal
From: @jianghui58 Reviewed-by: @zhanghaibo5,@zhang_xue_tong Signed-off-by:pull/8684/MERGE
commit
de60d1d98f
@ -1,47 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
project (Lite_Internal)
|
||||
set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../)
|
||||
set(CMAKE_CXX_COMPILER ${CMAKE_C_COMPILER})
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
|
||||
include_directories(${TOP_DIR})
|
||||
add_compile_definitions(ENABLE_NNACL_INFER_SHAPE)
|
||||
|
||||
file(GLOB KERNEL_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/arithmetic_common.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/activation_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_self_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/matmul_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/reduce_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/arithmetic_fp32.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/common/*.cc
|
||||
)
|
||||
if (SUPPORT_TRAIN)
|
||||
file(GLOB TRAIN_KERNEL_SRC
|
||||
${KERNEL_SRC}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32_grad/activation_grad.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/kernel/fp32_grad/*.cc
|
||||
)
|
||||
endif ()
|
||||
|
||||
list(REMOVE_ITEM KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/opt_op_handler.c)
|
||||
|
||||
set(CCSRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/common/vector.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/common/string.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/lite_session.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/allocator.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/ms_tensor.cc
|
||||
)
|
||||
|
||||
if (PLATFORM_ARM64)
|
||||
# assembly
|
||||
file(GLOB ASSEMBLY_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatmulFp32Opt.S
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatVecMulFp32.S
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/MatmulFp32.S)
|
||||
set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C)
|
||||
set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC})
|
||||
add_library(mslite_internal SHARED ${CCSRC} ${KERNEL_SRC} ${TRAIN_KERNEL_SRC})
|
||||
endif()
|
@ -1,40 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_
|
||||
|
||||
/// \brief CpuBindMode defined for holding bind cpu strategy argument.
|
||||
typedef enum {
|
||||
NO_BIND = 0, /**< no bind */
|
||||
HIGHER_CPU = 1, /**< bind higher cpu first */
|
||||
MID_CPU = 2 /**< bind middle cpu first */
|
||||
} CpuBindMode;
|
||||
|
||||
/// \brief DeviceType defined for holding user's preferred backend.
|
||||
typedef enum {
|
||||
DT_CPU, /**< CPU device type */
|
||||
DT_GPU, /**< GPU device type */
|
||||
DT_NPU /**< NPU device type, not supported yet */
|
||||
} DeviceType;
|
||||
|
||||
/// \brief Context defined for holding environment variables during runtime.
|
||||
typedef struct {
|
||||
bool float16_priority = false; /**< prior enable float16 inference */
|
||||
DeviceType device_type_ = DT_CPU;
|
||||
int thread_num_ = 2; /**< thread number config for thread pool */
|
||||
} Context;
|
||||
#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_
|
@ -1,55 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_
|
||||
|
||||
/// \brief STATUS defined for holding error code in MindSpore Lite.
|
||||
using STATUS = int;
|
||||
|
||||
/* Success */
|
||||
constexpr int RET_OK = 0; /**< No error occurs. */
|
||||
|
||||
/* Common error code, range: [-1, -100]*/
|
||||
constexpr int RET_ERROR = -1; /**< Common error code. */
|
||||
constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/
|
||||
constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/
|
||||
constexpr int RET_NO_CHANGE = -4; /**< No change. */
|
||||
constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */
|
||||
constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */
|
||||
|
||||
/* Executor error code, range: [-101,-200] */
|
||||
constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */
|
||||
constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */
|
||||
constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */
|
||||
|
||||
/* Graph error code, range: [-201,-300] */
|
||||
constexpr int RET_GRAPH_FILE_ERR = -201; /**< Failed to verify graph file. */
|
||||
|
||||
/* Node error code, range: [-301,-400] */
|
||||
constexpr int RET_NOT_FIND_OP = -301; /**< Failed to find operator. */
|
||||
constexpr int RET_INVALID_OP_NAME = -302; /**< Invalid operator name. */
|
||||
constexpr int RET_INVALID_OP_ATTR = -303; /**< Invalid operator attr. */
|
||||
constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. */
|
||||
|
||||
/* Tensor error code, range: [-401,-500] */
|
||||
constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */
|
||||
|
||||
/* InferShape error code, range: [-501,-600] */
|
||||
constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */
|
||||
constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */
|
||||
|
||||
#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_
|
@ -1,91 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H
|
||||
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/include/context.h"
|
||||
#include "internal/include/lite_utils.h"
|
||||
|
||||
/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model.
|
||||
typedef struct LiteSession {
|
||||
/// \brief Static method to create a LiteSession pointer.
|
||||
///
|
||||
/// \param[in] context Define the context of session to be created.
|
||||
///
|
||||
/// \return Pointer of MindSpore Lite LiteSession.
|
||||
static LiteSession *CreateSession(Context *context);
|
||||
|
||||
/// \brief Compile MindSpore Lite model.
|
||||
///
|
||||
/// \note CompileGraph should be called before RunGraph.
|
||||
///
|
||||
/// \param[in] model Define the model to be compiled.
|
||||
///
|
||||
/// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h.
|
||||
int CompileGraph(Model *model);
|
||||
|
||||
/// \brief Get input MindSpore Lite MSTensors of model.
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
TensorPtrVector GetInputs() const;
|
||||
|
||||
/// \brief Get input MindSpore Lite MSTensors of model by node name.
|
||||
///
|
||||
/// \param[in] node_name Define node name.
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
TensorPtrVector GetInputsByName(const String &node_name) const;
|
||||
|
||||
/// \brief Get output MindSpore Lite MSTensors of model by node name.
|
||||
///
|
||||
/// \param[in] node_name Define node name.
|
||||
///
|
||||
/// \return The vector of MindSpore Lite MSTensor.
|
||||
TensorPtrVector GetOutputsByNodeName(const String &node_name) const;
|
||||
|
||||
/// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name.
|
||||
///
|
||||
/// \return The map of output tensor name and MindSpore Lite MSTensor.
|
||||
TensorPtrVector GetOutputs() const;
|
||||
|
||||
/// \brief Get name of output tensors of model compiled by this session.
|
||||
///
|
||||
/// \return The vector of string as output tensor names in order.
|
||||
StringVector GetOutputTensorNames() const;
|
||||
|
||||
/// \brief Get output MindSpore Lite MSTensors of model by tensor name.
|
||||
///
|
||||
/// \param[in] tensor_name Define tensor name.
|
||||
///
|
||||
/// \return Pointer of MindSpore Lite MSTensor.
|
||||
MSTensor *GetOutputByTensorName(const String &tensor_name) const;
|
||||
|
||||
/// \note RunGraph should be called after CompileGraph.
|
||||
int RunGraph();
|
||||
|
||||
/// \brief Resize inputs shape.
|
||||
///
|
||||
/// \param[in] inputs Define the new inputs shape.
|
||||
/// \param[in] dims Define the inputs new shape.
|
||||
///
|
||||
/// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h.
|
||||
int Resize(const TensorPtrVector &inputs, const Int32VectorVector &dims);
|
||||
} LiteSession;
|
||||
|
||||
#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H
|
@ -1,32 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
|
||||
#include "internal/include/string.h"
|
||||
#include "internal/include/vector.h"
|
||||
|
||||
struct MSTensor;
|
||||
struct Node;
|
||||
using TensorPtr = MSTensor *;
|
||||
using TensorPtrVector = Vector<MSTensor *>;
|
||||
using Uint32Vector = Vector<uint32_t>;
|
||||
using StringVector = Vector<String>;
|
||||
using ShapeVector = Vector<int>;
|
||||
using NodePtrVector = Vector<struct Node *>;
|
||||
using Int32Vector = Vector<int>;
|
||||
using Int32VectorVector = Vector<Int32Vector>;
|
||||
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
|
@ -1,249 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H
|
||||
#include "internal/include/lite_utils.h"
|
||||
#include "nnacl/op_base.h"
|
||||
|
||||
using PrimitiveC = OpParameter;
|
||||
enum NodeType {
|
||||
NodeType_ValueNode = 0,
|
||||
NodeType_Parameter = 1,
|
||||
NodeType_CNode = 2,
|
||||
NodeType_MIN = NodeType_ValueNode,
|
||||
NodeType_MAX = NodeType_CNode
|
||||
};
|
||||
|
||||
enum KernelType : int {
|
||||
KernelType_Concat = 0,
|
||||
KernelType_SoftMax,
|
||||
KernelType_Activation,
|
||||
KernelType_Conv2D,
|
||||
KernelType_FusedBatchNorm,
|
||||
KernelType_BatchNorm,
|
||||
KernelType_BiasAdd,
|
||||
KernelType_Pooling,
|
||||
KernelType_ROIPooling,
|
||||
KernelType_DepthwiseConv2D,
|
||||
KernelType_DeDepthwiseConv2D,
|
||||
KernelType_Resize,
|
||||
KernelType_DetectionPostProcess,
|
||||
KernelType_FullConnection,
|
||||
KernelType_Mean,
|
||||
KernelType_DeConv2D,
|
||||
KernelType_Scale,
|
||||
KernelType_Reshape,
|
||||
KernelType_Eltwise,
|
||||
KernelType_NetOutput,
|
||||
KernelType_Add,
|
||||
KernelType_Sub,
|
||||
KernelType_MatMul,
|
||||
KernelType_StridedSlice,
|
||||
KernelType_Power,
|
||||
KernelType_Slice,
|
||||
KernelType_Stack,
|
||||
KernelType_Mul,
|
||||
KernelType_RealDiv,
|
||||
KernelType_Pad,
|
||||
KernelType_Maximum,
|
||||
KernelType_Minimum,
|
||||
KernelType_PReLU,
|
||||
KernelType_LeakyReLU,
|
||||
KernelType_ArgMax,
|
||||
KernelType_ArgMin,
|
||||
KernelType_Exp,
|
||||
KernelType_Crop,
|
||||
KernelType_Range,
|
||||
KernelType_Rsqrt,
|
||||
KernelType_ExpandDims,
|
||||
KernelType_Tile,
|
||||
KernelType_Cast,
|
||||
KernelType_Shape,
|
||||
KernelType_Nchw2Nhwc,
|
||||
KernelType_Nhwc2Nchw,
|
||||
KernelType_QuantDTypeCast,
|
||||
KernelType_Split,
|
||||
KernelType_Permute,
|
||||
KernelType_FakeQuantWithMinMaxVars,
|
||||
KernelType_Equal,
|
||||
KernelType_Less,
|
||||
KernelType_Greater,
|
||||
KernelType_NotEqual,
|
||||
KernelType_LessEqual,
|
||||
KernelType_GreaterEqual,
|
||||
KernelType_Min,
|
||||
KernelType_Floor,
|
||||
KernelType_Abs,
|
||||
KernelType_Neg,
|
||||
KernelType_Cos,
|
||||
KernelType_Sin,
|
||||
KernelType_Sqrt,
|
||||
KernelType_Square,
|
||||
KernelType_Constant,
|
||||
KernelType_Log,
|
||||
KernelType_Tan,
|
||||
KernelType_Atan,
|
||||
KernelType_Asin,
|
||||
KernelType_Clip,
|
||||
KernelType_Transpose,
|
||||
KernelType_Squeeze,
|
||||
KernelType_Unsqueeze,
|
||||
KernelType_Upsample,
|
||||
KernelType_Dropout,
|
||||
KernelType_Broadcast,
|
||||
KernelType_BroadcastTo,
|
||||
KernelType_Lrn,
|
||||
KernelType_ZerosLike,
|
||||
KernelType_TopK,
|
||||
KernelType_SpaceToDepth,
|
||||
KernelType_SpaceToBatch,
|
||||
KernelType_SparseToDense,
|
||||
KernelType_ReverseSequence,
|
||||
KernelType_Rank,
|
||||
KernelType_Gather,
|
||||
KernelType_GatherNd,
|
||||
KernelType_Fill,
|
||||
KernelType_Elu,
|
||||
KernelType_DepthToSpace,
|
||||
KernelType_BatchToSpace,
|
||||
KernelType_AddN,
|
||||
KernelType_Ceil,
|
||||
KernelType_EmbeddingLookup,
|
||||
KernelType_EmbeddingLookupSparse,
|
||||
KernelType_FloorDiv,
|
||||
KernelType_FloorMod,
|
||||
KernelType_L2Norm,
|
||||
KernelType_LocalResponseNormalization,
|
||||
KernelType_MatrixDiag,
|
||||
KernelType_Reduce,
|
||||
KernelType_Reverse,
|
||||
KernelType_Round,
|
||||
KernelType_Select,
|
||||
KernelType_Scatter,
|
||||
KernelType_ScatterND,
|
||||
KernelType_ConstantOfShape,
|
||||
KernelType_Unique,
|
||||
KernelType_Unstack,
|
||||
KernelType_LogicalAnd,
|
||||
KernelType_LogicalOr,
|
||||
KernelType_LogicalXor,
|
||||
KernelType_LogicalNot,
|
||||
KernelType_OnnxInt8Quantize,
|
||||
KernelType_OnnxInt8Dequantize,
|
||||
KernelType_FakeQuantWithMinMax,
|
||||
KernelType_FakeQuantWithMinMaxPerChannel,
|
||||
KernelType_BatchNormFold,
|
||||
KernelType_MulFold,
|
||||
KernelType_AddFold,
|
||||
KernelType_SquaredDifference,
|
||||
KernelType_Flatten,
|
||||
KernelType_FlattenGrad,
|
||||
KernelType_TupleGetItem,
|
||||
KernelType_Div,
|
||||
KernelType_Where,
|
||||
KernelType_OneHot,
|
||||
KernelType_Lstm,
|
||||
KernelType_Conv2DGradFilter,
|
||||
KernelType_Conv2DGradInput,
|
||||
KernelType_PoolingGrad,
|
||||
KernelType_BNGrad,
|
||||
KernelType_BNGradInput,
|
||||
KernelType_ApplyMomentum,
|
||||
KernelType_BiasGrad,
|
||||
KernelType_SoftmaxCrossEntropy,
|
||||
KernelType_AddGrad,
|
||||
KernelType_SubGrad,
|
||||
KernelType_MulGrad,
|
||||
KernelType_DivGrad,
|
||||
KernelType_PowerGrad,
|
||||
KernelType_ActivationGrad,
|
||||
KernelType_PriorBox,
|
||||
KernelType_SpaceToBatchND,
|
||||
KernelType_Depend,
|
||||
KernelType_Return,
|
||||
KernelType_MakeTuple,
|
||||
KernelType_ToFormat,
|
||||
KernelType_Proposal,
|
||||
KernelType_Custom,
|
||||
KernelType_BlackBox,
|
||||
KernelType_NegGrad,
|
||||
KernelType_LogGrad,
|
||||
KernelType_BatchToSpaceND,
|
||||
KernelType_END,
|
||||
};
|
||||
|
||||
enum ActivationType {
|
||||
NO_ACTIVATION = 0,
|
||||
RELU = 1,
|
||||
SIGMOID = 2,
|
||||
RELU6 = 3,
|
||||
ELU = 4,
|
||||
LEAKY_RELU = 5,
|
||||
ABS = 6,
|
||||
RELU1 = 7,
|
||||
SOFTSIGN = 8,
|
||||
SOFTPLUS = 9,
|
||||
TANH = 10,
|
||||
SELU = 11,
|
||||
HSWISH = 12,
|
||||
HSIGMOID = 13,
|
||||
THRESHOLDRELU = 14,
|
||||
LINEAR = 15,
|
||||
UNKNOW = 16
|
||||
};
|
||||
|
||||
enum ReduceMode {
|
||||
ReduceMode_ReduceMean = 0,
|
||||
ReduceMode_ReduceMax = 1,
|
||||
ReduceMode_ReduceMin = 2,
|
||||
ReduceMode_ReduceProd = 3,
|
||||
ReduceMode_ReduceSum = 4,
|
||||
ReduceMode_ReduceSumSquare = 5,
|
||||
ReduceMode_ReduceASum = 6,
|
||||
ReduceMode_MIN = ReduceMode_ReduceMean,
|
||||
ReduceMode_MAX = ReduceMode_ReduceASum
|
||||
};
|
||||
|
||||
typedef struct Node {
|
||||
String name_;
|
||||
NodeType node_type_;
|
||||
PrimitiveC *primitive_;
|
||||
Uint32Vector input_indices_;
|
||||
Uint32Vector output_indices_;
|
||||
} Node;
|
||||
|
||||
typedef struct Model {
|
||||
String name_;
|
||||
String version_;
|
||||
TensorPtrVector all_tensors_;
|
||||
Uint32Vector input_indices_;
|
||||
Uint32Vector output_indices_;
|
||||
NodePtrVector nodes_;
|
||||
char *buf;
|
||||
|
||||
/// \brief Static method to create a Model pointer.
|
||||
///
|
||||
/// \param[in] model_buf Define the buffer read from a model file.
|
||||
/// \param[in] size Define bytes number of model buffer.
|
||||
///
|
||||
/// \return Pointer of MindSpore Lite Model.
|
||||
static Model *Import(const char *model_buf, size_t size);
|
||||
|
||||
/// \brief Free all the temporary buffer
|
||||
void Free();
|
||||
} Model;
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H
|
@ -1,151 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_
|
||||
|
||||
#include "internal/include/lite_utils.h"
|
||||
|
||||
enum TypeId : int {
|
||||
kTypeUnknown = 0,
|
||||
kMetaTypeBegin = kTypeUnknown,
|
||||
kMetaTypeType, // Type
|
||||
kMetaTypeAnything,
|
||||
kMetaTypeObject,
|
||||
kMetaTypeTypeType, // TypeType
|
||||
kMetaTypeProblem,
|
||||
kMetaTypeExternal,
|
||||
kMetaTypeNone,
|
||||
kMetaTypeNull,
|
||||
kMetaTypeEllipsis,
|
||||
kMetaTypeEnd,
|
||||
//
|
||||
// Object types
|
||||
//
|
||||
kObjectTypeBegin = kMetaTypeEnd,
|
||||
kObjectTypeNumber,
|
||||
kObjectTypeString,
|
||||
kObjectTypeList,
|
||||
kObjectTypeTuple,
|
||||
kObjectTypeSlice,
|
||||
kObjectTypeKeyword,
|
||||
kObjectTypeTensorType,
|
||||
kObjectTypeRowTensorType,
|
||||
kObjectTypeSparseTensorType,
|
||||
kObjectTypeUndeterminedType,
|
||||
kObjectTypeClass,
|
||||
kObjectTypeDictionary,
|
||||
kObjectTypeFunction,
|
||||
kObjectTypeJTagged,
|
||||
kObjectTypeSymbolicKeyType,
|
||||
kObjectTypeEnvType,
|
||||
kObjectTypeRefKey,
|
||||
kObjectTypeRef,
|
||||
kObjectTypeEnd,
|
||||
//
|
||||
// Number Types
|
||||
//
|
||||
kNumberTypeBegin = kObjectTypeEnd,
|
||||
kNumberTypeBool,
|
||||
kNumberTypeInt,
|
||||
kNumberTypeInt8,
|
||||
kNumberTypeInt16,
|
||||
kNumberTypeInt32,
|
||||
kNumberTypeInt64,
|
||||
kNumberTypeUInt,
|
||||
kNumberTypeUInt8,
|
||||
kNumberTypeUInt16,
|
||||
kNumberTypeUInt32,
|
||||
kNumberTypeUInt64,
|
||||
kNumberTypeFloat,
|
||||
kNumberTypeFloat16,
|
||||
kNumberTypeFloat32,
|
||||
kNumberTypeFloat64,
|
||||
kNumberTypeEnd
|
||||
};
|
||||
|
||||
enum Format {
|
||||
Format_NCHW = 0,
|
||||
Format_NHWC = 1,
|
||||
Format_NHWC4 = 2,
|
||||
Format_HWKC = 3,
|
||||
Format_HWCK = 4,
|
||||
Format_KCHW = 5,
|
||||
Format_CKHW = 6,
|
||||
Format_KHWC = 7,
|
||||
Format_CHWK = 8,
|
||||
Format_HW = 9,
|
||||
Format_HW4 = 10,
|
||||
Format_NC = 11,
|
||||
Format_NC4 = 12,
|
||||
Format_NC4HW4 = 100,
|
||||
Format_NUM_OF_FORMAT = 101,
|
||||
Format_MIN = Format_NCHW,
|
||||
Format_MAX = Format_NUM_OF_FORMAT
|
||||
};
|
||||
|
||||
typedef struct MSTensor {
|
||||
enum Category {
|
||||
CONST, // weight tensor
|
||||
VAR // activation tensor
|
||||
};
|
||||
void *data_ = NULL;
|
||||
void *device_data_ = NULL;
|
||||
TypeId data_type_;
|
||||
Format format_ = Format_NHWC;
|
||||
Category category_ = VAR;
|
||||
ShapeVector shape_;
|
||||
size_t refCount = 0;
|
||||
|
||||
int32_t Batch() const;
|
||||
|
||||
int32_t Channel() const;
|
||||
|
||||
int32_t Height() const;
|
||||
|
||||
int32_t Width() const;
|
||||
|
||||
/// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index.
|
||||
///
|
||||
/// \param[in] index Define index of dimension returned.
|
||||
///
|
||||
/// \return Size of dimension of the MindSpore Lite MSTensor.
|
||||
int DimensionSize(size_t index) const;
|
||||
|
||||
/// \brief Get number of element in MSTensor.
|
||||
///
|
||||
/// \return Number of element in MSTensor.
|
||||
int ElementsNum() const;
|
||||
|
||||
int ElementsC4Num() const;
|
||||
|
||||
/// \brief Get byte size of data in MSTensor.
|
||||
///
|
||||
/// \return Byte size of data in MSTensor.
|
||||
size_t Size() const;
|
||||
|
||||
static void *operator new(size_t sz);
|
||||
|
||||
static void *operator new[](size_t sz);
|
||||
|
||||
static void operator delete(void *ptr, size_t sz);
|
||||
|
||||
static void operator delete[](void *ptr, size_t sz);
|
||||
} MSTensor;
|
||||
|
||||
MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape);
|
||||
void DestroyTensor(MSTensor *ptr);
|
||||
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
@ -1,100 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_SRC_STRING_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_SRC_STRING_H_
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct String {
|
||||
public:
|
||||
String();
|
||||
String(size_t count, char ch);
|
||||
String(const char *s, size_t count);
|
||||
explicit String(const char *s);
|
||||
String(const String &other);
|
||||
String(const String &other, size_t pos, size_t count = npos);
|
||||
|
||||
~String();
|
||||
|
||||
String &operator=(const String &str);
|
||||
String &operator=(const char *str);
|
||||
|
||||
char &at(size_t pos);
|
||||
const char &at(size_t pos) const;
|
||||
inline char &operator[](size_t pos);
|
||||
inline const char &operator[](size_t pos) const;
|
||||
char *data() noexcept;
|
||||
const char *data() const noexcept;
|
||||
const char *c_str() const noexcept;
|
||||
|
||||
// capacity
|
||||
bool empty() const noexcept;
|
||||
size_t size() const noexcept;
|
||||
size_t length() const noexcept;
|
||||
|
||||
// operations
|
||||
void clear() noexcept;
|
||||
String &append(size_t count, const char ch);
|
||||
String &append(const String &str);
|
||||
String &append(const char *s);
|
||||
String &operator+=(const String &str);
|
||||
String &operator+=(const char *str);
|
||||
String &operator+=(const char ch);
|
||||
int compare(const String &str) const;
|
||||
int compare(const char *str) const;
|
||||
|
||||
String substr(size_t pos = 0, size_t count = npos) const;
|
||||
|
||||
static const size_t npos = -1;
|
||||
|
||||
private:
|
||||
size_t size_;
|
||||
char *buffer_;
|
||||
} String;
|
||||
|
||||
bool operator==(const String &lhs, const String &rhs);
|
||||
bool operator==(const String &lhs, const char *rhs);
|
||||
bool operator==(const char *lhs, const String rhs);
|
||||
|
||||
bool operator!=(const String &lhs, const String &rhs);
|
||||
bool operator!=(const String &lhs, const char *rhs);
|
||||
bool operator!=(const char *lhs, const String rhs);
|
||||
|
||||
bool operator<(const String &lhs, const String &rhs);
|
||||
bool operator<(const String &lhs, const char *rhs);
|
||||
bool operator<(const char *lhs, const String rhs);
|
||||
|
||||
bool operator>(const String &lhs, const String &rhs);
|
||||
bool operator>(const String &lhs, const char *rhs);
|
||||
bool operator>(const char *lhs, const String rhs);
|
||||
|
||||
bool operator<=(const String &lhs, const String &rhs);
|
||||
bool operator<=(const String &lhs, const char *rhs);
|
||||
bool operator<=(const char *lhs, const String rhs);
|
||||
|
||||
bool operator>=(const String &lhs, const String &rhs);
|
||||
bool operator>=(const String &lhs, const char *rhs);
|
||||
bool operator>=(const char *lhs, const String rhs);
|
||||
|
||||
String to_String(int32_t value);
|
||||
String to_String(int64_t value);
|
||||
String to_String(uint32_t value);
|
||||
String to_String(uint64_t value);
|
||||
String to_String(float value);
|
||||
String to_String(double value);
|
||||
String to_String(long double value);
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_SRC_STRING_H_
|
@ -1,117 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H
|
||||
#define MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#define DEFAULT_CAPACITY 4
|
||||
|
||||
struct MSTensor;
|
||||
struct Node;
|
||||
|
||||
template <typename T>
|
||||
class Vector {
|
||||
public:
|
||||
Vector();
|
||||
|
||||
explicit Vector(size_t size);
|
||||
|
||||
Vector(size_t size, const T &value);
|
||||
|
||||
Vector(const Vector<T> &vector);
|
||||
|
||||
~Vector();
|
||||
|
||||
void clear();
|
||||
|
||||
void push_back(const T &elem);
|
||||
|
||||
void push_back(T &&);
|
||||
|
||||
void pop_back();
|
||||
|
||||
void insert(const T &elem, size_t index);
|
||||
|
||||
T *begin();
|
||||
|
||||
const T *begin() const;
|
||||
|
||||
T *end();
|
||||
|
||||
const T *end() const;
|
||||
|
||||
T &front();
|
||||
|
||||
const T &front() const;
|
||||
|
||||
T &back();
|
||||
|
||||
const T &back() const;
|
||||
|
||||
T &at(size_t index);
|
||||
|
||||
const T &at(size_t index) const;
|
||||
|
||||
T &operator[](size_t index);
|
||||
|
||||
const T &operator[](size_t index) const;
|
||||
|
||||
T *data();
|
||||
|
||||
const T *data() const;
|
||||
|
||||
size_t size() const;
|
||||
|
||||
size_t capacity() const;
|
||||
|
||||
bool empty() const;
|
||||
|
||||
void erase(size_t index);
|
||||
|
||||
void resize(size_t size);
|
||||
|
||||
void reserve(size_t capacity);
|
||||
|
||||
Vector<T> &operator=(const Vector<T> &v);
|
||||
|
||||
private:
|
||||
size_t size_;
|
||||
size_t elem_size_;
|
||||
size_t capacity_;
|
||||
T *data_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
bool operator==(const Vector<T> &lhs, const Vector<T> &rhs) {
|
||||
if (lhs.size() != rhs.size()) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < lhs.size(); ++i) {
|
||||
if (lhs[i] != rhs[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool operator!=(const Vector<T> &lhs, const Vector<T> &rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H
|
@ -1,220 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "internal/src/allocator.h"
|
||||
#include <stdlib.h>
|
||||
#include "internal/src/lite_log.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
namespace {
|
||||
constexpr size_t kMaxMallocSize = 2000 * 1024 * 1024;
|
||||
constexpr int kBlockSize = 1024;
|
||||
constexpr size_t kBlockLimit = (kBlockSize << (kBlockRange - 1));
|
||||
|
||||
int SizeToIndex(size_t size) {
|
||||
if (size > kBlockLimit) {
|
||||
return -1;
|
||||
}
|
||||
int index = 0;
|
||||
for (int i = 0; i < kBlockRange; ++i) {
|
||||
if ((size & (kBlockSize << i))) {
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
if (size > (size_t)(kBlockSize << index)) {
|
||||
index += 1;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
void PopMemNode(MemNode **head) {
|
||||
if (*head == nullptr) {
|
||||
return;
|
||||
}
|
||||
MemNode *next = (*head)->next_;
|
||||
(*head) = next;
|
||||
if (*head != nullptr) {
|
||||
(*head)->pre_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void PushMemNode(MemNode **head, MemNode *node) {
|
||||
if (node == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (*head == nullptr) {
|
||||
*head = node;
|
||||
return;
|
||||
}
|
||||
(*head)->pre_ = node;
|
||||
node->next_ = *head;
|
||||
node->pre_ = nullptr;
|
||||
*head = node;
|
||||
}
|
||||
|
||||
void RemoveMemNode(MemNode **head, MemNode *node) {
|
||||
if (node == nullptr) {
|
||||
return;
|
||||
}
|
||||
if ((*head) == node) {
|
||||
*head = node->next_;
|
||||
if (*head != nullptr) {
|
||||
(*head)->pre_ = nullptr;
|
||||
}
|
||||
} else {
|
||||
MemNode *node_pre = node->pre_;
|
||||
node_pre->next_ = node->next_;
|
||||
node->next_ = nullptr;
|
||||
node->pre_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void FreeNodesList(MemNode *head) {
|
||||
MemNode *node = head;
|
||||
while (node != nullptr) {
|
||||
MemNode *next = node->next_;
|
||||
free(node);
|
||||
node = next;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
Allocator::Allocator() {
|
||||
for (int i = 0; i < kBlockRange; ++i) {
|
||||
allocated_list_[i] = nullptr;
|
||||
free_list_[i] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
Allocator::~Allocator() { Clear(); }
|
||||
|
||||
void Allocator::SetContext(const AllocatorContext &ctx) {
|
||||
lock_flag_ = ctx.lock_flag_;
|
||||
}
|
||||
|
||||
void Allocator::Lock() {
|
||||
if (lock_flag_) {
|
||||
pthread_mutex_lock(&lock_);
|
||||
}
|
||||
}
|
||||
|
||||
void Allocator::UnLock() {
|
||||
if (lock_flag_) {
|
||||
pthread_mutex_unlock(&lock_);
|
||||
}
|
||||
}
|
||||
|
||||
void *Allocator::Malloc(size_t size) {
|
||||
if (size > kMaxMallocSize) {
|
||||
LITE_ERROR_LOG("MallocData out of max_size, size: %zd", size);
|
||||
return nullptr;
|
||||
}
|
||||
void *result = nullptr;
|
||||
int index = SizeToIndex(size);
|
||||
if (index < 0) {
|
||||
MemNode *node = (MemNode *)malloc(sizeof(MemNode) + size);
|
||||
if (node == nullptr) {
|
||||
LITE_ERROR_LOG("MallocData out of max_size, size: %zd", (size + sizeof(MemNode)));
|
||||
return result;
|
||||
}
|
||||
node->size_ = size;
|
||||
result = (char *)node + sizeof(MemNode);
|
||||
Lock();
|
||||
PushMemNode(&large_mem_list_, node);
|
||||
UnLock();
|
||||
return result;
|
||||
}
|
||||
Lock();
|
||||
size_t size_apply = (kBlockSize << index);
|
||||
if (free_list_[index] != nullptr) {
|
||||
MemNode *free_node = free_list_[index];
|
||||
PopMemNode(&free_list_[index]);
|
||||
PushMemNode(&allocated_list_[index], free_node);
|
||||
result = (char *)free_node + sizeof(MemNode);
|
||||
UnLock();
|
||||
return result;
|
||||
} else {
|
||||
MemNode *new_node = (MemNode *)malloc(sizeof(MemNode) + size_apply);
|
||||
if (new_node == nullptr) {
|
||||
UnLock();
|
||||
LITE_LOG_ERROR("malloc MemNode fail!");
|
||||
return nullptr;
|
||||
}
|
||||
new_node->size_ = size;
|
||||
PushMemNode(&allocated_list_[index], new_node);
|
||||
result = (char *)new_node + sizeof(MemNode);
|
||||
UnLock();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
void Allocator::Free(void *buf) {
|
||||
if (buf == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
MemNode *node = (MemNode *)((char *)buf - sizeof(MemNode));
|
||||
size_t buf_size = node->size_;
|
||||
Lock();
|
||||
if (buf_size > kBlockLimit) {
|
||||
RemoveMemNode(&large_mem_list_, node);
|
||||
free(node);
|
||||
} else {
|
||||
int index = SizeToIndex(buf_size);
|
||||
RemoveMemNode(&allocated_list_[index], node);
|
||||
PushMemNode(&free_list_[index], node);
|
||||
}
|
||||
UnLock();
|
||||
}
|
||||
|
||||
size_t Allocator::GetTotalSize() {
|
||||
Lock();
|
||||
size_t total_size = 0;
|
||||
for (int i = 0; i < kBlockRange; ++i) {
|
||||
MemNode *node = allocated_list_[i];
|
||||
while (node != nullptr) {
|
||||
total_size += node->size_;
|
||||
node = node->next_;
|
||||
}
|
||||
|
||||
node = free_list_[i];
|
||||
while (node != nullptr) {
|
||||
total_size += node->size_;
|
||||
node = node->next_;
|
||||
}
|
||||
}
|
||||
MemNode *node = large_mem_list_;
|
||||
while (node != nullptr) {
|
||||
total_size += node->size_;
|
||||
node = node->next_;
|
||||
}
|
||||
UnLock();
|
||||
return total_size;
|
||||
}
|
||||
|
||||
void Allocator::Clear() {
|
||||
Lock();
|
||||
for (int i = 0; i < kBlockRange; ++i) {
|
||||
FreeNodesList(allocated_list_[i]);
|
||||
allocated_list_[i] = nullptr;
|
||||
|
||||
FreeNodesList(free_list_[i]);
|
||||
free_list_[i] = nullptr;
|
||||
}
|
||||
FreeNodesList(large_mem_list_);
|
||||
UnLock();
|
||||
}
|
||||
} // namespace mindspore::lite
|
@ -1,60 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <pthread.h>
|
||||
#include "internal/include/string.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
constexpr int kBlockRange = 9;
|
||||
|
||||
typedef struct AllocatorContext {
|
||||
bool lock_flag_;
|
||||
} AllocatorContext;
|
||||
|
||||
typedef struct MemNode {
|
||||
MemNode *pre_ = nullptr;
|
||||
MemNode *next_ = nullptr;
|
||||
size_t size_ = 0;
|
||||
} MemNode;
|
||||
|
||||
|
||||
class Allocator {
|
||||
public:
|
||||
Allocator();
|
||||
~Allocator();
|
||||
void SetContext(const AllocatorContext &ctx);
|
||||
void *Malloc(size_t size);
|
||||
void Free(void *ptr);
|
||||
void Clear();
|
||||
size_t GetTotalSize();
|
||||
|
||||
private:
|
||||
void Lock();
|
||||
void UnLock();
|
||||
|
||||
bool lock_flag_ = false;
|
||||
pthread_mutex_t lock_ = PTHREAD_MUTEX_INITIALIZER;
|
||||
MemNode *large_mem_list_ = nullptr;
|
||||
MemNode *allocated_list_[kBlockRange];
|
||||
MemNode *free_list_[kBlockRange];
|
||||
};
|
||||
} // namespace mindspore::lite
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_SRC_ALLOCATOR_H_
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,31 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "internal/src/kernel/common/common_infershape.h"
|
||||
#include "internal/include/errorcode.h"
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "internal/src/lite_log.h"
|
||||
|
||||
int DoCommonInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors) {
|
||||
TensorPtr input = in_tensors.at(0);
|
||||
MS_ASSERT(input != nullptr);
|
||||
TensorPtr output = out_tensors.at(0);
|
||||
MS_ASSERT(output != nullptr);
|
||||
output->format_ = input->format_;
|
||||
output->data_type_ = input->data_type_;
|
||||
output->shape_ = input->shape_;
|
||||
return RET_OK;
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_
|
||||
|
||||
#include "internal/include/model.h"
|
||||
|
||||
int DoCommonInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors);
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_COMMON_INFERSHAPE_H_
|
@ -1,54 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "internal/src/kernel/fp32/activation.h"
|
||||
#include "internal/src/kernel/common/common_infershape.h"
|
||||
#include "internal/include/errorcode.h"
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "nnacl/fp32/activation_fp32.h"
|
||||
#include "internal/src/lite_log.h"
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
int DoActivationInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
|
||||
return DoCommonInferShape(in_tensors, out_tensors);
|
||||
}
|
||||
|
||||
int DoActivation(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator) {
|
||||
ActivationParameter *param = (ActivationParameter *)node->primitive_;
|
||||
int ret = RET_OK;
|
||||
size_t length = in_tensors[0]->ElementsNum();
|
||||
float *input_addr = (float *)in_tensors[0]->data_;
|
||||
float *output_addr = (float *)out_tensors[0]->data_;
|
||||
if (param->type_ == ActivationType::RELU) {
|
||||
ret = Fp32Relu(input_addr, length, output_addr);
|
||||
} else if (param->type_ == ActivationType::SIGMOID) {
|
||||
ret = Sigmoid(input_addr, length, output_addr);
|
||||
} else if (param->type_ == ActivationType::RELU6) {
|
||||
ret = Fp32Relu6(input_addr, length, output_addr);
|
||||
} else if (param->type_ == ActivationType::LEAKY_RELU) {
|
||||
float alpha = param->alpha_;
|
||||
ret = LRelu(input_addr, length, output_addr, alpha);
|
||||
} else {
|
||||
LITE_ERROR_LOG("Unsupport activation type: %d", param->type_);
|
||||
return RET_PARAM_INVALID;
|
||||
}
|
||||
if (ret != NNACL_OK) {
|
||||
LITE_ERROR_LOG("do activation(%d) fail!ret: %d", param->type_, ret);
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_
|
||||
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/src/allocator.h"
|
||||
|
||||
int DoActivationInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
|
||||
int DoActivation(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator);
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ACTIVATION_H_
|
@ -1,197 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "internal/src/kernel/fp32/arithmetic.h"
|
||||
#include "internal/src/lite_log.h"
|
||||
#include "internal/include/errorcode.h"
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "internal/include/lite_utils.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
|
||||
typedef int (*ArithmeticRun)(const float *input0, const float *input1, float *output, const int element_size);
|
||||
typedef int (*ArithmeticOptRun)(const float *input0, const float *input1, float *output, const int element_size,
|
||||
const ArithmeticParameter *param);
|
||||
|
||||
int BroadcastRun(float *input0, float *input1, float *output, int dim, int out_count, int break_pos,
|
||||
ArithmeticRun arithmetic_run, ArithmeticParameter *params) {
|
||||
if (dim > break_pos) {
|
||||
return arithmetic_run(input0, input1, output, out_count);
|
||||
}
|
||||
for (int i = 0; i < params->out_shape_[dim]; ++i) {
|
||||
int pos0_ = params->in_shape0_[dim] == 1 ? 0 : i;
|
||||
int pos1_ = params->in_shape1_[dim] == 1 ? 0 : i;
|
||||
int error_code =
|
||||
BroadcastRun(input0 + pos0_ * params->in_strides0_[dim], input1 + pos1_ * params->in_strides1_[dim],
|
||||
output + i * params->out_strides_[dim], dim + 1, out_count, break_pos, arithmetic_run, params);
|
||||
if (error_code != RET_OK) {
|
||||
return error_code;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CalBroadCasting(const TensorPtrVector &in_tensors, int *outside, int *break_pos, ArithmeticParameter *params) {
|
||||
params->broadcasting_ = false;
|
||||
for (size_t i = 0; i < params->ndim_; ++i) {
|
||||
if (params->in_shape0_[i] != params->in_shape1_[i]) {
|
||||
if (params->in_shape0_[i] == 1) {
|
||||
params->out_shape_[i] = params->in_shape1_[i];
|
||||
} else if (params->in_shape1_[i] == 1) {
|
||||
params->out_shape_[i] = params->in_shape0_[i];
|
||||
} else {
|
||||
LITE_LOG_ERROR("shapes of input tensors can not be broadCasted");
|
||||
return RET_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
params->broadcasting_ = true;
|
||||
} else {
|
||||
params->out_shape_[i] = params->in_shape0_[i];
|
||||
}
|
||||
}
|
||||
if (params->broadcasting_) {
|
||||
*outside = 1;
|
||||
for (auto i = params->ndim_ - 1; i >= 0; --i) {
|
||||
if (params->in_shape0_[i] != params->in_shape1_[i]) {
|
||||
*break_pos = i;
|
||||
break;
|
||||
}
|
||||
(*outside) *= params->out_shape_[i];
|
||||
}
|
||||
ComputeStrides(params->in_shape0_, params->in_strides0_, params->ndim_);
|
||||
ComputeStrides(params->in_shape1_, params->in_strides1_, params->ndim_);
|
||||
ComputeStrides(params->out_shape_, params->out_strides_, params->ndim_);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int RunArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, ArithmeticRun arithmetic_run,
|
||||
ArithmeticOptRun arithmetic_opt_run, int outside, int break_pos, ArithmeticParameter *params) {
|
||||
int error_code = RET_OK;
|
||||
int count = out_tensors[0]->ElementsNum();
|
||||
float *input0_data = reinterpret_cast<float *>(in_tensors[0]->data_);
|
||||
float *input1_data1 = reinterpret_cast<float *>(in_tensors[1]->data_);
|
||||
float *output_data = reinterpret_cast<float *>(out_tensors[0]->data_);
|
||||
if (params->broadcasting_) {
|
||||
error_code = BroadcastRun(input0_data, input1_data1, output_data, 0, outside, break_pos, arithmetic_run, params);
|
||||
} else if (arithmetic_opt_run != NULL) {
|
||||
error_code = arithmetic_opt_run(input0_data, input1_data1, output_data, count, params);
|
||||
} else {
|
||||
error_code = arithmetic_run(input0_data, input1_data1, output_data, count);
|
||||
}
|
||||
if (error_code != RET_OK) {
|
||||
return error_code;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int DoArithmeticInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
|
||||
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
|
||||
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
|
||||
return RET_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
if (out_tensors.size() != 1) {
|
||||
LITE_LOG_ERROR("output tensors num not correct!");
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
int in_datatype[2] = {in_tensors[0]->data_type_, in_tensors[1]->data_type_};
|
||||
int in_format[2] = {static_cast<int>(in_tensors[0]->format_), static_cast<int>(in_tensors[1]->format_)};
|
||||
size_t dim_size[2] = {in_tensors[0]->shape_.size(), in_tensors[1]->shape_.size()};
|
||||
int *in_shape[2] = {in_tensors[0]->shape_.data(), in_tensors[1]->shape_.data()};
|
||||
int out_format;
|
||||
int out_datatype;
|
||||
int ret = ArithmeticInferShape(in_shape, dim_size, out_tensors[0]->shape_.data(), in_format, &out_format, in_datatype,
|
||||
&out_datatype, param);
|
||||
if (ret != NNACL_OK) {
|
||||
LITE_ERROR_LOG("arithmetic infershape failed! ret: %d", ret);
|
||||
return RET_ERROR;
|
||||
}
|
||||
out_tensors[0]->format_ = static_cast<Format>(out_format);
|
||||
out_tensors[0]->data_type_ = static_cast<TypeId>(out_datatype);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ChooseKernel(const int kernel_type, ArithmeticRun *arithmetic_run, ArithmeticParameter *params) {
|
||||
if (kernel_type == KernelType::KernelType_Mul) {
|
||||
if (params->activation_type_ == ActivationType::RELU) {
|
||||
*arithmetic_run = ElementMulRelu;
|
||||
} else if (params->activation_type_ == ActivationType::RELU6) {
|
||||
*arithmetic_run = ElementMulRelu6;
|
||||
} else {
|
||||
*arithmetic_run = ElementMul;
|
||||
}
|
||||
} else {
|
||||
LITE_LOG_INFO("unsupported operator type");
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ChooseOptKernel(const int kernel_type, ArithmeticOptRun *arithmetic_opt_run, ArithmeticParameter *params) {
|
||||
if (kernel_type == KernelType::KernelType_Mul) {
|
||||
if (params->activation_type_ == ActivationType::RELU) {
|
||||
*arithmetic_opt_run = ElementOptMulRelu;
|
||||
} else if (params->activation_type_ == ActivationType::RELU6) {
|
||||
*arithmetic_opt_run = ElementOptMulRelu6;
|
||||
} else {
|
||||
*arithmetic_opt_run = ElementOptMul;
|
||||
}
|
||||
} else {
|
||||
LITE_LOG_INFO("kernel not have opt version");
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int DoArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator) {
|
||||
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
|
||||
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
|
||||
return RET_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
if (out_tensors.size() != 1 || out_tensors[0]->data_ == NULL) {
|
||||
LITE_LOG_ERROR("output tensors num not correct or output data is NULL!");
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (allocator == NULL) {
|
||||
LITE_LOG_ERROR("allocator is NULL!");
|
||||
return RET_ERROR;
|
||||
}
|
||||
ArithmeticParameter *params = reinterpret_cast<ArithmeticParameter *>(node->primitive_);
|
||||
|
||||
ArithmeticRun arithmetic_run = NULL;
|
||||
int kernel_type = params->op_parameter_.type_;
|
||||
int status = ChooseKernel(kernel_type, &arithmetic_run, params);
|
||||
if (status != RET_OK) {
|
||||
return status;
|
||||
}
|
||||
int outside = 0;
|
||||
int break_pos = 0;
|
||||
// when one of input only has one element
|
||||
params->in_elements_num0_ = in_tensors[0]->ElementsNum();
|
||||
params->in_elements_num1_ = in_tensors[1]->ElementsNum();
|
||||
params->out_elements_num_ = out_tensors[0]->ElementsNum();
|
||||
ArithmeticOptRun arithmetic_opt_run = NULL;
|
||||
if (params->in_elements_num0_ == 1 || params->in_elements_num1_ == 1) {
|
||||
params->broadcasting_ = false;
|
||||
ChooseOptKernel(kernel_type, &arithmetic_opt_run, params);
|
||||
} else {
|
||||
int ret = CalBroadCasting(in_tensors, &outside, &break_pos, params);
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return RunArithmetic(in_tensors, out_tensors, arithmetic_run, arithmetic_opt_run, outside, break_pos, params);
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_
|
||||
#define INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_
|
||||
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/include/lite_utils.h"
|
||||
#include "internal/src/allocator.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
|
||||
int DoArithmeticInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
|
||||
|
||||
int DoArithmetic(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator);
|
||||
|
||||
#endif // INTERNAL_SRC_RUNTIME_KERNEL_MUL_H_
|
@ -1,47 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "internal/src/kernel/fp32/arithmetic_self.h"
|
||||
#include "internal/src/kernel/common/common_infershape.h"
|
||||
#include "internal/include/errorcode.h"
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "internal/src/lite_log.h"
|
||||
#include "nnacl/fp32/arithmetic_self_fp32.h"
|
||||
|
||||
int DoArithmeticSelfInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
|
||||
OpParameter *param) {
|
||||
return DoCommonInferShape(in_tensors, out_tensors);
|
||||
}
|
||||
|
||||
int DoArithmeticSelf(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator) {
|
||||
size_t data_size = in_tensors[0]->ElementsNum();
|
||||
OpParameter *param = node->primitive_;
|
||||
int ret;
|
||||
if (param->type_ == KernelType::KernelType_Log) {
|
||||
ret = ElementLog((float *)in_tensors[0]->data_, (float *)out_tensors[0]->data_, data_size);
|
||||
} else if (param->type_ == KernelType::KernelType_Neg) {
|
||||
ret = ElementNegative((float *)in_tensors[0]->data_, (float *)out_tensors[0]->data_, data_size);
|
||||
} else {
|
||||
LITE_ERROR_LOG("Unsupport kernel type: %d", param->type_);
|
||||
return RET_PARAM_INVALID;
|
||||
}
|
||||
if (ret != NNACL_OK) {
|
||||
LITE_ERROR_LOG("do arithmetic %d fail!ret: %d", param->type_, ret);
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_
|
||||
#define MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_
|
||||
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/src/allocator.h"
|
||||
|
||||
int DoArithmeticSelfInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors,
|
||||
OpParameter *param);
|
||||
int DoArithmeticSelf(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator);
|
||||
|
||||
#endif // MINDSPORE_LITE_INTERNAL_SRC_KERNEL_FP32_ARITHMETIC_SELF_H_
|
@ -1,71 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "internal/src/kernel/fp32/bias_add.h"
|
||||
#include "internal/src/kernel/common/common_infershape.h"
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/include/ms_tensor.h"
|
||||
#include "internal/include/lite_utils.h"
|
||||
#include "internal/src/lite_log.h"
|
||||
#include "internal/include/errorcode.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
|
||||
int DoBiasAddInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param) {
|
||||
return DoCommonInferShape(in_tensors, out_tensors);
|
||||
}
|
||||
|
||||
int DoBiasAdd(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator) {
|
||||
if (in_tensors.size() != 2 || in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
|
||||
LITE_LOG_ERROR("input tensors num not correct or input data is NULL!");
|
||||
return RET_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
if (out_tensors.size() != 1 || out_tensors[0]->data_ == NULL) {
|
||||
LITE_LOG_ERROR("output tensors num not correct or output data is NULL!");
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (allocator == NULL) {
|
||||
LITE_LOG_ERROR("allocator is NULL!");
|
||||
return RET_ERROR;
|
||||
}
|
||||
ArithmeticParameter *params = reinterpret_cast<ArithmeticParameter *>(node->primitive_);
|
||||
|
||||
ShapeVector dims = in_tensors[0]->shape_;
|
||||
params->ndim_ = dims.size();
|
||||
for (size_t i = 0; i < params->ndim_; i++) {
|
||||
params->in_shape0_[i] = dims[i];
|
||||
params->in_shape1_[i] = 1;
|
||||
params->out_shape_[i] = dims[i];
|
||||
}
|
||||
params->in_shape1_[params->ndim_ - 1] = dims[params->ndim_ - 1];
|
||||
|
||||
float *in = reinterpret_cast<float *>(in_tensors[0]->data_);
|
||||
float *bias = reinterpret_cast<float *>(in_tensors[1]->data_);
|
||||
float *out = reinterpret_cast<float *>(out_tensors[0]->data_);
|
||||
size_t data_size = in_tensors[0]->ElementsNum();
|
||||
float *tile_in = reinterpret_cast<float *>(allocator->Malloc(data_size * sizeof(float)));
|
||||
float *tile_bias = reinterpret_cast<float *>(allocator->Malloc(data_size * sizeof(float)));
|
||||
if (tile_in == NULL || tile_bias == NULL) {
|
||||
LITE_LOG_ERROR("Memory allocation failed!");
|
||||
allocator->Free(tile_in);
|
||||
allocator->Free(tile_bias);
|
||||
return RET_ERROR;
|
||||
}
|
||||
BroadcastAdd(in, bias, tile_in, tile_bias, out, data_size, params);
|
||||
allocator->Free(tile_in);
|
||||
allocator->Free(tile_bias);
|
||||
return RET_OK;
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_
|
||||
#define INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_
|
||||
|
||||
#include "internal/include/model.h"
|
||||
#include "internal/include/lite_utils.h"
|
||||
#include "internal/src/allocator.h"
|
||||
|
||||
int DoBiasAddInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, OpParameter *param);
|
||||
|
||||
int DoBiasAdd(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
|
||||
mindspore::lite::Allocator *allocator);
|
||||
|
||||
#endif // INTERNAL_SRC_RUNTIME_KERNEL_BIAS_H_
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue