From 65092b549c9be7d517b2ddb2b0f7df8be9011406 Mon Sep 17 00:00:00 2001 From: xulei2020 Date: Sat, 20 Mar 2021 16:19:09 +0800 Subject: [PATCH] add code --- .../ccsrc/minddata/dataset/core/data_type.h | 4 +++ .../ccsrc/minddata/dataset/core/tensor.cc | 6 ++-- .../dataset/kernels/data/data_utils.cc | 29 +++++++++++++++---- mindspore/core/base/float16.h | 2 ++ mindspore/lite/CMakeLists.txt | 3 ++ .../lite/tools/optimizer/common/gllo_utils.cc | 3 ++ 6 files changed, 40 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/core/data_type.h b/mindspore/ccsrc/minddata/dataset/core/data_type.h index 8877bdbcdd..c9d7c1c2b9 100644 --- a/mindspore/ccsrc/minddata/dataset/core/data_type.h +++ b/mindspore/ccsrc/minddata/dataset/core/data_type.h @@ -257,10 +257,12 @@ inline DataType DataType::FromCType() { return DataType(DataType::DE_FLOAT32); } +#ifndef ENABLE_MD_LITE_X86_64 template <> inline DataType DataType::FromCType() { return DataType(DataType::DE_FLOAT16); } +#endif template <> inline DataType DataType::FromCType() { @@ -327,10 +329,12 @@ inline bool DataType::IsLooselyCompatible() const { return type_ == DataType::DE_FLOAT32; } +#ifndef ENABLE_MD_LITE_X86_64 template <> inline bool DataType::IsLooselyCompatible() const { return type_ == DataType::DE_FLOAT16; } +#endif template <> inline bool DataType::IsLooselyCompatible() const { diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.cc b/mindspore/ccsrc/minddata/dataset/core/tensor.cc index e2325b66dd..a4cdf18874 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.cc @@ -396,9 +396,9 @@ void Tensor::PrintItemAt(const std::vector &index, std::ostream &out) c CASE_PRINT(DataType::DE_INT64, int64_t) CASE_PRINT(DataType::DE_UINT64, uint64_t) - +#ifndef ENABLE_MD_LITE_X86_64 CASE_PRINT(DataType::DE_FLOAT16, float16) - +#endif CASE_PRINT(DataType::DE_FLOAT32, float) CASE_PRINT(DataType::DE_FLOAT64, double) @@ -825,12 +825,14 @@ Status Tensor::GetFloatAt(T *o, const std::vector &index) const { RETURN_STATUS_UNEXPECTED(err); } switch (type_.value()) { +#ifndef ENABLE_MD_LITE_X86_64 case DataType::DE_FLOAT16: { float16 *ptr = nullptr; RETURN_IF_NOT_OK(GetItemPtr(&ptr, index)); *o = static_cast(*ptr); break; } +#endif case DataType::DE_FLOAT32: { float *ptr = nullptr; RETURN_IF_NOT_OK(GetItemPtr(&ptr, index)); diff --git a/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc index 71771e3cff..e78f77de66 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc @@ -281,9 +281,11 @@ void CastFrom(const std::shared_ptr &input, std::shared_ptr *out case DataType::DE_UINT64: Cast(input, output); break; +#ifndef ENABLE_MD_LITE_X86_64 case DataType::DE_FLOAT16: Cast(input, output); break; +#endif case DataType::DE_FLOAT32: Cast(input, output); break; @@ -328,9 +330,11 @@ Status TypeCast(const std::shared_ptr &input, std::shared_ptr *o case DataType::DE_UINT64: CastFrom(input, output); break; +#ifndef ENABLE_MD_LITE_X86_64 case DataType::DE_FLOAT16: CastFrom(input, output); break; +#endif case DataType::DE_FLOAT32: CastFrom(input, output); break; @@ -344,6 +348,7 @@ Status TypeCast(const std::shared_ptr &input, std::shared_ptr *o return Status::OK(); } +#ifndef ENABLE_MD_LITE_X86_64 Status ToFloat16(const std::shared_ptr &input, std::shared_ptr *output) { // initiate new tensor for type cast DataType new_type = DataType("float16"); @@ -367,6 +372,9 @@ Status ToFloat16(const std::shared_ptr &input, std::shared_ptr * return Status::OK(); } +#else +Status ToFloat16(const std::shared_ptr &input, std::shared_ptr *output) { return Status::OK(); } +#endif Status PadEnd(const std::shared_ptr &src, std::shared_ptr *dst, const std::vector &pad_shape, const std::shared_ptr &pad_val) { @@ -410,9 +418,13 @@ Status PadEndNumeric(const std::shared_ptr &src, std::shared_ptr RETURN_IF_NOT_OK((*dst)->Fill(pad_val)); } else if (tensor_type == DataType::DE_INT16) { RETURN_IF_NOT_OK((*dst)->Fill(pad_val)); - } else if (tensor_type == DataType::DE_FLOAT16) { + } +#ifndef ENABLE_MD_LITE_X86_64 + else if (tensor_type == DataType::DE_FLOAT16) { // NOLINT RETURN_IF_NOT_OK((*dst)->Fill(static_cast(pad_val))); - } else if (tensor_type == DataType::DE_UINT16) { + } +#endif + else if (tensor_type == DataType::DE_UINT16) { // NOLINT RETURN_IF_NOT_OK((*dst)->Fill(pad_val)); } else if (tensor_type == DataType::DE_INT32) { RETURN_IF_NOT_OK((*dst)->Fill(pad_val)); @@ -570,9 +582,11 @@ Status Mask(const std::shared_ptr &input, std::shared_ptr *outpu case DataType::DE_INT64: RETURN_IF_NOT_OK(MaskHelper(input, *output, casted_value, op)); break; +#ifndef ENABLE_MD_LITE_X86_64 case DataType::DE_FLOAT16: RETURN_IF_NOT_OK(MaskHelper(input, *output, casted_value, op)); break; +#endif case DataType::DE_FLOAT32: RETURN_IF_NOT_OK(MaskHelper(input, *output, casted_value, op)); break; @@ -732,6 +746,7 @@ struct UniqueOpHashMap { }; #else +#ifndef ENABLE_MD_LITE_X86_64 struct gn_hash { size_t operator()(const float16 &f) const { return static_cast(f); } }; @@ -740,7 +755,7 @@ template <> struct UniqueOpHashMap { using map_type = std::unordered_map; }; - +#endif #endif template <> @@ -809,9 +824,13 @@ Status Unique(const std::shared_ptr &input, std::shared_ptr *out RETURN_IF_NOT_OK(UniqueHelper(input, output, output_idx, output_cnt)); } else if (input->type() == DataType::DE_UINT8) { RETURN_IF_NOT_OK(UniqueHelper(input, output, output_idx, output_cnt)); - } else if (input->type() == DataType::DE_FLOAT16) { + } +#ifndef ENABLE_MD_LITE_X86_64 + else if (input->type() == DataType::DE_FLOAT16) { // NOLINT RETURN_IF_NOT_OK(UniqueHelper(input, output, output_idx, output_cnt)); - } else if (input->type() == DataType::DE_FLOAT32) { + } +#endif + else if (input->type() == DataType::DE_FLOAT32) { // NOLINT RETURN_IF_NOT_OK(UniqueHelper(input, output, output_idx, output_cnt)); } else if (input->type() == DataType::DE_FLOAT64) { RETURN_IF_NOT_OK(UniqueHelper(input, output, output_idx, output_cnt)); diff --git a/mindspore/core/base/float16.h b/mindspore/core/base/float16.h index 9c23e0395a..a4c9fdaebf 100644 --- a/mindspore/core/base/float16.h +++ b/mindspore/core/base/float16.h @@ -23,6 +23,7 @@ using float16 = float16_t; inline float half_to_float(float16 h) { return static_cast(h); } #else +#ifndef ENABLE_MD_LITE_X86_64 #include #include "Eigen/Core" @@ -30,4 +31,5 @@ using float16 = Eigen::half; using HalfToFloat = std::function; const inline HalfToFloat half_to_float = Eigen::half_impl::half_to_float; #endif +#endif #endif // MINDSPORE_CORE_BASE_FLOAT16_H_ diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index cae5836b2a..ddcc2afdeb 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -260,6 +260,9 @@ endif() if(BUILD_MINDDATA STREQUAL "lite" OR BUILD_MINDDATA STREQUAL "full" OR BUILD_MINDDATA STREQUAL "wrapper") add_compile_definitions(ENABLE_ANDROID) + if(NOT PLATFORM_ARM32 AND NOT PLATFORM_ARM64) + add_compile_definitions(ENABLE_MD_LITE_X86_64) + endif() add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/minddata) endif() diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.cc b/mindspore/lite/tools/optimizer/common/gllo_utils.cc index 2cadc425fb..dc3edb7e66 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.cc @@ -20,11 +20,14 @@ #include #include #include +#include "Eigen/Core" #include "ops/fusion/conv2d_fusion.h" #include "src/common/common.h" #include "frontend/operator/ops.h" #include "backend/optimizer/common/helper.h" +using float16 = Eigen::half; + namespace mindspore { namespace opt { namespace {