From 50dcb79bdf2afba446a5b03b5b26be9fee8f6338 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Thu, 30 Jul 2020 16:29:17 -0400 Subject: [PATCH 01/13] added MindData lite --- .gitmodules | 10 + build.sh | 46 +++++ .../ccsrc/minddata/dataset/api/CMakeLists.txt | 2 + .../ccsrc/minddata/dataset/api/de_tensor.cc | 188 ++++++++++++++++++ .../ccsrc/minddata/dataset/api/execute.cc | 55 +++++ .../ccsrc/minddata/dataset/core/client.h | 3 + .../ccsrc/minddata/dataset/core/tensor.cc | 4 + .../ccsrc/minddata/dataset/core/tensor.h | 9 + .../minddata/dataset/include/de_tensor.h | 53 +++++ .../ccsrc/minddata/dataset/include/execute.h | 51 +++++ .../ccsrc/minddata/dataset/include/tensor.h | 24 ++- .../kernels/image/resize_with_bbox_op.cc | 1 - mindspore/lite/CMakeLists.txt | 27 +++ mindspore/lite/minddata/CMakeLists.txt | 44 ++++ mindspore/lite/src/CMakeLists.txt | 4 + mindspore/lite/test/CMakeLists.txt | 10 + mindspore/lite/test/dataset/de_tensor_test.cc | 98 +++++++++ mindspore/lite/test/dataset/eager_test.cc | 165 +++++++++++++++ third_party/eigen | 1 + third_party/libjpeg-turbo | 1 + third_party/opencv | 1 + 21 files changed, 789 insertions(+), 8 deletions(-) create mode 100644 mindspore/ccsrc/minddata/dataset/api/de_tensor.cc create mode 100644 mindspore/ccsrc/minddata/dataset/api/execute.cc create mode 100644 mindspore/ccsrc/minddata/dataset/include/de_tensor.h create mode 100644 mindspore/ccsrc/minddata/dataset/include/execute.h create mode 100644 mindspore/lite/minddata/CMakeLists.txt create mode 100644 mindspore/lite/test/dataset/de_tensor_test.cc create mode 100644 mindspore/lite/test/dataset/eager_test.cc create mode 160000 third_party/eigen create mode 160000 third_party/libjpeg-turbo create mode 160000 third_party/opencv diff --git a/.gitmodules b/.gitmodules index 80eac2de7d..9ac5eb15ed 100644 --- a/.gitmodules +++ b/.gitmodules @@ -24,3 +24,13 @@ [submodule "third_party/OpenCL-Headers"] path = third_party/OpenCL-Headers url = https://github.com/KhronosGroup/OpenCL-Headers.git +[submodule "third_party/opencv"] + path = third_party/opencv + url = https://github.com/opencv/opencv.git +[submodule "third_party/eigen"] + path = third_party/eigen + url = https://gitlab.com/libeigen/eigen.git +[submodule "third_party/libjpeg-turbo"] + path = third_party/libjpeg-turbo + url = https://github.com/libjpeg-turbo/libjpeg-turbo.git + ignore = dirty diff --git a/build.sh b/build.sh index bd11f4fa04..af8cd3164a 100755 --- a/build.sh +++ b/build.sh @@ -519,6 +519,50 @@ build_opencl() { fi } +build_opencv() { + cd ${BASEPATH} + if [[ "${INC_BUILD}" == "off" ]]; then + git submodule update --init --recursive third_party/opencv + cd ${BASEPATH}/third_party/opencv + rm -rf build && mkdir -p build && cd build && cmake ${CMAKE_MINDDATA_ARGS} -DBUILD_SHARED_LIBS=ON -DBUILD_ANDROID_PROJECTS=OFF \ + -DBUILD_LIST=core,imgcodecs,imgproc -DBUILD_ZLIB=ON .. && make -j$THREAD_NUM + fi +} + +build_jpeg_turbo() { + cd ${BASEPATH} + if [[ "${INC_BUILD}" == "off" ]]; then + git submodule update --init --recursive third_party/libjpeg-turbo + cd ${BASEPATH}/third_party/libjpeg-turbo + rm -rf build && mkdir -p build && cd build && cmake ${CMAKE_MINDDATA_ARGS} -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="${BASEPATH}/third_party/libjpeg-turbo" .. && make -j$THREAD_NUM && make install + fi +} + +build_eigen() { + cd ${BASEPATH} + git submodule update --init --recursive third_party/eigen +} + +build_minddata_lite_deps() +{ + echo "start build minddata lite project" + if [[ "${LITE_PLATFORM}" == "arm64" ]]; then + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ + -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="arm64-v8a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \ + -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + elif [[ "${LITE_PLATFORM}" == "arm32" ]]; then + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ + -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="armeabi-v7a" -DANDROID_TOOLCHAIN_NAME="clang" \ + -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + else + CMAKE_MINDDATA_ARGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE} " + fi + build_opencv + build_eigen + build_jpeg_turbo +} + build_lite() { echo "start build mindspore lite project" @@ -533,6 +577,8 @@ build_lite() build_flatbuffer build_gtest + build_minddata_lite_deps + cd "${BASEPATH}/mindspore/lite" if [[ "${INC_BUILD}" == "off" ]]; then rm -rf build diff --git a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt index ae0b9cc28e..93de69aad2 100644 --- a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt @@ -13,4 +13,6 @@ add_library(cpp-API OBJECT iterator.cc transforms.cc samplers.cc + de_tensor.cc + execute.cc ) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc new file mode 100644 index 0000000000..a2f61c899d --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc @@ -0,0 +1,188 @@ +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/core/data_type.h" +#include "mindspore/core/ir/dtype/type_id.h" +#include "utils/hashing.h" +#include "mindspore/lite/src/ir/tensor.h" + +namespace mindspore { +namespace tensor { +dataset::DataType MSTypeToDEType(TypeId data_type) { + switch (data_type) { + case kNumberTypeBool: + return dataset::DataType(dataset::DataType::DE_BOOL); + case kNumberTypeInt8: + return dataset::DataType(dataset::DataType::DE_INT8); + case kNumberTypeUInt8: + return dataset::DataType(dataset::DataType::DE_UINT8); + case kNumberTypeInt16: + return dataset::DataType(dataset::DataType::DE_INT16); + case kNumberTypeUInt16: + return dataset::DataType(dataset::DataType::DE_UINT16); + case kNumberTypeInt32: + return dataset::DataType(dataset::DataType::DE_INT32); + case kNumberTypeUInt32: + return dataset::DataType(dataset::DataType::DE_UINT32); + case kNumberTypeInt64: + return dataset::DataType(dataset::DataType::DE_INT64); + case kNumberTypeUInt64: + return dataset::DataType(dataset::DataType::DE_UINT64); + case kNumberTypeFloat16: + return dataset::DataType(dataset::DataType::DE_FLOAT16); + case kNumberTypeFloat32: + return dataset::DataType(dataset::DataType::DE_FLOAT32); + case kNumberTypeFloat64: + return dataset::DataType(dataset::DataType::DE_FLOAT64); + default: + // maybe throw? + return dataset::DataType(dataset::DataType::DE_UNKNOWN); + } +} + +TypeId DETypeToMSType(dataset::DataType data_type) { + switch (data_type.value()) { + case dataset::DataType::DE_BOOL: + return mindspore::TypeId::kNumberTypeBool; + case dataset::DataType::DE_INT8: + return mindspore::TypeId::kNumberTypeInt8; + case dataset::DataType::DE_UINT8: + return mindspore::TypeId::kNumberTypeUInt8; + case dataset::DataType::DE_INT16: + return mindspore::TypeId::kNumberTypeInt16; + case dataset::DataType::DE_UINT16: + return mindspore::TypeId::kNumberTypeUInt16; + case dataset::DataType::DE_INT32: + return mindspore::TypeId::kNumberTypeInt32; + case dataset::DataType::DE_UINT32: + return mindspore::TypeId::kNumberTypeUInt32; + case dataset::DataType::DE_INT64: + return mindspore::TypeId::kNumberTypeInt64; + case dataset::DataType::DE_UINT64: + return mindspore::TypeId::kNumberTypeUInt64; + case dataset::DataType::DE_FLOAT16: + return mindspore::TypeId::kNumberTypeFloat16; + case dataset::DataType::DE_FLOAT32: + return mindspore::TypeId::kNumberTypeFloat32; + case dataset::DataType::DE_FLOAT64: + return mindspore::TypeId::kNumberTypeFloat64; + default: + // maybe throw? + return kTypeUnknown; + } +} + +MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector &shape) { + return new DETensor(data_type, shape); +} + +MSTensor *DETensor::CreateTensor(const std::string &path) { + std::shared_ptr t; + (void) dataset::Tensor::CreateFromFile(path, &t); + return new DETensor(std::move(t)); +} + +DETensor::DETensor(TypeId data_type, const std::vector &shape) { + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), + std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t {return static_cast(s);}); + dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_); +} + +DETensor::DETensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } + +MSTensor *DETensor::ConvertToLiteTensor() { + // static MSTensor::CreateTensor is only for the LiteTensor + MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape()); + MS_ASSERT(tensor->Size() == this->Size()); + memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); + return tensor; +} + +std::shared_ptr DETensor::tensor() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_; +} + +TypeId DETensor::data_type() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return DETypeToMSType(this->tensor_impl_->type()); +} + +TypeId DETensor::set_data_type(TypeId data_type) { + MS_ASSERT(this->tensor_impl_ != nullptr); + if (data_type != this->data_type()) { + std::shared_ptr temp; + dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), this->tensor_impl_->GetBuffer(), &temp); + this->tensor_impl_ = temp; + } + return data_type; +} + +std::vector DETensor::shape() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape = this->tensor_impl_->shape().AsVector(); + std::vector shape; + shape.reserve(t_shape.size()); + std::transform(t_shape.begin(), t_shape.end(), + std::back_inserter(shape), + [](dataset::dsize_t s) -> int {return static_cast(s);}); + return shape; +} + +size_t DETensor::set_shape(const std::vector &shape) { + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), + std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t {return static_cast(s);}); + dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); + //TODO: what if t_shape has different size? + return shape.size(); +} + +int DETensor::DimensionSize(size_t index) const { + MS_ASSERT(this->tensor_impl_ != nullptr); + int dim_size = -1; + auto shape = this->shape(); + if (index < shape.size()) { + dim_size = shape[index]; + } else { + MS_LOG(ERROR) << "Dimension index is wrong: " << index; + } + return dim_size; +} + +int DETensor::ElementsNum() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->Size(); +} + +std::size_t DETensor::hash() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + auto shape = this->shape(); + std::size_t hash_value = std::hash{}(SizeToInt(this->data_type())); + hash_value = hash_combine(hash_value, std::hash{}(shape.size())); + // hash all elements may costly, so only take at most 4 elements into account based on + // some experiments. + for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) { + hash_value = hash_combine(hash_value, (std::hash{}(shape[i]))); + } + return hash_value; +} + +size_t DETensor::Size() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->SizeInBytes(); +} + +void *DETensor::MutableData() const { + MS_ASSERT(this->tensor_impl_ != nullptr); + // TODO: friend the DETensor? + return this->tensor_impl_->GetMutableBuffer(); +} + +} // namespace tensor +} // namespace mindspore \ No newline at end of file diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc new file mode 100644 index 0000000000..78acf38469 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minddata/dataset/include/execute.h" +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/kernels/tensor_op.h" + +namespace mindspore { +namespace dataset { +namespace api { + +Execute::Execute(const std::shared_ptr &op) : op_(std::move(op)) {} + +std::shared_ptr Execute::operator()(std::shared_ptr input){ + // Build the op + if (op_ == nullptr) { + MS_LOG(ERROR) << "Input TensorOperation is not valid"; + return nullptr; + } + + std::shared_ptr de_input = std::dynamic_pointer_cast(input)->tensor(); + if (de_input == nullptr) { + MS_LOG(ERROR) << "Input Tensor is not valid"; + return nullptr; + } + std::shared_ptr transform = op_->Build(); + std::shared_ptr de_output; + Status rc = transform->Compute(de_input, &de_output); + + if (rc.IsError()) { + // execution failed + MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); + return nullptr; + } + return std::shared_ptr(new tensor::DETensor(std::move(de_output))); +} + + +} // namespace api +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/core/client.h b/mindspore/ccsrc/minddata/dataset/core/client.h index b538bb20e1..3de90cfeb2 100644 --- a/mindspore/ccsrc/minddata/dataset/core/client.h +++ b/mindspore/ccsrc/minddata/dataset/core/client.h @@ -25,8 +25,11 @@ #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/engine/data_schema.h" #include "minddata/dataset/engine/dataset_iterator.h" + +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/mindrecord_op.h" #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" +#endif #ifdef ENABLE_PYTHON #include "minddata/dataset/engine/datasetops/barrier_op.h" diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.cc b/mindspore/ccsrc/minddata/dataset/core/tensor.cc index b8717c26fa..cd7e6dd75f 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.cc @@ -213,6 +213,7 @@ Status Tensor::CreateFromNpArray(const py::array &arr, std::shared_ptr * } #endif +#ifndef ENABLE_ANDROID Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out) { const TensorAlloc *alloc = GlobalContext::Instance()->tensor_allocator(); *out = std::allocate_shared(*alloc, TensorShape({static_cast(bytes_list.value_size())}), @@ -255,6 +256,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const (*out)->Reshape(shape); return Status::OK(); } +#endif Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr *out) { std::ifstream fs; @@ -269,6 +271,7 @@ Status Tensor::CreateFromFile(const std::string &path, std::shared_ptr * return Status::OK(); } +#ifndef ENABLE_ANDROID Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out) { RETURN_IF_NOT_OK(Tensor::CreateEmpty(shape, type, out)); @@ -298,6 +301,7 @@ Status Tensor::CreateFromByteList(const dataengine::BytesList &bytes_list, const return Status::OK(); } +#endif // Memcpy the given strided array's used part to consecutive memory // Consider a 3-d array diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 29d1785ad1..89c69e318b 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -38,12 +38,18 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" +#include "minddata/dataset/include/de_tensor.h" +#ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +namespace tensor { +class DETensor; +} // namespace tensor namespace dataset { class Tensor; template @@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { + friend class tensor::DETensor; public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -117,6 +124,7 @@ class Tensor { static Status CreateFromNpArray(const py::array &arr, TensorPtr *out); #endif +#ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist /// \param[in] shape shape of the outout tensor @@ -134,6 +142,7 @@ class Tensor { /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out); +#endif /// Create a Tensor from a given list of values. /// \tparam type of the values to be inserted. diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h new file mode 100644 index 0000000000..5b9a36ef1f --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h @@ -0,0 +1,53 @@ + +#ifndef DATASET_INCLUDE_DETENSOR_H_ +#define DATASET_INCLUDE_DETENSOR_H_ +#include "include/ms_tensor.h" +#include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/util/status.h" +namespace mindspore { +namespace tensor { +class DETensor : public MSTensor { + public: + // brief Create a MSTensor pointer. + // + // param data_type DataTypeId of tensor to be created. + // param shape Shape of tensor to be created. + // return MSTensor pointer. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + + static MSTensor *CreateTensor(const std::string &path); + + DETensor(TypeId data_type, const std::vector &shape); + + explicit DETensor(std::shared_ptr tensor_ptr); + + ~DETensor() = default; + + MSTensor *ConvertToLiteTensor(); + + std::shared_ptr tensor() const; + + TypeId data_type() const override; + + TypeId set_data_type(const TypeId data_type) override; + + std::vector shape() const override; + + size_t set_shape(const std::vector &shape) override; + + int DimensionSize(size_t index) const override; + + int ElementsNum() const override; + + std::size_t hash() const override; + + size_t Size() const override; + + void *MutableData() const override; + + protected: + std::shared_ptr tensor_impl_; +}; +} // namespace tensor +} // namespace mindspore +#endif // DATASET_INCLUDE_DETENSOR_H_ \ No newline at end of file diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h new file mode 100644 index 0000000000..4d686757ef --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DATASET_API_EXECUTE_H_ +#define DATASET_API_EXECUTE_H_ + +#include +#include +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/include/de_tensor.h" +#include "minddata/dataset/include/transforms.h" + +namespace mindspore { +namespace dataset { + +class TensorOp; + +namespace api { + +class Execute { + public: + /// \brief Constructor + Execute(const std::shared_ptr &op); + + /// \brief callable function to execute the TensorOperation in eager mode + /// \param[inout] input - the tensor to be transformed + /// \return - the output tensor, nullptr if Compute fails + std::shared_ptr operator()(std::shared_ptr input); + + private: + std::shared_ptr op_; +}; + + +} // namespace api +} // namespace dataset +} // namespace mindspore +#endif // DATASET_API_EXECUTE_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index c40f8346c7..89c69e318b 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -38,12 +38,18 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" +#include "minddata/dataset/include/de_tensor.h" +#ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +namespace tensor { +class DETensor; +} // namespace tensor namespace dataset { class Tensor; template @@ -55,6 +61,7 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { + friend class tensor::DETensor; public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -117,6 +124,7 @@ class Tensor { static Status CreateFromNpArray(const py::array &arr, TensorPtr *out); #endif +#ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist /// \param[in] shape shape of the outout tensor @@ -134,6 +142,7 @@ class Tensor { /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, const DataType &type, dsize_t pad_size, TensorPtr *out); +#endif /// Create a Tensor from a given list of values. /// \tparam type of the values to be inserted. @@ -649,13 +658,6 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_PYTHON - /// Helper function to create a tensor from Numpy array of strings - /// \param[in] arr Numpy array - /// \param[out] out Created Tensor - /// \return Status - static Status CreateFromNpString(py::array arr, TensorPtr *out); -#endif /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied @@ -668,6 +670,14 @@ class Tensor { /// const of the size of the offset variable static constexpr uint8_t kOffsetSize = sizeof(offset_t); + +#ifdef ENABLE_PYTHON + /// Helper function to create a tensor from Numpy array of strings + /// \param[in] arr Numpy array + /// \param[out] out Created Tensor + /// \return Status + static Status CreateFromNpString(py::array arr, TensorPtr *out); +#endif }; template <> inline Tensor::TensorIterator Tensor::end() { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc index 9df2d8a25e..8d40514f1b 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_with_bbox_op.cc @@ -20,7 +20,6 @@ #include "minddata/dataset/kernels/image/resize_op.h" #include "minddata/dataset/kernels/image/image_utils.h" #include "minddata/dataset/core/cv_tensor.h" -#include "minddata/dataset/core/pybind_support.h" #include "minddata/dataset/core/tensor.h" #include "minddata/dataset/kernels/tensor_op.h" #include "minddata/dataset/util/status.h" diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 887da2c9de..cf48032b2d 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -33,6 +33,7 @@ option(BUILD_CONVERTER "if build converter" on) option(ENABLE_FP16 "if build fp16 ops" off) option(SUPPORT_GPU "if support gpu" off) option(OFFLINE_COMPILE "if offline compile OpenCL kernel" off) +option(BUILD_MINDDATA "" on) if (BUILD_DEVICE) add_compile_definitions(BUILD_DEVICE) @@ -116,6 +117,32 @@ if (BUILD_DEVICE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8.2-a+dotprod+fp16") endif () endif() +endif() + +if (BUILD_MINDDATA) + # opencv + set(OpenCV_DIR ${TOP_DIR}/third_party/opencv/build) + find_package(OpenCV REQUIRED) + include_directories(${OpenCV_INCLUDE_DIRS}) + # eigen + include_directories(${TOP_DIR}/third_party/eigen/) + # jpeg-turbo + add_library(jpeg-turbo SHARED IMPORTED) + set_target_properties(jpeg-turbo PROPERTIES + IMPORTED_LOCATION ${TOP_DIR}/third_party/libjpeg-turbo/lib/libturbojpeg.so + ) + add_library(jpeg SHARED IMPORTED) + set_target_properties(jpeg PROPERTIES + IMPORTED_LOCATION ${TOP_DIR}/third_party/libjpeg-turbo/lib/libjpeg.so + ) + include_directories(${TOP_DIR}/third_party/libjpeg-turbo/include) + + add_compile_definitions(ENABLE_ANDROID) + add_compile_definitions(ENABLE_EAGER) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/minddata) +endif() + +if (BUILD_DEVICE) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test) diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt new file mode 100644 index 0000000000..7ffbcdd9c9 --- /dev/null +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -0,0 +1,44 @@ +set(MINDDATA_DIR ${CCSRC_DIR}/minddata/dataset) +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-deprecated-declarations") +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/core MINDDATA_CORE_SRC_FILES) +list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES "${MINDDATA_DIR}/core/client.cc") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels MINDDATA_KERNELS_SRC_FILES) +list(REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES "${MINDDATA_DIR}/kernels/py_func_op.cc") + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels/image MINDDATA_KERNELS_IMAGE_SRC_FILES) + +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/kernels/data MINDDATA_KERNELS_DATA_SRC_FILES) + +add_library(minddata-eager OBJECT + ${MINDDATA_DIR}/api/de_tensor.cc + ${MINDDATA_DIR}/api/execute.cc + ) + +add_library(minddata-lite SHARED + ${MINDDATA_CORE_SRC_FILES} + ${MINDDATA_KERNELS_SRC_FILES} + ${MINDDATA_KERNELS_IMAGE_SRC_FILES} + ${MINDDATA_KERNELS_DATA_SRC_FILES} + ${MINDDATA_DIR}/util/status.cc + ${MINDDATA_DIR}/util/memory_pool.cc + ${MINDDATA_DIR}/util/path.cc + ${MINDDATA_DIR}/api/transforms.cc + ${CORE_DIR}/utils/log_adapter.cc + ${CCSRC_DIR}/gvar/logging_level.cc + ) + +target_link_libraries(minddata-lite + securec + jpeg-turbo + jpeg + opencv_core + opencv_imgcodecs + opencv_imgproc + mindspore::json + ) \ No newline at end of file diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 23ef963dad..438e3da634 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -80,5 +80,9 @@ target_link_libraries(mindspore-lite ) add_subdirectory(runtime/kernel/arm) +if (BUILD_MINDDATA) + target_link_libraries(mindspore-lite minddata-eager minddata-lite log) +endif () + add_subdirectory(ops) diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index 14dd8a5115..dd909416c2 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -129,6 +129,15 @@ if (SUPPORT_GPU) ${LITE_DIR}/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc ) endif() +### minddata lite +if (BUILD_MINDDATA) + include_directories(${CCSRC_DIR}/minddata) + set(DATASET_TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/dataset) + set(TEST_MINDDATA_SRC + ${DATASET_TEST_DIR}/de_tensor_test.cc + ${DATASET_TEST_DIR}/eager_test.cc + ) +endif() ### runtime framework file(GLOB_RECURSE OPS_SRC ${LITE_DIR}/src/ops/*.cc) set(TEST_LITE_SRC @@ -245,6 +254,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC set(TEST_SRC ${TEST_LITE_SRC} + ${TEST_MINDDATA_SRC} ${TEST_CASE_KERNEL_SRC} ${TEST_DIR}/common/common_test.cc ${TEST_DIR}/main.cc diff --git a/mindspore/lite/test/dataset/de_tensor_test.cc b/mindspore/lite/test/dataset/de_tensor_test.cc new file mode 100644 index 0000000000..a96b7885ba --- /dev/null +++ b/mindspore/lite/test/dataset/de_tensor_test.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "gtest/gtest.h" +#include "securec.h" +#include "dataset/core/tensor.h" +#include "dataset/core/cv_tensor.h" +#include "dataset/core/data_type.h" +#include "mindspore/lite/src/ir/tensor.h" + +using namespace mindspore::dataset; + +class MindDataTestTensorDE : public UT::Common { + public: + MindDataTestTensorDE() {} +}; + +TEST_F(MindDataTestTensorDE, MSTensorBasic) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); +} + +TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + std::shared_ptr lite_ms_tensor = std::shared_ptr( + std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); + // check if the lite_ms_tensor is the derived LiteTensor + mindspore::tensor::LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); + ASSERT_EQ(lite_tensor != nullptr, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorShape) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true); + ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true); + ms_tensor->set_shape(std::vector{3,2}); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 3, true); + ASSERT_EQ(ms_tensor->DimensionSize(1) == 2, true); + ms_tensor->set_shape(std::vector{6}); + ASSERT_EQ(ms_tensor->DimensionSize(0) == 6, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorSize) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->ElementsNum() == 6, true); + ASSERT_EQ(ms_tensor->Size() == 24, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorDataType) { + std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); + ms_tensor->set_data_type(mindspore::TypeId::kNumberTypeInt32); + ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeInt32, true); + ASSERT_EQ(std::dynamic_pointer_cast(ms_tensor)->tensor()->type() == DataType::DE_INT32, true); +} + +TEST_F(MindDataTestTensorDE, MSTensorMutableData) { + std::vector x = {2.5, 2.5, 2.5, 2.5}; + std::shared_ptr t; + Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + float *data = static_cast(ms_tensor->MutableData()); + std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); + ASSERT_EQ(x == tensor_vec, true); + // TODO: add set_data_type after implmenting it +} + +TEST_F(MindDataTestTensorDE, MSTensorHash) { + std::vector x = {2.5, 2.5, 2.5, 2.5}; + std::shared_ptr t; + Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); +#ifdef ENABLE_ARM64 + ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); // arm64 +#else + ASSERT_EQ(ms_tensor->hash() == 11093825635904, true); +#endif +} \ No newline at end of file diff --git a/mindspore/lite/test/dataset/eager_test.cc b/mindspore/lite/test/dataset/eager_test.cc new file mode 100644 index 0000000000..5b28c44d47 --- /dev/null +++ b/mindspore/lite/test/dataset/eager_test.cc @@ -0,0 +1,165 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#include "gtest/gtest.h" +#include "securec.h" +#include "minddata/dataset/core/tensor.h" +#include "minddata/dataset/core/config_manager.h" +#include "minddata/dataset/include/datasets.h" +#include "minddata/dataset/include/execute.h" +#include "minddata/dataset/util/path.h" + +using namespace mindspore::dataset; +using namespace mindspore::dataset::api; +using namespace mindspore; + +class MindDataTestEager : public UT::Common { + public: + MindDataTestEager() {} +}; + +TEST_F(MindDataTestEager, Test1) { + std::string in_dir = "/sdcard/data/testPK/data/class1"; + Path base_dir = Path(in_dir); + MS_LOG(WARNING) << base_dir.toString() << "."; + if (!base_dir.IsDirectory() || !base_dir.Exists()) { + MS_LOG(INFO) << "Input dir is not a directory or doesn't exist" << "."; + } + auto t_start = std::chrono::high_resolution_clock::now(); + // check if output_dir exists and create it if it does not exist + + // iterate over in dir and create json for all images + auto dir_it = Path::DirIterator::OpenDirectory(&base_dir); + while (dir_it->hasNext()) { + Path v = dir_it->next(); + MS_LOG(WARNING) << v.toString() << "."; + std::shared_ptr image = std::shared_ptr(tensor::DETensor::CreateTensor(v.toString())); + + image = Execute(vision::Decode())(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Resize({224, 224}))(image); + EXPECT_TRUE(image != nullptr); + EXPECT_TRUE(image->DimensionSize(0) == 224); + EXPECT_TRUE(image->DimensionSize(1) == 224); + } + auto t_end = std::chrono::high_resolution_clock::now(); + double elapsed_time_ms = std::chrono::duration(t_end-t_start).count(); + MS_LOG(INFO) << "duration: " << elapsed_time_ms << " ms\n"; +} + +/* +TEST_F(MindDataTestEager, Test2) { + // string dir for image folder + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); + EXPECT_TRUE(normalize_op != nullptr); + std::shared_ptr resize_op = vision::Resize({224, 224}); + EXPECT_TRUE(resize_op != nullptr); + ds = ds->Map({normalize_op, resize_op}); + EXPECT_TRUE(ds != nullptr); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} + +TEST_F(MindDataTestEager, Test3) { + // string dir for image folder + ConfigManager cm = ConfigManager(); + cm.set_num_parallel_workers(1); + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); + EXPECT_TRUE(normalize_op != nullptr); + std::shared_ptr resize_op = vision::Resize({224, 224}); + EXPECT_TRUE(resize_op != nullptr); + ds = ds->Map({normalize_op, resize_op}); + EXPECT_TRUE(ds != nullptr); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} + +TEST_F(MindDataTestEager, Test4) { + // string dir for image folder + ConfigManager cm = ConfigManager(); + cm.set_num_parallel_workers(1); + std::string in_dir = datasets_root_path_ + "/testPK/data"; + // run dataset with decode = on + std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); + EXPECT_TRUE(iter != nullptr); + + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); + + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + EXPECT_TRUE(image != nullptr); + image = Execute(vision::Resize({224, 224}))(image); + EXPECT_TRUE(image != nullptr); + + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } + MS_LOG(WARNING) << i << "."; + iter->Stop(); + +} +*/ diff --git a/third_party/eigen b/third_party/eigen new file mode 160000 index 0000000000..daf9bbeca2 --- /dev/null +++ b/third_party/eigen @@ -0,0 +1 @@ +Subproject commit daf9bbeca26e98da2eed0058835cbb04e0a30ad8 diff --git a/third_party/libjpeg-turbo b/third_party/libjpeg-turbo new file mode 160000 index 0000000000..b443c541b9 --- /dev/null +++ b/third_party/libjpeg-turbo @@ -0,0 +1 @@ +Subproject commit b443c541b9a6fdcac214f9f003de0aa13e480ac1 diff --git a/third_party/opencv b/third_party/opencv new file mode 160000 index 0000000000..bda89a6469 --- /dev/null +++ b/third_party/opencv @@ -0,0 +1 @@ +Subproject commit bda89a6469aa79ecd8713967916bd754bff1d931 From 6ace46d516bdf263ae112d41362f49e25ffa267f Mon Sep 17 00:00:00 2001 From: Eric Date: Thu, 30 Jul 2020 22:12:55 -0400 Subject: [PATCH 02/13] Added preprocessor, cmake directory pending --- mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc | 6 ++++++ mindspore/ccsrc/minddata/dataset/engine/opt/pass.h | 4 ++++ .../dataset/engine/opt/pre/cache_transform_pass.cc | 11 +++++++++++ .../dataset/engine/opt/pre/cache_transform_pass.h | 5 +++++ .../minddata/dataset/engine/opt/util/printer_pass.cc | 3 ++- .../minddata/dataset/engine/opt/util/printer_pass.h | 2 ++ 6 files changed, 30 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc index 07ee10307a..4a2041e63d 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc @@ -34,10 +34,14 @@ #include "minddata/dataset/engine/datasetops/source/cifar_op.h" #include "minddata/dataset/engine/datasetops/source/coco_op.h" #include "minddata/dataset/engine/datasetops/source/manifest_op.h" +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/mindrecord_op.h" +#endif #include "minddata/dataset/engine/datasetops/source/mnist_op.h" #include "minddata/dataset/engine/datasetops/source/random_data_op.h" +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" +#endif #include "minddata/dataset/engine/datasetops/source/voc_op.h" #ifdef ENABLE_PYTHON #include "minddata/dataset/engine/datasetops/filter_op.h" @@ -136,6 +140,7 @@ Status NodePass::RunOnNode(std::shared_ptr node, bool *modified) { return RunOnNode(std::static_pointer_cast(node), modified); } +#ifndef ENABLE_ANDROID Status NodePass::RunOnNode(std::shared_ptr node, bool *modified) { // Fallback to base class visitor by default return RunOnNode(std::static_pointer_cast(node), modified); @@ -145,6 +150,7 @@ Status NodePass::RunOnNode(std::shared_ptr node, bool *modified) { // Fallback to base class visitor by default return RunOnNode(std::static_pointer_cast(node), modified); } +#endif #ifdef ENABLE_PYTHON Status NodePass::RunOnNode(std::shared_ptr node, bool *modified) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.h b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.h index f736c46229..f154b6c205 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.h +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.h @@ -37,9 +37,11 @@ class SkipOp; class ShuffleOp; +#ifndef ENABLE_ANDROID class MindRecordOp; class TFReaderOp; +#endif #ifdef ENABLE_PYTHON class FilterOp; @@ -158,9 +160,11 @@ class NodePass : public Pass { virtual Status RunOnNode(std::shared_ptr node, bool *modified); +#ifndef ENABLE_ANDROID virtual Status RunOnNode(std::shared_ptr node, bool *modified); virtual Status RunOnNode(std::shared_ptr node, bool *modified); +#endif #ifdef ENABLE_PYTHON virtual Status RunOnNode(std::shared_ptr node, bool *modified); diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc index 1667be4e96..8a463aecfa 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc @@ -25,10 +25,17 @@ #include "minddata/dataset/engine/datasetops/source/cifar_op.h" #include "minddata/dataset/engine/datasetops/source/coco_op.h" #include "minddata/dataset/engine/datasetops/source/image_folder_op.h" + +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/mindrecord_op.h" +#endif + #include "minddata/dataset/engine/datasetops/source/mnist_op.h" #include "minddata/dataset/engine/datasetops/source/random_data_op.h" + +#ifndef ENABLE_ANDROID #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h" +#endif #ifdef ENABLE_PYTHON #include "minddata/dataset/engine/datasetops/source/generator_op.h" @@ -123,6 +130,7 @@ Status CacheTransformPass::CachePass::NonMappableCacheLeafSetup(std::shared_ptr< return Status::OK(); } +#ifndef ENABLE_ANDROID // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr node, bool *modified) { if (is_caching_) { @@ -132,6 +140,7 @@ Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr node } return NonMappableCacheLeafSetup(std::static_pointer_cast(node)); } +#endif // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr node, bool *modified) { @@ -163,10 +172,12 @@ Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr node, return MappableCacheLeafSetup(std::static_pointer_cast(node)); } +#ifndef ENABLE_ANDROID // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::RunOnNode(std::shared_ptr node, bool *modified) { return MappableCacheLeafSetup(std::static_pointer_cast(node)); } +#endif #ifdef ENABLE_PYTHON // Perform leaf node cache transform identification diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h index d16334f3bb..970461d48f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.h @@ -58,11 +58,14 @@ class CacheTransformPass : public TreePass { /// \return Status The error code return Status RunOnNode(std::shared_ptr node, bool *modified) override; +#ifndef ENABLE_ANDROID + /// \brief Perform leaf node cache tranform identifications /// \param[in] node The node being visited /// \param[inout] modified Indicator if the node was changed at all /// \return Status The error code return Status RunOnNode(std::shared_ptr node, bool *modified) override; +#endif /// \brief Perform leaf node cache tranform identifications /// \param[in] node The node being visited @@ -120,11 +123,13 @@ class CacheTransformPass : public TreePass { /// \return Status The error code return Status RunOnNode(std::shared_ptr node, bool *modified) override; +#ifndef ENABLE_ANDROID /// \brief Perform leaf node cache tranform identifications /// \param[in] node The node being visited /// \param[inout] modified Indicator if the node was changed at all /// \return Status The error code return Status RunOnNode(std::shared_ptr node, bool *modified) override; +#endif /// \brief Getter std::vector, std::shared_ptr>> cache_pairs() { return cache_pairs_; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc index eb74d8fcc3..02f7bf8dfa 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.cc @@ -60,7 +60,7 @@ Status PrinterPass::RunOnNode(std::shared_ptr node, bool *modified) { std::cout << "Visiting ShuffleOp" << '\n'; return Status::OK(); } - +#ifndef ENABLE_ANDROID Status PrinterPass::RunOnNode(std::shared_ptr node, bool *modified) { *modified = false; std::cout << "Visiting MindRecordOp" << '\n'; @@ -72,6 +72,7 @@ Status PrinterPass::RunOnNode(std::shared_ptr node, bool *modified) std::cout << "Visiting TFReaderOp" << '\n'; return Status::OK(); } +#endif #ifdef ENABLE_PYTHON Status PrinterPass::RunOnNode(std::shared_ptr node, bool *modified) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h index bc84c0bd90..31b444cebd 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h @@ -39,9 +39,11 @@ class PrinterPass : public NodePass { Status RunOnNode(std::shared_ptr node, bool *modified) override; +#ifndef ENABLE_ANDROID Status RunOnNode(std::shared_ptr node, bool *modified) override; Status RunOnNode(std::shared_ptr node, bool *modified) override; +#endif #ifdef ENABLE_PYTHON Status RunOnNode(std::shared_ptr node, bool *modified) override; From 719823291b13cad14d4066d385449dbcd0e72da4 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Fri, 31 Jul 2020 10:41:04 -0400 Subject: [PATCH 03/13] rebased again --- build.sh | 6 ++++++ mindspore/lite/test/CMakeLists.txt | 9 ++++++++- .../lite/test/{ => ut/src}/dataset/de_tensor_test.cc | 8 ++++---- mindspore/lite/test/{ => ut/src}/dataset/eager_test.cc | 2 +- 4 files changed, 19 insertions(+), 6 deletions(-) rename mindspore/lite/test/{ => ut/src}/dataset/de_tensor_test.cc (93%) rename mindspore/lite/test/{ => ut/src}/dataset/eager_test.cc (99%) diff --git a/build.sh b/build.sh index af8cd3164a..4191bb8f35 100755 --- a/build.sh +++ b/build.sh @@ -519,6 +519,11 @@ build_opencl() { fi } +build_gtest() { + cd ${BASEPATH} + git submodule update --init --recursive third_party/googletest +} + build_opencv() { cd ${BASEPATH} if [[ "${INC_BUILD}" == "off" ]]; then @@ -561,6 +566,7 @@ build_minddata_lite_deps() build_opencv build_eigen build_jpeg_turbo + build_gtest } build_lite() diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index dd909416c2..b17c5662ab 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -132,7 +132,7 @@ endif() ### minddata lite if (BUILD_MINDDATA) include_directories(${CCSRC_DIR}/minddata) - set(DATASET_TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/dataset) + set(DATASET_TEST_DIR ${TEST_DIR}/ut/src/dataset) set(TEST_MINDDATA_SRC ${DATASET_TEST_DIR}/de_tensor_test.cc ${DATASET_TEST_DIR}/eager_test.cc @@ -294,6 +294,13 @@ endif () add_executable(lite-test ${TEST_SRC}) target_link_libraries(lite-test dl ${SECUREC_LIBRARY} ${GTEST_LIBRARY} mindspore::json) +if (BUILD_MINDDATA) + target_link_libraries(lite-test + minddata-lite + minddata-eager + log + ) +endif() if (BUILD_CONVERTER) target_link_libraries(lite-test anf_exporter_mid diff --git a/mindspore/lite/test/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc similarity index 93% rename from mindspore/lite/test/dataset/de_tensor_test.cc rename to mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index a96b7885ba..431216cb94 100644 --- a/mindspore/lite/test/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -25,7 +25,7 @@ using namespace mindspore::dataset; -class MindDataTestTensorDE : public UT::Common { +class MindDataTestTensorDE : public mindspore::Common { public: MindDataTestTensorDE() {} }; @@ -42,7 +42,7 @@ TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { std::shared_ptr lite_ms_tensor = std::shared_ptr( std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); // check if the lite_ms_tensor is the derived LiteTensor - mindspore::tensor::LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); + mindspore::lite::tensor::LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); ASSERT_EQ(lite_tensor != nullptr, true); } @@ -77,7 +77,7 @@ TEST_F(MindDataTestTensorDE, MSTensorDataType) { TEST_F(MindDataTestTensorDE, MSTensorMutableData) { std::vector x = {2.5, 2.5, 2.5, 2.5}; std::shared_ptr t; - Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); float *data = static_cast(ms_tensor->MutableData()); std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); @@ -88,7 +88,7 @@ TEST_F(MindDataTestTensorDE, MSTensorMutableData) { TEST_F(MindDataTestTensorDE, MSTensorHash) { std::vector x = {2.5, 2.5, 2.5, 2.5}; std::shared_ptr t; - Tensor::CreateTensor(&t, x, TensorShape({2, 2})); + Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); #ifdef ENABLE_ARM64 ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); // arm64 diff --git a/mindspore/lite/test/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc similarity index 99% rename from mindspore/lite/test/dataset/eager_test.cc rename to mindspore/lite/test/ut/src/dataset/eager_test.cc index 5b28c44d47..2a173a8a0f 100644 --- a/mindspore/lite/test/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -27,7 +27,7 @@ using namespace mindspore::dataset; using namespace mindspore::dataset::api; using namespace mindspore; -class MindDataTestEager : public UT::Common { +class MindDataTestEager : public mindspore::Common { public: MindDataTestEager() {} }; From aec85d299bd37a7308abce8b49c1071465feeac1 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 10:36:11 -0400 Subject: [PATCH 04/13] applied minor fixes --- build.sh | 6 -- .../ccsrc/minddata/dataset/api/execute.cc | 2 +- .../minddata/dataset/include/de_tensor.h | 55 +++++----- .../test/ut/src/dataset/de_tensor_test.cc | 2 +- .../lite/test/ut/src/dataset/eager_test.cc | 101 ------------------ 5 files changed, 31 insertions(+), 135 deletions(-) diff --git a/build.sh b/build.sh index 4191bb8f35..af8cd3164a 100755 --- a/build.sh +++ b/build.sh @@ -519,11 +519,6 @@ build_opencl() { fi } -build_gtest() { - cd ${BASEPATH} - git submodule update --init --recursive third_party/googletest -} - build_opencv() { cd ${BASEPATH} if [[ "${INC_BUILD}" == "off" ]]; then @@ -566,7 +561,6 @@ build_minddata_lite_deps() build_opencv build_eigen build_jpeg_turbo - build_gtest } build_lite() diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index 78acf38469..33eb117cb6 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -46,7 +46,7 @@ std::shared_ptr Execute::operator()(std::shared_ptr(new tensor::DETensor(std::move(de_output))); + return std::make_shared(std::move(de_output)); } diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h index 5b9a36ef1f..8980de7abc 100644 --- a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h @@ -1,6 +1,5 @@ - -#ifndef DATASET_INCLUDE_DETENSOR_H_ -#define DATASET_INCLUDE_DETENSOR_H_ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ #include "include/ms_tensor.h" #include "minddata/dataset/include/tensor.h" #include "minddata/dataset/util/status.h" @@ -8,46 +7,50 @@ namespace mindspore { namespace tensor { class DETensor : public MSTensor { public: - // brief Create a MSTensor pointer. - // - // param data_type DataTypeId of tensor to be created. - // param shape Shape of tensor to be created. - // return MSTensor pointer. - static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + /// \brief Create a MSTensor pointer. + /// \param[data_type] DataTypeId of tensor to be created. + /// \param[shape] Shape of tensor to be created. + /// \return - MSTensor pointer. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); - static MSTensor *CreateTensor(const std::string &path); + /// \brief Create a MSTensor pointer. + /// \param[path] Path file to be read. + /// \return - MSTensor pointer. + static MSTensor *CreateTensor(const std::string &path); - DETensor(TypeId data_type, const std::vector &shape); + DETensor(TypeId data_type, const std::vector &shape); - explicit DETensor(std::shared_ptr tensor_ptr); + explicit DETensor(std::shared_ptr tensor_ptr); - ~DETensor() = default; + ~DETensor() = default; - MSTensor *ConvertToLiteTensor(); + /// \brief Create a duplicate instance, convert the DETensor to the LiteTensor. + /// \return - MSTensor pointer. + MSTensor *ConvertToLiteTensor(); - std::shared_ptr tensor() const; + std::shared_ptr tensor() const; - TypeId data_type() const override; + TypeId data_type() const override; - TypeId set_data_type(const TypeId data_type) override; + TypeId set_data_type(const TypeId data_type) override; - std::vector shape() const override; + std::vector shape() const override; - size_t set_shape(const std::vector &shape) override; + size_t set_shape(const std::vector &shape) override; - int DimensionSize(size_t index) const override; + int DimensionSize(size_t index) const override; - int ElementsNum() const override; + int ElementsNum() const override; - std::size_t hash() const override; + std::size_t hash() const override; - size_t Size() const override; + size_t Size() const override; - void *MutableData() const override; + void *MutableData() const override; protected: - std::shared_ptr tensor_impl_; + std::shared_ptr tensor_impl_; }; } // namespace tensor } // namespace mindspore -#endif // DATASET_INCLUDE_DETENSOR_H_ \ No newline at end of file +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ \ No newline at end of file diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index 431216cb94..13120405eb 100644 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 2a173a8a0f..6cacfffdfa 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -62,104 +62,3 @@ TEST_F(MindDataTestEager, Test1) { double elapsed_time_ms = std::chrono::duration(t_end-t_start).count(); MS_LOG(INFO) << "duration: " << elapsed_time_ms << " ms\n"; } - -/* -TEST_F(MindDataTestEager, Test2) { - // string dir for image folder - std::string in_dir = datasets_root_path_ + "/testPK/data"; - // run dataset with decode = on - std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); - std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); - EXPECT_TRUE(normalize_op != nullptr); - std::shared_ptr resize_op = vision::Resize({224, 224}); - EXPECT_TRUE(resize_op != nullptr); - ds = ds->Map({normalize_op, resize_op}); - EXPECT_TRUE(ds != nullptr); - // Create an iterator over the result of the above dataset - // This will trigger the creation of the Execution Tree and launch it. - std::shared_ptr iter = ds->CreateIterator(); - EXPECT_TRUE(iter != nullptr); - - // Iterate the dataset and get each row - std::unordered_map> row; - iter->GetNextRow(&row); - - uint64_t i = 0; - while (row.size() != 0) { - i++; - auto image = row["image"]; - MS_LOG(INFO) << "Tensor image shape: " << image->shape(); - iter->GetNextRow(&row); - } - MS_LOG(WARNING) << i << "."; - iter->Stop(); - -} - -TEST_F(MindDataTestEager, Test3) { - // string dir for image folder - ConfigManager cm = ConfigManager(); - cm.set_num_parallel_workers(1); - std::string in_dir = datasets_root_path_ + "/testPK/data"; - // run dataset with decode = on - std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); - std::shared_ptr normalize_op = vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}); - EXPECT_TRUE(normalize_op != nullptr); - std::shared_ptr resize_op = vision::Resize({224, 224}); - EXPECT_TRUE(resize_op != nullptr); - ds = ds->Map({normalize_op, resize_op}); - EXPECT_TRUE(ds != nullptr); - // Create an iterator over the result of the above dataset - // This will trigger the creation of the Execution Tree and launch it. - std::shared_ptr iter = ds->CreateIterator(); - EXPECT_TRUE(iter != nullptr); - - // Iterate the dataset and get each row - std::unordered_map> row; - iter->GetNextRow(&row); - - uint64_t i = 0; - while (row.size() != 0) { - i++; - auto image = row["image"]; - MS_LOG(INFO) << "Tensor image shape: " << image->shape(); - iter->GetNextRow(&row); - } - MS_LOG(WARNING) << i << "."; - iter->Stop(); - -} - -TEST_F(MindDataTestEager, Test4) { - // string dir for image folder - ConfigManager cm = ConfigManager(); - cm.set_num_parallel_workers(1); - std::string in_dir = datasets_root_path_ + "/testPK/data"; - // run dataset with decode = on - std::shared_ptr ds = ImageFolder(in_dir, true, RandomSampler(false)); - // Create an iterator over the result of the above dataset - // This will trigger the creation of the Execution Tree and launch it. - std::shared_ptr iter = ds->CreateIterator(); - EXPECT_TRUE(iter != nullptr); - - // Iterate the dataset and get each row - std::unordered_map> row; - iter->GetNextRow(&row); - - uint64_t i = 0; - while (row.size() != 0) { - i++; - auto image = row["image"]; - image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); - EXPECT_TRUE(image != nullptr); - image = Execute(vision::Resize({224, 224}))(image); - EXPECT_TRUE(image != nullptr); - - MS_LOG(INFO) << "Tensor image shape: " << image->shape(); - iter->GetNextRow(&row); - } - MS_LOG(WARNING) << i << "."; - iter->Stop(); - -} -*/ From 83499884ae28a0df22439cbda4ae5099abb2f347 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 11:31:08 -0400 Subject: [PATCH 05/13] adapted test for x86 build --- mindspore/lite/src/CMakeLists.txt | 5 ++++- mindspore/lite/test/CMakeLists.txt | 4 +++- mindspore/lite/test/run_test.sh | 6 ++++++ mindspore/lite/test/ut/src/dataset/de_tensor_test.cc | 6 +----- mindspore/lite/test/ut/src/dataset/eager_test.cc | 4 ++++ 5 files changed, 18 insertions(+), 7 deletions(-) mode change 100644 => 100755 mindspore/lite/test/run_test.sh diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 438e3da634..c41ad846e0 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -81,7 +81,10 @@ target_link_libraries(mindspore-lite add_subdirectory(runtime/kernel/arm) if (BUILD_MINDDATA) - target_link_libraries(mindspore-lite minddata-eager minddata-lite log) + target_link_libraries(mindspore-lite minddata-eager minddata-lite) + if (PLATFORM_ARM32 OR PLATFORM_ARM64) + target_link_libraries(mindspore-lite log) + endif() endif () add_subdirectory(ops) diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index b17c5662ab..a5d46bad2f 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -298,8 +298,10 @@ if (BUILD_MINDDATA) target_link_libraries(lite-test minddata-lite minddata-eager - log ) + if (PLATFORM_ARM32 OR PLATFORM_ARM64) + target_link_libraries(lite-test log) + endif() endif() if (BUILD_CONVERTER) target_link_libraries(lite-test diff --git a/mindspore/lite/test/run_test.sh b/mindspore/lite/test/run_test.sh old mode 100644 new mode 100755 index 738df29b9c..be4073bc2b --- a/mindspore/lite/test/run_test.sh +++ b/mindspore/lite/test/run_test.sh @@ -7,6 +7,12 @@ mkdir -pv ${CUR_DIR}/do_test cd ${CUR_DIR}/do_test cp ${BUILD_DIR}/test/lite-test ./ cp -r ${CUR_DIR}/ut/src/runtime/kernel/arm/test_data/* ./ +## prepare data for dataset +TEST_DATA_DIR=${CUR_DIR}/../../../tests/ut/data/dataset/ +cp -fr $TEST_DATA_DIR/testPK ./data + +./lite-test --gtest_filter="*MindDataTestTensorDE*" +./lite-test --gtest_filter="*MindDataTestEager*" ./lite-test --gtest_filter="*TestHebing*" diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index 13120405eb..0a497a65a5 100644 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -90,9 +90,5 @@ TEST_F(MindDataTestTensorDE, MSTensorHash) { std::shared_ptr t; Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); -#ifdef ENABLE_ARM64 - ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); // arm64 -#else - ASSERT_EQ(ms_tensor->hash() == 11093825635904, true); -#endif + ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); } \ No newline at end of file diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 6cacfffdfa..3a36a5194f 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -33,7 +33,11 @@ class MindDataTestEager : public mindspore::Common { }; TEST_F(MindDataTestEager, Test1) { +#ifdef ENABLE_ARM64 || ENABLE_ARM32 std::string in_dir = "/sdcard/data/testPK/data/class1"; +#else + std::string in_dir = "data/testPK/data/class1"; +#endif Path base_dir = Path(in_dir); MS_LOG(WARNING) << base_dir.toString() << "."; if (!base_dir.IsDirectory() || !base_dir.Exists()) { From 2258fd4c4ce8ef96c3308d6fb822ac97fda6c0ea Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 11:58:00 -0400 Subject: [PATCH 06/13] added debug symbols in debug mode --- mindspore/lite/minddata/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt index 7ffbcdd9c9..fcf6d721a9 100644 --- a/mindspore/lite/minddata/CMakeLists.txt +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -3,7 +3,10 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-deprecated-declarations") -set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s") +set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb") +if (CMAKE_BUILD_TYPE EQUAL "DEBUG") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s") +endif() AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/core MINDDATA_CORE_SRC_FILES) list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES "${MINDDATA_DIR}/core/client.cc") From 6639149e580baa9527a79755db8c7714ae0dfe70 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 13:36:01 -0400 Subject: [PATCH 07/13] fixed cpplint format --- .../ccsrc/minddata/dataset/api/de_tensor.cc | 5 +- .../ccsrc/minddata/dataset/api/execute.cc | 2 +- .../dataset/engine/opt/util/printer_pass.h | 2 +- .../minddata/dataset/include/de_tensor.h | 73 ++++++++++++------- .../ccsrc/minddata/dataset/include/execute.h | 2 +- .../test/ut/src/dataset/de_tensor_test.cc | 38 +++++----- .../lite/test/ut/src/dataset/eager_test.cc | 30 ++++---- 7 files changed, 89 insertions(+), 63 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc index a2f61c899d..9f6148e724 100644 --- a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc @@ -114,7 +114,8 @@ TypeId DETensor::set_data_type(TypeId data_type) { MS_ASSERT(this->tensor_impl_ != nullptr); if (data_type != this->data_type()) { std::shared_ptr temp; - dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), this->tensor_impl_->GetBuffer(), &temp); + dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), + this->tensor_impl_->GetBuffer(), &temp); this->tensor_impl_ = temp; } return data_type; @@ -139,7 +140,6 @@ size_t DETensor::set_shape(const std::vector &shape) { std::back_inserter(t_shape), [](int s) -> dataset::dsize_t {return static_cast(s);}); dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); - //TODO: what if t_shape has different size? return shape.size(); } @@ -180,7 +180,6 @@ size_t DETensor::Size() const { void *DETensor::MutableData() const { MS_ASSERT(this->tensor_impl_ != nullptr); - // TODO: friend the DETensor? return this->tensor_impl_->GetMutableBuffer(); } diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index 33eb117cb6..6e00aaa25b 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -25,7 +25,7 @@ namespace api { Execute::Execute(const std::shared_ptr &op) : op_(std::move(op)) {} -std::shared_ptr Execute::operator()(std::shared_ptr input){ +std::shared_ptr Execute::operator()(std::shared_ptr input) { // Build the op if (op_ == nullptr) { MS_LOG(ERROR) << "Input TensorOperation is not valid"; diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h index 31b444cebd..d469554a93 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/util/printer_pass.h @@ -39,7 +39,7 @@ class PrinterPass : public NodePass { Status RunOnNode(std::shared_ptr node, bool *modified) override; -#ifndef ENABLE_ANDROID +#ifndef ENABLE_ANDROID Status RunOnNode(std::shared_ptr node, bool *modified) override; Status RunOnNode(std::shared_ptr node, bool *modified) override; diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h index 8980de7abc..749b9d35d9 100644 --- a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h @@ -1,5 +1,24 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ #define MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ +#include +#include +#include #include "include/ms_tensor.h" #include "minddata/dataset/include/tensor.h" #include "minddata/dataset/util/status.h" @@ -7,50 +26,50 @@ namespace mindspore { namespace tensor { class DETensor : public MSTensor { public: - /// \brief Create a MSTensor pointer. - /// \param[data_type] DataTypeId of tensor to be created. - /// \param[shape] Shape of tensor to be created. - /// \return - MSTensor pointer. - static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); + /// \brief Create a MSTensor pointer. + /// \param[data_type] DataTypeId of tensor to be created. + /// \param[shape] Shape of tensor to be created. + /// \return - MSTensor pointer. + static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); - /// \brief Create a MSTensor pointer. - /// \param[path] Path file to be read. - /// \return - MSTensor pointer. - static MSTensor *CreateTensor(const std::string &path); + /// \brief Create a MSTensor pointer. + /// \param[path] Path file to be read. + /// \return - MSTensor pointer. + static MSTensor *CreateTensor(const std::string &path); - DETensor(TypeId data_type, const std::vector &shape); + DETensor(TypeId data_type, const std::vector &shape); - explicit DETensor(std::shared_ptr tensor_ptr); + explicit DETensor(std::shared_ptr tensor_ptr); - ~DETensor() = default; + ~DETensor() = default; - /// \brief Create a duplicate instance, convert the DETensor to the LiteTensor. - /// \return - MSTensor pointer. - MSTensor *ConvertToLiteTensor(); + /// \brief Create a duplicate instance, convert the DETensor to the LiteTensor. + /// \return - MSTensor pointer. + MSTensor *ConvertToLiteTensor(); - std::shared_ptr tensor() const; + std::shared_ptr tensor() const; - TypeId data_type() const override; + TypeId data_type() const override; - TypeId set_data_type(const TypeId data_type) override; + TypeId set_data_type(const TypeId data_type) override; - std::vector shape() const override; + std::vector shape() const override; - size_t set_shape(const std::vector &shape) override; + size_t set_shape(const std::vector &shape) override; - int DimensionSize(size_t index) const override; + int DimensionSize(size_t index) const override; - int ElementsNum() const override; + int ElementsNum() const override; - std::size_t hash() const override; + std::size_t hash() const override; - size_t Size() const override; + size_t Size() const override; - void *MutableData() const override; + void *MutableData() const override; protected: - std::shared_ptr tensor_impl_; + std::shared_ptr tensor_impl_; }; } // namespace tensor } // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ \ No newline at end of file +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_DETENSOR_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index 4d686757ef..b3a7ea297f 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -33,7 +33,7 @@ namespace api { class Execute { public: /// \brief Constructor - Execute(const std::shared_ptr &op); + explicit Execute(const std::shared_ptr &op); /// \brief callable function to execute the TensorOperation in eager mode /// \param[inout] input - the tensor to be transformed diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index 0a497a65a5..a2b6bd5f89 100644 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -17,13 +17,18 @@ #include #include "common/common_test.h" #include "gtest/gtest.h" -#include "securec.h" +#include "./securec.h" #include "dataset/core/tensor.h" #include "dataset/core/cv_tensor.h" #include "dataset/core/data_type.h" #include "mindspore/lite/src/ir/tensor.h" -using namespace mindspore::dataset; +using MSTensor = mindspore::tensor::MSTensor; +using DETensor = mindspore::tensor::DETensor; +using LiteTensor = mindspore::lite::tensor::LiteTensor; +using Tensor = mindspore::dataset::Tensor; +using DataType = mindspore::dataset::DataType; +using TensorShape = mindspore::dataset::TensorShape; class MindDataTestTensorDE : public mindspore::Common { public: @@ -32,26 +37,26 @@ class MindDataTestTensorDE : public mindspore::Common { TEST_F(MindDataTestTensorDE, MSTensorBasic) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); - ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); + auto ms_tensor = std::shared_ptr(new DETensor(t)); + ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); } TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); - std::shared_ptr lite_ms_tensor = std::shared_ptr( - std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); + auto ms_tensor = std::shared_ptr(new DETensor(t)); + std::shared_ptr lite_ms_tensor = std::shared_ptr( + std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); // check if the lite_ms_tensor is the derived LiteTensor - mindspore::lite::tensor::LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); + LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); ASSERT_EQ(lite_tensor != nullptr, true); } TEST_F(MindDataTestTensorDE, MSTensorShape) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + auto ms_tensor = std::shared_ptr(new DETensor(t)); ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true); ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true); - ms_tensor->set_shape(std::vector{3,2}); + ms_tensor->set_shape(std::vector{3, 2}); ASSERT_EQ(ms_tensor->DimensionSize(0) == 3, true); ASSERT_EQ(ms_tensor->DimensionSize(1) == 2, true); ms_tensor->set_shape(std::vector{6}); @@ -60,35 +65,34 @@ TEST_F(MindDataTestTensorDE, MSTensorShape) { TEST_F(MindDataTestTensorDE, MSTensorSize) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + auto ms_tensor = std::shared_ptr(new DETensor(t)); ASSERT_EQ(ms_tensor->ElementsNum() == 6, true); ASSERT_EQ(ms_tensor->Size() == 24, true); } TEST_F(MindDataTestTensorDE, MSTensorDataType) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + auto ms_tensor = std::shared_ptr(new DETensor(t)); ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); ms_tensor->set_data_type(mindspore::TypeId::kNumberTypeInt32); ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeInt32, true); - ASSERT_EQ(std::dynamic_pointer_cast(ms_tensor)->tensor()->type() == DataType::DE_INT32, true); + ASSERT_EQ(std::dynamic_pointer_cast(ms_tensor)->tensor()->type() == DataType::DE_INT32, true); } TEST_F(MindDataTestTensorDE, MSTensorMutableData) { std::vector x = {2.5, 2.5, 2.5, 2.5}; std::shared_ptr t; Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + auto ms_tensor = std::shared_ptr(new DETensor(t)); float *data = static_cast(ms_tensor->MutableData()); std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); ASSERT_EQ(x == tensor_vec, true); - // TODO: add set_data_type after implmenting it } TEST_F(MindDataTestTensorDE, MSTensorHash) { std::vector x = {2.5, 2.5, 2.5, 2.5}; std::shared_ptr t; Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); - auto ms_tensor = std::shared_ptr(new mindspore::tensor::DETensor(t)); + auto ms_tensor = std::shared_ptr(new DETensor(t)); ASSERT_EQ(ms_tensor->hash() == 11093771382437, true); -} \ No newline at end of file +} diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 3a36a5194f..ffc271a981 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -16,16 +16,20 @@ #include #include "common/common_test.h" #include "gtest/gtest.h" -#include "securec.h" +#include "./securec.h" #include "minddata/dataset/core/tensor.h" #include "minddata/dataset/core/config_manager.h" #include "minddata/dataset/include/datasets.h" #include "minddata/dataset/include/execute.h" #include "minddata/dataset/util/path.h" -using namespace mindspore::dataset; -using namespace mindspore::dataset::api; -using namespace mindspore; +using MSTensor = mindspore::tensor::MSTensor; +using DETensor = mindspore::tensor::DETensor; +using mindspore::dataset::api::vision::Decode; +using mindspore::dataset::api::vision::Normalize; +using mindspore::dataset::api::vision::Resize; +using Execute = mindspore::dataset::api::Execute; +using Path = mindspore::dataset::Path; class MindDataTestEager : public mindspore::Common { public: @@ -33,7 +37,7 @@ class MindDataTestEager : public mindspore::Common { }; TEST_F(MindDataTestEager, Test1) { -#ifdef ENABLE_ARM64 || ENABLE_ARM32 +#if defined(ENABLE_ARM64) || defined(ENABLE_ARM32) std::string in_dir = "/sdcard/data/testPK/data/class1"; #else std::string in_dir = "data/testPK/data/class1"; @@ -47,20 +51,20 @@ TEST_F(MindDataTestEager, Test1) { // check if output_dir exists and create it if it does not exist // iterate over in dir and create json for all images - auto dir_it = Path::DirIterator::OpenDirectory(&base_dir); + auto dir_it = Path::DirIterator::OpenDirectory(&base_dir); while (dir_it->hasNext()) { Path v = dir_it->next(); MS_LOG(WARNING) << v.toString() << "."; - std::shared_ptr image = std::shared_ptr(tensor::DETensor::CreateTensor(v.toString())); - - image = Execute(vision::Decode())(image); + std::shared_ptr image = std::shared_ptr(DETensor::CreateTensor(v.toString())); + + image = Execute(Decode())(image); EXPECT_TRUE(image != nullptr); - image = Execute(vision::Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + image = Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); EXPECT_TRUE(image != nullptr); - image = Execute(vision::Resize({224, 224}))(image); + image = Execute(Resize({224, 224}))(image); EXPECT_TRUE(image != nullptr); - EXPECT_TRUE(image->DimensionSize(0) == 224); - EXPECT_TRUE(image->DimensionSize(1) == 224); + EXPECT_EQ(image->DimensionSize(0), 224); + EXPECT_EQ(image->DimensionSize(1), 224); } auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration(t_end-t_start).count(); From f92d68456dec30cdcfa16f49eedfbacfcaa89c5c Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 13:52:23 -0400 Subject: [PATCH 08/13] fixed clang format --- .../ccsrc/minddata/dataset/api/de_tensor.cc | 239 +++++++++--------- .../ccsrc/minddata/dataset/api/execute.cc | 1 - .../ccsrc/minddata/dataset/include/execute.h | 1 - 3 files changed, 117 insertions(+), 124 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc index 9f6148e724..23ac1dd1c8 100644 --- a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc @@ -8,169 +8,164 @@ namespace mindspore { namespace tensor { dataset::DataType MSTypeToDEType(TypeId data_type) { - switch (data_type) { - case kNumberTypeBool: - return dataset::DataType(dataset::DataType::DE_BOOL); - case kNumberTypeInt8: - return dataset::DataType(dataset::DataType::DE_INT8); - case kNumberTypeUInt8: - return dataset::DataType(dataset::DataType::DE_UINT8); - case kNumberTypeInt16: - return dataset::DataType(dataset::DataType::DE_INT16); - case kNumberTypeUInt16: - return dataset::DataType(dataset::DataType::DE_UINT16); - case kNumberTypeInt32: - return dataset::DataType(dataset::DataType::DE_INT32); - case kNumberTypeUInt32: - return dataset::DataType(dataset::DataType::DE_UINT32); - case kNumberTypeInt64: - return dataset::DataType(dataset::DataType::DE_INT64); - case kNumberTypeUInt64: - return dataset::DataType(dataset::DataType::DE_UINT64); - case kNumberTypeFloat16: - return dataset::DataType(dataset::DataType::DE_FLOAT16); - case kNumberTypeFloat32: - return dataset::DataType(dataset::DataType::DE_FLOAT32); - case kNumberTypeFloat64: - return dataset::DataType(dataset::DataType::DE_FLOAT64); - default: - // maybe throw? - return dataset::DataType(dataset::DataType::DE_UNKNOWN); - } + switch (data_type) { + case kNumberTypeBool: + return dataset::DataType(dataset::DataType::DE_BOOL); + case kNumberTypeInt8: + return dataset::DataType(dataset::DataType::DE_INT8); + case kNumberTypeUInt8: + return dataset::DataType(dataset::DataType::DE_UINT8); + case kNumberTypeInt16: + return dataset::DataType(dataset::DataType::DE_INT16); + case kNumberTypeUInt16: + return dataset::DataType(dataset::DataType::DE_UINT16); + case kNumberTypeInt32: + return dataset::DataType(dataset::DataType::DE_INT32); + case kNumberTypeUInt32: + return dataset::DataType(dataset::DataType::DE_UINT32); + case kNumberTypeInt64: + return dataset::DataType(dataset::DataType::DE_INT64); + case kNumberTypeUInt64: + return dataset::DataType(dataset::DataType::DE_UINT64); + case kNumberTypeFloat16: + return dataset::DataType(dataset::DataType::DE_FLOAT16); + case kNumberTypeFloat32: + return dataset::DataType(dataset::DataType::DE_FLOAT32); + case kNumberTypeFloat64: + return dataset::DataType(dataset::DataType::DE_FLOAT64); + default: + return dataset::DataType(dataset::DataType::DE_UNKNOWN); + } } TypeId DETypeToMSType(dataset::DataType data_type) { - switch (data_type.value()) { - case dataset::DataType::DE_BOOL: - return mindspore::TypeId::kNumberTypeBool; - case dataset::DataType::DE_INT8: - return mindspore::TypeId::kNumberTypeInt8; - case dataset::DataType::DE_UINT8: - return mindspore::TypeId::kNumberTypeUInt8; - case dataset::DataType::DE_INT16: - return mindspore::TypeId::kNumberTypeInt16; - case dataset::DataType::DE_UINT16: - return mindspore::TypeId::kNumberTypeUInt16; - case dataset::DataType::DE_INT32: - return mindspore::TypeId::kNumberTypeInt32; - case dataset::DataType::DE_UINT32: - return mindspore::TypeId::kNumberTypeUInt32; - case dataset::DataType::DE_INT64: - return mindspore::TypeId::kNumberTypeInt64; - case dataset::DataType::DE_UINT64: - return mindspore::TypeId::kNumberTypeUInt64; - case dataset::DataType::DE_FLOAT16: - return mindspore::TypeId::kNumberTypeFloat16; - case dataset::DataType::DE_FLOAT32: - return mindspore::TypeId::kNumberTypeFloat32; - case dataset::DataType::DE_FLOAT64: - return mindspore::TypeId::kNumberTypeFloat64; - default: - // maybe throw? - return kTypeUnknown; - } + switch (data_type.value()) { + case dataset::DataType::DE_BOOL: + return mindspore::TypeId::kNumberTypeBool; + case dataset::DataType::DE_INT8: + return mindspore::TypeId::kNumberTypeInt8; + case dataset::DataType::DE_UINT8: + return mindspore::TypeId::kNumberTypeUInt8; + case dataset::DataType::DE_INT16: + return mindspore::TypeId::kNumberTypeInt16; + case dataset::DataType::DE_UINT16: + return mindspore::TypeId::kNumberTypeUInt16; + case dataset::DataType::DE_INT32: + return mindspore::TypeId::kNumberTypeInt32; + case dataset::DataType::DE_UINT32: + return mindspore::TypeId::kNumberTypeUInt32; + case dataset::DataType::DE_INT64: + return mindspore::TypeId::kNumberTypeInt64; + case dataset::DataType::DE_UINT64: + return mindspore::TypeId::kNumberTypeUInt64; + case dataset::DataType::DE_FLOAT16: + return mindspore::TypeId::kNumberTypeFloat16; + case dataset::DataType::DE_FLOAT32: + return mindspore::TypeId::kNumberTypeFloat32; + case dataset::DataType::DE_FLOAT64: + return mindspore::TypeId::kNumberTypeFloat64; + default: + return kTypeUnknown; + } } MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector &shape) { - return new DETensor(data_type, shape); + return new DETensor(data_type, shape); } MSTensor *DETensor::CreateTensor(const std::string &path) { - std::shared_ptr t; - (void) dataset::Tensor::CreateFromFile(path, &t); - return new DETensor(std::move(t)); + std::shared_ptr t; + (void)dataset::Tensor::CreateFromFile(path, &t); + return new DETensor(std::move(t)); } DETensor::DETensor(TypeId data_type, const std::vector &shape) { - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), - std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t {return static_cast(s);}); - dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_); + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t { return static_cast(s); }); + dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), MSTypeToDEType(data_type), &this->tensor_impl_); } DETensor::DETensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } MSTensor *DETensor::ConvertToLiteTensor() { - // static MSTensor::CreateTensor is only for the LiteTensor - MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape()); - MS_ASSERT(tensor->Size() == this->Size()); - memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); - return tensor; + // static MSTensor::CreateTensor is only for the LiteTensor + MSTensor *tensor = MSTensor::CreateTensor(this->data_type(), this->shape()); + MS_ASSERT(tensor->Size() == this->Size()); + memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); + return tensor; } std::shared_ptr DETensor::tensor() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_; + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_; } TypeId DETensor::data_type() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return DETypeToMSType(this->tensor_impl_->type()); + MS_ASSERT(this->tensor_impl_ != nullptr); + return DETypeToMSType(this->tensor_impl_->type()); } TypeId DETensor::set_data_type(TypeId data_type) { - MS_ASSERT(this->tensor_impl_ != nullptr); - if (data_type != this->data_type()) { - std::shared_ptr temp; - dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), - this->tensor_impl_->GetBuffer(), &temp); - this->tensor_impl_ = temp; - } - return data_type; + MS_ASSERT(this->tensor_impl_ != nullptr); + if (data_type != this->data_type()) { + std::shared_ptr temp; + dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), MSTypeToDEType(data_type), + this->tensor_impl_->GetBuffer(), &temp); + this->tensor_impl_ = temp; + } + return data_type; } std::vector DETensor::shape() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape = this->tensor_impl_->shape().AsVector(); - std::vector shape; - shape.reserve(t_shape.size()); - std::transform(t_shape.begin(), t_shape.end(), - std::back_inserter(shape), - [](dataset::dsize_t s) -> int {return static_cast(s);}); - return shape; + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape = this->tensor_impl_->shape().AsVector(); + std::vector shape; + shape.reserve(t_shape.size()); + std::transform(t_shape.begin(), t_shape.end(), std::back_inserter(shape), + [](dataset::dsize_t s) -> int { return static_cast(s); }); + return shape; } size_t DETensor::set_shape(const std::vector &shape) { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), - std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t {return static_cast(s);}); - dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); - return shape.size(); + MS_ASSERT(this->tensor_impl_ != nullptr); + std::vector t_shape; + t_shape.reserve(shape.size()); + std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), + [](int s) -> dataset::dsize_t { return static_cast(s); }); + dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); + return shape.size(); } int DETensor::DimensionSize(size_t index) const { - MS_ASSERT(this->tensor_impl_ != nullptr); - int dim_size = -1; - auto shape = this->shape(); - if (index < shape.size()) { - dim_size = shape[index]; - } else { - MS_LOG(ERROR) << "Dimension index is wrong: " << index; - } - return dim_size; + MS_ASSERT(this->tensor_impl_ != nullptr); + int dim_size = -1; + auto shape = this->shape(); + if (index < shape.size()) { + dim_size = shape[index]; + } else { + MS_LOG(ERROR) << "Dimension index is wrong: " << index; + } + return dim_size; } int DETensor::ElementsNum() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->Size(); + MS_ASSERT(this->tensor_impl_ != nullptr); + return this->tensor_impl_->Size(); } std::size_t DETensor::hash() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - auto shape = this->shape(); - std::size_t hash_value = std::hash{}(SizeToInt(this->data_type())); - hash_value = hash_combine(hash_value, std::hash{}(shape.size())); - // hash all elements may costly, so only take at most 4 elements into account based on - // some experiments. - for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) { - hash_value = hash_combine(hash_value, (std::hash{}(shape[i]))); - } - return hash_value; + MS_ASSERT(this->tensor_impl_ != nullptr); + auto shape = this->shape(); + std::size_t hash_value = std::hash{}(SizeToInt(this->data_type())); + hash_value = hash_combine(hash_value, std::hash{}(shape.size())); + // hash all elements may costly, so only take at most 4 elements into account based on + // some experiments. + for (size_t i = 0; (i < shape.size()) && (i < 4); ++i) { + hash_value = hash_combine(hash_value, (std::hash{}(shape[i]))); + } + return hash_value; } size_t DETensor::Size() const { diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index 6e00aaa25b..bf34529fe9 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -49,7 +49,6 @@ std::shared_ptr Execute::operator()(std::shared_ptr(std::move(de_output)); } - } // namespace api } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index b3a7ea297f..c2028fbb12 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -44,7 +44,6 @@ class Execute { std::shared_ptr op_; }; - } // namespace api } // namespace dataset } // namespace mindspore From a4ef5b26a17092b33071c9401c51ea365a3d322c Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 13:55:54 -0400 Subject: [PATCH 09/13] fixed cpplint format --- .../ccsrc/minddata/dataset/api/de_tensor.cc | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc index 23ac1dd1c8..efb2dbdf97 100644 --- a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc +++ b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc @@ -1,3 +1,19 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #include "minddata/dataset/include/de_tensor.h" #include "minddata/dataset/core/constants.h" #include "minddata/dataset/core/data_type.h" @@ -179,4 +195,4 @@ void *DETensor::MutableData() const { } } // namespace tensor -} // namespace mindspore \ No newline at end of file +} // namespace mindspore From a53678894a70acaafb682a2d8bbff70435c05a34 Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 14:02:34 -0400 Subject: [PATCH 10/13] fixed shell format --- build.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/build.sh b/build.sh index af8cd3164a..4718c45b75 100755 --- a/build.sh +++ b/build.sh @@ -548,15 +548,15 @@ build_minddata_lite_deps() { echo "start build minddata lite project" if [[ "${LITE_PLATFORM}" == "arm64" ]]; then - CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ - -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="arm64-v8a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \ - -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake -DANDROID_NATIVE_API_LEVEL=19 \ + -DANDROID_NDK=${ANDROID_NDK} -DANDROID_ABI=arm64-v8a -DANDROID_TOOLCHAIN_NAME=aarch64-linux-android-clang \ + -DANDROID_STL=c++_shared -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" elif [[ "${LITE_PLATFORM}" == "arm32" ]]; then - CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" -DANDROID_NATIVE_API_LEVEL="19" \ - -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="armeabi-v7a" -DANDROID_TOOLCHAIN_NAME="clang" \ - -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" + CMAKE_MINDDATA_ARGS="-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake -DANDROID_NATIVE_API_LEVEL=19 \ + -DANDROID_NDK=${ANDROID_NDK} -DANDROID_ABI=armeabi-v7a -DANDROID_TOOLCHAIN_NAME=clang \ + -DANDROID_STL=c++_shared -DCMAKE_BUILD_TYPE=${BUILD_TYPE}" else - CMAKE_MINDDATA_ARGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE} " + CMAKE_MINDDATA_ARGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE}" fi build_opencv build_eigen From 6af4e7c2266a5d62db7e4d49cd3405670a55fb7c Mon Sep 17 00:00:00 2001 From: Eric Date: Tue, 4 Aug 2020 14:00:40 -0400 Subject: [PATCH 11/13] Addressing comments --- mindspore/ccsrc/minddata/dataset/api/execute.cc | 2 +- mindspore/ccsrc/minddata/dataset/include/execute.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index bf34529fe9..548bb8866b 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -23,7 +23,7 @@ namespace mindspore { namespace dataset { namespace api { -Execute::Execute(const std::shared_ptr &op) : op_(std::move(op)) {} +Execute::Execute(std::shared_ptr op) : op_(std::move(op)) {} std::shared_ptr Execute::operator()(std::shared_ptr input) { // Build the op diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index c2028fbb12..53d6ee5572 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -30,10 +30,11 @@ class TensorOp; namespace api { +// class to run tensor operations in eager mode class Execute { public: /// \brief Constructor - explicit Execute(const std::shared_ptr &op); + explicit Execute(std::shared_ptr op); /// \brief callable function to execute the TensorOperation in eager mode /// \param[inout] input - the tensor to be transformed From 1f13b3ef6de2e553023f830b1b255e5f25a1973f Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 14:15:05 -0400 Subject: [PATCH 12/13] exclude de_tensor in normal build --- mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt | 2 -- mindspore/ccsrc/minddata/dataset/core/tensor.h | 7 ++++++- mindspore/ccsrc/minddata/dataset/include/tensor.h | 7 ++++++- mindspore/lite/CMakeLists.txt | 1 - 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt index 93de69aad2..ae0b9cc28e 100644 --- a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt @@ -13,6 +13,4 @@ add_library(cpp-API OBJECT iterator.cc transforms.cc samplers.cc - de_tensor.cc - execute.cc ) diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 89c69e318b..85ed96215d 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -38,18 +38,21 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" -#include "minddata/dataset/include/de_tensor.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#else +#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +#ifdef ENABLE_ANDROID namespace tensor { class DETensor; } // namespace tensor +#endif namespace dataset { class Tensor; template @@ -61,7 +64,9 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { +#ifdef ENABLE_ANDROID friend class tensor::DETensor; +#endif public: Tensor() = delete; Tensor(const Tensor &other) = delete; diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index 89c69e318b..85ed96215d 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -38,18 +38,21 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/util/status.h" -#include "minddata/dataset/include/de_tensor.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" +#else +#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { +#ifdef ENABLE_ANDROID namespace tensor { class DETensor; } // namespace tensor +#endif namespace dataset { class Tensor; template @@ -61,7 +64,9 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { +#ifdef ENABLE_ANDROID friend class tensor::DETensor; +#endif public: Tensor() = delete; Tensor(const Tensor &other) = delete; diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index cf48032b2d..1a7e4d1588 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -138,7 +138,6 @@ if (BUILD_MINDDATA) include_directories(${TOP_DIR}/third_party/libjpeg-turbo/include) add_compile_definitions(ENABLE_ANDROID) - add_compile_definitions(ENABLE_EAGER) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/minddata) endif() From 19b2216610cbf1bfe569327757c1c2683daebd3c Mon Sep 17 00:00:00 2001 From: ervinzhang Date: Tue, 4 Aug 2020 14:35:31 -0400 Subject: [PATCH 13/13] fixed cpplint format --- mindspore/ccsrc/minddata/dataset/core/tensor.h | 6 +++--- mindspore/ccsrc/minddata/dataset/include/tensor.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 85ed96215d..ff58046553 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -64,9 +64,6 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -663,6 +660,9 @@ class Tensor { unsigned char *data_end_ = nullptr; private: +#ifdef ENABLE_ANDROID + friend class tensor::DETensor; +#endif /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index 85ed96215d..ff58046553 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -64,9 +64,6 @@ using offset_t = uint32_t; // type of offset va using TensorPtr = std::shared_ptr; class Tensor { -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif public: Tensor() = delete; Tensor(const Tensor &other) = delete; @@ -663,6 +660,9 @@ class Tensor { unsigned char *data_end_ = nullptr; private: +#ifdef ENABLE_ANDROID + friend class tensor::DETensor; +#endif /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied