From 7d2fd6e76c56985a78380b7228628374a63b3195 Mon Sep 17 00:00:00 2001 From: lixian Date: Tue, 26 Jan 2021 17:06:34 +0800 Subject: [PATCH] cxx api refactor: tensor/status/model --- cmake/package.cmake | 4 +- cmake/package_lite.cmake | 8 + include/api/cell.h | 28 +- include/api/context.h | 53 ++- include/api/data_type.h | 43 +++ include/api/graph.h | 5 +- include/api/lite_context.h | 77 +++++ include/api/model.h | 23 +- include/api/ops/ops.h | 2 - include/api/serialization.h | 3 +- include/api/status.h | 120 ++++++- include/api/types.h | 79 ++--- mindspore/ccsrc/cxx_api/CMakeLists.txt | 9 +- mindspore/ccsrc/cxx_api/cell.cc | 35 +- mindspore/ccsrc/cxx_api/context.cc | 136 ++++++-- mindspore/ccsrc/cxx_api/factory.h | 4 +- .../ccsrc/cxx_api/graph/acl/acl_env_guard.cc | 8 +- .../ccsrc/cxx_api/graph/acl/acl_env_guard.h | 6 +- .../ccsrc/cxx_api/graph/acl/acl_graph_impl.cc | 85 +++-- .../ccsrc/cxx_api/graph/acl/acl_graph_impl.h | 12 +- .../ccsrc/cxx_api/graph/acl/model_process.cc | 230 +++++++------ .../ccsrc/cxx_api/graph/acl/model_process.h | 21 +- .../cxx_api/graph/ascend/ascend_graph_impl.cc | 296 +++++++++-------- .../cxx_api/graph/ascend/ascend_graph_impl.h | 42 ++- .../ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc | 168 +++++----- .../ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h | 21 +- mindspore/ccsrc/cxx_api/graph/graph.cc | 8 +- mindspore/ccsrc/cxx_api/graph/graph_data.cc | 4 +- mindspore/ccsrc/cxx_api/graph/graph_data.h | 4 +- mindspore/ccsrc/cxx_api/graph/graph_impl.h | 13 +- mindspore/ccsrc/cxx_api/graph/graph_utils.h | 63 ---- .../ccsrc/cxx_api/model/acl/acl_model.cc | 145 ++++++--- mindspore/ccsrc/cxx_api/model/acl/acl_model.h | 23 +- .../cxx_api/model/acl/acl_model_options.cc | 52 ++- .../cxx_api/model/acl/acl_model_options.h | 10 +- .../cxx_api/model/acl/model_converter.cc | 77 +++-- .../ccsrc/cxx_api/model/acl/model_converter.h | 5 +- mindspore/ccsrc/cxx_api/model/model.cc | 40 ++- .../model_converter_utils/multi_process.cc | 23 +- .../model_converter_utils/multi_process.h | 3 - .../model_converter_utils/shared_memory.cc | 10 +- .../model_converter_utils/shared_memory.h | 3 - mindspore/ccsrc/cxx_api/model/model_impl.h | 25 +- mindspore/ccsrc/cxx_api/model/ms/ms_model.cc | 123 +++++-- mindspore/ccsrc/cxx_api/model/ms/ms_model.h | 18 +- mindspore/ccsrc/cxx_api/ops/ops.cc | 4 +- mindspore/ccsrc/cxx_api/python_utils.cc | 4 +- mindspore/ccsrc/cxx_api/python_utils.h | 4 +- mindspore/ccsrc/cxx_api/serialization.cc | 30 +- mindspore/ccsrc/cxx_api/types.cc | 205 +++++++----- .../ccsrc/minddata/dataset/CMakeLists.txt | 9 +- .../ccsrc/minddata/dataset/api/CMakeLists.txt | 35 +- .../ccsrc/minddata/dataset/api/de_tensor.cc | 142 -------- .../ccsrc/minddata/dataset/api/execute.cc | 130 ++++---- .../minddata/dataset/api/minddata_eager.cc | 154 --------- .../dataset/include/execute_binding.cc | 45 ++- .../bindings/dataset/kernels/data/bindings.cc | 3 +- .../dataset/kernels/ir/image/bindings.cc | 6 +- .../dataset/callback/py_ds_callback.cc | 4 +- .../minddata/dataset/core/CMakeLists.txt | 1 + .../ccsrc/minddata/dataset/core/de_tensor.cc | 67 ++++ .../ccsrc/minddata/dataset/core/de_tensor.h | 59 ++++ .../ccsrc/minddata/dataset/core/tensor.h | 20 +- .../dataset/engine/cache/CMakeLists.txt | 3 + .../dataset/engine/cache/cache_admin.cc | 3 +- .../dataset/engine/cache/cache_admin_arg.cc | 44 +-- .../dataset/engine/cache/cache_arena.cc | 2 +- .../dataset/engine/cache/cache_client.cc | 17 +- .../dataset/engine/cache/cache_common.h | 2 +- .../dataset/engine/cache/cache_fbb.cc | 2 +- .../dataset/engine/cache/cache_grpc_client.cc | 4 +- .../dataset/engine/cache/cache_ipc.cc | 4 +- .../dataset/engine/cache/cache_main.cc | 21 +- .../dataset/engine/cache/cache_numa.cc | 6 +- .../dataset/engine/cache/cache_pool.cc | 6 +- .../dataset/engine/cache/cache_request.cc | 4 +- .../dataset/engine/cache/cache_server.cc | 58 ++-- .../dataset/engine/cache/cache_server.h | 2 +- .../dataset/engine/cache/cache_service.cc | 20 +- .../dataset/engine/cache/perf/cache_msg.cc | 2 +- .../dataset/engine/cache/perf/cache_perf.cc | 2 +- .../engine/cache/perf/cache_perf_run.h | 4 +- .../engine/cache/perf/cache_pipeline.cc | 4 +- .../engine/cache/perf/cache_pipeline_run.h | 6 +- .../dataset/engine/cache/storage_container.cc | 4 +- .../dataset/engine/cache/storage_manager.cc | 4 +- .../minddata/dataset/engine/data_schema.cc | 12 +- .../dataset/engine/dataset_iterator.cc | 2 +- .../dataset/engine/datasetops/barrier_op.cc | 11 +- .../dataset/engine/datasetops/batch_op.cc | 20 +- .../datasetops/bucket_batch_by_length_op.cc | 2 +- .../build_sentence_piece_vocab_op.cc | 8 +- .../engine/datasetops/build_vocab_op.cc | 2 +- .../engine/datasetops/cache_base_op.cc | 6 +- .../engine/datasetops/cache_lookup_op.cc | 6 +- .../engine/datasetops/cache_merge_op.cc | 16 +- .../dataset/engine/datasetops/cache_op.cc | 14 +- .../engine/datasetops/device_queue_op.cc | 31 +- .../engine/datasetops/device_queue_op.h | 1 + .../dataset/engine/datasetops/filter_op.cc | 6 +- .../engine/datasetops/map_op/map_op.cc | 2 +- .../dataset/engine/datasetops/shuffle_op.cc | 4 +- .../engine/datasetops/source/album_op.cc | 4 +- .../engine/datasetops/source/celeba_op.cc | 12 +- .../engine/datasetops/source/cifar_op.cc | 2 +- .../engine/datasetops/source/clue_op.cc | 2 +- .../engine/datasetops/source/coco_op.cc | 2 +- .../engine/datasetops/source/csv_op.cc | 2 +- .../engine/datasetops/source/generator_op.cc | 22 +- .../datasetops/source/image_folder_op.cc | 2 +- .../engine/datasetops/source/manifest_op.cc | 2 +- .../engine/datasetops/source/mindrecord_op.cc | 2 +- .../engine/datasetops/source/mnist_op.cc | 2 +- .../datasetops/source/random_data_op.cc | 6 +- .../source/sampler/python_sampler.cc | 14 +- .../datasetops/source/sampler/sampler.cc | 4 +- .../source/sampler/weighted_random_sampler.cc | 4 +- .../engine/datasetops/source/text_file_op.cc | 2 +- .../engine/datasetops/source/tf_reader_op.cc | 2 +- .../engine/datasetops/source/voc_op.cc | 2 +- .../dataset/engine/datasetops/zip_op.cc | 6 +- .../minddata/dataset/engine/db_connector.h | 4 +- .../ir/datasetops/source/tf_record_node.cc | 12 +- .../ccsrc/minddata/dataset/engine/opt/pass.cc | 8 +- .../engine/opt/pre/cache_transform_pass.cc | 10 +- .../engine/perf/connector_throughput.cc | 2 +- .../dataset/engine/perf/cpu_sampling.cc | 18 +- .../engine/perf/dataset_iterator_tracing.cc | 2 +- .../engine/perf/device_queue_tracing.cc | 2 +- .../minddata/dataset/engine/perf/profiling.cc | 8 +- .../minddata/dataset/include/de_tensor.h | 82 ----- .../ccsrc/minddata/dataset/include/execute.h | 35 +- .../minddata/dataset/include/minddata_eager.h | 62 ---- .../ccsrc/minddata/dataset/include/status.h | 132 ++------ .../ccsrc/minddata/dataset/include/tensor.h | 23 +- .../ccsrc/minddata/dataset/include/type_id.h | 5 + .../dataset/kernels/data/one_hot_op.cc | 2 +- .../dataset/kernels/image/bounding_box.cc | 10 +- .../dataset/kernels/image/center_crop_op.cc | 2 +- .../minddata/dataset/kernels/image/crop_op.cc | 2 +- .../dataset/kernels/image/decode_op.cc | 2 +- .../dvpp/dvpp_decode_resize_crop_jpeg_op.cc | 212 ++++++------ .../dvpp/dvpp_decode_resize_crop_jpeg_op.h | 120 +++---- .../image/dvpp/utils/ResourceManager.cc | 4 +- .../image/dvpp/utils/ResourceManager.h | 2 +- .../dataset/kernels/image/hwc_to_chw_op.cc | 2 +- .../dataset/kernels/image/image_utils.cc | 12 +- .../dataset/kernels/image/lite_image_utils.cc | 6 +- .../minddata/dataset/kernels/image/pad_op.cc | 2 +- .../image/random_crop_and_resize_op.cc | 2 +- .../dataset/kernels/image/random_crop_op.cc | 6 +- .../kernels/image/random_rotation_op.cc | 2 +- .../dataset/kernels/image/resize_op.cc | 2 +- .../soft_dvpp_decode_resize_jpeg_op.cc | 2 +- .../minddata/dataset/kernels/py_func_op.cc | 14 +- .../minddata/dataset/kernels/tensor_op.cc | 10 +- .../text/kernels/jieba_tokenizer_op.cc | 2 +- .../kernels/sentence_piece_tokenizer_op.cc | 4 +- .../dataset/text/sentence_piece_vocab.cc | 2 +- .../ccsrc/minddata/dataset/util/allocator.h | 6 +- .../ccsrc/minddata/dataset/util/arena.cc | 12 +- mindspore/ccsrc/minddata/dataset/util/btree.h | 6 +- .../ccsrc/minddata/dataset/util/buddy.cc | 12 +- .../minddata/dataset/util/circular_pool.cc | 12 +- .../minddata/dataset/util/intrp_resource.h | 2 +- .../minddata/dataset/util/intrp_service.cc | 4 +- .../minddata/dataset/util/memory_pool.cc | 6 +- .../ccsrc/minddata/dataset/util/memory_pool.h | 4 +- mindspore/ccsrc/minddata/dataset/util/queue.h | 3 +- .../ccsrc/minddata/dataset/util/services.h | 2 +- .../ccsrc/minddata/dataset/util/status.cc | 149 --------- .../ccsrc/minddata/dataset/util/status.h | 125 ++------ mindspore/ccsrc/minddata/dataset/util/task.cc | 12 +- .../minddata/dataset/util/task_manager.cc | 6 +- mindspore/core/CMakeLists.txt | 2 - mindspore/core/ir/api_tensor_impl.h | 47 +++ mindspore/core/utils/status.cc | 127 ++++++++ mindspore/lite/include/context.h | 8 +- mindspore/lite/include/errorcode.h | 3 + mindspore/lite/include/ms_tensor.h | 15 +- .../java/app/src/main/native/CMakeLists.txt | 3 +- mindspore/lite/minddata/CMakeLists.txt | 38 ++- mindspore/lite/minddata/wrapper/MDToDApi.cc | 26 +- mindspore/lite/src/CMakeLists.txt | 13 + mindspore/lite/src/cxx_api/cell.cc | 95 ++++++ mindspore/lite/src/cxx_api/graph/graph.cc | 34 ++ mindspore/lite/src/cxx_api/graph/graph_data.h | 44 +++ mindspore/lite/src/cxx_api/lite_context.cc | 303 ++++++++++++++++++ mindspore/lite/src/cxx_api/model/model.cc | 98 ++++++ .../lite/src/cxx_api/model/model_impl.cc | 241 ++++++++++++++ mindspore/lite/src/cxx_api/model/model_impl.h | 56 ++++ mindspore/lite/src/cxx_api/serialization.cc | 74 +++++ .../lite/src/cxx_api/tensor/tensor_impl.cc | 39 +++ .../lite/src/cxx_api/tensor/tensor_impl.h | 140 ++++++++ mindspore/lite/src/cxx_api/types.cc | 199 ++++++++++++ mindspore/lite/src/cxx_api/utils.h | 41 +++ mindspore/lite/src/tensor.cc | 5 + mindspore/lite/src/tensor.h | 8 +- .../test/ut/src/dataset/de_tensor_test.cc | 91 ------ .../lite/test/ut/src/dataset/eager_test.cc | 15 +- .../tools/converter/quantizer/quant_cast.h | 2 +- tests/st/cpp/common/common_test.cc | 5 +- tests/st/cpp/data/dataset/apple.jpg | Bin 0 -> 159109 bytes tests/st/cpp/dataset/test_de.cc | 81 +++-- tests/st/cpp/model/test_tensor_add.cc | 43 ++- tests/ut/cpp/cxx_api/context_test.cc | 73 +++++ tests/ut/cpp/cxx_api/status_test.cc | 62 ++++ tests/ut/cpp/cxx_api/types_test.cc | 192 +++++------ tests/ut/cpp/dataset/btree_test.cc | 2 +- tests/ut/cpp/dataset/build_vocab_test.cc | 2 +- .../dataset/c_api_dataset_randomdata_test.cc | 2 +- tests/ut/cpp/dataset/c_api_text_test.cc | 2 +- tests/ut/cpp/dataset/c_api_text_vocab_test.cc | 2 +- tests/ut/cpp/dataset/cache_op_test.cc | 2 +- tests/ut/cpp/dataset/center_crop_op_test.cc | 2 +- tests/ut/cpp/dataset/common/common.h | 3 + tests/ut/cpp/dataset/connector_test.cc | 6 +- tests/ut/cpp/dataset/execute_test.cc | 25 +- tests/ut/cpp/dataset/fill_op_test.cc | 6 +- tests/ut/cpp/dataset/interrupt_test.cc | 4 +- tests/ut/cpp/dataset/memory_pool_test.cc | 2 +- tests/ut/cpp/dataset/queue_test.cc | 6 +- .../dataset/random_crop_with_bbox_op_test.cc | 2 +- tests/ut/cpp/dataset/solarize_op_test.cc | 2 +- tests/ut/cpp/dataset/status_test.cc | 8 +- tests/ut/cpp/dataset/task_manager_test.cc | 6 +- tests/ut/cpp/dataset/tensor_test.cc | 6 +- 227 files changed, 4276 insertions(+), 2777 deletions(-) create mode 100644 include/api/data_type.h create mode 100644 include/api/lite_context.h delete mode 100644 mindspore/ccsrc/cxx_api/graph/graph_utils.h delete mode 100644 mindspore/ccsrc/minddata/dataset/api/de_tensor.cc delete mode 100644 mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc create mode 100644 mindspore/ccsrc/minddata/dataset/core/de_tensor.cc create mode 100644 mindspore/ccsrc/minddata/dataset/core/de_tensor.h delete mode 100644 mindspore/ccsrc/minddata/dataset/include/de_tensor.h delete mode 100644 mindspore/ccsrc/minddata/dataset/include/minddata_eager.h create mode 100644 mindspore/core/ir/api_tensor_impl.h create mode 100644 mindspore/core/utils/status.cc create mode 100644 mindspore/lite/src/cxx_api/cell.cc create mode 100644 mindspore/lite/src/cxx_api/graph/graph.cc create mode 100644 mindspore/lite/src/cxx_api/graph/graph_data.h create mode 100644 mindspore/lite/src/cxx_api/lite_context.cc create mode 100644 mindspore/lite/src/cxx_api/model/model.cc create mode 100644 mindspore/lite/src/cxx_api/model/model_impl.cc create mode 100644 mindspore/lite/src/cxx_api/model/model_impl.h create mode 100644 mindspore/lite/src/cxx_api/serialization.cc create mode 100644 mindspore/lite/src/cxx_api/tensor/tensor_impl.cc create mode 100644 mindspore/lite/src/cxx_api/tensor/tensor_impl.h create mode 100644 mindspore/lite/src/cxx_api/types.cc create mode 100644 mindspore/lite/src/cxx_api/utils.h delete mode 100644 mindspore/lite/test/ut/src/dataset/de_tensor_test.cc create mode 100644 tests/st/cpp/data/dataset/apple.jpg create mode 100644 tests/ut/cpp/cxx_api/context_test.cc create mode 100644 tests/ut/cpp/cxx_api/status_test.cc diff --git a/cmake/package.cmake b/cmake/package.cmake index b77068157c..87b30bffa8 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -65,7 +65,7 @@ install( install( TARGETS mindspore_shared_lib - LIBRARY DESTINATION ${INSTALL_LIB_DIR} + DESTINATION ${INSTALL_LIB_DIR} COMPONENT mindspore ) @@ -327,7 +327,7 @@ install( ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/transforms.h ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/vision.h ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/vision_lite.h - ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h + ${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset/include/execute.h DESTINATION ${INSTALL_BASE_DIR}/include/minddata/dataset/include COMPONENT mindspore ) diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index 4dc3e09901..c7a0aaf18d 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -109,6 +109,8 @@ if(PLATFORM_ARM64) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend* ops*" EXCLUDE) if(ENABLE_TOOLS) install(TARGETS benchmark RUNTIME DESTINATION ${RUNTIME_PKG_NAME}/benchmark COMPONENT ${RUNTIME_COMPONENT_NAME}) endif() @@ -128,6 +130,8 @@ elseif(PLATFORM_ARM32) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) if(ENABLE_TOOLS) install(TARGETS benchmark RUNTIME DESTINATION ${RUNTIME_PKG_NAME}/benchmark COMPONENT ${RUNTIME_COMPONENT_NAME}) endif() @@ -162,6 +166,8 @@ elseif(WIN32) endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) set(WIN_LIB_DIR_RUN_X86 ${RUNTIME_PKG_NAME}/benchmark) install(FILES ${TOP_DIR}/build/mindspore/src/libmindspore-lite.a DESTINATION ${WIN_LIB_DIR_RUN_X86} COMPONENT ${RUNTIME_COMPONENT_NAME}) @@ -182,6 +188,8 @@ else() endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api + COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ascend*" EXCLUDE) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.a DESTINATION ${RUNTIME_LIB_DIR} diff --git a/include/api/cell.h b/include/api/cell.h index 096bb8b1a9..3039fa816b 100644 --- a/include/api/cell.h +++ b/include/api/cell.h @@ -24,7 +24,6 @@ #include "include/api/graph.h" namespace mindspore { -namespace api { class InputAndOutput; using Input = InputAndOutput; using Output = InputAndOutput; @@ -35,7 +34,7 @@ class MS_API CellBase { virtual ~CellBase() = default; virtual std::vector Construct(const std::vector &inputs) { return {}; } virtual std::shared_ptr Clone() const = 0; - virtual Status Run(const std::vector &inputs, std::vector *outputs) { return SUCCESS; } + virtual Status Run(const std::vector &inputs, std::vector *outputs) { return kSuccess; } std::vector operator()(const std::vector &inputs) const; }; @@ -57,16 +56,16 @@ class MS_API ParameterCell final : public Cell { ParameterCell(ParameterCell &&); ParameterCell &operator=(ParameterCell &&); - explicit ParameterCell(const Tensor &); - ParameterCell &operator=(const Tensor &); + explicit ParameterCell(const MSTensor &); + ParameterCell &operator=(const MSTensor &); - explicit ParameterCell(Tensor &&); - ParameterCell &operator=(Tensor &&); + explicit ParameterCell(MSTensor &&); + ParameterCell &operator=(MSTensor &&); - Tensor GetTensor() const { return tensor_; } + MSTensor GetTensor() const { return tensor_; } private: - Tensor tensor_; + MSTensor tensor_; }; class MS_API OpCellBase : public CellBase { @@ -99,11 +98,9 @@ class MS_API GraphCell final : public Cell { explicit GraphCell(const std::shared_ptr &); const std::shared_ptr &GetGraph() const { return graph_; } - Status Run(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + Status Run(const std::vector &inputs, std::vector *outputs) override; + std::vector GetInputs(); + std::vector GetOutputs(); private: friend class ModelImpl; @@ -119,8 +116,8 @@ class MS_API InputAndOutput { ~InputAndOutput() = default; // no explicit - InputAndOutput(const Tensor &); // NOLINT(runtime/explicit) - InputAndOutput(Tensor &&); // NOLINT(runtime/explicit) + InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit) + InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit) InputAndOutput(const std::shared_ptr &, const std::vector &, int32_t index); @@ -132,6 +129,5 @@ class MS_API InputAndOutput { std::vector prev_; int32_t index_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CELL_H diff --git a/include/api/context.h b/include/api/context.h index 31552c95f4..0aea49dd99 100644 --- a/include/api/context.h +++ b/include/api/context.h @@ -16,26 +16,49 @@ #ifndef MINDSPORE_INCLUDE_API_CONTEXT_H #define MINDSPORE_INCLUDE_API_CONTEXT_H +#include +#include #include #include #include "include/api/types.h" namespace mindspore { -namespace api { -class MS_API Context { - public: - static Context &Instance(); - const std::string &GetDeviceTarget() const; - Context &SetDeviceTarget(const std::string &device_target); - uint32_t GetDeviceID() const; - Context &SetDeviceID(uint32_t device_id); - - private: - Context(); - ~Context(); - class ContextImpl; - std::shared_ptr impl_; +constexpr auto kDeviceTypeAscend310 = "Ascend310"; +constexpr auto kDeviceTypeAscend910 = "Ascend910"; + +struct MS_API Context { + virtual ~Context() = default; + std::map params; +}; + +struct MS_API GlobalContext : public Context { + static std::shared_ptr GetGlobalContext(); + + static void SetGlobalDeviceTarget(const std::string &device_target); + static std::string GetGlobalDeviceTarget(); + + static void SetGlobalDeviceID(const uint32_t &device_id); + static uint32_t GetGlobalDeviceID(); +}; + +struct MS_API ModelContext : public Context { + static void SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path); + static std::string GetInsertOpConfigPath(const std::shared_ptr &context); + + static void SetInputFormat(const std::shared_ptr &context, const std::string &format); + static std::string GetInputFormat(const std::shared_ptr &context); + + static void SetInputShape(const std::shared_ptr &context, const std::string &shape); + static std::string GetInputShape(const std::shared_ptr &context); + + static void SetOutputType(const std::shared_ptr &context, enum DataType output_type); + static enum DataType GetOutputType(const std::shared_ptr &context); + + static void SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode); + static std::string GetPrecisionMode(const std::shared_ptr &context); + + static void SetOpSelectImplMode(const std::shared_ptr &context, const std::string &op_select_impl_mode); + static std::string GetOpSelectImplMode(const std::shared_ptr &context); }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/include/api/data_type.h b/include/api/data_type.h new file mode 100644 index 0000000000..a39488a83d --- /dev/null +++ b/include/api/data_type.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_ +#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_ + +namespace mindspore { +enum class DataType : int { + kTypeUnknown = 0, + kObjectTypeString = 12, + kObjectTypeList = 13, + kObjectTypeTuple = 14, + kObjectTypeTensorType = 17, + kNumberTypeBool = 30, + kNumberTypeInt8 = 32, + kNumberTypeInt16 = 33, + kNumberTypeInt32 = 34, + kNumberTypeInt64 = 35, + kNumberTypeUInt8 = 37, + kNumberTypeUInt16 = 38, + kNumberTypeUInt32 = 39, + kNumberTypeUInt64 = 40, + kNumberTypeFloat16 = 42, + kNumberTypeFloat32 = 43, + kNumberTypeFloat64 = 44, + kNumberTypeEnd = 46, + // add new enum here + kInvalidType = INT32_MAX, +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_ diff --git a/include/api/graph.h b/include/api/graph.h index 9373f573e6..a9288eb5a1 100644 --- a/include/api/graph.h +++ b/include/api/graph.h @@ -16,6 +16,7 @@ #ifndef MINDSPORE_INCLUDE_API_GRAPH_H #define MINDSPORE_INCLUDE_API_GRAPH_H +#include #include #include #include @@ -24,21 +25,21 @@ #include "include/api/types.h" namespace mindspore { -namespace api { class MS_API Graph { public: class GraphData; explicit Graph(const std::shared_ptr &graph_data); explicit Graph(std::shared_ptr &&graph_data); + explicit Graph(std::nullptr_t); ~Graph(); enum ModelType ModelType() const; + bool operator==(std::nullptr_t) const; private: friend class GraphCell; friend class ModelImpl; std::shared_ptr graph_data_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_GRAPH_H diff --git a/include/api/lite_context.h b/include/api/lite_context.h new file mode 100644 index 0000000000..933c6521e0 --- /dev/null +++ b/include/api/lite_context.h @@ -0,0 +1,77 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H +#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H + +#include +#include +#include +#include +#include "include/api/types.h" + +namespace mindspore { +namespace lite { +/// \brief CpuBindMode defined for holding bind cpu strategy argument. +typedef enum : uint32_t { + NO_BIND = 0, /**< no bind */ + HIGHER_CPU = 1, /**< bind higher cpu first */ + MID_CPU = 2 /**< bind middle cpu first */ +} CpuBindMode; + +class Allocator; +} // namespace lite + +struct MS_API Context { + public: + static void Clear(const std::shared_ptr &contxet); + + static void SetAsDefault(const std::shared_ptr &contxet); + + static void SetVendorName(const std::shared_ptr &contxet, const std::string &name); + static std::string GetVendorName(const std::shared_ptr &contxet); + + static void SetThreadNum(const std::shared_ptr &contxet, int num); + static int GetThreadNum(const std::shared_ptr &contxet); + + static void SetAllocator(const std::shared_ptr &contxet, std::shared_ptr alloc); + static std::shared_ptr GetAllocator(const std::shared_ptr &contxet); + + static void ConfigCPU(const std::shared_ptr &contxet, bool config); + static bool IfCPUEnabled(const std::shared_ptr &contxet); + + static void ConfigCPUFp16(const std::shared_ptr &contxet, bool config); + static bool IfCPUFp16Enabled(const std::shared_ptr &contxet); + + static void SetCPUBindMode(const std::shared_ptr &contxet, lite::CpuBindMode mode); + static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr &contxet); + + static void ConfigGPU(const std::shared_ptr &contxet, bool config); + static bool IfGPUEnabled(const std::shared_ptr &contxet); + + static void ConfigGPUFp16(const std::shared_ptr &contxet, bool config); + static bool IfGPUFp16Enabled(const std::shared_ptr &contxet); + + static void ConfigNPU(const std::shared_ptr &contxet, bool config); + static bool IfNPUEnabled(const std::shared_ptr &contxet); + + static void SetNPUFrequency(const std::shared_ptr &contxet, int freq); + static int GetNPUFrequency(const std::shared_ptr &contxet); + + private: + std::map context_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H diff --git a/include/api/model.h b/include/api/model.h index efd06aedc5..8d401085eb 100644 --- a/include/api/model.h +++ b/include/api/model.h @@ -20,41 +20,36 @@ #include #include #include +#include #include "include/api/status.h" #include "include/api/types.h" #include "include/api/graph.h" #include "include/api/cell.h" namespace mindspore { -namespace api { class ModelImpl; -// todo: minddata c++ interface -class DataSet {}; +struct Context; class MS_API Model { public: - explicit Model(const std::vector &network); - explicit Model(const GraphCell &graph); + explicit Model(const std::vector &network, const std::shared_ptr &model_context = nullptr); + explicit Model(const GraphCell &graph, const std::shared_ptr &model_context = nullptr); ~Model(); Model(const Model &) = delete; void operator=(const Model &) = delete; - Status Build(const std::map &options); + Status Build(); + Status Resize(const std::vector &inputs, const std::vector> &dims); - Status Train(const DataSet &dataset, bool data_sink, std::map *outputs); - Status Eval(const DataSet &dataset, bool data_sink, std::map *outputs); - Status Predict(const std::vector &inputs, std::vector *outputs); + Status Predict(const std::vector &inputs, std::vector *outputs); - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + std::vector GetInputs(); + std::vector GetOutputs(); static bool CheckModelSupport(const std::string &device_type, ModelType model_type); private: std::shared_ptr impl_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_MODEL_H diff --git a/include/api/ops/ops.h b/include/api/ops/ops.h index 0715bac898..5e56c17377 100644 --- a/include/api/ops/ops.h +++ b/include/api/ops/ops.h @@ -25,7 +25,6 @@ #include "include/api/cell.h" namespace mindspore { -namespace api { struct MS_API Conv2D : public OpCell { Conv2D() : OpCell("Conv2D") {} ~Conv2D() override = default; @@ -45,6 +44,5 @@ struct MS_API Conv2D : public OpCell { std::vector dilation = {1, 1, 1, 1}; int group = 1; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_OPS_OPS_H diff --git a/include/api/serialization.h b/include/api/serialization.h index 9750337d0d..2c34b826d3 100644 --- a/include/api/serialization.h +++ b/include/api/serialization.h @@ -26,15 +26,14 @@ #include "include/api/graph.h" namespace mindspore { -namespace api { class MS_API Serialization { public: + static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type); static Graph LoadModel(const std::string &file, ModelType model_type); static Status LoadCheckPoint(const std::string &ckpt_file, std::map *parameters); static Status SetParameters(const std::map ¶meters, Model *model); static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file); }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H diff --git a/include/api/status.h b/include/api/status.h index c8284fbaa5..131a15372c 100644 --- a/include/api/status.h +++ b/include/api/status.h @@ -17,37 +17,129 @@ #define MINDSPORE_INCLUDE_API_STATUS_H #include +#include +#include namespace mindspore { -namespace api { -enum StatusCode { - SUCCESS = 0, - FAILED, - INVALID_INPUTS, - // insert new status code here - UNKNOWN = 0xFFFFFFFF +enum CompCode : uint32_t { + kCore = 0x00000000u, + kMD = 0x10000000u, + kME = 0x20000000u, + kMC = 0x30000000u, + kLite = 0xF0000000u, +}; + +enum StatusCode : uint32_t { + kSuccess = 0, + // Core + kCoreFailed = kCore | 0x1, + + // MD + kMDOutOfMemory = kMD | 1, + kMDShapeMisMatch = kMD | 2, + kMDInterrupted = kMD | 3, + kMDNoSpace = kMD | 4, + kMDPyFuncException = kMD | 5, + kMDDuplicateKey = kMD | 6, + kMDPythonInterpreterFailure = kMD | 7, + kMDTDTPushFailure = kMD | 8, + kMDFileNotExist = kMD | 9, + kMDProfilingError = kMD | 10, + kMDBoundingBoxOutOfBounds = kMD | 11, + kMDBoundingBoxInvalidShape = kMD | 12, + kMDSyntaxError = kMD | 13, + kMDTimeOut = kMD | 14, + kMDBuddySpaceFull = kMD | 15, + kMDNetWorkError = kMD | 16, + kMDNotImplementedYet = kMD | 17, + // Make this error code the last one. Add new error code above it. + kMDUnexpectedError = kMD | 127, + + // ME + kMEFailed = kME | 0x1, + kMEInvalidInput = kME | 0x2, + + // MC + kMCFailed = kMC | 0x1, + kMCDeviceError = kMC | 0x2, + kMCInvalidInput = kMC | 0x3, + kMCInvalidArgs = kMC | 0x4, + + // Lite // Common error code, range: [-1, -100) + kLiteError = kLite | (0x0FFFFFFF & -1), /**< Common error code. */ + kLiteNullptr = kLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + kLiteParamInvalid = kLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + kLiteNoChange = kLite | (0x0FFFFFFF & -4), /**< No change. */ + kLiteSuccessExit = kLite | (0x0FFFFFFF & -5), /**< No error but exit. */ + kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + kLiteNotSupport = kLite | (0x0FFFFFFF & -7), /**< Fail to support. */ + kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + + // Executor error code, range: [-100,-200) + kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ + kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + kLiteReentrantError = kLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + + // Graph error code, range: [-200,-300) + kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + + // Node error code, range: [-300,-400) + kLiteNotFindOp = kLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + + // Tensor error code, range: [-400,-500) + kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + + // InferShape error code, range: [-500,-600) + kLiteInferError = kLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + + // User input param error code, range: [-600, 700) + kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ }; class Status { public: - Status() : status_code_(FAILED) {} - Status(enum StatusCode status_code, const std::string &status_msg = "") // NOLINT(runtime/explicit) - : status_code_(status_code), status_msg_(status_msg) {} + Status() : status_code_(kSuccess), line_of_code_(-1) {} + Status(enum StatusCode status_code, const std::string &status_msg = "") // NOLINT(runtime/explicit) + : status_code_(status_code), status_msg_(status_msg), line_of_code_(-1) {} + Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); + ~Status() = default; - bool IsSuccess() const { return status_code_ == SUCCESS; } enum StatusCode StatusCode() const { return status_code_; } - std::string StatusMessage() const { return status_msg_; } + const std::string &ToString() const { return status_msg_; } + + int GetLineOfCode() const { return line_of_code_; } + const std::string &GetErrDescription() const { return status_msg_; } + const std::string &SetErrDescription(const std::string &err_description); + + friend std::ostream &operator<<(std::ostream &os, const Status &s); + bool operator==(const Status &other) const { return status_code_ == other.status_code_; } bool operator==(enum StatusCode other_code) const { return status_code_ == other_code; } bool operator!=(const Status &other) const { return status_code_ != other.status_code_; } bool operator!=(enum StatusCode other_code) const { return status_code_ != other_code; } - operator bool() const = delete; + + explicit operator bool() const { return (status_code_ == kSuccess); } + explicit operator int() const { return static_cast(status_code_); } + + static Status OK() { return Status(StatusCode::kSuccess); } + + bool IsOk() const { return (StatusCode() == StatusCode::kSuccess); } + + bool IsError() const { return !IsOk(); } + + static std::string CodeAsString(enum StatusCode c); private: enum StatusCode status_code_; std::string status_msg_; + int line_of_code_; + std::string file_name_; + std::string err_description_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_STATUS_H diff --git a/include/api/types.h b/include/api/types.h index 7395072869..0f4503e122 100644 --- a/include/api/types.h +++ b/include/api/types.h @@ -16,15 +16,20 @@ #ifndef MINDSPORE_INCLUDE_API_TYPES_H #define MINDSPORE_INCLUDE_API_TYPES_H +#include #include #include #include +#include "include/api/data_type.h" +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else #define MS_API __attribute__((visibility("default"))) +#endif namespace mindspore { -namespace api { -enum ModelType { +enum ModelType : uint32_t { kMindIR = 0, kAIR = 1, kOM = 2, @@ -33,52 +38,38 @@ enum ModelType { kUnknownType = 0xFFFFFFFF }; -enum DataType { - kMsUnknown = 0, - kMsBool = 1, - kMsInt8 = 2, - kMsInt16 = 3, - kMsInt32 = 4, - kMsInt64 = 5, - kMsUint8 = 6, - kMsUint16 = 7, - kMsUint32 = 8, - kMsUint64 = 9, - kMsFloat16 = 10, - kMsFloat32 = 11, - kMsFloat64 = 12, - // insert new data type here - kInvalidDataType = 0xFFFFFFFF -}; - -class MS_API Tensor { +class MS_API MSTensor { public: - Tensor(); - Tensor(const std::string &name, DataType type, const std::vector &shape, const void *data, size_t data_len); - ~Tensor(); + class Impl; - const std::string &Name() const; - void SetName(const std::string &name); + static MSTensor CreateTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; - api::DataType DataType() const; - void SetDataType(api::DataType type); + MSTensor(); + explicit MSTensor(const std::shared_ptr &impl); + MSTensor(const std::string &name, DataType type, const std::vector &shape, const void *data, + size_t data_len); + ~MSTensor(); + const std::string &Name() const; + enum DataType DataType() const; const std::vector &Shape() const; - void SetShape(const std::vector &shape); + int64_t ElementNum() const; - const void *Data() const; + std::shared_ptr Data() const; void *MutableData(); size_t DataSize() const; - bool ResizeData(size_t data_len); - bool SetData(const void *data, size_t data_len); + bool IsDevice() const; - int64_t ElementNum() const; - static int GetTypeSize(api::DataType type); - Tensor Clone() const; + MSTensor Clone() const; + bool operator==(std::nullptr_t) const; private: - class Impl; + friend class ModelImpl; + explicit MSTensor(std::nullptr_t); std::shared_ptr impl_; }; @@ -101,21 +92,5 @@ class MS_API Buffer { class Impl; std::shared_ptr impl_; }; - -extern MS_API const char *kDeviceTypeAscend310; -extern MS_API const char *kDeviceTypeAscend910; -extern MS_API const char *kDeviceTypeGpu; - -constexpr auto kModelOptionDumpCfgPath = "mindspore.option.dump_config_file_path"; -constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file -constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc -// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" -constexpr auto kModelOptionInputShape = "mindspore.option.input_shape"; -constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" -constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode"; -// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" -constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode"; -// "high_precision" or "high_performance", default as "high_performance" -} // namespace api } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_TYPES_H diff --git a/mindspore/ccsrc/cxx_api/CMakeLists.txt b/mindspore/ccsrc/cxx_api/CMakeLists.txt index 4992308f15..d5e4707593 100644 --- a/mindspore/ccsrc/cxx_api/CMakeLists.txt +++ b/mindspore/ccsrc/cxx_api/CMakeLists.txt @@ -23,7 +23,7 @@ if(ENABLE_D) endif() if(ENABLE_GPU) - file(GLOB_RECURSE API_MS_INFER_SRC ${CMAKE_CURRENT_SOURCE_DIR} "python_utils.cc" "model/ms/*.cc" "graph/gpu/*.cc") + file(GLOB_RECURSE API_MS_INFER_SRC ${CMAKE_CURRENT_SOURCE_DIR} "model/ms/*.cc" "graph/gpu/*.cc") endif() set(MSLIB_SRC ${CMAKE_CURRENT_SOURCE_DIR}/types.cc @@ -45,8 +45,13 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin") target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} -Wl,-force_load mindspore -Wl,-noall_load proto_input mindspore_gvar mindspore::protobuf) else() - target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} + if(ENABLE_D OR ENABLE_ACL) + target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} -Wl,--whole-archive mindspore -Wl,--no-whole-archive proto_input mindspore_gvar mindspore::protobuf) + else() + target_link_libraries(mindspore_shared_lib PRIVATE ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY} + mindspore proto_input mindspore_gvar mindspore::protobuf) + endif() endif() if(ENABLE_CPU) diff --git a/mindspore/ccsrc/cxx_api/cell.cc b/mindspore/ccsrc/cxx_api/cell.cc index 7329675c0f..ebf3a4706e 100644 --- a/mindspore/ccsrc/cxx_api/cell.cc +++ b/mindspore/ccsrc/cxx_api/cell.cc @@ -18,7 +18,7 @@ #include "cxx_api/factory.h" #include "cxx_api/graph/graph_impl.h" -namespace mindspore::api { +namespace mindspore { std::vector CellBase::operator()(const std::vector &inputs) const { return Clone()->Construct(inputs); } ParameterCell::ParameterCell(const ParameterCell &cell) : tensor_(cell.tensor_.Clone()) {} @@ -40,23 +40,23 @@ ParameterCell &ParameterCell::operator=(ParameterCell &&cell) { return *this; } -ParameterCell::ParameterCell(const Tensor &tensor) : tensor_(tensor.Clone()) {} +ParameterCell::ParameterCell(const MSTensor &tensor) : tensor_(tensor.Clone()) {} -ParameterCell &ParameterCell::operator=(const Tensor &tensor) { +ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { tensor_ = tensor.Clone(); return *this; } -ParameterCell::ParameterCell(Tensor &&tensor) : tensor_(tensor) {} +ParameterCell::ParameterCell(MSTensor &&tensor) : tensor_(tensor) {} -ParameterCell &ParameterCell::operator=(Tensor &&tensor) { +ParameterCell &ParameterCell::operator=(MSTensor &&tensor) { tensor_ = tensor; return *this; } GraphCell::GraphCell(const Graph &graph) : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); @@ -64,7 +64,7 @@ GraphCell::GraphCell(const Graph &graph) GraphCell::GraphCell(const std::shared_ptr &graph) : graph_(graph), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); @@ -72,13 +72,13 @@ GraphCell::GraphCell(const std::shared_ptr &graph) GraphCell::GraphCell(Graph &&graph) : graph_(std::make_shared(graph)), - executor_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { + executor_(Factory::Instance().Create(GlobalContext::GetGlobalDeviceTarget())) { MS_EXCEPTION_IF_NULL(graph_); MS_EXCEPTION_IF_NULL(executor_); executor_->SetGraph(graph_); } -Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { +Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(executor_); return executor_->Run(inputs, outputs); } @@ -88,25 +88,24 @@ Status GraphCell::Load() { return executor_->Load(); } -Status GraphCell::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector GraphCell::GetInputs() { MS_EXCEPTION_IF_NULL(executor_); - return executor_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return executor_->GetInputs(); } -Status GraphCell::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector GraphCell::GetOutputs() { MS_EXCEPTION_IF_NULL(executor_); - return executor_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return executor_->GetOutputs(); } InputAndOutput::InputAndOutput() : cell_(nullptr), prev_(), index_(-1) {} -InputAndOutput::InputAndOutput(const Tensor &tensor) +InputAndOutput::InputAndOutput(const MSTensor &tensor) : cell_(std::make_shared(tensor.Clone())), prev_(), index_(-1) {} -InputAndOutput::InputAndOutput(Tensor &&tensor) : cell_(std::make_shared(tensor)), prev_(), index_(-1) {} +InputAndOutput::InputAndOutput(MSTensor &&tensor) + : cell_(std::make_shared(tensor)), prev_(), index_(-1) {} InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std::vector &prev, int32_t index) : cell_(cell), prev_(prev), index_(index) {} -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/context.cc b/mindspore/ccsrc/cxx_api/context.cc index 6af1915bfb..a9ea4055a0 100644 --- a/mindspore/ccsrc/cxx_api/context.cc +++ b/mindspore/ccsrc/cxx_api/context.cc @@ -16,49 +16,119 @@ #include "include/api/context.h" #include "utils/log_adapter.h" -namespace mindspore::api { -class Context::ContextImpl { - public: - ContextImpl() : device_target_("NotSet"), device_id_(0) {} - ~ContextImpl() = default; - const std::string &GetDeviceTarget() const { return device_target_; } - void SetDeviceTarget(std::string_view device_target) { device_target_ = device_target; } - uint32_t GetDeviceID() const { return device_id_; } - void SetDeviceID(uint32_t device_id) { device_id_ = device_id; } +constexpr auto kGlobalContextDeviceTarget = "mindspore.ascend.globalcontext.device_target"; +constexpr auto kGlobalContextDeviceID = "mindspore.ascend.globalcontext.device_id"; +constexpr auto kModelOptionInsertOpCfgPath = "mindspore.option.insert_op_config_file_path"; // aipp config file +constexpr auto kModelOptionInputFormat = "mindspore.option.input_format"; // nchw or nhwc +constexpr auto kModelOptionInputShape = "mindspore.option.input_shape"; +// Mandatory while dynamic batch: e.g. "input_op_name1: n1,c2,h3,w4;input_op_name2: n4,c3,h2,w1" +constexpr auto kModelOptionOutputType = "mindspore.option.output_type"; // "FP32", "UINT8" or "FP16", default as "FP32" +constexpr auto kModelOptionPrecisionMode = "mindspore.option.precision_mode"; +// "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" or "allow_mix_precision", default as "force_fp16" +constexpr auto kModelOptionOpSelectImplMode = "mindspore.option.op_select_impl_mode"; - private: - std::string device_target_; - uint32_t device_id_; -}; +namespace mindspore { +template +static T GetValue(const std::shared_ptr &context, const std::string &key) { + auto iter = context->params.find(key); + if (iter == context->params.end()) { + return T(); + } + const std::any &value = iter->second; + if (value.type() != typeid(T)) { + return T(); + } -Context &Context::Instance() { - static Context context; - return context; + return std::any_cast(value); } -const std::string &Context::GetDeviceTarget() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetDeviceTarget(); +std::shared_ptr GlobalContext::GetGlobalContext() { + static std::shared_ptr g_context = std::make_shared(); + return g_context; } -Context &Context::SetDeviceTarget(const std::string &device_target) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDeviceTarget(device_target); - return *this; +void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + global_context->params[kGlobalContextDeviceTarget] = device_target; } -uint32_t Context::GetDeviceID() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetDeviceID(); +std::string GlobalContext::GetGlobalDeviceTarget() { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + return GetValue(global_context, kGlobalContextDeviceTarget); } -Context &Context::SetDeviceID(uint32_t device_id) { - MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDeviceID(device_id); - return *this; +void GlobalContext::SetGlobalDeviceID(const uint32_t &device_id) { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + global_context->params[kGlobalContextDeviceID] = device_id; } -Context::Context() : impl_(std::make_shared()) { MS_EXCEPTION_IF_NULL(impl_); } +uint32_t GlobalContext::GetGlobalDeviceID() { + auto global_context = GetGlobalContext(); + MS_EXCEPTION_IF_NULL(global_context); + return GetValue(global_context, kGlobalContextDeviceID); +} + +void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInsertOpCfgPath] = cfg_path; +} + +std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInsertOpCfgPath); +} + +void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::string &format) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInputFormat] = format; +} + +std::string ModelContext::GetInputFormat(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInputFormat); +} + +void ModelContext::SetInputShape(const std::shared_ptr &context, const std::string &shape) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionInputShape] = shape; +} + +std::string ModelContext::GetInputShape(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionInputShape); +} + +void ModelContext::SetOutputType(const std::shared_ptr &context, enum DataType output_type) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionOutputType] = output_type; +} + +enum DataType ModelContext::GetOutputType(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionOutputType); +} + +void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionPrecisionMode] = precision_mode; +} + +std::string ModelContext::GetPrecisionMode(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionPrecisionMode); +} -Context::~Context() {} -} // namespace mindspore::api +void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context, + const std::string &op_select_impl_mode) { + MS_EXCEPTION_IF_NULL(context); + context->params[kModelOptionOpSelectImplMode] = op_select_impl_mode; +} + +std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr &context) { + MS_EXCEPTION_IF_NULL(context); + return GetValue(context, kModelOptionOpSelectImplMode); +} +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/factory.h b/mindspore/ccsrc/cxx_api/factory.h index 7a7b45e12a..e2bdb96cea 100644 --- a/mindspore/ccsrc/cxx_api/factory.h +++ b/mindspore/ccsrc/cxx_api/factory.h @@ -23,7 +23,7 @@ #include #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { template class Factory { using U = std::function()>; @@ -79,5 +79,5 @@ class Registrar { #define API_FACTORY_REG(BASE_CLASS, DEVICE_NAME, DERIVE_CLASS) \ static const Registrar g_api_##DERIVE_CLASS##_registrar_##DEVICE_NAME##_reg( \ #DEVICE_NAME, []() { return std::make_shared(); }); -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_FACTORY_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc index 9feb614f77..624d1c8832 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.cc @@ -17,8 +17,8 @@ #include "utils/log_adapter.h" #include "acl/acl.h" -namespace mindspore::api { -std::weak_ptr AclEnvGuard::global_acl_env_; +namespace mindspore { +std::shared_ptr AclEnvGuard::global_acl_env_; std::mutex AclEnvGuard::global_acl_env_mutex_; AclEnvGuard::AclEnvGuard(std::string_view cfg_file) { @@ -42,7 +42,7 @@ std::shared_ptr AclEnvGuard::GetAclEnv(std::string_view cfg_file) { std::shared_ptr acl_env; std::lock_guard lock(global_acl_env_mutex_); - acl_env = global_acl_env_.lock(); + acl_env = global_acl_env_; if (acl_env != nullptr) { MS_LOG(INFO) << "Acl has been initialized, skip."; } else { @@ -57,4 +57,4 @@ std::shared_ptr AclEnvGuard::GetAclEnv(std::string_view cfg_file) { } return acl_env; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h index df35385d1f..8b4ae76c68 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_env_guard.h @@ -20,7 +20,7 @@ #include #include "acl/acl_base.h" -namespace mindspore::api { +namespace mindspore { class __attribute__((visibility("default"))) AclEnvGuard { public: explicit AclEnvGuard(std::string_view cfg_file); @@ -29,10 +29,10 @@ class __attribute__((visibility("default"))) AclEnvGuard { static std::shared_ptr GetAclEnv(std::string_view cfg_file); private: - static std::weak_ptr global_acl_env_; + static std::shared_ptr global_acl_env_; static std::mutex global_acl_env_mutex_; aclError errno_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_ENV_GUARD_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc index 262d79bfb7..439161910b 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.cc @@ -16,53 +16,50 @@ #include "cxx_api/graph/acl/acl_graph_impl.h" #include "include/api/context.h" #include "cxx_api/model/acl/model_converter.h" -#include "cxx_api/python_utils.h" #include "utils/log_adapter.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, Ascend310, AclGraphImpl); AclGraphImpl::AclGraphImpl() : init_flag_(false), load_flag_(false), device_type_("AscendCL"), - device_id_(Context::Instance().GetDeviceID()), + device_id_(GlobalContext::GetGlobalDeviceID()), context_(nullptr), acl_env_(nullptr) {} AclGraphImpl::~AclGraphImpl() { (void)FinalizeEnv(); } -Status AclGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status AclGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return ret; } return model_process_.PredictFromHost(inputs, outputs); } -Status AclGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AclGraphImpl::GetInputs() { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return {}; } - return model_process_.GetInputsInfo(names, shapes, data_types, mem_sizes); + return model_process_.GetInputs(); } -Status AclGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AclGraphImpl::GetOutputs() { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Prepare model resource failed."; - return FAILED; + return {}; } - return model_process_.GetOutputsInfo(names, shapes, data_types, mem_sizes); + return model_process_.GetOutputs(); } Status AclGraphImpl::LoadAclModel(Buffer om_data) { @@ -72,44 +69,44 @@ Status AclGraphImpl::LoadAclModel(Buffer om_data) { auto acl_ret = aclmdlLoadFromMem(om_data.Data(), om_data.DataSize(), &acl_model_id); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Call aclmdlLoadFromMem failed."; - return FAILED; + return kMCDeviceError; } // acl init model resource model_process_.set_model_id(acl_model_id); Status ret = model_process_.PreInitModelResource(); - if (ret != SUCCESS) { + if (ret != kSuccess) { (void)aclmdlUnload(acl_model_id); MS_LOG(ERROR) << "Pre init model resource failed."; - return FAILED; + return ret; } MS_LOG(INFO) << "Load acl model success."; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::InitEnv() { if (init_flag_) { - return SUCCESS; + return kSuccess; } acl_env_ = AclEnvGuard::GetAclEnv(""); if (acl_env_ == nullptr) { MS_LOG(ERROR) << "Acl init failed."; - return FAILED; + return kMCDeviceError; } aclError ret = aclrtSetDevice(device_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl open device " << device_id_ << " failed"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Open device " << device_id_ << " success"; ret = aclrtCreateContext(&context_, device_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl create context failed"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Create context success"; @@ -117,7 +114,7 @@ Status AclGraphImpl::InitEnv() { ret = aclrtGetRunMode(&run_mode); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl get run mode failed"; - return FAILED; + return kMCDeviceError; } bool is_device = (run_mode == ACL_DEVICE); model_process_.SetIsDevice(is_device); @@ -125,24 +122,24 @@ Status AclGraphImpl::InitEnv() { MS_LOG(INFO) << "Init acl success, device id " << device_id_; init_flag_ = true; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::FinalizeEnv() { if (!init_flag_) { - return SUCCESS; + return kSuccess; } aclError rt_ret = aclrtSetCurrentContext(context_); if (rt_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } Status ret = model_process_.UnLoad(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Unload model inner failed."; - return FAILED; + return ret; } if (context_ != nullptr) { @@ -161,16 +158,16 @@ Status AclGraphImpl::FinalizeEnv() { MS_LOG(INFO) << "End to reset device " << device_id_; init_flag_ = false; - return SUCCESS; + return kSuccess; } Status AclGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kOM) { Status ret = ConvertToOM(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load Failed."; - return FAILED; + return ret; } } @@ -180,15 +177,15 @@ Status AclGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return ret; } // load model if (!load_flag_) { ret = LoadAclModel(om_data); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load acl model failed."; return ret; } @@ -198,24 +195,24 @@ Status AclGraphImpl::Load() { aclError rt_ret = aclrtSetCurrentContext(context_); if (rt_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } Status AclGraphImpl::ConvertToOM() { MS_LOG(INFO) << "Start convert to om model."; if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid graph_ is null."; - return FAILED; + return kMCFailed; } auto &graph_data = GraphImpl::MutableGraphData(); MS_EXCEPTION_IF_NULL(graph_data); if (graph_->ModelType() == ModelType::kOM) { MS_LOG(INFO) << "This model has been built, skip."; - return SUCCESS; + return kSuccess; } else if (graph_->ModelType() == ModelType::kMindIR) { auto func_graph = graph_data->GetFuncGraph(); MS_EXCEPTION_IF_NULL(func_graph); @@ -223,13 +220,13 @@ Status AclGraphImpl::ConvertToOM() { Buffer om_data = model_converter.LoadMindIR(func_graph); if (om_data.Data() == nullptr || om_data.DataSize() == 0) { MS_LOG(ERROR) << "Convert MindIR to OM failed."; - return FAILED; + return kMCFailed; } graph_data = std::make_shared(om_data, ModelType::kOM); MS_LOG(INFO) << "Convert MindIR to OM success."; - return SUCCESS; + return kSuccess; } MS_LOG(ERROR) << "Unsupported ModelType " << graph_->ModelType(); - return FAILED; + return kMCFailed; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h index e3085c1b5b..4d185d5fbe 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/acl_graph_impl.h @@ -27,18 +27,16 @@ #include "cxx_api/graph/graph_impl.h" #include "cxx_api/factory.h" -namespace mindspore::api { +namespace mindspore { class AclGraphImpl : public GraphCell::GraphImpl { public: AclGraphImpl(); ~AclGraphImpl() override; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: Status ConvertToOM(); @@ -56,5 +54,5 @@ class AclGraphImpl : public GraphCell::GraphImpl { ModelProcess model_process_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc b/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc index 5c10b75a79..c153ae7df2 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc +++ b/mindspore/ccsrc/cxx_api/graph/acl/model_process.cc @@ -20,17 +20,19 @@ #include #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { static DataType TransToApiType(aclDataType data_type) { - static const std::map data_type_map = { - {ACL_FLOAT16, api::kMsFloat16}, {ACL_FLOAT, api::kMsFloat32}, {ACL_DOUBLE, api::kMsFloat64}, - {ACL_INT8, api::kMsInt8}, {ACL_INT16, api::kMsInt16}, {ACL_INT32, api::kMsInt32}, - {ACL_INT64, api::kMsInt64}, {ACL_UINT8, api::kMsUint8}, {ACL_UINT16, api::kMsUint16}, - {ACL_UINT32, api::kMsUint32}, {ACL_UINT64, api::kMsUint64}, {ACL_BOOL, api::kMsBool}, + static const std::map data_type_map = { + {ACL_FLOAT16, DataType::kNumberTypeFloat16}, {ACL_FLOAT, DataType::kNumberTypeFloat32}, + {ACL_DOUBLE, DataType::kNumberTypeFloat64}, {ACL_INT8, DataType::kNumberTypeInt8}, + {ACL_INT16, DataType::kNumberTypeInt16}, {ACL_INT32, DataType::kNumberTypeInt32}, + {ACL_INT64, DataType::kNumberTypeInt64}, {ACL_UINT8, DataType::kNumberTypeUInt8}, + {ACL_UINT16, DataType::kNumberTypeUInt16}, {ACL_UINT32, DataType::kNumberTypeUInt32}, + {ACL_UINT64, DataType::kNumberTypeUInt64}, {ACL_BOOL, DataType::kNumberTypeBool}, }; auto it = data_type_map.find(data_type); if (it == data_type_map.end()) { - return api::kInvalidDataType; + return DataType::kTypeUnknown; } else { return it->second; } @@ -51,7 +53,7 @@ inline static void PushbackIfNotNull(U *vec, T &&item) { } static void ConstructTensorDesc(const std::vector &acl_tensor_list, std::vector *names, - std::vector> *shapes, std::vector *data_types, + std::vector> *shapes, std::vector *data_types, std::vector *mem_sizes) { ClearIfNotNull(names); ClearIfNotNull(shapes); @@ -66,41 +68,69 @@ static void ConstructTensorDesc(const std::vector &acl_tensor_lis } } +static std::string ShapeToString(const std::vector &shape) { + std::string result = "["; + for (size_t i = 0; i < shape.size(); ++i) { + result += std::to_string(shape[i]); + if (i + 1 < shape.size()) { + result += ", "; + } + } + result += "]"; + return result; +} + +Status ModelProcess::ConstructTensors(const std::vector &acl_tensor_list, + std::vector *tensor_list) { + MS_EXCEPTION_IF_NULL(tensor_list); + std::vector names; + std::vector> shapes; + std::vector data_types; + std::vector mem_sizes; + + ConstructTensorDesc(acl_tensor_list, &names, &shapes, &data_types, &mem_sizes); + tensor_list->clear(); + if (names.size() != acl_tensor_list.size() || shapes.size() != acl_tensor_list.size() || + data_types.size() != acl_tensor_list.size() || mem_sizes.size() != acl_tensor_list.size()) { + MS_LOG(ERROR) << "Inner error, size do not match: names size " << names.size() << " shapes size " << shapes.size() + << " data types size " << data_types.size() << " mem sizes size " << mem_sizes.size() + << " acl_tensor_list size " << acl_tensor_list.size(); + return kMCFailed; + } + + aclrtMemcpyKind kind = is_run_on_device_ ? ACL_MEMCPY_HOST_TO_HOST : ACL_MEMCPY_DEVICE_TO_HOST; + for (size_t i = 0; i < acl_tensor_list.size(); ++i) { + tensor_list->emplace_back(names[i], data_types[i], shapes[i], nullptr, mem_sizes[i]); + auto ret = aclrtMemcpy((*tensor_list)[i].MutableData(), (*tensor_list)[i].DataSize(), + acl_tensor_list[i].device_data, acl_tensor_list[i].buffer_size, kind); + if (ret != ACL_ERROR_NONE) { + MS_LOG(ERROR) << "Memcpy input " << i << " from " << (is_run_on_device_ ? "host" : "device") + << " to host failed, memory size " << acl_tensor_list[i].buffer_size; + return kMCFailed; + } + } + + return kSuccess; +} + Status ModelProcess::PreInitModelResource() { model_desc_ = aclmdlCreateDesc(); aclError acl_ret = aclmdlGetDesc(model_desc_, model_id_); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Read model desc failed"; - return FAILED; + return kMCDeviceError; } Status ret = InitInputsBuffer(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Create input buffer failed"; - return FAILED; + return ret; } ret = InitOutputsBuffer(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Create output buffer failed"; - return FAILED; + return ret; } - return SUCCESS; -} - -Status ModelProcess::LoadModelFromFile(const std::string &file_name, uint32_t *model_id) { - MS_EXCEPTION_IF_NULL(model_id); - aclError acl_ret = aclmdlLoadFromFile(file_name.c_str(), model_id); - if (acl_ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Read model file failed, file name is " << file_name; - return FAILED; - } - MS_LOG(INFO) << "Load model success " << file_name; - model_id_ = *model_id; - if (PreInitModelResource() != SUCCESS) { - aclmdlUnload(model_id_); - MS_LOG(ERROR) << "Pre init model resource failed, file name is " << file_name; - return FAILED; - } - return SUCCESS; + return kSuccess; } Status ModelProcess::InitInputsBuffer() { @@ -113,8 +143,8 @@ Status ModelProcess::InitInputsBuffer() { if (!is_run_on_device_) { // need to copy input/output to/from device ret = aclrtMalloc(&data_mem_buffer, buffer_size, ACL_MEM_MALLOC_NORMAL_ONLY); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device input buffer faild , input size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device input buffer failed , input size " << buffer_size; + return kMCDeviceError; } } @@ -125,7 +155,7 @@ Status ModelProcess::InitInputsBuffer() { if (!is_run_on_device_) { aclrtFree(data_mem_buffer); } - return FAILED; + return kMCDeviceError; } aclDataType data_type = aclmdlGetInputDataType(model_desc_, i); std::vector shape(dims.dims, dims.dims + dims.dimCount); @@ -137,7 +167,7 @@ Status ModelProcess::InitInputsBuffer() { input_infos_.emplace_back(AclTensorInfo{data_mem_buffer, buffer_size, data_type, shape, input_name}); } MS_LOG(INFO) << "Create model inputs success"; - return SUCCESS; + return kSuccess; } Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset) { @@ -154,14 +184,14 @@ Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size if (!is_run_on_device_) { ret = aclrtMalloc(data_mem_buffer, buffer_size, ACL_MEM_MALLOC_NORMAL_ONLY); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device buffer faild , buffer size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device buffer failed , buffer size " << buffer_size; + return kMCDeviceError; } } else { ret = aclrtMallocHost(data_mem_buffer, buffer_size); if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Malloc device buffer faild , buffer size " << buffer_size; - return FAILED; + MS_LOG(ERROR) << "Malloc device buffer failed , buffer size " << buffer_size; + return kMCDeviceError; } } @@ -169,16 +199,16 @@ Status ModelProcess::CreateDataBuffer(void **data_mem_buffer, size_t buffer_size if (data_buffer == nullptr) { MS_LOG(ERROR) << "Create Data Buffer failed"; free_data_buffer(*data_mem_buffer); - return FAILED; + return kMCDeviceError; } ret = aclmdlAddDatasetBuffer(dataset, data_buffer); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "add data buffer failed"; free_data_buffer(*data_mem_buffer); aclDestroyDataBuffer(data_buffer); - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } Status ModelProcess::InitOutputsBuffer() { @@ -186,7 +216,7 @@ Status ModelProcess::InitOutputsBuffer() { outputs_ = aclmdlCreateDataset(); if (outputs_ == nullptr) { MS_LOG(ERROR) << "Create input dataset failed"; - return FAILED; + return kMCDeviceError; } size_t output_size = aclmdlGetNumOutputs(model_desc_); MS_LOG(INFO) << "output_size = " << output_size; @@ -194,9 +224,9 @@ Status ModelProcess::InitOutputsBuffer() { auto buffer_size = aclmdlGetOutputSizeByIndex(model_desc_, i); void *data_mem_buffer = nullptr; - if (CreateDataBuffer(&data_mem_buffer, buffer_size, outputs_) != SUCCESS) { + if (CreateDataBuffer(&data_mem_buffer, buffer_size, outputs_) != kSuccess) { MS_LOG(ERROR) << "add output data buffer failed, buffer size " << buffer_size; - return FAILED; + return kMCDeviceError; } aclmdlIODims dims; ret = aclmdlGetOutputDims(model_desc_, i, &dims); @@ -207,7 +237,7 @@ Status ModelProcess::InitOutputsBuffer() { } else { aclrtFreeHost(data_mem_buffer); } - return FAILED; + return kMCDeviceError; } aclDataType data_type = aclmdlGetOutputDataType(model_desc_, i); std::vector shape(dims.dims, dims.dims + dims.dimCount); @@ -219,7 +249,7 @@ Status ModelProcess::InitOutputsBuffer() { output_infos_.emplace_back(AclTensorInfo{data_mem_buffer, buffer_size, data_type, shape, output_name}); } MS_LOG(INFO) << "Create model output success"; - return SUCCESS; + return kSuccess; } void ModelProcess::DestroyInputsDataset() { @@ -273,50 +303,60 @@ Status ModelProcess::UnLoad() { auto ret = aclmdlUnload(model_id_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Unload model failed"; - return FAILED; + return kMCDeviceError; } if (model_desc_ != nullptr) { ret = aclmdlDestroyDesc(model_desc_); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Unload model failed"; - return FAILED; + return kMCDeviceError; } model_desc_ = nullptr; } DestroyInputsBuffer(); DestroyOutputsBuffer(); MS_LOG(INFO) << "End unload model " << model_id_; - return SUCCESS; + return kSuccess; } -Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { +Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { aclError ret; inputs_ = aclmdlCreateDataset(); // check inputs if (inputs.size() != input_infos_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << input_infos_.size() << ", given count " + MS_LOG(ERROR) << "Inputs count not match, required count " << input_infos_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + return kMCInvalidInput; } for (size_t i = 0; i < input_infos_.size(); ++i) { + if (inputs[i].Shape() != input_infos_[i].dims) { + MS_LOG(INFO) << "Note: input " << i << " shape not match, required " << ShapeToString(input_infos_[i].dims) + << ", given " << ShapeToString(inputs[i].Shape()); + } + + if (inputs[i].DataType() != TransToApiType(input_infos_[i].data_type)) { + MS_LOG(INFO) << "Note: input " << i << " data type not match, required " + << TransToApiType(input_infos_[i].data_type) << ", given " << inputs[i].DataType(); + } + if (inputs[i].DataSize() != input_infos_[i].buffer_size) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << input_infos_[i].buffer_size + MS_LOG(ERROR) << "Input " << i << " data size not match, required size " << input_infos_[i].buffer_size << ", given count " << inputs[i].DataSize(); - return INVALID_INPUTS; + return kMCInvalidInput; } } // copy inputs for (size_t i = 0; i < input_infos_.size(); ++i) { const auto &info = input_infos_[i]; - const auto &input = inputs[i]; - const void *data = input.Data(); + auto input = inputs[i]; + const void *data = input.MutableData(); void *input_buffer = nullptr; if (!is_run_on_device_) { ret = aclrtMemcpy(info.device_data, info.buffer_size, data, input.DataSize(), ACL_MEMCPY_HOST_TO_DEVICE); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Acl memcpy input " << i << " data to device failed, buffer size " << input.DataSize(); - return FAILED; + return kMCDeviceError; } input_buffer = info.device_data; } else { @@ -325,23 +365,23 @@ Status ModelProcess::CheckAndInitInput(const std::vector &inputs) { auto data_buffer = aclCreateDataBuffer(input_buffer, info.buffer_size); if (data_buffer == nullptr) { MS_LOG(ERROR) << "Create Data Buffer failed"; - return FAILED; + return kMCDeviceError; } ret = aclmdlAddDatasetBuffer(inputs_, data_buffer); if (ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "add data buffer failed"; aclDestroyDataBuffer(data_buffer); - return FAILED; + return kMCDeviceError; } } - return SUCCESS; + return kSuccess; } -Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vector *outputs) { +Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); aclError acl_ret; Status ret = CheckAndInitInput(inputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "check or init input failed"; DestroyInputsDataset(); return ret; // forward status error @@ -361,50 +401,48 @@ Status ModelProcess::PredictFromHost(const std::vector &inputs, std::vec DestroyInputsDataset(); if (acl_ret != ACL_ERROR_NONE) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCDeviceError; } ret = BuildOutputs(outputs); - if (ret != SUCCESS) { - MS_LOG(ERROR) << "Build outputs faield"; - return FAILED; + if (ret != kSuccess) { + MS_LOG(ERROR) << "Build outputs failed"; + return ret; } - MS_LOG(INFO) << "excute model success"; - return SUCCESS; + MS_LOG(INFO) << "Execute model success"; + return kSuccess; } -Status ModelProcess::BuildOutputs(std::vector *outputs) { +Status ModelProcess::BuildOutputs(std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); - aclError ret; // copy outputs outputs->clear(); - aclrtMemcpyKind kind = is_run_on_device_ ? ACL_MEMCPY_HOST_TO_HOST : ACL_MEMCPY_DEVICE_TO_HOST; - for (size_t i = 0; i < output_infos_.size(); ++i) { - const auto &info = output_infos_[i]; - outputs->emplace_back(Buffer()); - auto output = outputs->rbegin(); - if (!output->ResizeData(info.buffer_size)) { - MS_LOG(ERROR) << "new output data buffer failed, data size " << info.buffer_size; - return FAILED; - } - ret = aclrtMemcpy(output->MutableData(), output->DataSize(), info.device_data, info.buffer_size, kind); - if (ret != ACL_ERROR_NONE) { - MS_LOG(ERROR) << "Memcpy output " << i << " from " << (is_run_on_device_ ? "host" : "device") - << " to host failed, memory size " << info.buffer_size; - return FAILED; - } + auto inner_outputs = GetOutputs(); + if (inner_outputs.size() != output_infos_.size()) { + MS_LOG(ERROR) << "Invalid inner outputs size " << inner_outputs.size() << " do not match device output infos size " + << output_infos_.size(); + return kMCFailed; } - return SUCCESS; + (*outputs) = inner_outputs; + return kSuccess; } -Status ModelProcess::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { - ConstructTensorDesc(input_infos_, names, shapes, data_types, mem_sizes); - return SUCCESS; +std::vector ModelProcess::GetInputs() { + Status ret = ConstructTensors(input_infos_, &input_tensors_); + if (ret != kSuccess) { + MS_LOG(ERROR) << "ConstructTensors failed."; + input_tensors_.clear(); + } + + return input_tensors_; } -Status ModelProcess::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { - ConstructTensorDesc(output_infos_, names, shapes, data_types, mem_sizes); - return SUCCESS; +std::vector ModelProcess::GetOutputs() { + Status ret = ConstructTensors(output_infos_, &output_tensors_); + if (ret != kSuccess) { + MS_LOG(ERROR) << "ConstructTensors failed."; + output_tensors_.clear(); + } + + return output_tensors_; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/acl/model_process.h b/mindspore/ccsrc/cxx_api/graph/acl/model_process.h index e9c3363bd9..7906b17823 100644 --- a/mindspore/ccsrc/cxx_api/graph/acl/model_process.h +++ b/mindspore/ccsrc/cxx_api/graph/acl/model_process.h @@ -25,7 +25,7 @@ #include "include/api/status.h" #include "include/api/types.h" -namespace mindspore::api { +namespace mindspore { struct AclTensorInfo { void *device_data; size_t buffer_size; @@ -45,14 +45,12 @@ class ModelProcess { input_infos_(), output_infos_() {} ~ModelProcess() {} - Status LoadModelFromFile(const std::string &file_name, uint32_t *model_id); + Status UnLoad(); - Status PredictFromHost(const std::vector &inputs, std::vector *outputs); + Status PredictFromHost(const std::vector &inputs, std::vector *outputs); Status PreInitModelResource(); - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const; + std::vector GetInputs(); + std::vector GetOutputs(); // override this method to avoid request/reply data copy void SetIsDevice(bool is_device) { is_run_on_device_ = is_device; } @@ -62,8 +60,9 @@ class ModelProcess { private: Status CreateDataBuffer(void **data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset); - Status CheckAndInitInput(const std::vector &inputs); - Status BuildOutputs(std::vector *outputs); + Status CheckAndInitInput(const std::vector &inputs); + Status ConstructTensors(const std::vector &acl_tensor_list, std::vector *tensor_list); + Status BuildOutputs(std::vector *outputs); Status InitInputsBuffer(); Status InitOutputsBuffer(); @@ -80,7 +79,9 @@ class ModelProcess { aclmdlDataset *outputs_; std::vector input_infos_; std::vector output_infos_; + std::vector input_tensors_; + std::vector output_tensors_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_GRAPH_ACL_MODEL_PROCESS_H diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc index 9490bcf74b..b0c8572bc3 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.cc @@ -25,91 +25,51 @@ #include "backend/session/executor_manager.h" #include "runtime/device/kernel_runtime_manager.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, Ascend910, AscendGraphImpl); AscendGraphImpl::AscendGraphImpl() : session_impl_(nullptr), graph_id_(0), device_type_("Ascend"), - device_id_(Context::Instance().GetDeviceID()), + device_id_(GlobalContext::GetGlobalDeviceID()), context_(nullptr), - inputs_(), - outputs_(), + inputs_info_(), + outputs_info_(), input_names_(), output_names_(), - init_flag_(false), load_flag_(false) {} -AscendGraphImpl::~AscendGraphImpl() { (void)FinalizeEnv(); } +AscendGraphImpl::~AscendGraphImpl() {} Status AscendGraphImpl::InitEnv() { - if (init_flag_) { - return SUCCESS; - } - RegAllOp(); - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; - } - - ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); - ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); - ms_context->set_param(MS_CTX_DEVICE_TARGET, kAscendDevice); - if (!context::OpenTsd(ms_context)) { - MS_LOG(ERROR) << "Session init OpenTsd failed!"; - return FAILED; + MS_LOG(INFO) << "Start to init env."; + env_guard_ = MsEnvGuard::GetEnv(device_id_); + if (env_guard_ == nullptr) { + MS_LOG(ERROR) << "Env init failed."; + return kMCDeviceError; } session_impl_ = session::SessionFactory::Get().Create(kDavinciInferenceDevice); if (session_impl_ == nullptr) { MS_LOG(ERROR) << "Session create failed!, please make sure target device:" << kDavinciInferenceDevice << " is available."; - return FAILED; + return kMCFailed; } session_impl_->Init(device_id_); - init_flag_ = true; - return SUCCESS; -} - -Status AscendGraphImpl::FinalizeEnv() { - if (!init_flag_) { - return SUCCESS; - } - - MS_LOG_INFO << "Start finalize env"; - session::ExecutorManager::Instance().Clear(); - device::KernelRuntimeManager::Instance().ClearRuntimeResource(); - - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; - } - - { - PythonEnvGuard guard; - if (!context::CloseTsd(ms_context)) { - MS_LOG(ERROR) << "CloseTsd failed!"; - return FAILED; - } - } - - init_flag_ = false; - MS_LOG(INFO) << "End finalize env"; - return SUCCESS; + MS_LOG(INFO) << "InitEnv success."; + return kSuccess; } Status AscendGraphImpl::CompileGraph(const std::shared_ptr &funcGraphPtr) { MS_ASSERT(session_impl_ != nullptr); try { graph_id_ = session_impl_->CompileGraph(NOT_NULL(funcGraphPtr)); - return SUCCESS; + return kSuccess; } catch (std::exception &e) { MS_LOG(ERROR) << "CompileGraph failed: " << e.what(); - return FAILED; + return kMCFailed; } } @@ -128,104 +88,104 @@ Status AscendGraphImpl::CheckModelInputs(const std::vector &i MS_ASSERT(session_impl_ != nullptr); std::string error_msg; if (!session_impl_->CheckModelInputs(graph_id_, inputs, &error_msg)) { - return Status(INVALID_INPUTS, error_msg); + return Status(kMCInvalidInput, error_msg); } - return SUCCESS; + return kSuccess; } -Status AscendGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { +Status AscendGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { MS_EXCEPTION_IF_NULL(reply); if (context_ == nullptr) { MS_LOG(ERROR) << "rtCtx is nullptr"; - return FAILED; + return kMCDeviceError; } rtError_t rt_ret = rtCtxSetCurrent(context_); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "Set Ascend rtCtx failed"; - return FAILED; + return kMCDeviceError; } vector inputs; for (size_t i = 0; i < request.size(); i++) { - auto &item = request[i]; - auto input = inputs_[i]; + auto item = request[i]; + auto input = inputs_info_[i]; if (input->Size() != item.DataSize()) { MS_LOG(ERROR) << "Input " << i << " data size " << item.DataSize() << " not match model input data size " << input->Size(); - return FAILED; + return kMCInvalidInput; } - auto ret = memcpy_s(input->data_c(), input->Size(), item.Data(), item.DataSize()); - if (ret != SUCCESS) { - MS_LOG(ERROR) << "Tensor copy failed"; - return FAILED; + auto ret = memcpy_s(input->data_c(), input->Size(), item.MutableData(), item.DataSize()); + if (ret != kSuccess) { + MS_LOG(ERROR) << "MSTensor copy failed"; + return kMCFailed; } inputs.push_back(input); } - vector outputs = RunGraph(inputs); + last_inputs_ = inputs; + std::vector outputs = RunGraph(inputs); if (outputs.empty()) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } + last_outputs_ = outputs; reply->clear(); - std::transform(outputs.begin(), outputs.end(), std::back_inserter(*reply), - [](const tensor::TensorPtr &tensor) { return Buffer(tensor->data_c(), tensor->Size()); }); - return SUCCESS; + *reply = GetOutputs(); + return kSuccess; } -Status AscendGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AscendGraphImpl::GetInputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < inputs_.size(); i++) { - auto &tensor = inputs_[i]; - GraphUtils::PushbackIfNotNull(names, input_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(inputs_info_.size()); + for (size_t i = 0; i < inputs_info_.size(); ++i) { + auto &tensor = inputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_inputs_.size()) { + data = last_inputs_[i]->data_c(); + data_size = last_inputs_[i]->Size(); + } + result[i] = + MSTensor(input_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - return SUCCESS; + return result; } -Status AscendGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector AscendGraphImpl::GetOutputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < outputs_.size(); i++) { - auto &tensor = outputs_[i]; - GraphUtils::PushbackIfNotNull(names, output_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(outputs_info_.size()); + for (size_t i = 0; i < outputs_info_.size(); ++i) { + auto &tensor = outputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_outputs_.size()) { + data = last_outputs_[i]->data_c(); + data_size = last_outputs_[i]->Size(); + } + result[i] = + MSTensor(output_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - - return SUCCESS; + return result; } Status AscendGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); - return INVALID_INPUTS; + return kMCInvalidInput; } const auto &graph_data = GraphImpl::MutableGraphData(); @@ -234,34 +194,34 @@ Status AscendGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return ret; } // load model if (!load_flag_) { ret = CompileGraph(func_graph); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Compile graph model failed"; - return FAILED; + return ret; } - session_impl_->GetModelInputsInfo(graph_id_, &inputs_, &input_names_); - session_impl_->GetModelOutputsInfo(graph_id_, &outputs_, &output_names_); - if (inputs_.empty() || inputs_.size() != input_names_.size()) { + session_impl_->GetModelInputsInfo(graph_id_, &inputs_info_, &input_names_); + session_impl_->GetModelOutputsInfo(graph_id_, &outputs_info_, &output_names_); + if (inputs_info_.empty() || inputs_info_.size() != input_names_.size()) { MS_LOG_ERROR << "Get model inputs info failed"; - return FAILED; + return kMCInvalidInput; } - if (outputs_.empty() || outputs_.size() != output_names_.size()) { + if (outputs_info_.empty() || outputs_info_.size() != output_names_.size()) { MS_LOG_ERROR << "Get model outputs info failed"; - return FAILED; + return kMCInvalidInput; } // save d context rtError_t rt_ret = rtCtxGetCurrent(&context_); if (rt_ret != RT_ERROR_NONE || context_ == nullptr) { MS_LOG(ERROR) << "the ascend device context is null"; - return FAILED; + return kMCDeviceError; } MS_LOG(INFO) << "Load model success"; @@ -271,44 +231,112 @@ Status AscendGraphImpl::Load() { rtError_t rt_ret = rtCtxSetCurrent(context_); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "Set the ascend device context failed"; - return FAILED; + return kMCDeviceError; } - return SUCCESS; + return kSuccess; } -Status AscendGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status AscendGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; } } - if (inputs.size() != inputs_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << inputs_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + if (inputs.size() != inputs_info_.size()) { + MS_LOG(ERROR) << "inputs count not match, required count " << inputs_info_.size() << ", given count " + << inputs.size(); + return kMCInvalidInput; } - for (size_t i = 0; i < inputs_.size(); ++i) { - if (inputs[i].DataSize() != inputs_[i]->Size()) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_[i]->Size() << ", given count " - << inputs[i].DataSize(); - return INVALID_INPUTS; + for (size_t i = 0; i < inputs_info_.size(); ++i) { + if (inputs[i].DataSize() != inputs_info_[i]->Size()) { + MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_info_[i]->Size() + << ", given count " << inputs[i].DataSize(); + return kMCInvalidInput; } } - if (ExecuteModel(inputs, outputs) != SUCCESS) { + + Status ret = ExecuteModel(inputs, outputs); + if (ret != kSuccess) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return ret; } - if (outputs_.size() != outputs->size()) { + if (outputs_info_.size() != outputs->size()) { MS_LOG(ERROR) << "Predict output size " << outputs->size() << " not match output size got from model info " - << outputs_.size(); - return FAILED; + << outputs_info_.size(); + return kMCFailed; + } + + return kSuccess; +} + +AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) { + MS_LOG(INFO) << "Start to init env."; + device_id_ = device_id; + RegAllOp(); + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(ERROR) << "Get Context failed!"; + errno_ = kMCFailed; + return; + } + + ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); + ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); + ms_context->set_param(MS_CTX_DEVICE_TARGET, kAscendDevice); + auto ret = rtSetDevice(device_id_); + if (ret != RT_ERROR_NONE) { + MS_LOG(EXCEPTION) << "Device " << device_id_ << " call rtSetDevice failed, ret[" << static_cast(ret) << "]"; + } + + MS_LOG(INFO) << "InitEnv success."; + errno_ = kSuccess; +} + +AscendGraphImpl::MsEnvGuard::~MsEnvGuard() { + MS_LOG(INFO) << "Start finalize env"; + session::ExecutorManager::Instance().Clear(); + device::KernelRuntimeManager::Instance().ClearRuntimeResource(); + + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(ERROR) << "Get Context failed!"; + errno_ = kMCFailed; + return; } - return SUCCESS; + auto ret = rtDeviceReset(device_id_); + if (ret != RT_ERROR_NONE) { + MS_LOG(EXCEPTION) << "Device " << device_id_ << " call rtDeviceReset failed, ret[" << static_cast(ret) << "]"; + } + + errno_ = kSuccess; + MS_LOG(INFO) << "End finalize env"; +} + +std::shared_ptr AscendGraphImpl::MsEnvGuard::GetEnv(uint32_t device_id) { + std::shared_ptr acl_env; + std::lock_guard lock(global_ms_env_mutex_); + acl_env = global_ms_env_.lock(); + if (acl_env != nullptr) { + MS_LOG(INFO) << "Env has been initialized, skip."; + } else { + acl_env = std::make_shared(device_id); + if (acl_env->GetErrno() != kSuccess) { + MS_LOG(ERROR) << "Execute aclInit Failed"; + return nullptr; + } + global_ms_env_ = acl_env; + MS_LOG(INFO) << "Env init success"; + } + return acl_env; } -} // namespace mindspore::api + +std::weak_ptr AscendGraphImpl::MsEnvGuard::global_ms_env_; +std::mutex AscendGraphImpl::MsEnvGuard::global_ms_env_mutex_; +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h index fae683558e..c4595dab93 100644 --- a/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/ascend/ascend_graph_impl.h @@ -28,40 +28,56 @@ #include "ir/anf.h" #include "cxx_api/model/model_impl.h" #include "runtime/context.h" -#include "cxx_api/graph/graph_utils.h" -namespace mindspore::api { +namespace mindspore { class AscendGraphImpl : public GraphCell::GraphImpl { public: AscendGraphImpl(); ~AscendGraphImpl() override; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: + class MsEnvGuard; + Status InitEnv(); - Status FinalizeEnv(); Status CompileGraph(const std::shared_ptr &funcGraphPtr); Status CheckModelInputs(const std::vector &inputs) const; std::vector RunGraph(const std::vector &inputs); - Status ExecuteModel(const std::vector &inputs, std::vector *outputs); + Status ExecuteModel(const std::vector &inputs, std::vector *outputs); std::shared_ptr session_impl_; uint32_t graph_id_; std::string device_type_; uint32_t device_id_; rtContext_t context_; - std::vector inputs_; - std::vector outputs_; + std::vector inputs_info_; + std::vector outputs_info_; + std::vector last_inputs_; + std::vector last_outputs_; std::vector input_names_; std::vector output_names_; - bool init_flag_; bool load_flag_; + + std::shared_ptr env_guard_; +}; + +class AscendGraphImpl::MsEnvGuard { + public: + explicit MsEnvGuard(uint32_t device_id); + ~MsEnvGuard(); + Status GetErrno() const { return errno_; } + static std::shared_ptr GetEnv(uint32_t device_id); + + private: + static std::weak_ptr global_ms_env_; + static std::mutex global_ms_env_mutex_; + + Status errno_; + uint32_t device_id_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_MS_ASCEND_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc index 6af3a9ab9b..ff7719a715 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.cc @@ -23,15 +23,15 @@ #include "backend/session/executor_manager.h" #include "runtime/device/kernel_runtime_manager.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(GraphCell::GraphImpl, GPU, GPUGraphImpl); GPUGraphImpl::GPUGraphImpl() : session_impl_(nullptr), graph_id_(0), - device_id_(Context::Instance().GetDeviceID()), - inputs_(), - outputs_(), + device_id_(GlobalContext::GetGlobalDeviceID()), + inputs_info_(), + outputs_info_(), input_names_(), output_names_(), init_flag_(false), @@ -40,13 +40,13 @@ GPUGraphImpl::GPUGraphImpl() Status GPUGraphImpl::InitEnv() { if (init_flag_) { MS_LOG(WARNING) << "Initialized again, return success."; - return SUCCESS; + return kSuccess; } auto ms_context = MsContext::GetInstance(); if (ms_context == nullptr) { MS_LOG(ERROR) << "Get Context failed!"; - return FAILED; + return kMCFailed; } ms_context->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); ms_context->set_param(MS_CTX_DEVICE_ID, device_id_); @@ -57,18 +57,18 @@ Status GPUGraphImpl::InitEnv() { if (session_impl_ == nullptr) { MS_LOG(ERROR) << "Session create failed!, please make sure target device:" << kGpuInferenceDevice << " is available."; - return FAILED; + return kMCFailed; } session_impl_->Init(device_id_); init_flag_ = true; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::FinalizeEnv() { if (!init_flag_) { MS_LOG(WARNING) << "Never initialize before, return success"; - return SUCCESS; + return kSuccess; } MS_LOG_INFO << "Start finalize env"; @@ -77,14 +77,14 @@ Status GPUGraphImpl::FinalizeEnv() { init_flag_ = false; MS_LOG(INFO) << "End finalize env"; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::Load() { // check graph type if (graph_->ModelType() != ModelType::kMindIR) { MS_LOG(ERROR) << "Unsupported model type " << graph_->ModelType(); - return INVALID_INPUTS; + return kMCInvalidInput; } const auto &graph_data = GraphImpl::MutableGraphData(); @@ -93,38 +93,38 @@ Status GPUGraphImpl::Load() { // init Status ret = InitEnv(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "InitEnv failed."; - return FAILED; + return kMCDeviceError; } ret = CompileGraph(func_graph); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Compile graph model failed"; - return FAILED; + return kMCFailed; } - session_impl_->GetModelInputsInfo(graph_id_, &inputs_, &input_names_); - session_impl_->GetModelOutputsInfo(graph_id_, &outputs_, &output_names_); - if (inputs_.empty() || inputs_.size() != input_names_.size()) { + session_impl_->GetModelInputsInfo(graph_id_, &inputs_info_, &input_names_); + session_impl_->GetModelOutputsInfo(graph_id_, &outputs_info_, &output_names_); + if (inputs_info_.empty() || inputs_info_.size() != input_names_.size()) { MS_LOG_ERROR << "Get model inputs info failed"; - return FAILED; + return kMCInvalidInput; } - if (outputs_.empty() || outputs_.size() != output_names_.size()) { + if (outputs_info_.empty() || outputs_info_.size() != output_names_.size()) { MS_LOG_ERROR << "Get model outputs info failed"; - return FAILED; + return kMCInvalidInput; } load_flag_ = true; - return SUCCESS; + return kSuccess; } Status GPUGraphImpl::CompileGraph(const std::shared_ptr &funcGraphPtr) { MS_ASSERT(session_impl_ != nullptr); try { graph_id_ = session_impl_->CompileGraph(NOT_NULL(funcGraphPtr)); - return SUCCESS; + return kSuccess; } catch (std::exception &e) { MS_LOG(ERROR) << "CompileGraph failed: " << e.what(); - return FAILED; + return kMCFailed; } } @@ -139,118 +139,118 @@ std::vector GPUGraphImpl::RunGraph(const std::vector &request, std::vector *reply) { +Status GPUGraphImpl::ExecuteModel(const std::vector &request, std::vector *reply) { MS_EXCEPTION_IF_NULL(reply); vector inputs; for (size_t i = 0; i < request.size(); i++) { auto &item = request[i]; - auto input = inputs_[i]; + auto input = inputs_info_[i]; if (input->Size() != item.DataSize()) { MS_LOG(ERROR) << "Input " << i << " data size " << item.DataSize() << " not match model input data size " << input->Size(); - return FAILED; + return kMCInvalidInput; } - auto ret = memcpy_s(input->data_c(), input->Size(), item.Data(), item.DataSize()); - if (ret != SUCCESS) { + auto ret = memcpy_s(input->data_c(), input->Size(), item.Data().get(), item.DataSize()); + if (ret != kSuccess) { MS_LOG(ERROR) << "Tensor copy failed"; - return FAILED; + return kMCFailed; } inputs.push_back(input); } - vector outputs = RunGraph(inputs); + last_inputs_ = inputs; + std::vector outputs = RunGraph(inputs); if (outputs.empty()) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } + last_outputs_ = outputs; reply->clear(); - std::transform(outputs.begin(), outputs.end(), std::back_inserter(*reply), - [](const tensor::TensorPtr &tensor) { return Buffer(tensor->data_c(), tensor->Size()); }); - return SUCCESS; + *reply = GetOutputs(); + return kSuccess; } -Status GPUGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { +Status GPUGraphImpl::Run(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; return ret; } } - if (inputs.size() != inputs_.size()) { - MS_LOG(ERROR) << "inputs count not match, required count " << inputs_.size() << ", given count " << inputs.size(); - return INVALID_INPUTS; + if (inputs.size() != inputs_info_.size()) { + MS_LOG(ERROR) << "inputs count not match, required count " << inputs_info_.size() << ", given count " + << inputs.size(); + return kMCInvalidInput; } - for (size_t i = 0; i < inputs_.size(); ++i) { - if (inputs[i].DataSize() != inputs_[i]->Size()) { - MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_[i]->Size() << ", given count " - << inputs[i].DataSize(); - return INVALID_INPUTS; + for (size_t i = 0; i < inputs_info_.size(); ++i) { + if (inputs[i].DataSize() != inputs_info_[i]->Size()) { + MS_LOG(ERROR) << "input " << i << " data size not match, required size " << inputs_info_[i]->Size() + << ", given count " << inputs[i].DataSize(); + return kMCInvalidInput; } } - if (ExecuteModel(inputs, outputs) != SUCCESS) { + if (ExecuteModel(inputs, outputs) != kSuccess) { MS_LOG(ERROR) << "Execute Model Failed"; - return FAILED; + return kMCFailed; } - if (outputs_.size() != outputs->size()) { + if (outputs_info_.size() != outputs->size()) { MS_LOG(ERROR) << "Predict output size " << outputs->size() << " not match output size got from model info " - << outputs_.size(); - return FAILED; + << outputs_info_.size(); + return kMCFailed; } - return SUCCESS; + return kSuccess; } -Status GPUGraphImpl::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector GPUGraphImpl::GetInputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < inputs_.size(); i++) { - auto &tensor = inputs_[i]; - GraphUtils::PushbackIfNotNull(names, input_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(inputs_info_.size()); + for (size_t i = 0; i < inputs_info_.size(); ++i) { + auto &tensor = inputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_inputs_.size()) { + data = last_inputs_[i]->data_c(); + data_size = last_inputs_[i]->Size(); + } + result[i] = + MSTensor(input_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - return SUCCESS; + return result; } -Status GPUGraphImpl::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) { +std::vector GPUGraphImpl::GetOutputs() { if (!load_flag_) { Status ret = Load(); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "PrepareModel failed."; - return ret; + return {}; } } - GraphUtils::ClearIfNotNull(names); - GraphUtils::ClearIfNotNull(shapes); - GraphUtils::ClearIfNotNull(data_types); - GraphUtils::ClearIfNotNull(mem_sizes); - for (size_t i = 0; i < outputs_.size(); i++) { - auto &tensor = outputs_[i]; - GraphUtils::PushbackIfNotNull(names, output_names_[i]); - GraphUtils::PushbackIfNotNull(shapes, tensor->shape()); - GraphUtils::PushbackIfNotNull(data_types, GraphUtils::TransTypeId2InferDataType(tensor->data_type())); - GraphUtils::PushbackIfNotNull(mem_sizes, tensor->Size()); + std::vector result(outputs_info_.size()); + for (size_t i = 0; i < outputs_info_.size(); ++i) { + auto &tensor = outputs_info_[i]; + void *data = nullptr; + size_t data_size = tensor->Size(); + if (i < last_outputs_.size()) { + data = last_outputs_[i]->data_c(); + data_size = last_outputs_[i]->Size(); + } + result[i] = + MSTensor(output_names_[i], static_cast(tensor->data_type()), tensor->shape(), data, data_size); } - - return SUCCESS; + return result; } - -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h index fca0323f82..0058e7fbcd 100644 --- a/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/gpu/gpu_graph_impl.h @@ -25,20 +25,17 @@ #include "backend/session/session_basic.h" #include "ir/anf.h" #include "cxx_api/model/model_impl.h" -#include "cxx_api/graph/graph_utils.h" -namespace mindspore::api { +namespace mindspore { class GPUGraphImpl : public GraphCell::GraphImpl { public: GPUGraphImpl(); ~GPUGraphImpl() override = default; - Status Run(const std::vector &inputs, std::vector *outputs) override; + Status Run(const std::vector &inputs, std::vector *outputs) override; Status Load() override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: Status InitEnv(); @@ -46,14 +43,16 @@ class GPUGraphImpl : public GraphCell::GraphImpl { Status CompileGraph(const std::shared_ptr &funcGraphPtr); Status CheckModelInputs(const std::vector &inputs) const; std::vector RunGraph(const std::vector &inputs); - Status ExecuteModel(const std::vector &inputs, std::vector *outputs); + Status ExecuteModel(const std::vector &inputs, std::vector *outputs); std::shared_ptr session_impl_; uint32_t graph_id_; std::string device_type_; uint32_t device_id_; - std::vector inputs_; - std::vector outputs_; + std::vector inputs_info_; + std::vector outputs_info_; + std::vector last_inputs_; + std::vector last_outputs_; std::vector input_names_; std::vector output_names_; bool init_flag_; @@ -63,5 +62,5 @@ class GPUGraphImpl : public GraphCell::GraphImpl { uint32_t batch_size_; uint32_t workspace_size_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_MS_GPU_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph.cc b/mindspore/ccsrc/cxx_api/graph/graph.cc index 902bbcabb0..e38266d624 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph.cc +++ b/mindspore/ccsrc/cxx_api/graph/graph.cc @@ -17,15 +17,19 @@ #include "cxx_api/graph/graph_data.h" #include "utils/log_adapter.h" -namespace mindspore::api { +namespace mindspore { Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} Graph::~Graph() {} +Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} + +bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } + ModelType Graph::ModelType() const { MS_EXCEPTION_IF_NULL(graph_data_); return graph_data_->ModelType(); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/graph_data.cc b/mindspore/ccsrc/cxx_api/graph/graph_data.cc index a1092e21b1..85be64f8ff 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_data.cc +++ b/mindspore/ccsrc/cxx_api/graph/graph_data.cc @@ -19,7 +19,7 @@ #include "framework/common/helper/model_helper.h" #endif -namespace mindspore::api { +namespace mindspore { Graph::GraphData::GraphData(const FuncGraphPtr &func_graph, enum ModelType model_type) : func_graph_(nullptr), om_data_(), model_type_(ModelType::kUnknownType) { if (model_type != ModelType::kMindIR) { @@ -72,4 +72,4 @@ Buffer Graph::GraphData::GetOMData() const { return om_data_; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/graph/graph_data.h b/mindspore/ccsrc/cxx_api/graph/graph_data.h index 7e7a2ac9c5..7b84ee5efb 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_data.h +++ b/mindspore/ccsrc/cxx_api/graph/graph_data.h @@ -24,7 +24,7 @@ #include "include/api/types.h" #include "ir/func_graph.h" -namespace mindspore::api { +namespace mindspore { class Graph::GraphData { public: GraphData(); @@ -46,5 +46,5 @@ class Graph::GraphData { Buffer om_data_; enum ModelType model_type_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph_impl.h b/mindspore/ccsrc/cxx_api/graph/graph_impl.h index a2c651c4cf..42c843225d 100644 --- a/mindspore/ccsrc/cxx_api/graph/graph_impl.h +++ b/mindspore/ccsrc/cxx_api/graph/graph_impl.h @@ -26,7 +26,7 @@ #include "cxx_api/graph/graph_data.h" #include "utils/utils.h" -namespace mindspore::api { +namespace mindspore { class GraphCell::GraphImpl { public: GraphImpl() = default; @@ -35,17 +35,14 @@ class GraphCell::GraphImpl { std::shared_ptr &MutableGraphData() const { return graph_->graph_data_; } void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } - virtual Status Run(const std::vector &inputs, std::vector *outputs) = 0; + virtual Status Run(const std::vector &inputs, std::vector *outputs) = 0; virtual Status Load() = 0; - virtual Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) = 0; - virtual Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) = 0; + virtual std::vector GetInputs() = 0; + virtual std::vector GetOutputs() = 0; protected: std::shared_ptr graph_; }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/graph/graph_utils.h b/mindspore/ccsrc/cxx_api/graph/graph_utils.h deleted file mode 100644 index 6a087e019d..0000000000 --- a/mindspore/ccsrc/cxx_api/graph/graph_utils.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H -#define MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H -#include -#include -#include "include/api/types.h" -#include "ir/dtype/type_id.h" -#include "utils/log_adapter.h" - -namespace mindspore::api { -class GraphUtils { - public: - static DataType TransTypeId2InferDataType(TypeId type_id) { - const std::map id2type_map{ - {TypeId::kNumberTypeBegin, api::kMsUnknown}, {TypeId::kNumberTypeBool, api::kMsBool}, - {TypeId::kNumberTypeFloat64, api::kMsFloat64}, {TypeId::kNumberTypeInt8, api::kMsInt8}, - {TypeId::kNumberTypeUInt8, api::kMsUint8}, {TypeId::kNumberTypeInt16, api::kMsInt16}, - {TypeId::kNumberTypeUInt16, api::kMsUint16}, {TypeId::kNumberTypeInt32, api::kMsInt32}, - {TypeId::kNumberTypeUInt32, api::kMsUint32}, {TypeId::kNumberTypeInt64, api::kMsInt64}, - {TypeId::kNumberTypeUInt64, api::kMsUint64}, {TypeId::kNumberTypeFloat16, api::kMsFloat16}, - {TypeId::kNumberTypeFloat32, api::kMsFloat32}, - }; - - auto it = id2type_map.find(type_id); - if (it != id2type_map.end()) { - return it->second; - } - - MS_LOG(WARNING) << "Unsupported data id " << type_id; - return api::kMsUnknown; - } - - template - inline static void ClearIfNotNull(T *vec) { - if (vec != nullptr) { - vec->clear(); - } - } - - template - inline static void PushbackIfNotNull(U *vec, T &&item) { - if (vec != nullptr) { - vec->emplace_back(item); - } - } -}; -} // namespace mindspore::api - -#endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_UTILS_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc index 57dfb03387..0b299c429f 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model.cc @@ -16,47 +16,53 @@ #include "cxx_api/model/acl/acl_model.h" #include +#include "include/api/context.h" #include "cxx_api/factory.h" -#include "cxx_api/python_utils.h" -namespace mindspore::api { +namespace mindspore { API_FACTORY_REG(ModelImpl, Ascend310, AclModel); -Status AclModel::Build(const std::map &options_map) { +Status AclModel::Build() { MS_LOG(INFO) << "Start build model."; MS_EXCEPTION_IF_NULL(graph_); - std::unique_ptr options = std::make_unique(options_map); - std::string options_str = GenerateOptionsStr(options_map); - MS_EXCEPTION_IF_NULL(options); - if (graph_cell_ != nullptr && options_str == options_str_) { + + if (graph_cell_ != nullptr) { MS_LOG(INFO) << "This model has been built, skip."; - return SUCCESS; + return kSuccess; } if (graph_cell_ == nullptr && graph_->ModelType() == ModelType::kOM) { + MS_LOG(INFO) << "Note: Load om model and all build options will be ignored."; graph_cell_ = std::make_shared(graph_); MS_EXCEPTION_IF_NULL(graph_cell_); - if (!options_map.empty()) { - MS_LOG(WARNING) << "All build options will be ignored."; - } - return SUCCESS; + return kSuccess; } - auto func_graph = ModelImpl::GetFuncGraph(); - MS_EXCEPTION_IF_NULL(func_graph); - model_converter_.set_options(options.get()); - auto om_data = model_converter_.LoadMindIR(func_graph); - if (om_data.Data() == nullptr || om_data.DataSize() == 0) { - MS_LOG(ERROR) << "Load MindIR failed."; - return FAILED; + std::unique_ptr options = std::make_unique(model_context_); + MS_EXCEPTION_IF_NULL(options); + std::string options_key = options->GenAclOptionsKey(); + std::shared_ptr graph; + if (auto iter = dynamic_size_graph_map_.find(options_key); iter != dynamic_size_graph_map_.end()) { + MS_LOG(INFO) << "This options has been built, read cache."; + graph = iter->second; + } else { + auto func_graph = ModelImpl::GetFuncGraph(); + MS_EXCEPTION_IF_NULL(func_graph); + model_converter_.set_options(options.get()); + auto om_data = model_converter_.LoadMindIR(func_graph); + if (om_data.Data() == nullptr || om_data.DataSize() == 0) { + MS_LOG(ERROR) << "Load MindIR failed."; + return kMCFailed; + } + graph = std::make_shared(std::make_shared(om_data, ModelType::kOM)); + dynamic_size_graph_map_[options_key] = graph; } - auto graph = std::make_shared(std::make_shared(om_data, ModelType::kOM)); MS_EXCEPTION_IF_NULL(graph); auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); auto ret = ModelImpl::Load(graph_cell); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; } @@ -64,64 +70,97 @@ Status AclModel::Build(const std::map &options_map) { // save result graph_cell_ = graph_cell; options_ = std::move(options); - options_str_ = options_str; MS_LOG(INFO) << "Build model success."; - return SUCCESS; + return kSuccess; } -Status AclModel::Train(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; -} +Status AclModel::Resize(const std::vector &inputs, const std::vector> &dims) { + MS_LOG(INFO) << "Start to resize model."; + MS_EXCEPTION_IF_NULL(graph_); + if (graph_->ModelType() == ModelType::kOM) { + MS_LOG(ERROR) << "OM model is not supported to resize model."; + return kMCFailed; + } + + auto origin_inputs = GetInputs(); + if (inputs.size() != origin_inputs.size()) { + MS_LOG(ERROR) << "Invalid inputs size " << inputs.size() << " not match model inputs size " << origin_inputs.size(); + return kMCInvalidInput; + } + + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match inputs size " << inputs.size(); + return kMCInvalidInput; + } -Status AclModel::Eval(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + if (model_context_ == nullptr) { + model_context_ = std::make_shared(); + } + + std::string input_shape_option; + for (size_t i = 0; i < inputs.size(); ++i) { + if (inputs[i].Name() != origin_inputs[i].Name()) { + MS_LOG(ERROR) << "Invalid inputs " << i << " name " << inputs[i].Name() << " not match model input name " + << origin_inputs[i].Name(); + return kMCInvalidInput; + } + input_shape_option += inputs[i].Name() + ":"; + for (size_t j = 0; j < dims[i].size(); ++j) { + input_shape_option += std::to_string(dims[i][j]); + if (j + 1 < dims[i].size()) { + input_shape_option += ","; + } + } + if (i + 1 < inputs.size()) { + input_shape_option += ";"; + } + } + MS_LOG(INFO) << "Set input size option is " << input_shape_option; + ModelContext::SetInputShape(model_context_, input_shape_option); + auto graph_cell_bak = std::move(graph_cell_); + auto ret = Build(); + if (ret != kSuccess) { + MS_LOG(INFO) << "Resize build failed."; + graph_cell_ = std::move(graph_cell_bak); + return ret; + } + MS_LOG(INFO) << "Resize success."; + return kSuccess; } -Status AclModel::Predict(const std::vector &inputs, std::vector *outputs) { +Status AclModel::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid data, graph_ is null."; - return FAILED; + return kMCFailed; } if (graph_cell_ == nullptr) { MS_LOG(WARNING) << "Model has not been built, it will be built with default options"; - Status ret = Build({}); - if (ret != SUCCESS) { + Status ret = Build(); + if (ret != kSuccess) { MS_LOG(ERROR) << "Build model failed."; - return FAILED; + return ret; } } MS_EXCEPTION_IF_NULL(graph_cell_); Status ret = graph_cell_->Run(inputs, outputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Run graph failed."; - return FAILED; + return ret; } - return SUCCESS; + return kSuccess; } -Status AclModel::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector AclModel::GetInputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetInputs(); } -Status AclModel::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector AclModel::GetOutputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetOutputsInfo(names, shapes, data_types, mem_sizes); -} - -std::string AclModel::GenerateOptionsStr(const std::map &options) { - std::string ret; - for (auto &[key, value] : options) { - ret += key + "^" + value + "^^"; - } - return ret; + return graph_cell_->GetOutputs(); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model.h b/mindspore/ccsrc/cxx_api/model/acl/acl_model.h index 4455eba7d1..f9097779d7 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model.h +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model.h @@ -31,30 +31,25 @@ #include "ir/tensor.h" #include "ir/anf.h" -namespace mindspore::api { +namespace mindspore { class AclModel : public ModelImpl { public: - AclModel() : model_converter_(), options_(nullptr), options_str_() {} + AclModel() : model_converter_(), options_(nullptr) {} ~AclModel() = default; - Status Build(const std::map &options_map) override; + Status Build() override; + Status Resize(const std::vector &inputs, const std::vector> &dims) override; - Status Train(const DataSet &dataset, std::map *outputs) override; - Status Eval(const DataSet &dataset, std::map *outputs) override; - Status Predict(const std::vector &inputs, std::vector *outputs) override; + Status Predict(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: - static std::string GenerateOptionsStr(const std::map &options); - std::shared_ptr graph_cell_; ModelConverter model_converter_; std::unique_ptr options_; - std::string options_str_; + std::map> dynamic_size_graph_map_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_ACL_MODEL_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc index 3f6cb0d41e..ffc059f770 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/acl_model_options.cc @@ -18,23 +18,31 @@ #include "utils/log_adapter.h" #include "external/ge/ge_api_types.h" -namespace mindspore::api { -static std::string ParseOption(const std::map &options, const std::string &key) { - auto iter = options.find(key); - if (iter != options.end()) { - return iter->second; +namespace mindspore { +static const std::map kSupportedDtypeOptionMap = {{DataType::kNumberTypeFloat16, "FP16"}, + {DataType::kNumberTypeFloat32, "FP32"}, + {DataType::kNumberTypeUInt8, "UINT8"}}; + +AclModelOptions::AclModelOptions(const std::shared_ptr &context) { + if (context == nullptr) { + return; + } + insert_op_cfg_path = ModelContext::GetInsertOpConfigPath(context); + input_format = ModelContext::GetInputFormat(context); + input_shape = ModelContext::GetInputShape(context); + + auto out_type = ModelContext::GetOutputType(context); + auto iter = kSupportedDtypeOptionMap.find(out_type); + if (out_type == DataType::kTypeUnknown) { + // do nothing + } else if (iter == kSupportedDtypeOptionMap.end()) { + MS_LOG(WARNING) << "Unsupported output type " << out_type << ", use FP32 as default."; + } else { + output_type = iter->second; } - return ""; -} -AclModelOptions::AclModelOptions(const std::map &options) { - // to acl - insert_op_cfg_path = ParseOption(options, kModelOptionInsertOpCfgPath); - input_format = ParseOption(options, kModelOptionInputFormat); - input_shape = ParseOption(options, kModelOptionInputShape); - output_type = ParseOption(options, kModelOptionOutputType); - precision_mode = ParseOption(options, kModelOptionPrecisionMode); - op_select_impl_mode = ParseOption(options, kModelOptionOpSelectImplMode); + precision_mode = ModelContext::GetPrecisionMode(context); + op_select_impl_mode = ModelContext::GetOpSelectImplMode(context); } std::tuple, std::map> AclModelOptions::GenAclOptions() @@ -69,4 +77,16 @@ std::tuple, std::map #include #include +#include #include "include/api/types.h" #include "include/api/status.h" +#include "include/api/context.h" -namespace mindspore::api { +namespace mindspore { struct AclModelOptions { - std::string output_node; // todo: at convert.cc::BuildGraph(), no atc options // build options std::string insert_op_cfg_path; std::string input_format; @@ -35,12 +36,13 @@ struct AclModelOptions { std::string op_select_impl_mode; std::string soc_version = "Ascend310"; - explicit AclModelOptions(const std::map &options); + explicit AclModelOptions(const std::shared_ptr &context); ~AclModelOptions() = default; // return tuple std::tuple, std::map> GenAclOptions() const; + std::string GenAclOptionsKey() const; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_OPTION_PARSER_H diff --git a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc index 28ec34188c..b1dc2e8858 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc @@ -22,9 +22,8 @@ #include "include/api/serialization.h" #include "graph/model.h" #include "cxx_api/model/model_converter_utils/multi_process.h" -#include "cxx_api/python_utils.h" -namespace mindspore::api { +namespace mindspore { namespace { transform::TensorOrderMap GetParams(const FuncGraphPtr &anf_graph) { transform::TensorOrderMap res; @@ -86,25 +85,25 @@ transform::DfGraphPtr ModelConverter::ConvertFuncGraphToAIR(const FuncGraphPtr & para->set_name(name); } - transform::DfGraphConvertor convertor(anf_graph); + transform::DfGraphConvertor converter(anf_graph); std::string net_id = "0"; std::string init_graph = "init_subgraph." + net_id; std::string checkpoint_name = "save." + net_id; - convertor.set_training(false); - (void)convertor.ConvertAllNode().InitParam(GetParams(anf_graph)).BuildGraph(); - (void)convertor.GenerateCheckpointGraph(); - if (convertor.ErrCode() != 0) { + converter.set_training(false); + (void)converter.ConvertAllNode().InitParam(GetParams(anf_graph)).BuildGraph(); + (void)converter.GenerateCheckpointGraph(); + if (converter.ErrCode() != 0) { transform::DfGraphManager::GetInstance().ClearGraph(); - MS_LOG(ERROR) << "Convert df graph failed, err:" << convertor.ErrCode(); + MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode(); return nullptr; } - (void)transform::DfGraphManager::GetInstance().AddGraph(anf_graph->ToString(), convertor.GetComputeGraph()); - (void)transform::DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); - (void)transform::DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(anf_graph->ToString(), converter.GetComputeGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(init_graph, converter.GetInitGraph()); + (void)transform::DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, converter.GetBroadcastGraph()); transform::Status ret = - transform::DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); + transform::DfGraphManager::GetInstance().AddGraph(checkpoint_name, converter.GetSaveCheckpointGraph()); if (ret == transform::Status::SUCCESS) { transform::DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); } @@ -158,7 +157,7 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { auto df_graph = ConvertFuncGraphToAIR(func_graph); if (df_graph == nullptr) { MS_LOG(ERROR) << "Convert FuncGraph to AscendIR failed."; - return FAILED; + return kMCFailed; } ge::Model model; ge::Buffer model_data; @@ -166,14 +165,14 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { auto ge_ret = model.Save(model_data); if (ge_ret != ge::SUCCESS) { MS_LOG(ERROR) << "Save ge model to buffer failed."; - return FAILED; + return kMCFailed; } // send original model to child auto status = multi_process->SendMsg(model_data.data(), model_data.size()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send original model to child process failed"; - return FAILED; + return status; } // receive convert model result from child CreateBufferCall call = [&buffer_ret](size_t msg_len) -> uint8_t * { @@ -181,11 +180,11 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { return reinterpret_cast(buffer_ret.MutableData()); }; status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive result model from child process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto child_process = [this](MultiProcess *multi_process) -> Status { MS_EXCEPTION_IF_NULL(multi_process); @@ -196,25 +195,25 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { return reinterpret_cast(model.MutableData()); }; auto status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive original model from parent process failed"; - return FAILED; + return status; } Buffer model_result = LoadAscendIRInner(model); if (model_result.DataSize() == 0) { MS_LOG_ERROR << "Convert model from MindIR to OM failed"; - return FAILED; + return kMCFailed; } // send result model to parent status = multi_process->SendMsg(model_result.Data(), model_result.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send result model to parent process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto status = multi_process.MainProcess(parent_process, child_process); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Convert MindIR model to OM model failed"; } else { MS_LOG_INFO << "Convert MindIR model to OM model success"; @@ -229,9 +228,9 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { MS_EXCEPTION_IF_NULL(multi_process); // send original model to child auto status = multi_process->SendMsg(model_data.Data(), model_data.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send original model to child process failed"; - return FAILED; + return status; } // receive convert model result from child CreateBufferCall call = [&buffer_ret](size_t msg_len) -> uint8_t * { @@ -239,11 +238,11 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { return reinterpret_cast(buffer_ret.MutableData()); }; status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive result model from child process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto child_process = [this](MultiProcess *multi_process) -> Status { MS_EXCEPTION_IF_NULL(multi_process); @@ -254,25 +253,25 @@ Buffer ModelConverter::LoadAscendIR(const Buffer &model_data) { return reinterpret_cast(model.MutableData()); }; auto status = multi_process->ReceiveMsg(call); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Receive original model from parent process failed"; - return FAILED; + return status; } Buffer model_result = LoadAscendIRInner(model); if (model_result.DataSize() == 0) { MS_LOG_ERROR << "Convert model from AIR to OM failed"; - return FAILED; + return kMCFailed; } // send result model to parent status = multi_process->SendMsg(model_result.Data(), model_result.DataSize()); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Send result model to parent process failed"; - return FAILED; + return status; } - return SUCCESS; + return kSuccess; }; auto status = multi_process.MainProcess(parent_process, child_process); - if (!status.IsSuccess()) { + if (status != kSuccess) { MS_LOG_ERROR << "Convert AIR model to OM model failed"; } else { MS_LOG_INFO << "Convert AIR model to OM model success"; @@ -326,4 +325,4 @@ Buffer ModelConverter::LoadAscendIRInner(const Buffer &model_data) { auto om_data = BuildAirModel(df_graph, init_options, build_options); return om_data; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/acl/model_converter.h b/mindspore/ccsrc/cxx_api/model/acl/model_converter.h index eabc4dd1d3..7e46f142ae 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/model_converter.h +++ b/mindspore/ccsrc/cxx_api/model/acl/model_converter.h @@ -27,7 +27,7 @@ #include "external/ge/ge_ir_build.h" #include "cxx_api/model/acl/acl_model_options.h" -namespace mindspore::api { +namespace mindspore { class ModelConverter { public: ModelConverter() : options_(nullptr) {} @@ -46,6 +46,5 @@ class ModelConverter { Buffer LoadMindIRInner(const FuncGraphPtr &func_graph); Buffer LoadAscendIRInner(const Buffer &model_data); }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_MODEL_CONVERTER_H diff --git a/mindspore/ccsrc/cxx_api/model/model.cc b/mindspore/ccsrc/cxx_api/model/model.cc index b3fc97ef21..88d364f7f2 100644 --- a/mindspore/ccsrc/cxx_api/model/model.cc +++ b/mindspore/ccsrc/cxx_api/model/model.cc @@ -19,49 +19,45 @@ #include "cxx_api/factory.h" #include "utils/utils.h" -namespace mindspore::api { -Status Model::Build(const std::map &options) { +namespace mindspore { +Status Model::Build() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->Build(options); + return impl_->Build(); } -Status Model::Train(const DataSet &dataset, bool data_sink, std::map *outputs) { +Status Model::Resize(const std::vector &inputs, const std::vector> &dims) { MS_EXCEPTION_IF_NULL(impl_); - return impl_->Train(dataset, outputs); + return impl_->Resize(inputs, dims); } -Status Model::Eval(const DataSet &dataset, bool data_sink, std::map *outputs) { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->Eval(dataset, outputs); -} - -Status Model::Predict(const std::vector &inputs, std::vector *outputs) { +Status Model::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(impl_); return impl_->Predict(inputs, outputs); } -Status Model::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector Model::GetInputs() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return impl_->GetInputs(); } -Status Model::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector Model::GetOutputs() { MS_EXCEPTION_IF_NULL(impl_); - return impl_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return impl_->GetOutputs(); } -Model::Model(const GraphCell &graph_cell) - : impl_(Factory::Instance().Create(Context::Instance().GetDeviceTarget())) { +Model::Model(const GraphCell &graph_cell, const std::shared_ptr &model_context) + : impl_(Factory::Instance().Create(mindspore::GlobalContext::GetGlobalDeviceTarget())) { if (impl_ == nullptr) { - MS_LOG(EXCEPTION) << "Create session type " << Context::Instance().GetDeviceTarget() << " failed"; + MS_LOG(EXCEPTION) << "Create session type " << mindspore::GlobalContext::GetGlobalDeviceTarget() << " failed"; } MS_EXCEPTION_IF_NULL(graph_cell.GetGraph()); impl_->SetGraph(std::make_shared(*graph_cell.GetGraph())); + impl_->SetContext(model_context); } -Model::Model(const std::vector &network) { MS_LOG(EXCEPTION) << "Unsupported feature."; } +Model::Model(const std::vector &network, const std::shared_ptr &model_context) { + MS_LOG(EXCEPTION) << "Unsupported feature."; +} Model::~Model() {} @@ -69,4 +65,4 @@ bool Model::CheckModelSupport(const std::string &device_type, ModelType) { return Factory::Instance().CheckModelSupport(device_type); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc index c56ef354d0..50ca5477a8 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.cc @@ -24,7 +24,6 @@ #include "cxx_api/model/model_converter_utils/shared_memory.h" namespace mindspore { -namespace api { namespace { uint64_t kSharedMemorySize = 100ull << 20; // 100 MB } @@ -40,7 +39,7 @@ Status MultiProcess::MainProcess(ProcessFuncCall parent_process, ProcessFuncCall memory_size_ = kSharedMemorySize; // 100 MB SharedMemory shared_memory; ret = shared_memory.Create(memory_size_); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Create shared memory failed"; return ret; } @@ -48,10 +47,10 @@ Status MultiProcess::MainProcess(ProcessFuncCall parent_process, ProcessFuncCall if (pid < 0) { shared_memory.Destroy(); MS_LOG_ERROR << "Fork process to convert model failed"; - return FAILED; + return kMEFailed; } ret = shared_memory.Attach(); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Process attach shared memory failed, pid " << pid; return ret; } @@ -87,12 +86,12 @@ Status MultiProcess::ParentProcess(ProcessFuncCall parent_process) { Status ret; try { ret = parent_process(this); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Parent process process failed"; } } catch (const std::runtime_error &ex) { MS_LOG_ERROR << "Catch parent process runtime error: " << ex.what(); - ret = FAILED; + ret = kMEFailed; } stopped_ = true; send_msg_->stop = true; @@ -108,7 +107,7 @@ void MultiProcess::ChildProcess(ProcessFuncCall child_process) { std::thread heartbeat_thread(MultiProcess::HeartbeatThreadFunc, this); try { auto ret = child_process(this); - if (!ret.IsSuccess()) { + if (ret != kSuccess) { MS_LOG_ERROR << "Child process process failed"; } } catch (const std::runtime_error &ex) { @@ -138,14 +137,14 @@ Status MultiProcess::SendMsg(const void *buffer, uint64_t msg_len) { } if (peer_stopped_) { if (!send_msg_->read_finish_flag) { - return FAILED; + return kMEFailed; } break; } MS_LOG_INFO << "Send end " << cur_offset << ", msg len " << sub_msg_len << ", total len " << msg_len; } MS_LOG_INFO << "End to send message to peer process, msg len " << msg_len; - return SUCCESS; + return kSuccess; } Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { @@ -158,7 +157,7 @@ Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { usleep(1000); // 1ms } if (peer_stopped_) { - return FAILED; + return kMEFailed; } if (msg_buffer == nullptr) { msg_len = receive_msg_->msg_total_len; @@ -170,7 +169,7 @@ Status MultiProcess::ReceiveMsg(CreateBufferCall create_buffer_call) { receive_msg_->read_finish_flag = true; MS_LOG_INFO << "Receive end, current length " << cur_offset << ", total length " << msg_len << std::endl; } while (msg_len > cur_offset); - return SUCCESS; + return kSuccess; } void MultiProcess::HeartbeatThreadFunc(MultiProcess *multi_process) { multi_process->HeartbeatThreadFuncInner(); } @@ -200,6 +199,4 @@ void MultiProcess::HeartbeatThreadFuncInner() { usleep(100000); // sleep 100 ms } } - -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h index a31d9f0a3b..8958c13e62 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/multi_process.h @@ -21,7 +21,6 @@ #include "include/api/status.h" namespace mindspore { -namespace api { struct MessageFlag { uint64_t heartbeat = 0; uint64_t stop = false; @@ -60,7 +59,5 @@ class MultiProcess { Status ParentProcess(ProcessFuncCall parent_process); void ChildProcess(ProcessFuncCall child_process); }; -} // namespace api } // namespace mindspore - #endif // MINDSPORE_CCSRC_CXXAPI_MULTI_PROCESS_H diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc index 09dabe0f1d..24ef852746 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.cc @@ -20,26 +20,25 @@ #include "mindspore/core/utils/log_adapter.h" namespace mindspore { -namespace api { Status SharedMemory::Create(uint64_t memory_size) { auto access_mode = S_IRUSR | S_IWUSR | S_IROTH | S_IWOTH | S_IRGRP | S_IWGRP; shm_id_ = shmget(IPC_PRIVATE, memory_size, IPC_CREAT | IPC_EXCL | access_mode); if (shm_id_ == -1) { MS_LOG_ERROR << "Shared memory creation failed. Errno " + std::to_string(errno); - return FAILED; + return kMCFailed; } MS_LOG_INFO << "shmget success, shm id " << shm_id_; - return SUCCESS; + return kSuccess; } Status SharedMemory::Attach() { void *shmat_addr = shmat(shm_id_, nullptr, 0); if (shmat_addr == reinterpret_cast(-1)) { MS_LOG_ERROR << "Shared memory attach failed. Errno " + std::to_string(errno); - return FAILED; + return kMCFailed; } shmat_addr_ = reinterpret_cast(shmat_addr); - return SUCCESS; + return kSuccess; } void SharedMemory::Detach() { @@ -63,5 +62,4 @@ void SharedMemory::Destroy() { MS_LOG_ERROR << errMsg; } } -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h index 77c9423d58..5200a2d26d 100644 --- a/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h +++ b/mindspore/ccsrc/cxx_api/model/model_converter_utils/shared_memory.h @@ -20,7 +20,6 @@ #include "include/api/status.h" namespace mindspore { -namespace api { class SharedMemory { public: Status Create(uint64_t memory_size); @@ -33,7 +32,5 @@ class SharedMemory { int shm_id_ = -1; uint8_t *shmat_addr_ = nullptr; }; -} // namespace api } // namespace mindspore - #endif // MINDSPORE_CCSRC_CXXAPI_SHARED_MEMORY_H diff --git a/mindspore/ccsrc/cxx_api/model/model_impl.h b/mindspore/ccsrc/cxx_api/model/model_impl.h index 5ada9782b5..97a308eafa 100644 --- a/mindspore/ccsrc/cxx_api/model/model_impl.h +++ b/mindspore/ccsrc/cxx_api/model/model_impl.h @@ -21,28 +21,26 @@ #include #include #include +#include "include/api/context.h" #include "include/api/model.h" #include "include/api/graph.h" #include "cxx_api/graph/graph_data.h" #include "utils/utils.h" #include "ir/func_graph.h" -namespace mindspore::api { +namespace mindspore { class ModelImpl { public: ModelImpl() = default; virtual ~ModelImpl() = default; - virtual Status Build(const std::map &options) = 0; + virtual Status Build() = 0; + virtual Status Resize(const std::vector &inputs, const std::vector> &dims) = 0; - virtual Status Train(const DataSet &dataset, std::map *outputs) = 0; - virtual Status Eval(const DataSet &dataset, std::map *outputs) = 0; - virtual Status Predict(const std::vector &inputs, std::vector *outputs) = 0; + virtual Status Predict(const std::vector &inputs, std::vector *outputs) = 0; - virtual Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const = 0; - virtual Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const = 0; + virtual std::vector GetInputs() = 0; + virtual std::vector GetOutputs() = 0; protected: Status Load(const std::shared_ptr &graph_cell) { @@ -61,11 +59,16 @@ class ModelImpl { } std::shared_ptr graph_; + std::shared_ptr model_context_; private: friend class Model; void SetGraph(const std::shared_ptr &graph) { graph_ = graph; } + void SetContext(const std::shared_ptr &model_context) { + if (model_context != nullptr) { + model_context_ = std::make_shared(*model_context); + } + } }; -} // namespace mindspore::api - +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXX_API_MODEL_MODEL_IMPL_H diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc index 7349aba943..5a4366d0b7 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.cc @@ -16,18 +16,78 @@ #include "cxx_api/model/ms/ms_model.h" #include +#include "include/api/context.h" #include "utils/ms_context.h" #include "cxx_api/factory.h" namespace mindspore { -namespace api { API_FACTORY_REG(ModelImpl, Ascend910, MsModel); API_FACTORY_REG(ModelImpl, GPU, MsModel); -Status MsModel::Build(const std::map &) { +static std::string GenerateShapeKey(const std::vector> &dims) { + std::string shape_key; + for (size_t i = 0; i < dims.size(); ++i) { + shape_key += std::to_string(i) + ":"; + for (size_t j = 0; j < dims[i].size(); ++j) { + shape_key += std::to_string(dims[i][j]); + if (j + 1 < dims[i].size()) { + shape_key += ","; + } + } + if (i + 1 < dims.size()) { + shape_key += ";"; + } + } + return shape_key; +} + +std::shared_ptr MsModel::GenerateGraphCell(const std::vector> &dims) { + std::string shape_key = GenerateShapeKey(dims); + if (auto iter = dynamic_size_graph_map_.find(shape_key); iter != dynamic_size_graph_map_.end()) { + MS_LOG(INFO) << "This options has been built, read cache."; + return iter->second; + } + + auto func_graph = ModelImpl::GetFuncGraph(); + MS_EXCEPTION_IF_NULL(func_graph); + + const auto &inputs = func_graph->parameters(); + if (dims.size() != inputs.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match model inputs size " << inputs.size(); + return nullptr; + } + for (size_t i = 0; i < dims.size(); ++i) { + const auto ¶m = inputs[i]; + auto shape_ptr = std::dynamic_pointer_cast(param->Shape()); + if (shape_ptr == nullptr) { + MS_LOG(ERROR) << "Inputs " << i << " is not supported to resize, debug string: " << param->DebugString(); + return nullptr; + } + shape_ptr->shape() = dims[i]; + } + + auto graph = std::make_shared(std::make_shared(func_graph, ModelType::kMindIR)); + MS_EXCEPTION_IF_NULL(graph); + auto graph_cell = std::make_shared(graph); + MS_EXCEPTION_IF_NULL(graph_cell); + auto ret = ModelImpl::Load(graph_cell); + if (ret != kSuccess) { + MS_LOG(ERROR) << "Load failed."; + return nullptr; + } + dynamic_size_graph_map_[shape_key] = graph_cell; + return graph_cell; +} + +Status MsModel::Build() { MS_LOG(INFO) << "Start build model."; MS_EXCEPTION_IF_NULL(graph_); + if (graph_cell_ != nullptr) { + MS_LOG(INFO) << "This model has been built, skip."; + return kSuccess; + } + auto func_graph = ModelImpl::GetFuncGraph(); MS_EXCEPTION_IF_NULL(func_graph); @@ -36,7 +96,7 @@ Status MsModel::Build(const std::map &) { auto graph_cell = std::make_shared(graph); MS_EXCEPTION_IF_NULL(graph_cell); auto ret = ModelImpl::Load(graph_cell); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Load failed."; return ret; } @@ -44,55 +104,66 @@ Status MsModel::Build(const std::map &) { // save result graph_cell_ = graph_cell; MS_LOG(INFO) << "Build model success."; - return SUCCESS; + return kSuccess; } -Status MsModel::Train(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; -} +Status MsModel::Resize(const std::vector &inputs, const std::vector> &dims) { + MS_LOG(INFO) << "Start to resize model"; + auto origin_inputs = GetInputs(); + if (inputs.size() != origin_inputs.size()) { + MS_LOG(ERROR) << "Invalid inputs size " << inputs.size() << " not match model inputs size " << origin_inputs.size(); + return kMCInvalidInput; + } + + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "Invalid dims size " << dims.size() << " not match inputs size " << inputs.size(); + return kMCInvalidInput; + } + + auto graph_cell = GenerateGraphCell(dims); + if (graph_cell == nullptr) { + MS_LOG(ERROR) << "GenerateGraphCell failed."; + return kMCFailed; + } -Status MsModel::Eval(const DataSet &, std::map *) { - MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + MS_LOG(INFO) << "Resize model success."; + graph_cell_ = std::move(graph_cell); + return kSuccess; } -Status MsModel::Predict(const std::vector &inputs, std::vector *outputs) { +Status MsModel::Predict(const std::vector &inputs, std::vector *outputs) { MS_EXCEPTION_IF_NULL(outputs); if (graph_ == nullptr) { MS_LOG(ERROR) << "Invalid data, graph_ is null."; - return FAILED; + return kMCFailed; } if (graph_cell_ == nullptr) { MS_LOG(INFO) << "Model has not been built, it will be built with default options"; - Status ret = Build({}); - if (ret != SUCCESS) { + Status ret = Build(); + if (ret != kSuccess) { MS_LOG(ERROR) << "Build model failed."; - return FAILED; + return ret; } } MS_EXCEPTION_IF_NULL(graph_cell_); Status ret = graph_cell_->Run(inputs, outputs); - if (ret != SUCCESS) { + if (ret != kSuccess) { MS_LOG(ERROR) << "Run graph failed."; - return FAILED; + return ret; } - return SUCCESS; + return kSuccess; } -Status MsModel::GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector MsModel::GetInputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetInputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetInputs(); } -Status MsModel::GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const { +std::vector MsModel::GetOutputs() { MS_EXCEPTION_IF_NULL(graph_cell_); - return graph_cell_->GetOutputsInfo(names, shapes, data_types, mem_sizes); + return graph_cell_->GetOutputs(); } -} // namespace api } // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h index 747ff0da8b..0571b4e409 100644 --- a/mindspore/ccsrc/cxx_api/model/ms/ms_model.h +++ b/mindspore/ccsrc/cxx_api/model/ms/ms_model.h @@ -33,26 +33,24 @@ #endif namespace mindspore { -namespace api { class MsModel : public ModelImpl { public: MsModel() {} ~MsModel() = default; - Status Build(const std::map &options_map) override; + Status Build() override; + Status Resize(const std::vector &inputs, const std::vector> &dims) override; - Status Train(const DataSet &dataset, std::map *outputs) override; - Status Eval(const DataSet &dataset, std::map *outputs) override; - Status Predict(const std::vector &inputs, std::vector *outputs) override; + Status Predict(const std::vector &inputs, std::vector *outputs) override; - Status GetInputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; - Status GetOutputsInfo(std::vector *names, std::vector> *shapes, - std::vector *data_types, std::vector *mem_sizes) const override; + std::vector GetInputs() override; + std::vector GetOutputs() override; private: + std::shared_ptr GenerateGraphCell(const std::vector> &dims); + std::shared_ptr graph_cell_; + std::map> dynamic_size_graph_map_; }; -} // namespace api } // namespace mindspore #endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H diff --git a/mindspore/ccsrc/cxx_api/ops/ops.cc b/mindspore/ccsrc/cxx_api/ops/ops.cc index 1d028a6d8d..6fe3171af9 100644 --- a/mindspore/ccsrc/cxx_api/ops/ops.cc +++ b/mindspore/ccsrc/cxx_api/ops/ops.cc @@ -15,7 +15,7 @@ */ #include "include/api/ops/ops.h" -namespace mindspore::api { +namespace mindspore { Conv2D::Conv2D(int out_channel, const std::vector &kernel_size, int mode, const std::string &pad_mode, const std::vector &pad, const std::vector &stride, const std::vector &dilation, int group) : OpCell("Conv2D"), @@ -35,4 +35,4 @@ Output Conv2D::operator()(const Input &input1, const Input &input2) const { std::vector Conv2D::Construct(const std::vector &inputs) { return {Output(shared_from_this(), inputs, 1)}; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/python_utils.cc b/mindspore/ccsrc/cxx_api/python_utils.cc index ecf737dcf5..89a0d2793b 100644 --- a/mindspore/ccsrc/cxx_api/python_utils.cc +++ b/mindspore/ccsrc/cxx_api/python_utils.cc @@ -29,7 +29,7 @@ namespace py = pybind11; static std::mutex init_mutex; static bool Initialized = false; -namespace mindspore::api { +namespace mindspore { static void RegAllOpFromPython() { MsContext::GetInstance()->set_param(MS_CTX_EXECUTION_MODE, kGraphMode); Py_Initialize(); @@ -143,4 +143,4 @@ PythonEnvGuard::~PythonEnvGuard() { FinalizePython(); } } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/python_utils.h b/mindspore/ccsrc/cxx_api/python_utils.h index e7c91a203f..dbc6dacb9f 100644 --- a/mindspore/ccsrc/cxx_api/python_utils.h +++ b/mindspore/ccsrc/cxx_api/python_utils.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H #define MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H -namespace mindspore::api { +namespace mindspore { void RegAllOp(); bool PythonIsInited(); void InitPython(); @@ -30,5 +30,5 @@ class PythonEnvGuard { private: bool origin_init_status_; }; -} // namespace mindspore::api +} // namespace mindspore #endif // MINDSPORE_CCSRC_CXXAPI_PYTHON_UTILS_H diff --git a/mindspore/ccsrc/cxx_api/serialization.cc b/mindspore/ccsrc/cxx_api/serialization.cc index 8a8b26b00b..5ff271d8f1 100644 --- a/mindspore/ccsrc/cxx_api/serialization.cc +++ b/mindspore/ccsrc/cxx_api/serialization.cc @@ -19,7 +19,7 @@ #include "utils/log_adapter.h" #include "mindspore/core/load_mindir/load_model.h" -namespace mindspore::api { +namespace mindspore { static Buffer ReadFile(const std::string &file) { Buffer buffer; if (file.empty()) { @@ -68,6 +68,22 @@ static Buffer ReadFile(const std::string &file) { return buffer; } +Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { + if (model_type == kMindIR) { + FuncGraphPtr anf_graph = nullptr; + try { + anf_graph = ConvertStreamToFuncGraph(reinterpret_cast(model_data), data_size); + } catch (const std::exception &) { + MS_LOG(EXCEPTION) << "Load MindIR failed."; + } + + return Graph(std::make_shared(anf_graph, kMindIR)); + } else if (model_type == kOM) { + return Graph(std::make_shared(Buffer(model_data, data_size), kOM)); + } + MS_LOG(EXCEPTION) << "Unsupported ModelType " << model_type; +} + Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { Buffer data = ReadFile(file); if (data.Data() == nullptr) { @@ -77,7 +93,7 @@ Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { FuncGraphPtr anf_graph = nullptr; try { anf_graph = ConvertStreamToFuncGraph(reinterpret_cast(data.Data()), data.DataSize()); - } catch (std::exception &e) { + } catch (const std::exception &) { MS_LOG(EXCEPTION) << "Load MindIR failed."; } @@ -90,21 +106,21 @@ Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::SetParameters(const std::map ¶meters, Model *model) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::ExportModel(const Model &model, ModelType model_type, Buffer *model_data) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } Status Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file) { MS_LOG(ERROR) << "Unsupported feature."; - return FAILED; + return kMEFailed; } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/cxx_api/types.cc b/mindspore/ccsrc/cxx_api/types.cc index 98178f108b..38ecf4dee1 100644 --- a/mindspore/ccsrc/cxx_api/types.cc +++ b/mindspore/ccsrc/cxx_api/types.cc @@ -17,17 +17,20 @@ #include #include "securec/include/securec.h" #include "utils/utils.h" +#include "mindspore/core/ir/api_tensor_impl.h" -namespace mindspore::api { -const char *kDeviceTypeAscend310 = "Ascend310"; -const char *kDeviceTypeAscend910 = "Ascend910"; -const char *kDeviceTypeGpu = "GPU"; - -class DataImpl { +namespace mindspore { +class Buffer::Impl { public: - DataImpl() : data_() {} - ~DataImpl() = default; - DataImpl(const void *data, size_t data_len) { SetData(data, data_len); } + Impl() : data_() {} + ~Impl() = default; + Impl(const void *data, size_t data_len) { + if (data != nullptr) { + (void)SetData(data, data_len); + } else { + ResizeData(data_len); + } + } const void *Data() const { return data_.data(); } void *MutableData() { return data_.data(); } @@ -66,132 +69,162 @@ class DataImpl { std::vector data_; }; -class Buffer::Impl : public DataImpl { +class TensorDefaultImpl : public MSTensor::Impl { public: - Impl() : DataImpl() {} - ~Impl() = default; - Impl(const void *data, size_t data_len) : DataImpl(data, data_len) {} -}; + TensorDefaultImpl() : buffer_(), name_(), type_(DataType::kTypeUnknown), shape_() {} + ~TensorDefaultImpl() override = default; + TensorDefaultImpl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : buffer_(data, data_len), name_(name), type_(type), shape_(shape) {} + + const std::string &Name() const override { return name_; } + enum DataType DataType() const override { return type_; } + const std::vector &Shape() const override { return shape_; } + + std::shared_ptr Data() const override { + return std::shared_ptr(buffer_.Data(), [](const void *) {}); + } -class Tensor::Impl : public DataImpl { - public: - Impl() : DataImpl(), name_(), type_(DataType::kMsUnknown), shape_() {} - ~Impl() = default; - Impl(const std::string &name, api::DataType type, const std::vector &shape, const void *data, - size_t data_len) - : DataImpl(data, data_len), name_(name), type_(type), shape_(shape) {} + void *MutableData() override { return buffer_.MutableData(); } + size_t DataSize() const override { return buffer_.DataSize(); } - const std::string &Name() const { return name_; } - void SetName(const std::string &name) { name_ = name; } + bool IsDevice() const override { return false; } - api::DataType DataType() const { return type_; } - void SetDataType(api::DataType type) { type_ = type; } + std::shared_ptr Clone() const override { + return std::make_shared(name_, type_, shape_, buffer_.Data(), buffer_.DataSize()); + } - void SetShape(const std::vector &shape) { shape_ = shape; } - const std::vector &Shape() const { return shape_; } + private: + Buffer buffer_; + std::string name_; + enum DataType type_; + std::vector shape_; +}; - int64_t ElementNum() const { - std::vector shapex = Shape(); - return std::accumulate(shapex.begin(), shapex.end(), 1LL, std::multiplies()); +class TensorReferenceImpl : public MSTensor::Impl { + public: + TensorReferenceImpl() : data_(nullptr), data_size_(0), name_(), type_(DataType::kTypeUnknown), shape_() {} + ~TensorReferenceImpl() override = default; + TensorReferenceImpl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : data_(data), data_size_(data_len), name_(name), type_(type), shape_(shape) {} + + const std::string &Name() const override { return name_; } + enum DataType DataType() const override { return type_; } + const std::vector &Shape() const override { return shape_; } + + std::shared_ptr Data() const override { + return std::shared_ptr(data_, [](const void *) {}); } - static int GetTypeSize(api::DataType type) { - static const std::map type_size_map = { - {kMsBool, sizeof(bool)}, {kMsFloat64, sizeof(double)}, {kMsInt8, sizeof(int8_t)}, - {kMsUint8, sizeof(uint8_t)}, {kMsInt16, sizeof(int16_t)}, {kMsUint16, sizeof(uint16_t)}, - {kMsInt32, sizeof(int32_t)}, {kMsUint32, sizeof(uint32_t)}, {kMsInt64, sizeof(int64_t)}, - {kMsUint64, sizeof(uint64_t)}, {kMsFloat16, sizeof(uint16_t)}, {kMsFloat32, sizeof(float)}, - }; - auto it = type_size_map.find(type); - if (it != type_size_map.end()) { - return it->second; - } + void *MutableData() override { return const_cast(data_); } + size_t DataSize() const override { return data_size_; } + + bool IsDevice() const override { return false; } - MS_LOG(WARNING) << "Cannot find data type " << type; - return 0; + std::shared_ptr Clone() const override { + return std::make_shared(name_, type_, shape_, data_, data_size_); } - private: + protected: + const void *data_; + size_t data_size_; std::string name_; - api::DataType type_; + enum DataType type_; std::vector shape_; }; -Tensor::Tensor() : impl_(std::make_shared()) {} -Tensor::Tensor(const std::string &name, api::DataType type, const std::vector &shape, const void *data, - size_t data_len) - : impl_(std::make_shared(name, type, shape, data, data_len)) {} -Tensor::~Tensor() = default; - -Tensor Tensor::Clone() const { - MS_EXCEPTION_IF_NULL(impl_); - Tensor ret; - ret.impl_ = std::make_shared(*impl_); - return ret; +MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + try { + std::shared_ptr impl = std::make_shared(name, type, shape, data, data_len); + return MSTensor(impl); + } catch (const std::bad_alloc &) { + MS_LOG(ERROR) << "Malloc memory failed."; + return MSTensor(nullptr); + } catch (...) { + MS_LOG(ERROR) << "Unknown error occurred."; + return MSTensor(nullptr); + } } -const std::string &Tensor::Name() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->Name(); +MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + try { + std::shared_ptr impl = std::make_shared(name, type, shape, data, data_len); + return MSTensor(impl); + } catch (const std::bad_alloc &) { + MS_LOG(ERROR) << "Malloc memory failed."; + return MSTensor(nullptr); + } catch (...) { + MS_LOG(ERROR) << "Unknown error occurred."; + return MSTensor(nullptr); + } } -void Tensor::SetName(const std::string &name) { +MSTensor::MSTensor() : impl_(std::make_shared()) {} +MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} +MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) { MS_EXCEPTION_IF_NULL(impl); } +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : impl_(std::make_shared(name, type, shape, data, data_len)) {} +MSTensor::~MSTensor() = default; + +bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } + +MSTensor MSTensor::Clone() const { MS_EXCEPTION_IF_NULL(impl_); - impl_->SetName(name); + MSTensor ret; + ret.impl_ = impl_->Clone(); + return ret; } -DataType Tensor::DataType() const { +const std::string &MSTensor::Name() const { MS_EXCEPTION_IF_NULL(impl_); - return impl_->DataType(); + return impl_->Name(); } -void Tensor::SetDataType(api::DataType type) { +enum DataType MSTensor::DataType() const { MS_EXCEPTION_IF_NULL(impl_); - impl_->SetDataType(type); + return impl_->DataType(); } -const std::vector &Tensor::Shape() const { +const std::vector &MSTensor::Shape() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->Shape(); } -void Tensor::SetShape(const std::vector &shape) { +int64_t MSTensor::ElementNum() const { MS_EXCEPTION_IF_NULL(impl_); - impl_->SetShape(shape); + const auto &shape = impl_->Shape(); + if (shape.empty()) { + // element number of scalar is 1 + return 1; + } + + return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); } -const void *Tensor::Data() const { +std::shared_ptr MSTensor::Data() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->Data(); } -void *Tensor::MutableData() { +void *MSTensor::MutableData() { MS_EXCEPTION_IF_NULL(impl_); return impl_->MutableData(); } -size_t Tensor::DataSize() const { +size_t MSTensor::DataSize() const { MS_EXCEPTION_IF_NULL(impl_); return impl_->DataSize(); } -bool Tensor::ResizeData(size_t data_len) { +bool MSTensor::IsDevice() const { MS_EXCEPTION_IF_NULL(impl_); - return impl_->ResizeData(data_len); + return impl_->IsDevice(); } -bool Tensor::SetData(const void *data, size_t data_len) { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->SetData(data, data_len); -} - -int64_t Tensor::ElementNum() const { - MS_EXCEPTION_IF_NULL(impl_); - return impl_->ElementNum(); -} - -int Tensor::GetTypeSize(api::DataType type) { return Impl::GetTypeSize(type); } - Buffer::Buffer() : impl_(std::make_shared()) {} Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared(data, data_len)) {} Buffer::~Buffer() = default; @@ -227,4 +260,4 @@ bool Buffer::SetData(const void *data, size_t data_len) { MS_EXCEPTION_IF_NULL(impl_); return impl_->SetData(data, data_len); } -} // namespace mindspore::api +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt index a772f096f7..ecacf5e4ff 100644 --- a/mindspore/ccsrc/minddata/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/CMakeLists.txt @@ -284,14 +284,7 @@ else() endif() add_dependencies(_c_dataengine mindspore_shared_lib) -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - set(MINDSPORE_LINK_OBJECT ${CMAKE_BINARY_DIR}/mindspore/ccsrc/cxx_api/CMakeFiles/mindspore_shared_lib.dir/objects.a) - target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib ${MINDSPORE_LINK_OBJECT}) -else() - if(ENABLE_ACL) - target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib) - endif() -endif() +target_link_libraries(_c_dataengine PRIVATE mindspore_shared_lib) if(USE_GLOG) target_link_libraries(_c_dataengine PRIVATE mindspore::glog) diff --git a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt index dda0b2bc52..ac0b8a4a66 100644 --- a/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/api/CMakeLists.txt @@ -26,28 +26,13 @@ if(ENABLE_PYTHON) target_include_directories(APItoPython PRIVATE ${pybind11_INCLUDE_DIRS}) endif() - -if(ENABLE_ACL) - add_library(cpp-API OBJECT - config.cc - datasets.cc - execute.cc - iterator.cc - minddata_eager.cc - transforms.cc - samplers.cc - text.cc - vision.cc - ) -else() - add_library(cpp-API OBJECT - config.cc - datasets.cc - execute.cc - iterator.cc - transforms.cc - samplers.cc - text.cc - vision.cc - ) -endif() +add_library(cpp-API OBJECT + config.cc + datasets.cc + execute.cc + iterator.cc + transforms.cc + samplers.cc + text.cc + vision.cc + ) diff --git a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc deleted file mode 100644 index fc92ba30f3..0000000000 --- a/mindspore/ccsrc/minddata/dataset/api/de_tensor.cc +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "minddata/dataset/core/constants.h" -#include "minddata/dataset/core/data_type.h" -#include "minddata/dataset/include/de_tensor.h" -#include "minddata/dataset/include/type_id.h" -#include "mindspore/core/ir/dtype/type_id.h" -#include "mindspore/lite/include/ms_tensor.h" -#include "utils/hashing.h" -#ifndef ENABLE_ANDROID -#include "utils/log_adapter.h" -#else -#include "mindspore/lite/src/common/log_adapter.h" -#endif - -namespace mindspore { -namespace tensor { -MSTensor *DETensor::CreateTensor(TypeId data_type, const std::vector &shape) { - return new DETensor(data_type, shape); -} - -MSTensor *DETensor::CreateTensor(const std::string &path) { - std::shared_ptr t; - (void)dataset::Tensor::CreateFromFile(path, &t); - return new DETensor(std::move(t)); -} - -MSTensor *DETensor::CreateFromMemory(TypeId data_type, const std::vector &shape, void *data) { - std::shared_ptr t; - // prepare shape info - std::vector t_shape; - - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - - (void)dataset::Tensor::CreateFromMemory(dataset::TensorShape(t_shape), dataset::MSTypeToDEType(data_type), - static_cast(data), &t); - return new DETensor(std::move(t)); -} - -DETensor::DETensor(TypeId data_type, const std::vector &shape) { - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - dataset::Tensor::CreateEmpty(dataset::TensorShape(t_shape), dataset::MSTypeToDEType(data_type), &this->tensor_impl_); -} - -DETensor::DETensor(std::shared_ptr tensor_ptr) { this->tensor_impl_ = std::move(tensor_ptr); } - -MSTensor *DETensor::ConvertToLiteTensor() { - // static MSTensor::CreateTensor is only for the LiteTensor - MSTensor *tensor = CreateTensor(this->data_type(), this->shape()); - MS_ASSERT(tensor->Size() == this->Size()); - memcpy_s(tensor->MutableData(), tensor->Size(), this->MutableData(), this->Size()); - return tensor; -} - -std::shared_ptr DETensor::tensor() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_; -} - -TypeId DETensor::data_type() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return dataset::DETypeToMSType(this->tensor_impl_->type()); -} - -TypeId DETensor::set_data_type(TypeId data_type) { - MS_ASSERT(this->tensor_impl_ != nullptr); - if (data_type != this->data_type()) { - std::shared_ptr temp; - dataset::Tensor::CreateFromMemory(this->tensor_impl_->shape(), dataset::MSTypeToDEType(data_type), - this->tensor_impl_->GetBuffer(), &temp); - this->tensor_impl_ = temp; - } - return data_type; -} - -std::vector DETensor::shape() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape = this->tensor_impl_->shape().AsVector(); - std::vector shape; - shape.reserve(t_shape.size()); - std::transform(t_shape.begin(), t_shape.end(), std::back_inserter(shape), - [](dataset::dsize_t s) -> int { return static_cast(s); }); - return shape; -} - -size_t DETensor::set_shape(const std::vector &shape) { - MS_ASSERT(this->tensor_impl_ != nullptr); - std::vector t_shape; - t_shape.reserve(shape.size()); - std::transform(shape.begin(), shape.end(), std::back_inserter(t_shape), - [](int s) -> dataset::dsize_t { return static_cast(s); }); - dataset::Status rc = this->tensor_impl_->Reshape(dataset::TensorShape(t_shape)); - return shape.size(); -} - -int DETensor::DimensionSize(size_t index) const { - MS_ASSERT(this->tensor_impl_ != nullptr); - int dim_size = -1; - auto shape = this->shape(); - if (index < shape.size()) { - dim_size = shape[index]; - } else { - MS_LOG(ERROR) << "Dimension index is wrong: " << index; - } - return dim_size; -} - -int DETensor::ElementsNum() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->Size(); -} - -size_t DETensor::Size() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->SizeInBytes(); -} - -void *DETensor::MutableData() { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->GetMutableBuffer(); -} - -} // namespace tensor -} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/api/execute.cc b/mindspore/ccsrc/minddata/dataset/api/execute.cc index bfa0566956..b72bcbf741 100644 --- a/mindspore/ccsrc/minddata/dataset/api/execute.cc +++ b/mindspore/ccsrc/minddata/dataset/api/execute.cc @@ -14,12 +14,11 @@ * limitations under the License. */ -#include "minddata/dataset/core/tensor_row.h" -#ifdef ENABLE_ANDROID -#include "minddata/dataset/include/de_tensor.h" -#endif #include "minddata/dataset/include/execute.h" +#include "minddata/dataset/core/de_tensor.h" +#include "minddata/dataset/core/tensor_row.h" #include "minddata/dataset/include/tensor.h" +#include "minddata/dataset/include/type_id.h" #include "minddata/dataset/kernels/tensor_op.h" #ifndef ENABLE_ANDROID #include "utils/log_adapter.h" @@ -30,78 +29,85 @@ namespace mindspore { namespace dataset { -Execute::Execute(std::shared_ptr op) : op_(std::move(op)) {} +Execute::Execute(std::shared_ptr op) { ops_.emplace_back(std::move(op)); } -/// \brief Destructor -Execute::~Execute() = default; +Execute::Execute(std::vector> ops) : ops_(std::move(ops)) {} -#ifdef ENABLE_ANDROID -std::shared_ptr Execute::operator()(std::shared_ptr input) { - // Build the op - if (op_ == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation is not valid"; - return nullptr; - } +Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) { + // Validate input tensor + CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data"); + CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided"); - std::shared_ptr de_input = std::dynamic_pointer_cast(input)->tensor(); - if (de_input == nullptr) { - MS_LOG(ERROR) << "Input Tensor is not valid"; - return nullptr; - } - std::shared_ptr transform = op_->Build(); - std::shared_ptr de_output; - Status rc = transform->Compute(de_input, &de_output); - - if (rc.IsError()) { - // execution failed - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; + // Validate and build runtime ops + std::vector> transforms; + for (int32_t i = 0; i < ops_.size(); i++) { + CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null"); + RETURN_IF_NOT_OK(ops_[i]->ValidateParams()); + transforms.emplace_back(ops_[i]->Build()); } - return std::make_shared(std::move(de_output)); -} -#endif -std::shared_ptr Execute::operator()(std::shared_ptr input) { - // Build the op - if (op_ == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation is not valid"; - return nullptr; - } + // Convert mindspore::Tensor to dataset::Tensor + std::shared_ptr de_tensor; + Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()), + MSTypeToDEType(static_cast(input.DataType())), + (const uchar *)(input.Data().get()), input.DataSize(), &de_tensor); + RETURN_IF_NOT_OK(rc); - if (input == nullptr) { - MS_LOG(ERROR) << "Input Tensor is not valid"; - return nullptr; - } - // will add validate params once API is set - std::shared_ptr transform = op_->Build(); - std::shared_ptr de_output; - Status rc = transform->Compute(input, &de_output); - - if (rc.IsError()) { - // execution failed - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; + // Apply transforms on tensor + for (auto &t : transforms) { + std::shared_ptr de_output; + RETURN_IF_NOT_OK(t->Compute(de_tensor, &de_output)); + + // For next transform + de_tensor = std::move(de_output); } - return de_output; + + // Convert dataset::Tensor to mindspore::Tensor + CHECK_FAIL_RETURN_UNEXPECTED(de_tensor->HasData(), "Apply transform failed, output tensor has no data"); + *output = mindspore::MSTensor(std::make_shared(de_tensor)); + return Status::OK(); } -Status Execute::operator()(const std::vector> &input_tensor_list, - std::vector> *output_tensor_list) { - CHECK_FAIL_RETURN_UNEXPECTED(op_ != nullptr, "Input TensorOperation is not valid"); +Status Execute::operator()(const std::vector &input_tensor_list, std::vector *output_tensor_list) { + // Validate input tensor CHECK_FAIL_RETURN_UNEXPECTED(!input_tensor_list.empty(), "Input Tensor is not valid"); + for (auto &tensor : input_tensor_list) { + CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data"); + } + CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided"); - TensorRow input, output; - std::copy(input_tensor_list.begin(), input_tensor_list.end(), std::back_inserter(input)); - CHECK_FAIL_RETURN_UNEXPECTED(!input.empty(), "Input Tensor is not valid"); + // Validate and build runtime ops + std::vector> transforms; + for (int32_t i = 0; i < ops_.size(); i++) { + CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null"); + RETURN_IF_NOT_OK(ops_[i]->ValidateParams()); + transforms.emplace_back(ops_[i]->Build()); + } + + TensorRow de_tensor_list; + for (auto &tensor : input_tensor_list) { + std::shared_ptr de_tensor; + Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(tensor.Shape()), + MSTypeToDEType(static_cast(tensor.DataType())), + (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_tensor); + RETURN_IF_NOT_OK(rc); + de_tensor_list.emplace_back(std::move(de_tensor)); + } - std::shared_ptr transform = op_->Build(); - Status rc = transform->Compute(input, &output); - if (rc.IsError()) { - // execution failed - RETURN_STATUS_UNEXPECTED("Operation execution failed : " + rc.ToString()); + // Apply transforms on tensor + for (auto &t : transforms) { + TensorRow de_output_list; + RETURN_IF_NOT_OK(t->Compute(de_tensor_list, &de_output_list)); + // For next transform + de_tensor_list = std::move(de_output_list); } - std::copy(output.begin(), output.end(), std::back_inserter(*output_tensor_list)); + for (auto &tensor : de_tensor_list) { + CHECK_FAIL_RETURN_UNEXPECTED(tensor->HasData(), "Apply transform failed, output tensor has no data"); + auto ms_tensor = mindspore::MSTensor(std::make_shared(tensor)); + output_tensor_list->emplace_back(ms_tensor); + } + CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid"); return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc b/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc deleted file mode 100644 index 154ec4ab03..0000000000 --- a/mindspore/ccsrc/minddata/dataset/api/minddata_eager.cc +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "minddata/dataset/include/minddata_eager.h" -#include "minddata/dataset/include/vision.h" -#include "minddata/dataset/core/tensor.h" -#include "minddata/dataset/kernels/tensor_op.h" -#include "minddata/dataset/util/path.h" - -namespace mindspore { -namespace api { - -MindDataEager::MindDataEager(std::vector> ops) : ops_(ops) {} - -// Helper function to convert Type from DE to MS -DataType ToMSType(dataset::DataType type) { - switch (dataset::DataType::Type(type)) { - case dataset::DataType::DE_BOOL: - return DataType::kMsBool; - case dataset::DataType::DE_UINT8: - return DataType::kMsUint8; - case dataset::DataType::DE_INT32: - return DataType::kMsInt32; - case dataset::DataType::DE_INT64: - return DataType::kMsInt64; - case dataset::DataType::DE_FLOAT32: - return DataType::kMsFloat32; - default: - return DataType::kMsUnknown; - } -} - -// Helper function to convert Type from MS to DE -dataset::DataType ToDEType(DataType type) { - switch (type) { - case DataType::kMsBool: - return dataset::DataType(dataset::DataType::DE_BOOL); - case DataType::kMsUint8: - return dataset::DataType(dataset::DataType::DE_UINT8); - case DataType::kMsInt32: - return dataset::DataType(dataset::DataType::DE_INT32); - case DataType::kMsInt64: - return dataset::DataType(dataset::DataType::DE_INT64); - case DataType::kMsFloat32: - return dataset::DataType(dataset::DataType::DE_FLOAT32); - default: - return dataset::DataType(dataset::DataType::DE_UNKNOWN); - } -} - -Status MindDataEager::LoadImageFromDir(const std::string &image_dir, std::vector> *images) { - // Check target directory - dataset::Path image_dir_(image_dir); - if (!image_dir_.Exists() || !image_dir_.IsDirectory()) { - std::string err_msg = "Target directory: " + image_dir + " does not exist or not a directory."; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - if (access(image_dir_.toString().c_str(), R_OK) == -1) { - std::string err_msg = "No access to target directory: " + image_dir; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - - // Start reading images and constructing tensors - auto path_itr = dataset::Path::DirIterator::OpenDirectory(&image_dir_); - while (path_itr->hasNext()) { - dataset::Path file = path_itr->next(); - std::shared_ptr image; - dataset::Tensor::CreateFromFile(file.toString(), &image); - - std::shared_ptr ms_image = std::make_shared("image", DataType(kMsUint8), image->shape().AsVector(), - image->GetBuffer(), image->SizeInBytes()); - images->push_back(ms_image); - } - - // Check if read images or not - if (images->empty()) { - std::string err_msg = "No images found in target directory: " + image_dir; - MS_LOG(ERROR) << err_msg; - return Status(StatusCode::FAILED, err_msg); - } - - return Status(StatusCode::SUCCESS); -} - -std::shared_ptr MindDataEager::operator()(std::shared_ptr input) { - // Validate ops - if (ops_.empty()) { - MS_LOG(ERROR) << "Input TensorOperation should be provided"; - return nullptr; - } - for (int32_t i = 0; i < ops_.size(); i++) { - if (ops_[i] == nullptr) { - MS_LOG(ERROR) << "Input TensorOperation[" << i << "] is invalid or null"; - return nullptr; - } - } - // Validate input tensor - if (input == nullptr) { - MS_LOG(ERROR) << "Input Tensor should not be null"; - return nullptr; - } - - // Start applying transforms in ops - std::shared_ptr de_input; - dataset::Tensor::CreateFromMemory(dataset::TensorShape(input->Shape()), ToDEType(input->DataType()), - (const uchar *)(input->Data()), &de_input); - - for (int32_t i = 0; i < ops_.size(); i++) { - // Build runtime op and run - std::shared_ptr de_output; - std::shared_ptr transform = ops_[i]->Build(); - dataset::Status rc = transform->Compute(de_input, &de_output); - - // check execution failed - if (rc.IsError()) { - MS_LOG(ERROR) << "Operation execution failed : " << rc.ToString(); - return nullptr; - } - - // For next transform - de_input = std::move(de_output); - } - - // Convert DETensor to Tensor - if (!de_input->HasData()) { - MS_LOG(ERROR) << "Apply transform failed, output tensor has no data"; - return nullptr; - } - std::shared_ptr output = - std::make_shared("transfomed", ToMSType(de_input->type()), de_input->shape().AsVector(), - de_input->GetBuffer(), de_input->SizeInBytes()); - return output; -} - -} // namespace api -} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc index 56dc7d793e..094a070e69 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/include/execute_binding.cc @@ -29,25 +29,42 @@ PYBIND_REGISTER(Execute, 0, ([](const py::module *m) { return execute; })) .def("__call__", - [](Execute &self, std::shared_ptr in) { - std::shared_ptr out = self(in); - if (out == nullptr) { - THROW_IF_ERROR([]() { - RETURN_STATUS_UNEXPECTED( - "Failed to execute op in eager mode, please check ERROR log above."); + [](Execute &self, const std::shared_ptr &de_tensor) { + auto ms_tensor = mindspore::MSTensor(std::make_shared(de_tensor)); + Status rc = self(ms_tensor, &ms_tensor); + if (rc.IsError()) { + THROW_IF_ERROR([&rc]() { + RETURN_STATUS_UNEXPECTED("Failed to execute transform op, " + rc.ToString()); }()); } - return out; + std::shared_ptr de_output_tensor; + dataset::Tensor::CreateFromMemory(dataset::TensorShape(ms_tensor.Shape()), + MSTypeToDEType(static_cast(ms_tensor.DataType())), + (const uchar *)(ms_tensor.Data().get()), + ms_tensor.DataSize(), &de_output_tensor); + return de_output_tensor; }) .def("__call__", [](Execute &self, const std::vector> &input_tensor_list) { - std::vector> output_tensor_list; - THROW_IF_ERROR(self(input_tensor_list, &output_tensor_list)); - if (output_tensor_list.empty()) { - THROW_IF_ERROR([]() { - RETURN_STATUS_UNEXPECTED("Failed to execute op in eager mode, please check ERROR log above."); - }()); + std::vector ms_input_tensor_list; + std::vector ms_output_tensor_list; + for (auto &tensor : input_tensor_list) { + auto ms_tensor = mindspore::MSTensor(std::make_shared(tensor)); + ms_input_tensor_list.emplace_back(std::move(ms_tensor)); } - return output_tensor_list; + Status rc = self(ms_input_tensor_list, &ms_output_tensor_list); + if (rc.IsError()) { + THROW_IF_ERROR( + [&rc]() { RETURN_STATUS_UNEXPECTED("Failed to execute transform op, " + rc.ToString()); }()); + } + std::vector> de_output_tensor_list; + for (auto &tensor : ms_output_tensor_list) { + std::shared_ptr de_output_tensor; + dataset::Tensor::CreateFromMemory( + dataset::TensorShape(tensor.Shape()), MSTypeToDEType(static_cast(tensor.DataType())), + (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_output_tensor); + de_output_tensor_list.emplace_back(std::move(de_output_tensor)); + } + return de_output_tensor_list; }); })); } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc index e58557a511..03044106ee 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/data/bindings.cc @@ -84,7 +84,8 @@ PYBIND_REGISTER(SliceOption, 0, ([](const py::module *m) { } if (!c_slice.valid()) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Wrong slice object")); + THROW_IF_ERROR( + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Wrong slice object")); } return SliceOption(c_slice); })) diff --git a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc index f0d73ca2bb..830696337d 100644 --- a/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc +++ b/mindspore/ccsrc/minddata/dataset/api/python/bindings/dataset/kernels/ir/image/bindings.cc @@ -354,7 +354,7 @@ PYBIND_REGISTER( for (auto handle : py_sub.cast()) { py::tuple tp = handle.cast(); if (tp.is_none() || tp.size() != 2) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, "Each tuple in subpolicy should be (op, prob).")); + THROW_IF_ERROR(Status(StatusCode::kMDUnexpectedError, "Each tuple in subpolicy should be (op, prob).")); } std::shared_ptr t_op; if (py::isinstance(tp[0])) { @@ -366,11 +366,11 @@ PYBIND_REGISTER( std::make_shared((tp[0]).cast())); } else { THROW_IF_ERROR( - Status(StatusCode::kUnexpectedError, "op is neither a tensorOp, tensorOperation nor a pyfunc.")); + Status(StatusCode::kMDUnexpectedError, "op is neither a tensorOp, tensorOperation nor a pyfunc.")); } double prob = (tp[1]).cast(); if (prob < 0 || prob > 1) { - THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, "prob needs to be with [0,1].")); + THROW_IF_ERROR(Status(StatusCode::kMDUnexpectedError, "prob needs to be with [0,1].")); } cpp_policy.back().emplace_back(std::make_pair(t_op, prob)); } diff --git a/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc b/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc index 6763dada42..85e1177906 100644 --- a/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc +++ b/mindspore/ccsrc/minddata/dataset/callback/py_ds_callback.cc @@ -51,12 +51,12 @@ Status PyDSCallback::ExecutePyfunc(py::function f, const CallbackParam &cb_param // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { f(cb_param); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt index a991323174..9a204f6deb 100644 --- a/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/core/CMakeLists.txt @@ -5,6 +5,7 @@ set(DATASET_CORE_SRC_FILES config_manager.cc cv_tensor.cc data_type.cc + de_tensor.cc global_context.cc tensor.cc tensor_helpers.cc diff --git a/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc b/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc new file mode 100644 index 0000000000..041533fd91 --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/core/de_tensor.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "minddata/dataset/core/de_tensor.h" +#include "minddata/dataset/core/constants.h" +#include "minddata/dataset/core/data_type.h" +#include "minddata/dataset/include/type_id.h" +#include "mindspore/core/ir/dtype/type_id.h" +#include "utils/hashing.h" +#ifndef ENABLE_ANDROID +#include "utils/log_adapter.h" +#define ASSERT_NULL(ptr) MS_EXCEPTION_IF_NULL(ptr) +#else +#include "mindspore/lite/src/common/log_adapter.h" +#define ASSERT_NULL(ptr) MS_ASSERT((ptr) != nullptr) +#endif + +namespace mindspore { +namespace dataset { + +DETensor::DETensor(std::shared_ptr tensor_impl) + : tensor_impl_(tensor_impl), + name_("MindDataTensor"), + type_(static_cast(DETypeToMSType(tensor_impl_->type()))), + shape_(tensor_impl_->shape().AsVector()) {} + +const std::string &DETensor::Name() const { return name_; } + +enum mindspore::DataType DETensor::DataType() const { + ASSERT_NULL(tensor_impl_); + return static_cast(DETypeToMSType(tensor_impl_->type())); +} + +size_t DETensor::DataSize() const { + ASSERT_NULL(tensor_impl_); + return tensor_impl_->SizeInBytes(); +} + +const std::vector &DETensor::Shape() const { return shape_; } + +std::shared_ptr DETensor::Data() const { + return std::shared_ptr(tensor_impl_->GetBuffer(), [](const void *) {}); +} + +void *DETensor::MutableData() { + ASSERT_NULL(tensor_impl_); + return tensor_impl_->GetMutableBuffer(); +} + +bool DETensor::IsDevice() const { return false; } + +std::shared_ptr DETensor::Clone() const { return std::make_shared(tensor_impl_); } +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/core/de_tensor.h b/mindspore/ccsrc/minddata/dataset/core/de_tensor.h new file mode 100644 index 0000000000..be3eb68e2f --- /dev/null +++ b/mindspore/ccsrc/minddata/dataset/core/de_tensor.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ +#include +#include +#include +#include "include/api/types.h" +#include "mindspore/core/ir/api_tensor_impl.h" +#include "minddata/dataset/include/status.h" +#include "minddata/dataset/include/tensor.h" + +namespace mindspore { +namespace dataset { +class DETensor : public mindspore::MSTensor::Impl { + public: + DETensor() = default; + ~DETensor() override = default; + explicit DETensor(std::shared_ptr tensor_impl); + + const std::string &Name() const override; + + enum mindspore::DataType DataType() const override; + + size_t DataSize() const override; + + const std::vector &Shape() const override; + + std::shared_ptr Data() const override; + + void *MutableData() override; + + bool IsDevice() const override; + + std::shared_ptr Clone() const override; + + private: + std::shared_ptr tensor_impl_; + std::string name_; + enum mindspore::DataType type_; + std::vector shape_; +}; +} // namespace dataset +} // namespace mindspore +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ diff --git a/mindspore/ccsrc/minddata/dataset/core/tensor.h b/mindspore/ccsrc/minddata/dataset/core/tensor.h index 12bebf8d8a..ac60324098 100644 --- a/mindspore/ccsrc/minddata/dataset/core/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/core/tensor.h @@ -41,23 +41,17 @@ #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_helpers.h" #include "minddata/dataset/core/tensor_shape.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/util/status.h" #include "utils/ms_utils.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" -#else -#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { -#ifdef ENABLE_ANDROID -namespace tensor { -class DETensor; -} // namespace tensor -#endif namespace dataset { class Tensor; template @@ -85,7 +79,7 @@ class Tensor { /// \param other Tensor to be moved Tensor(Tensor &&other) noexcept; - /// Move assigment operator + /// Move assignment operator /// \param other Tensor to be moved Tensor &operator=(Tensor &&other) noexcept; @@ -134,7 +128,7 @@ class Tensor { #ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist - /// \param[in] shape shape of the outout tensor + /// \param[in] shape shape of the output tensor /// \param[out] out created Tensor /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out); @@ -292,7 +286,7 @@ class Tensor { std::string err; err += (data_ == nullptr) ? "data_ is nullptr \t" : ""; err += type_.IsCompatible() ? "data type not compatible\t" : ""; - return Status(StatusCode::kUnexpectedError, err); + return Status(StatusCode::kMDUnexpectedError, err); } } @@ -343,7 +337,7 @@ class Tensor { void Invalidate(); /// Copy input tensor into self at the location index. - /// Index is a vector of axises which can be incomplete: + /// Index is a vector of axes which can be incomplete: /// Ex: shape <2,3>, inserting into index {0} will replace the first row. index {1,2} will replace the last cell. /// \param index /// \param input @@ -686,9 +680,7 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif + friend class DETensor; /// Slice numeric tensors. Status SliceNumeric(TensorPtr *out, const std::vector> &indices, const TensorShape &shape); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt index 99b8a48149..7ee95fc5e8 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt @@ -73,6 +73,7 @@ if(ENABLE_CACHE) engine-cache-server _c_dataengine _c_mindrecord + mindspore mindspore::protobuf mindspore::grpc++ mindspore_gvar @@ -85,6 +86,7 @@ if(ENABLE_CACHE) engine-cache-server _c_dataengine _c_mindrecord + mindspore mindspore::protobuf mindspore::grpc++ mindspore_gvar @@ -103,6 +105,7 @@ if(ENABLE_CACHE) add_executable(cache_admin cache_admin.cc cache_admin_arg.cc) target_link_libraries(cache_admin _c_dataengine _c_mindrecord mindspore::protobuf ${PYTHON_LIBRARIES} pthread) + target_link_libraries(cache_admin mindspore mindspore_shared_lib) if(USE_GLOG) target_link_libraries(cache_admin mindspore::glog) diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc index 0995c4ea48..79ec9c7119 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin.cc @@ -22,10 +22,11 @@ #include "minddata/dataset/engine/cache/cache_common.h" #include "minddata/dataset/util/path.h" +namespace ms = mindspore; namespace ds = mindspore::dataset; int main(int argc, char **argv) { - ds::Status rc; + ms::Status rc; ds::CacheAdminArgHandler args; std::stringstream arg_stream; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc index ab30ceaa44..5774c80e41 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_admin_arg.cc @@ -89,7 +89,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -101,7 +101,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -113,7 +113,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std *arg_stream >> value_as_string; if (value_as_string.empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Now, attempt to convert the value into it's numeric format for output @@ -121,7 +121,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, int32_t *out_arg, std *out_arg = std::stoul(value_as_string); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value: " + value_as_string; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } return Status::OK(); @@ -133,7 +133,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -145,7 +145,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -158,12 +158,12 @@ Status CacheAdminArgHandler::AssignArg(std::string option, std::string *out_arg, *arg_stream >> *out_arg; } else { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } if (out_arg->empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } } @@ -176,7 +176,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: ArgValue selected_arg = arg_map_[option]; if (used_args_[selected_arg]) { std::string err_msg = "The " + option + " argument was given more than once."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Flag that this arg is used now @@ -188,7 +188,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: if (command_id != CommandId::kCmdUnknown) { if (command_id_ != CommandId::kCmdUnknown) { std::string err_msg = "Only one command at a time is allowed. Invalid command: " + option; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } else { command_id_ = command_id; } @@ -200,7 +200,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: *arg_stream >> value_as_string; if (value_as_string.empty()) { std::string err_msg = option + " option requires an argument field. Syntax: " + option + " "; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Now, attempt to convert the value into it's string format for output @@ -208,7 +208,7 @@ Status CacheAdminArgHandler::AssignArg(std::string option, float *out_arg, std:: *out_arg = std::stof(value_as_string, nullptr); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value: " + value_as_string; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } return Status::OK(); @@ -224,7 +224,7 @@ Status CacheAdminArgHandler::ParseArgStream(std::stringstream *arg_stream) { if (hostname_ != std::string(kCfgDefaultCacheHost)) { std::string err_msg = "Invalid host interface: " + hostname_ + ". Current limitation, only 127.0.0.1 can be used."; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } break; } @@ -304,7 +304,7 @@ Status CacheAdminArgHandler::Validate() { if (!trailing_args_.empty()) { std::string err_msg = "Invalid arguments provided: " + trailing_args_; err_msg += "\nPlease try `cache_admin --help` for more information"; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // The user must pick at least one command. i.e. it's meaningless to just give a hostname or port but no command to @@ -312,18 +312,18 @@ Status CacheAdminArgHandler::Validate() { if (command_id_ == CommandId::kCmdUnknown) { std::string err_msg = "No command provided"; err_msg += "\nPlease try `cache_admin --help` for more information"; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } // Additional checks here auto max_num_workers = std::max(std::thread::hardware_concurrency(), 100); if (num_workers_ < 1 || num_workers_ > max_num_workers) - return Status(StatusCode::kSyntaxError, + return Status(StatusCode::kMDSyntaxError, "Number of workers must be in range of 1 and " + std::to_string(max_num_workers) + "."); - if (log_level_ < 0 || log_level_ > 3) return Status(StatusCode::kSyntaxError, "Log level must be in range (0..3)."); + if (log_level_ < 0 || log_level_ > 3) return Status(StatusCode::kMDSyntaxError, "Log level must be in range (0..3)."); if (memory_cap_ratio_ <= 0 || memory_cap_ratio_ > 1) - return Status(StatusCode::kSyntaxError, "Memory cap ratio should be positive and no greater than 1"); - if (port_ < 1025 || port_ > 65535) return Status(StatusCode::kSyntaxError, "Port must be in range (1025..65535)."); + return Status(StatusCode::kMDSyntaxError, "Memory cap ratio should be positive and no greater than 1"); + if (port_ < 1025 || port_ > 65535) return Status(StatusCode::kMDSyntaxError, "Port must be in range (1025..65535)."); return Status::OK(); } @@ -467,9 +467,9 @@ Status CacheAdminArgHandler::StopServer(CommandId command_id) { Status rc = rq->Wait(); if (rc.IsError()) { msg.RemoveResourcesOnExit(); - if (rc.IsNetWorkError()) { + if (rc == StatusCode::kMDNetWorkError) { std::string errMsg = "Server on port " + std::to_string(port_) + " is not up or has been shutdown already."; - return Status(StatusCode::kNetWorkError, errMsg); + return Status(StatusCode::kMDNetWorkError, errMsg); } return rc; } @@ -544,7 +544,7 @@ Status CacheAdminArgHandler::StartServer(CommandId command_id) { if (WIFEXITED(status)) { auto exit_status = WEXITSTATUS(status); if (exit_status) { - return Status(StatusCode::kUnexpectedError, msg); + return Status(StatusCode::kMDUnexpectedError, msg); } else { // Not an error, some info message goes to stdout std::cout << msg << std::endl; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc index 5542f61189..27cb0de8d9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_arena.cc @@ -75,7 +75,7 @@ Status CachedSharedMemory::AllocateSharedMemory(int32_t client_id, size_t sz, vo do { std::unique_lock lock(mux_[slot]); rc = shm_pool_[slot]->Allocate(sz, p); - if (rc.IsOutofMemory()) { + if (rc == StatusCode::kMDOutOfMemory) { slot = (slot + 1) % shm_pool_.size(); } } while (rc.IsError() && slot != begin_slot); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc index b54927b115..b2ae11a13e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_client.cc @@ -137,7 +137,7 @@ Status CacheClient::WriteBuffer(std::unique_ptr &&in) const { Status CacheClient::AsyncWriteRow(const TensorRow &row) { if (async_buffer_stream_ == nullptr) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } RETURN_IF_NOT_OK(async_buffer_stream_->AsyncWrite(row)); return Status::OK(); @@ -145,7 +145,7 @@ Status CacheClient::AsyncWriteRow(const TensorRow &row) { Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { if (async_buffer_stream_ == nullptr) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } else { Status rc; std::unique_ptr tensor_table = std::make_unique(); @@ -155,7 +155,7 @@ Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { TensorRow row; RETURN_IF_NOT_OK(in->PopRow(&row)); rc = AsyncWriteRow(row); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { tensor_table->push_back(row); } else if (rc.IsError()) { return rc; @@ -165,7 +165,7 @@ Status CacheClient::AsyncWriteBuffer(std::unique_ptr &&in) { // If not all of them can be sent async, return what's left back to the caller. if (!tensor_table->empty()) { in->set_tensor_table(std::move(tensor_table)); - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } } return Status::OK(); @@ -225,7 +225,8 @@ Status CacheClient::CreateCache(uint32_t tree_crc, bool generate_id) { auto cache_state = static_cast(out); if (cache_state == CacheServiceState::kFetchPhase || (cache_state == CacheServiceState::kBuildPhase && cookie_.empty())) { - return Status(StatusCode::kDuplicateKey, __LINE__, __FILE__, "Not an error and we should bypass the build phase"); + return Status(StatusCode::kMDDuplicateKey, __LINE__, __FILE__, + "Not an error and we should bypass the build phase"); } } else { cinfo_.set_crc(tree_crc); // It's really a new cache we're creating so save our crc in the client @@ -243,10 +244,10 @@ Status CacheClient::CreateCache(uint32_t tree_crc, bool generate_id) { auto rq = std::make_shared(this, cinfo_, cache_mem_sz_, createFlag); RETURN_IF_NOT_OK(PushRequest(rq)); Status rc = rq->Wait(); - bool success = (rc.IsOk() || rc.get_code() == StatusCode::kDuplicateKey); + bool success = (rc.IsOk() || rc.StatusCode() == StatusCode::kMDDuplicateKey); // If we get kDuplicateKey, it just means we aren't the first one to create the cache, // and we will continue to parse the result. - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { RETURN_IF_NOT_OK(rq->PostReply()); } if (success) { @@ -443,7 +444,7 @@ Status CacheClient::AsyncBufferStream::AsyncWrite(const TensorRow &row) { } // If the size is too big, tell the user to send it directly. if (sz > kAsyncBufferSize) { - return Status(StatusCode::kNotImplementedYet); + return Status(StatusCode::kMDNotImplementedYet); } std::unique_lock lock(mux_); // Check error from the server side while we have the lock; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h index 637bbe38c8..40922fea23 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_common.h @@ -66,7 +66,7 @@ enum class CacheServiceState : int8_t { /// \param rc[in] Status object /// \param reply[in/out] pointer to pre-allocated protobuf object inline void Status2CacheReply(const Status &rc, CacheReply *reply) { - reply->set_rc(static_cast(rc.get_code())); + reply->set_rc(static_cast(rc.StatusCode())); reply->set_msg(rc.ToString()); } /// \brief Generate the unix socket file we use on both client/server side given a tcp/ip port number diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc index 7a49dfc237..5b95068f12 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_fbb.cc @@ -98,7 +98,7 @@ Status SerializeTensorRowHeader(const TensorRow &row, std::shared_ptr rq) { std::unique_lock lck(mux_); auto r = req_.emplace(seqNo, std::move(tag)); if (!r.second) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__); } } // Last step is to tag the request. @@ -124,7 +124,7 @@ Status CacheClientGreeter::WorkerEntry() { } else { err_msg = rq->rc_.error_message() + ". GRPC Code " + std::to_string(error_code); } - Status remote_rc = Status(StatusCode::kNetWorkError, __LINE__, __FILE__, err_msg); + Status remote_rc = Status(StatusCode::kMDNetWorkError, __LINE__, __FILE__, err_msg); Status2CacheReply(remote_rc, &rq->base_rq_->reply_); } // Notify the waiting thread. diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc index 1b822e684b..ae75d064d1 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_ipc.cc @@ -25,7 +25,7 @@ Status PortToFtok(int port, SharedMemory::shm_key_t *out) { shmkey = ftok(unix_path.data(), 'a'); if (shmkey == (key_t)-1) { std::string errMsg = "Unable to create a ftok token. Errno = " + std::to_string(errno); - return Status(errno == ENOENT ? StatusCode::kFileNotExist : StatusCode::kUnexpectedError, errMsg); + return Status(errno == ENOENT ? StatusCode::kMDFileNotExist : StatusCode::kMDUnexpectedError, errMsg); } *out = shmkey; return Status::OK(); @@ -56,7 +56,7 @@ Status SharedMessage::SendStatus(const Status &rc) { CacheMsgBuf msg{ 1, }; - msg.body.status.err_code = static_cast(rc.get_code()); + msg.body.status.err_code = static_cast(rc.StatusCode()); auto err = memcpy_s(msg.body.status.err_msg, kSharedMessageSize, rc.ToString().data(), rc.ToString().size()); CHECK_FAIL_RETURN_UNEXPECTED(err == EOK, "memcpy_s failed. err = " + std::to_string(err)); msg.body.status.err_msg[rc.ToString().size()] = '\0'; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc index cdb36cbdf8..a118a5e609 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_main.cc @@ -25,16 +25,17 @@ #include #include "minddata/dataset/engine/cache/cache_common.h" #include "minddata/dataset/engine/cache/cache_ipc.h" +namespace ms = mindspore; namespace ds = mindspore::dataset; /// Start the server /// \param argv /// \return Status object -ds::Status StartServer(int argc, char **argv) { - ds::Status rc; +ms::Status StartServer(int argc, char **argv) { + ms::Status rc; ds::CacheServer::Builder builder; if (argc != 8) { - return ds::Status(ds::StatusCode::kSyntaxError); + return ms::Status(ms::StatusCode::kMDSyntaxError); } int32_t port = strtol(argv[3], nullptr, 10); @@ -53,7 +54,7 @@ ds::Status StartServer(int argc, char **argv) { // is called. This is a standard procedure for daemonize a process on unix. if (chdir("/") == -1) { std::string errMsg = "Unable to change directory to /. Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } // A message queue for communication between parent and child (if we fork). @@ -80,13 +81,13 @@ ds::Status StartServer(int argc, char **argv) { // failed to fork if (pid < 0) { std::string errMsg = "Failed to fork process for cache server. Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else if (pid > 0) { // Parent and will be responsible for remove the queue on exit. msg.RemoveResourcesOnExit(); // Sleep one second and we attach to the msg que std::this_thread::sleep_for(std::chrono::seconds(1)); - ds::Status child_rc; + ms::Status child_rc; rc = msg.ReceiveStatus(&child_rc); if (rc.IsError()) { return rc; @@ -101,7 +102,7 @@ ds::Status StartServer(int argc, char **argv) { "logs (under " << ds::DefaultLogDir() << ") for any issues that may happen after startup\n"; signal(SIGCHLD, SIG_IGN); // ignore sig child signal. - return ds::Status::OK(); + return ms::Status::OK(); } else { // Child process will continue from here if daemonize and parent has already exited. // If we are running in the foreground, none of the code in block below will be run. @@ -110,7 +111,7 @@ ds::Status StartServer(int argc, char **argv) { sid = setsid(); if (sid < 0) { std::string errMsg = "Failed to setsid(). Errno = " + std::to_string(errno); - return ds::Status(ds::StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return ms::Status(ms::StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } close(0); close(1); @@ -137,10 +138,10 @@ ds::Status StartServer(int argc, char **argv) { int main(int argc, char **argv) { // This executable is not to be called directly, and should be invoked by cache_admin executable. - ds::Status rc = StartServer(argc, argv); + ms::Status rc = StartServer(argc, argv); // Check result if (rc.IsError()) { - auto errCode = rc.get_code(); + auto errCode = rc.StatusCode(); auto errMsg = rc.ToString(); std::cerr << errMsg << std::endl; return static_cast(errCode); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc index 35ddc1df9d..7e75fb0b3e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_numa.cc @@ -136,7 +136,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { inx = (inx + 1) % num_slots; } else { return rc; @@ -162,7 +162,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { // Make the next arena and continue. slot = (slot + 1) % num_segments; } else { @@ -172,7 +172,7 @@ Status NumaMemoryPool::Allocate(size_t n, void **p) { } // Handle the case we have done one round robin search. if (ptr == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } return rc; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc index e677c58f06..47611836d3 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_pool.cc @@ -108,7 +108,7 @@ Status CachePool::Insert(CachePool::key_type key, const std::vectorDoInsert(key, bl); } catch (const std::bad_alloc &e) { - rc = Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + rc = Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } // Duplicate key is treated as error and we will also free the memory. if (rc.IsError() && bl.ptr != nullptr) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc index df68f406a1..14803bcf28 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc @@ -223,7 +223,7 @@ Status CreateCacheRequest::Prepare() { rq_.add_buf_data(fbb.GetBufferPointer(), fbb.GetSize()); return Status::OK(); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } @@ -277,7 +277,7 @@ Status CacheSchemaRequest::SerializeCacheSchemaRequest(const std::unordered_map< rq_.add_buf_data(fbb.GetBufferPointer(), fbb.GetSize()); return Status::OK(); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc index 4f40fe0a77..14c3830209 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc @@ -169,7 +169,7 @@ Status CacheServer::GlobalMemoryCheck(uint64_t cache_mem_sz) { int64_t mem_consumed = stat.stat_.num_mem_cached * stat.stat_.average_cache_sz; max_avail -= mem_consumed; if (max_avail <= 0) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } ++it; } @@ -179,12 +179,12 @@ Status CacheServer::GlobalMemoryCheck(uint64_t cache_mem_sz) { if (max_avail < avail_mem) { int64_t req_mem = cache_mem_sz * 1048576L; // It is in MB unit. if (req_mem > max_avail) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } else if (req_mem == 0) { // This cache request is specifying unlimited memory up to the memory cap. If we have consumed more than // 85% of our limit, fail this request. if (static_cast(max_avail) / static_cast(avail_mem) <= 0.15) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Please destroy some sessions"); } } } @@ -249,7 +249,7 @@ Status CacheServer::CreateService(CacheRequest *rq, CacheReply *reply) { client_id = cs->num_clients_.fetch_add(1); all_caches_.emplace(connection_id, std::move(cs)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } } @@ -276,7 +276,7 @@ Status CacheServer::CreateService(CacheRequest *rq, CacheReply *reply) { reply->set_result(fbb.GetBufferPointer(), fbb.GetSize()); // We can return OK but we will return a duplicate key so user can act accordingly to either ignore it // treat it as OK. - return duplicate ? Status(StatusCode::kDuplicateKey) : Status::OK(); + return duplicate ? Status(StatusCode::kMDDuplicateKey) : Status::OK(); } Status CacheServer::DestroyCache(CacheRequest *rq) { @@ -306,7 +306,7 @@ Status CacheServer::CacheRow(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto sz = rq->buf_data_size(); std::vector buffers; @@ -326,7 +326,7 @@ Status CacheServer::CacheRow(CacheRequest *rq, CacheReply *reply) { RETURN_IF_NOT_OK(cs->CacheRow(buffers, &id)); reply->set_result(std::to_string(id)); } else { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); } } return Status::OK(); @@ -353,7 +353,7 @@ Status CacheServer::FastCacheRow(CacheRequest *rq, CacheReply *reply) { Status rc; if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // Only if the cookie matches, we can accept insert into this cache that has a build phase if (!cs->HasBuildPhase() || cookie == cs->cookie()) { @@ -365,11 +365,11 @@ Status CacheServer::FastCacheRow(CacheRequest *rq, CacheReply *reply) { } else { auto state = cs->GetState(); if (state != CacheServiceState::kFetchPhase) { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cache service is not in fetch phase. The current phase is " + std::to_string(static_cast(state)) + ". Client id: " + std::to_string(client_id)); } else { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch. Client id: " + std::to_string(client_id)); } } @@ -413,7 +413,7 @@ Status CacheServer::InternalFetchRow(CacheRequest *rq) { Status rc; if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } rc = cs->InternalFetchRow(flatbuffers::GetRoot(rq->buf_data(0).data())); // This is an internal request and is not tied to rpc. But need to post because there @@ -494,7 +494,7 @@ Status CacheServer::BatchFetchRows(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Cache id " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing row id"); auto &row_id_buf = rq->buf_data(0); @@ -551,7 +551,7 @@ Status CacheServer::BatchFetchRows(CacheRequest *rq, CacheReply *reply) { mem.resize(mem_sz); CHECK_FAIL_RETURN_UNEXPECTED(mem.capacity() >= mem_sz, "Programming error"); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } WritableSlice dest(mem.data(), mem_sz); RETURN_IF_NOT_OK(BatchFetch(fbb, &dest)); @@ -568,7 +568,7 @@ Status CacheServer::GetStat(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CacheService::ServiceStat svc_stat; RETURN_IF_NOT_OK(cs->GetStat(&svc_stat)); @@ -595,7 +595,7 @@ Status CacheServer::CacheSchema(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing schema information"); auto &create_schema_buf = rq->buf_data(0); @@ -611,7 +611,7 @@ Status CacheServer::FetchSchema(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // We are going to use std::string to allocate and hold the result which will be eventually // 'moved' to the protobuf message (which underneath is also a std::string) for the purpose @@ -630,7 +630,7 @@ Status CacheServer::BuildPhaseDone(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // First piece of data is the cookie CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing cookie"); @@ -639,7 +639,7 @@ Status CacheServer::BuildPhaseDone(CacheRequest *rq) { if (cookie == cs->cookie()) { RETURN_IF_NOT_OK(cs->BuildPhaseDone()); } else { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Cookie mismatch"); } } return Status::OK(); @@ -652,7 +652,7 @@ Status CacheServer::GetCacheMissKeys(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { std::vector gap; RETURN_IF_NOT_OK(cs->FindKeysMiss(&gap)); @@ -680,7 +680,7 @@ Status CacheServer::ToggleWriteMode(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { // First piece of data is the on/off flag CHECK_FAIL_RETURN_UNEXPECTED(!rq->buf_data().empty(), "Missing action flag"); @@ -747,7 +747,7 @@ Status CacheServer::ConnectReset(CacheRequest *rq) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto client_id = rq->client_id(); MS_LOG(WARNING) << "Client id " << client_id << " with connection id " << connection_id << " disconnects"; @@ -836,7 +836,7 @@ Status CacheServer::ProcessRowRequest(CacheServerRequest *cache_req, bool *inter default: std::string errMsg("Internal error, request type is not row request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -860,7 +860,7 @@ Status CacheServer::ProcessSessionRequest(CacheServerRequest *cache_req) { default: std::string errMsg("Internal error, request type is not session request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -931,7 +931,7 @@ Status CacheServer::ProcessAdminRequest(CacheServerRequest *cache_req) { default: std::string errMsg("Internal error, request type is not admin request: "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } return Status::OK(); } @@ -949,7 +949,7 @@ Status CacheServer::ProcessRequest(CacheServerRequest *cache_req) { } else { std::string errMsg("Unknown request type : "); errMsg += std::to_string(static_cast(cache_req->type_)); - cache_req->rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + cache_req->rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } // Notify it is done, and move on to the next request. @@ -1045,7 +1045,7 @@ Status CacheServer::GetFreeRequestTag(CacheServerRequest **q) { RETURN_UNEXPECTED_IF_NULL(q); auto *p = new (std::nothrow) CacheServerRequest(); if (p == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } *q = p; return Status::OK(); @@ -1091,7 +1091,7 @@ Status CacheServer::DestroySession(CacheRequest *rq) { } else { std::string errMsg = "Session id " + std::to_string(drop_session_id) + " not found in server on port " + std::to_string(port_) + "."; - return Status(StatusCode::kFileNotExist, errMsg); + return Status(StatusCode::kMDFileNotExist, errMsg); } } } @@ -1148,7 +1148,7 @@ Status CacheServer::GetCacheState(CacheRequest *rq, CacheReply *reply) { CacheService *cs = GetService(connection_id); if (cs == nullptr) { std::string errMsg = "Connection " + std::to_string(connection_id) + " not found"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, errMsg); } else { auto state = cs->GetState(); reply->set_result(std::to_string(static_cast(state))); @@ -1247,7 +1247,7 @@ Status CacheServer::Builder::IpcResourceCleanup() { std::string errMsg = "Cache server is already up and running"; // We return a duplicate error. The main() will intercept // and output a proper message - return Status(StatusCode::kDuplicateKey, errMsg); + return Status(StatusCode::kMDDuplicateKey, errMsg); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h index 1297e1018a..c8f2b95a98 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.h @@ -419,7 +419,7 @@ class CacheServer : public Service { Status GetRc() { Status rc; for (auto &cache_rc : rc_lists_) { - if (cache_rc.IsError() && !cache_rc.IsInterrupted() && rc.IsOk()) { + if (cache_rc.IsError() && cache_rc != StatusCode::kMDInterrupted && rc.IsOk()) { rc = cache_rc; } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc index 4679ac214e..790832e36f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_service.cc @@ -42,7 +42,7 @@ Status CacheService::DoServiceStart() { // Return an error if we use more than recommended memory. std::string errMsg = "Requesting cache size " + std::to_string(cache_mem_sz_) + " while available system memory " + std::to_string(avail_mem); - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, errMsg); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, errMsg); } memory_cap_ratio = static_cast(cache_mem_sz_) / avail_mem; } @@ -79,7 +79,7 @@ Status CacheService::CacheRow(const std::vector &buf, row_id_type if (st_ == CacheServiceState::kNoLocking) { // We ignore write this request once we turn off locking on the B+ tree. So we will just // return out of memory from now on. - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } try { // The first buffer is a flatbuffer which describes the rest of the buffers follow @@ -119,16 +119,16 @@ Status CacheService::CacheRow(const std::vector &buf, row_id_type } // Now we cache the buffer. Status rc = cp_->Insert(*row_id_generated, all_data); - if (rc == Status(StatusCode::kDuplicateKey)) { + if (rc == Status(StatusCode::kMDDuplicateKey)) { MS_LOG(DEBUG) << "Ignoring duplicate key."; } else { if (HasBuildPhase()) { // For cache service that has a build phase, record the error in the state // so other clients can be aware of the new state. There is nothing one can // do to resume other than to drop the cache. - if (rc.IsNoSpace()) { + if (rc == StatusCode::kMDNoSpace) { st_ = CacheServiceState::kNoSpace; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { st_ = CacheServiceState::kOutOfMemory; } } @@ -152,7 +152,7 @@ Status CacheService::FastCacheRow(const ReadableSlice &src, row_id_type *row_id_ if (st_ == CacheServiceState::kNoLocking) { // We ignore write this request once we turn off locking on the B+ tree. So we will just // return out of memory from now on. - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } try { // If we don't need to generate id, we need to find it from the buffer. @@ -172,16 +172,16 @@ Status CacheService::FastCacheRow(const ReadableSlice &src, row_id_type *row_id_ } // Now we cache the buffer. Status rc = cp_->Insert(*row_id_generated, {src}); - if (rc == Status(StatusCode::kDuplicateKey)) { + if (rc == Status(StatusCode::kMDDuplicateKey)) { MS_LOG(DEBUG) << "Ignoring duplicate key."; } else { if (HasBuildPhase()) { // For cache service that has a build phase, record the error in the state // so other clients can be aware of the new state. There is nothing one can // do to resume other than to drop the cache. - if (rc.IsNoSpace()) { + if (rc == StatusCode::kMDNoSpace) { st_ = CacheServiceState::kNoSpace; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { st_ = CacheServiceState::kOutOfMemory; } } @@ -307,7 +307,7 @@ Status CacheService::FetchSchema(std::string *out) const { if (!mem.empty()) { *out = std::move(mem); } else { - return Status(StatusCode::kFileNotExist, __LINE__, __FILE__, "No schema has been cached"); + return Status(StatusCode::kMDFileNotExist, __LINE__, __FILE__, "No schema has been cached"); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc index 9a63788b9d..82782d7900 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_msg.cc @@ -36,7 +36,7 @@ Status CachePerfMsg::Receive(int32_t qID) { auto err = msgrcv(qID, reinterpret_cast(&small_msg_), sizeof(small_msg_.body.msg), 0, MSG_NOERROR); if (err == -1) { if (errno == EIDRM) { - return Status(StatusCode::kInterrupted); + return Status(StatusCode::kMDInterrupted); } else { std::string errMsg = "Failed to call msgrcv. Errno = " + std::to_string(errno); RETURN_STATUS_UNEXPECTED(errMsg); diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc index 990f1f518d..92a36a4865 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf.cc @@ -33,7 +33,7 @@ int main(int argc, char **argv) { if (rc.IsError()) { std::cerr << rc.ToString() << std::endl; } - return static_cast(rc.get_code()); + return static_cast(rc.StatusCode()); } return 0; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h index dac9c8012e..3324e460d2 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_perf_run.h @@ -100,5 +100,7 @@ class CachePerfRun { }; } // namespace dataset } // namespace mindspore - +// todo: waiting for the master of the codes to refactor +#define get_code StatusCode +#define kDuplicateKey kMDDuplicateKey #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_PERF_RUN_H_ diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc index 130bc102e6..bf03749fd9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline.cc @@ -33,12 +33,12 @@ int main(int argc, char **argv) { // If we hit any error, send the rc back to the parent. if (rc.IsError()) { ds::ErrorMsg proto; - proto.set_rc(static_cast(rc.get_code())); + proto.set_rc(static_cast(rc.StatusCode())); proto.set_msg(rc.ToString()); ds::CachePerfMsg msg; (void)cachePipelineRun.SendMessage(&msg, ds::CachePerfMsg::MessageType::kError, &proto); } - return static_cast(rc.get_code()); + return static_cast(rc.StatusCode()); } return 0; } diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h index d1d617133a..c0f8d8e738 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/perf/cache_pipeline_run.h @@ -115,5 +115,9 @@ class CachePipelineRun { }; } // namespace dataset } // namespace mindspore - +// todo: waiting for the master of the codes to refactor +#define get_code StatusCode +#define kDuplicateKey kMDDuplicateKey +#define IsOutofMemory() StatusCode() == StatusCode::kMDOutOfMemory +#define IsNoSpace() StatusCode() == StatusCode::kMDNoSpace #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_PIPELINE_RUN_H_ diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc index ea6cb44f55..bc1ebce07c 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_container.cc @@ -104,7 +104,7 @@ Status StorageContainer::Write(const ReadableSlice &dest, off64_t offset) const if (r_sz != sz) { errno_t err = (r_sz == 0) ? EOF : errno; if (errno == ENOSPC) { - return Status(StatusCode::kNoSpace, __LINE__, __FILE__); + return Status(StatusCode::kMDNoSpace, __LINE__, __FILE__); } else { RETURN_STATUS_UNEXPECTED(strerror(err)); } @@ -157,7 +157,7 @@ Status StorageContainer::CreateStorageContainer(std::shared_ptrCreate(); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc index 2e16e843f5..26b12ea5f2 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/storage_manager.cc @@ -96,9 +96,9 @@ Status StorageManager::Write(key_type *key, const std::vector &bu cont = containers_.at(num_containers - 1); off64_t offset; Status rc = cont->Insert(buf, &offset); - if (rc.get_code() == StatusCode::kBuddySpaceFull) { + if (rc.StatusCode() == StatusCode::kMDBuddySpaceFull) { create_new_container = true; - // Remember how many containers we saw. In the next iteration we will do a comparision to see + // Remember how many containers we saw. In the next iteration we will do a comparison to see // if someone has already created it. last_num_container = num_containers; } else if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc b/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc index 2273a5d1a2..6968566a88 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/data_schema.cc @@ -140,7 +140,7 @@ Status ColDescriptor::MaterializeTensorShape(int32_t num_elements, TensorShape * // If we already had an unknown dimension, then we cannot have a second unknown dimension. // We only support the compute of a single unknown dim. if (requested_shape[i] == TensorShape::kDimUnknown && unknown_dim_position != TensorShape::kDimUnknown) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Requested shape has more than one unknown dimension!"); } @@ -312,12 +312,12 @@ Status DataSchema::ColumnLoad(nlohmann::json column_child_tree, const std::strin } // data type is mandatory field if (type_str.empty()) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "json schema file for column " + col_name + " has invalid or missing column type."); // rank number is mandatory field if (rank_value <= -1) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "json schema file for column " + col_name + " must define a positive rank value."); // Create the column descriptor for this column from the data we pulled from the json file @@ -425,7 +425,7 @@ Status DataSchema::AddColumn(const ColDescriptor &cd) { Status DataSchema::PreLoadExceptionCheck(const nlohmann::json &js) { // Check if columns node exists. It is required for building schema from file. if (js.find("columns") == js.end()) - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "\"columns\" node is required in the schema json file."); return Status::OK(); } @@ -434,12 +434,12 @@ Status DataSchema::PreLoadExceptionCheck(const nlohmann::json &js) { // name to column index number. Status DataSchema::GetColumnNameMap(std::unordered_map *out_column_name_map) { if (out_column_name_map == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "unexpected null output column name map."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "unexpected null output column name map."); } for (int32_t i = 0; i < col_descs_.size(); ++i) { if (col_descs_[i].name().empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Constructing column name map from schema, but found empty column name."); } (*out_column_name_map)[col_descs_[i].name()] = i; diff --git a/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc b/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc index 714b9e5647..3c654ea9c9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc @@ -290,7 +290,7 @@ Status ChildIterator::Drain() { RETURN_IF_NOT_OK(current_op_->GetNextInput(&curr_buffer_, worker_id_, child_idx_)); } if (curr_buffer_->eof()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Child iterator picked up EOF in drain."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Child iterator picked up EOF in drain."); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc index f240c9bc4c..12f4f1595b 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/barrier_op.cc @@ -122,7 +122,8 @@ Status BarrierOp::prepare(TensorQTable *const table) { clean_up_ = false; buffer_id_ = 0; if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "BarrierOp prepare phase requires a tensor table."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, + "BarrierOp prepare phase requires a tensor table."); } // fill initial row TensorRow new_row = {}; @@ -150,7 +151,7 @@ Status BarrierOp::prepare(TensorQTable *const table) { // fillBuffer always expects a new table to fill Status BarrierOp::fillBuffer(TensorQTable *const table) { if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "BarrierOp fillBuffer null table pointer."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "BarrierOp fillBuffer null table pointer."); } TensorRow new_row = {}; while (table->size() < static_cast(rows_per_buffer_)) { @@ -172,7 +173,7 @@ Status BarrierOp::blockCond() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } // we have condition name, however the flexibility is in python today try { @@ -180,11 +181,11 @@ Status BarrierOp::blockCond() { py::object ret_py_obj = condition_function_(); // Process the return value if (!py::isinstance(ret_py_obj)) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, condition wait function should return true/false."); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc index 4c64c47057..5590ae71d8 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc @@ -61,7 +61,7 @@ Status BatchOp::Builder::SanityCheck() { err += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); } #ifdef ENABLE_PYTHON @@ -261,7 +261,7 @@ Status BatchOp::MakeBatchedBuffer(std::pair, CBatc Status BatchOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(worker_queues_.Register(tree_->AllTasks())); RETURN_IF_NOT_OK( @@ -338,23 +338,23 @@ Status BatchOp::InvokeBatchSizeFunc(int32_t *batch_size, CBatchInfo info) { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized."); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized."); } try { py::object size = batch_size_func_(info); *batch_size = size.cast(); if (*batch_size <= 0) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch size function should return an integer greater than 0."); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch size function should return an integer greater than 0."); } } - return Status(StatusCode::kOK, "Batch size func call succeed."); + return Status(StatusCode::kSuccess, "Batch size func call succeed."); } Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBatchInfo info) { @@ -362,7 +362,7 @@ Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBat // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized."); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized."); } try { // Prepare batch map call back parameters @@ -407,9 +407,9 @@ Status BatchOp::InvokeBatchMapFunc(TensorTable *input, TensorTable *output, CBat output->push_back(std::move(output_batch)); } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid parameter, batch map function should return a tuple of list of numpy array."); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc index d3057353f3..03f9cf4fbf 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc @@ -191,7 +191,7 @@ Status BucketBatchByLengthOp::PadAndBatchBucket(int32_t bucket_index, int32_t ba if (bucket_index + 1 >= bucket_boundaries_.size()) { std::string error_message = "Invalid data, requested to pad to bucket boundary, element falls in last bucket."; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, error_message); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, error_message); } pad_shape[i] = bucket_boundaries_[bucket_index + 1] - 1; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc index a633784fc5..8053baa567 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc @@ -42,7 +42,7 @@ BuildSentencePieceVocabOp::BuildSentencePieceVocabOp(std::shared_ptrRegister(tree_->AllTasks())); RETURN_IF_NOT_OK(tree_->AllTasks()->CreateAsyncTask( @@ -84,10 +84,10 @@ Status BuildSentencePieceVocabOp::SentenceThread() { sentencepiece::util::Status s_status = sentencepiece::SentencePieceTrainer::Train(BuildParams(), sentence_iter.get(), &model_proto); if (!s_status.ok()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, s_status.message()); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, s_status.message()); } else { if (vocab_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, sentencepiece vocab not set."); } vocab_->set_model_proto(model_proto); @@ -145,7 +145,7 @@ void BuildSentencePieceVocabOp::Next(std::string *sentence) { if (new_row[col_id_]->type().IsNumeric() || new_row[col_id_]->Rank() > 1) { ret_status_ = - Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, build_sentence_piece_vocab only works on string data with rank equal to 1, got type: " + new_row[col_id_]->type().ToString() + "and rank: " + std::to_string(new_row[col_id_]->Rank())); read_done_ = true; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc index c2145e0e42..6d915abe88 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/build_vocab_op.cc @@ -80,7 +80,7 @@ Status BuildVocabOp::WorkerEntry(int32_t worker_id) { Status BuildVocabOp::operator()() { // launch the collector thread if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(distributor_queue_->Register(tree_->AllTasks())); RETURN_IF_NOT_OK(collector_queue_->Register(tree_->AllTasks())); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc index 8b914a780d..5accad7c76 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_base_op.cc @@ -233,7 +233,7 @@ Status CacheBase::UpdateColumnMapFromCache() { // Get the schema from the server. It may not be there yet. So tolerate the error. if (column_name_id_map_.empty()) { rc = cache_client_->FetchSchema(&column_name_id_map_); - if (rc == Status(StatusCode::kFileNotExist)) { + if (rc == Status(StatusCode::kMDFileNotExist)) { MS_LOG(DEBUG) << "Schema not in the server yet."; rc = Status::OK(); } @@ -304,14 +304,14 @@ Status CacheBase::Prefetcher(int32_t worker_id) { int32_t retry_count = 0; do { rc = PrefetchRows(prefetch_keys, &cache_miss); - if (rc.IsNetWorkError() && retry_count < max_retries) { + if (rc == StatusCode::kMDNetWorkError && retry_count < max_retries) { // If we get some network error, we will attempt some retries retry_count++; } else if (rc.IsError()) { MS_LOG(WARNING) << rc.ToString(); return rc; } - } while (rc.IsNetWorkError()); + } while (rc == StatusCode::kMDNetWorkError); // In case any thread is waiting for the rows to come back and blocked on a semaphore, // we will put an empty row in the local cache. if (rc.IsError() && AllowCacheMiss()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc index 8549caf82c..7beb6c15b0 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_lookup_op.cc @@ -39,12 +39,12 @@ CacheLookupOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_ // Check if the required parameters are set by the builder. Status CacheLookupOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheLookupOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheLookupOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -59,7 +59,7 @@ Status CacheLookupOp::Builder::Build(std::shared_ptr *ptr) { } Status CacheLookupOp::operator()() { if (!sampler_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheLookupOp requires a sampler before it can be executed, but got nullptr."); } RETURN_IF_NOT_OK(RegisterResources()); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc index 6037897fad..e02eecbd7c 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_merge_op.cc @@ -129,7 +129,7 @@ Status CacheMergeOp::CacheMissWorkerEntry(int32_t workerId) { Status rc; if ((rc = cache_client_->FlushAsyncWriteBuffer()).IsError()) { cache_missing_rows_ = false; - if (rc.IsOutofMemory() || rc.IsNoSpace()) { + if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { cache_client_->ServerRunningOutOfResources(); } else { MS_LOG(INFO) << "Async row flushing not successful: " << rc.ToString(); @@ -156,7 +156,7 @@ Status CacheMergeOp::CacheMissWorkerEntry(int32_t workerId) { rc = rq->AsyncSendCacheRequest(cache_client_, row); if (rc.IsOk()) { RETURN_IF_NOT_OK(io_que_->EmplaceBack(row_id)); - } else if (rc.IsOutofMemory() || rc.IsNoSpace()) { + } else if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { cache_missing_rows_ = false; cache_client_->ServerRunningOutOfResources(); } @@ -188,9 +188,9 @@ Status CacheMergeOp::Cleaner() { Status rc = rq->CheckCacheResult(); if (rc.IsError()) { // If interrupt, time to quit. - if (rc.IsInterrupted()) { + if (rc == StatusCode::kMDInterrupted) { return Status::OK(); - } else if (rc.IsOutofMemory() || rc.IsNoSpace()) { + } else if (rc == StatusCode::kMDOutOfMemory || rc == kMDNoSpace) { // The server is hitting some limit and we will turn off caching from now on. cache_missing_rows_ = false; cache_client_->ServerRunningOutOfResources(); @@ -215,7 +215,7 @@ Status CacheMergeOp::PrepareNodePostAction() { // Run any common code from supe // Construct the cache const bool generate_ids = false; Status rc = cache_client_->CreateCache(cache_crc, generate_ids); - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { // We are told the cache has been created already. MS_LOG(INFO) << "Cache created already"; rc = Status::OK(); @@ -244,12 +244,12 @@ CacheMergeOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_( // Check if the required parameters are set by the builder. Status CacheMergeOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheMergeOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheMergeOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -316,7 +316,7 @@ Status CacheMergeOp::TensorRowCacheRequest::AsyncSendCacheRequest(const std::sha // We will do a deep copy but write directly into CacheRequest protobuf or shared memory Status rc; rc = cc->AsyncWriteRow(row); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { cleaner_copy_ = std::make_shared(cc.get()); rc = cleaner_copy_->SerializeCacheRowRequest(cc.get(), row); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc index ae73cc1bdf..5c80a5523a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/cache_op.cc @@ -41,12 +41,12 @@ CacheOp::Builder::Builder() : build_cache_client_(nullptr), build_sampler_(nullp // Check if the required parameters are set by the builder. Status CacheOp::Builder::SanityCheck() const { if (build_cache_client_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheOp requires a CacheClient, but got nullptr."); } // Make sure the cache client has a valid session if (!build_cache_client_->session_id()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, cache client for CacheOp requires a session id which is not equal to 0."); } return Status::OK(); @@ -78,7 +78,7 @@ Status CacheOp::InitCache() { return Status::OK(); } // This class functor will provide the master loop that drives the logic for performing the work Status CacheOp::operator()() { if (!sampler_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, CacheOp requires a sampler before it can be executed, but got nullptr."); } RETURN_IF_NOT_OK(RegisterResources()); @@ -113,7 +113,7 @@ Status CacheOp::CacheAllRows(int32_t worker_id) { Status rc; // Do the Async write if we attach to the shared memory. rc = cache_client_->AsyncWriteBuffer(std::move(db_ptr)); - if (rc.get_code() == StatusCode::kNotImplementedYet) { + if (rc.StatusCode() == StatusCode::kMDNotImplementedYet) { RETURN_IF_NOT_OK(cache_client_->WriteBuffer(std::move(db_ptr))); } else if (rc.IsError()) { return rc; @@ -169,9 +169,9 @@ Status CacheOp::WaitForCachingAllRows() { BuildPhaseDone = true; break; case CacheServiceState::kOutOfMemory: - return Status(StatusCode::kOutOfMemory, "Cache server is running out of memory"); + return Status(StatusCode::kMDOutOfMemory, "Cache server is running out of memory"); case CacheServiceState::kNoSpace: - return Status(StatusCode::kNoSpace, "Cache server is running of out spill storage"); + return Status(StatusCode::kMDNoSpace, "Cache server is running of out spill storage"); case CacheServiceState::kNone: case CacheServiceState::kError: default: @@ -246,7 +246,7 @@ Status CacheOp::PrepareNodePostAction() { // Construct the cache const bool generate_ids = true; Status rc = cache_client_->CreateCache(cache_crc, generate_ids); - if (rc.get_code() == StatusCode::kDuplicateKey) { + if (rc.StatusCode() == StatusCode::kMDDuplicateKey) { // We are told the cache has been created already. So we skip the build phase. phase_ = Phase::kFetchPhase; rc = Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc index e1876e232d..05714d8865 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.cc @@ -157,18 +157,14 @@ Status DeviceQueueOp::SendDataToAscend() { TensorRow currRow; for (int row_id = 0; row_id < current_buffer->NumRows(); row_id++) { RETURN_IF_NOT_OK(current_buffer->GetRow(row_id, &currRow)); - while (stop_send_ && ascend_keep_waiting_) { - MS_LOG(DEBUG) << "stop_send flag is set, waiting for continue signal..."; - std::this_thread::sleep_for(std::chrono::microseconds(100)); - } + WaitContinueSignal(); auto status = tdtInstancePtr->hostPush(currRow, true, channel_name_, isProfilingEnable, tdt_cost); if (status == TdtStatus::FAILED) { if (stop_send_) { MS_LOG(INFO) << "stop_send received"; return Status::OK(); - } else { - return Status(StatusCode::kTDTPushFailure, "TDT Push Failed"); } + return Status(StatusCode::kMDTDTPushFailure, "TDT Push Failed"); } if (create_data_info_queue_) { DATA_INFO data_info; @@ -200,9 +196,8 @@ Status DeviceQueueOp::SendDataToAscend() { if (stop_send_) { MS_LOG(INFO) << "stop_send received"; return Status::OK(); - } else { - return Status(StatusCode::kTDTPushFailure, "TDT Push Failed"); } + return Status(StatusCode::kMDTDTPushFailure, "TDT Push Failed"); } MS_LOG(INFO) << "an epoch has already sent, now stop send data."; stop_send_ = true; @@ -219,13 +214,19 @@ Status DeviceQueueOp::SendDataToAscend() { return Status::OK(); } +void DeviceQueueOp::WaitContinueSignal() const { + while (stop_send_ && ascend_keep_waiting_) { + MS_LOG(DEBUG) << "stop_send flag is set, waiting for continue signal..."; + std::this_thread::sleep_for(std::chrono::microseconds(100)); + } +} #endif #ifdef ENABLE_TDTQUE Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { if (!create_data_info_queue_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "DataInfo queue is not created."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "DataInfo queue is not created."); } // This place has a race condition with operator(), so the first one // arrive here will do the initialize work. @@ -241,7 +242,7 @@ Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { } #else Status DeviceQueueOp::GetDataInfo(DATA_INFO *data_info) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "GetDataInfo is not supported yet."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "GetDataInfo is not supported yet."); } #endif @@ -301,7 +302,7 @@ Status DeviceQueueOp::PushDataToGPU() { } handle = GpuBufferMgr::GetInstance().Open(0, channel_name_, data_size, release_function); if (handle == INVALID_HANDLE) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Failed to open channel for sending data."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Failed to open channel for sending data."); } is_open = true; } @@ -309,14 +310,14 @@ Status DeviceQueueOp::PushDataToGPU() { // Data prefetch only when PS mode enables cache. if (items.size() > 0) { if (!ps::PsDataPrefetch::GetInstance().PrefetchData(channel_name_, items[0].data_ptr_, items[0].data_len_)) { - return Status(StatusCode::kTimeOut, __LINE__, __FILE__, "Failed to prefetch data."); + return Status(StatusCode::kMDTimeOut, __LINE__, __FILE__, "Failed to prefetch data."); } } while (!GpuBufferMgr::GetInstance().IsClosed() && !TaskManager::FindMe()->Interrupted()) { BlockQueueStatus_T ret = GpuBufferMgr::GetInstance().Push(handle, items, WAIT_TIME); if (ret) { if (ret == BlockQueueStatus_T::ERROR_INPUT) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Invalid input data, please check it."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid input data, please check it."); } else { if (!stop_send_) { MS_LOG(DEBUG) << "Retry pushing data..."; @@ -438,13 +439,13 @@ Status DeviceQueueOp::MallocForGPUData(std::vector *items, for (auto &sub_item : *items) { RETURN_IF_NOT_OK(pool_[worker_id]->Allocate(sub_item.data_len_, &sub_item.data_ptr_)); if (sub_item.data_ptr_ == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, "Memory malloc failed."); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, "Memory malloc failed."); } const unsigned char *column_data = curr_row[i]->GetBuffer(); if (memcpy_s(sub_item.data_ptr_, sub_item.data_len_, column_data, static_cast(curr_row[i++]->SizeInBytes())) != 0) { MS_LOG(ERROR) << "memcpy_s failed!"; - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "memcpy_s failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "memcpy_s failed."); } } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h index 16b45b5511..dcf77cd262 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/device_queue_op.h @@ -190,6 +190,7 @@ class DeviceQueueOp : public PipelineOp { private: #ifdef ENABLE_TDTQUE + void WaitContinueSignal() const; Status SendDataToAscend(); bool ascend_keep_waiting_; #endif diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc index 487501ba73..f674582c26 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/filter_op.cc @@ -43,7 +43,7 @@ Status FilterOp::Builder::SanityCheck() { err += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, common::SafeCStr(err)); } FilterOp::Builder::Builder() { @@ -66,7 +66,7 @@ FilterOp::FilterOp(const std::vector &in_col_names, int32_t num_wor Status FilterOp::operator()() { // The operator class just starts off threads by calling the tree_ function. if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } filter_queues_.Init(num_workers_, oc_queue_size_); RETURN_IF_NOT_OK(filter_queues_.Register(tree_->AllTasks())); @@ -244,7 +244,7 @@ Status FilterOp::InvokePredicateFunc(const TensorRow &input, bool *out_predicate RETURN_IF_NOT_OK(predicate_func_->Compute(input, &output)); RETURN_IF_NOT_OK(output.at(0)->GetItemAt(out_predicate, {})); - return Status(StatusCode::kOK, "FilterOp predicate func call succeed"); + return Status(StatusCode::kSuccess, "FilterOp predicate func call succeed"); } // Visitor accept method for NodePass diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc index 795381fbba..c951a61b80 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/map_op/map_op.cc @@ -43,7 +43,7 @@ MapOp::Builder::Builder() { // Check if the required parameters are set by the builder. Status MapOp::Builder::sanityCheck() const { if (build_tensor_funcs_.empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Building a MapOp without providing any function/operation to apply"); } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc index b4637faf5e..0eaa31d2a0 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/shuffle_op.cc @@ -126,7 +126,7 @@ Status ShuffleOp::AddRowToShuffleBuffer(TensorRow new_shuffle_row) { shuffle_last_row_idx_ = (shuffle_buffer_->size()) - 1; } else { if (!(*shuffle_buffer_)[shuffle_last_row_idx_].empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Last row of shuffle buffer should not be occupied!"); } (*shuffle_buffer_)[shuffle_last_row_idx_] = std::move(new_shuffle_row); @@ -245,7 +245,7 @@ Status ShuffleOp::InitShuffleBuffer() { // shuffle buffer to it's max size, or the dataset below us is not providing any more // rows. if (shuffle_buffer_state_ != kShuffleStateInit) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid shuffle buffer state (SHUFFLE_STATE_INIT expected)"); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc index 97e7104a98..4e0482588f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/album_op.cc @@ -67,7 +67,7 @@ Status AlbumOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } AlbumOp::AlbumOp(int32_t num_wkrs, int32_t rows_per_buffer, std::string file_dir, int32_t queue_size, bool do_decode, @@ -577,7 +577,7 @@ Status AlbumOp::InitSampler() { Status AlbumOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } // registers QueueList and individual Queues for interrupt services RETURN_IF_NOT_OK(io_block_queues_.Register(tree_->AllTasks())); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc index 3dae849e65..54c8cf6da9 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/celeba_op.cc @@ -59,7 +59,7 @@ Status CelebAOp::Builder::Build(std::shared_ptr *op) { builder_op_connector_size_, builder_decode_, builder_usage_, builder_extensions_, std::move(builder_schema_), std::move(builder_sampler_)); if (*op == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "CelebAOp init failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "CelebAOp init failed."); } return Status::OK(); @@ -74,7 +74,7 @@ Status CelebAOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::string &dir, int32_t queue_size, @@ -95,7 +95,7 @@ CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::stri Status CelebAOp::LaunchThreadsAndInitOp() { if (tree_ == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Pipeline init failed, Execution tree not set."); } RETURN_IF_NOT_OK(io_block_queues_.Register(tree_->AllTasks())); @@ -119,7 +119,7 @@ Status CelebAOp::ParseAttrFile() { std::ifstream attr_file((folder_path / "list_attr_celeba.txt").toString()); if (!attr_file.is_open()) { std::string attr_file_name = (folder_path / "list_attr_celeba.txt").toString(); - return Status(StatusCode::kFileNotExist, __LINE__, __FILE__, + return Status(StatusCode::kMDFileNotExist, __LINE__, __FILE__, "Invalid file, failed to open Celeba attr file: " + attr_file_name); } @@ -368,7 +368,7 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) { } RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); } - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Unexpected nullptr received in worker."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Unexpected nullptr received in worker."); } Status CelebAOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { @@ -396,7 +396,7 @@ Status CelebAOp::LoadTensorRow(row_id_type row_id, const std::pair *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc index 223b12499e..01a167f006 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/coco_op.cc @@ -118,7 +118,7 @@ Status CocoOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } CocoOp::CocoOp(const TaskType &task_type, const std::string &image_folder_path, const std::string &annotation_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc index 32118e7e78..8575d06006 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/csv_op.cc @@ -46,7 +46,7 @@ Status CsvOp::Builder::ValidateInputs() const { ? "Invalid parameter, num_shard must be greater than shard_id and greater than 0, got num_shard: " + std::to_string(builder_num_devices_) + ", shard_id: " + std::to_string(builder_device_id_) + ".\n" : ""; - return err.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err); + return err.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err); } Status CsvOp::Builder::Build(std::shared_ptr *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc index 90b3a5939e..4bbecff431 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/generator_op.cc @@ -89,7 +89,7 @@ Status GeneratorOp::CreateGeneratorObject() { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py::array sample_ids; @@ -103,7 +103,7 @@ Status GeneratorOp::CreateGeneratorObject() { generator_ = generator_function_(); } } catch (const py::error_already_set &e) { - ret = Status(StatusCode::kPyFuncException, e.what()); + ret = Status(StatusCode::kMDPyFuncException, e.what()); } } return ret; @@ -118,33 +118,33 @@ Status GeneratorOp::Init() { Status GeneratorOp::PyRowToTensorRow(py::object py_data, TensorRow *tensor_row) { if (!py::isinstance(py_data)) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return a tuple of numpy arrays."); } py::tuple py_row = py_data.cast(); // Check if returned number of columns matches with column names if (py_row.size() != column_names_.size()) { return Status( - StatusCode::kPyFuncException, __LINE__, __FILE__, + StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return same number of numpy arrays as specified in column names."); } // Iterate over two containers simultaneously for memory copy for (int i = 0; i < py_row.size(); ++i) { py::object ret_py_ele = py_row[i]; if (!py::isinstance(ret_py_ele)) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, Generator should return a tuple of numpy arrays."); } std::shared_ptr tensor; RETURN_IF_NOT_OK(Tensor::CreateFromNpArray(ret_py_ele.cast(), &tensor)); if ((!column_types_.empty()) && (column_types_[i] != DataType::DE_UNKNOWN) && (column_types_[i] != tensor->type())) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, "Invalid parameter, input column type is not same with output tensor type."); } tensor_row->push_back(tensor); } - return Status(StatusCode::kOK, ""); + return Status(StatusCode::kSuccess, ""); } Status GeneratorOp::FillBuffer(TensorQTable *tt) { @@ -207,7 +207,7 @@ Status GeneratorOp::operator()() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { RETURN_IF_NOT_OK(FillBuffer(fetched_table.get())); @@ -217,14 +217,14 @@ Status GeneratorOp::operator()() { e.restore(); // Pop up non StopIteration Python Exception if (!eoe) { - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, e.what()); + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, e.what()); } if (num_rows_sampled != -1 && num_rows_sampled != generator_counter_) { std::stringstream ss; ss << "The actual amount of data read from generator " << generator_counter_ << " is different from generator.len " << num_rows_sampled << ", you should adjust generator.len to make them match."; - return Status(StatusCode::kPyFuncException, __LINE__, __FILE__, ss.str()); + return Status(StatusCode::kMDPyFuncException, __LINE__, __FILE__, ss.str()); } } } @@ -275,7 +275,7 @@ Status GeneratorOp::Reset() { wp_.Set(); } generator_counter_ = 0; - return Status(StatusCode::kOK, "GeneratorOp Reset Succeed"); + return Status(StatusCode::kSuccess, "GeneratorOp Reset Succeed"); } // Visitor accept method for NodePass diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc index dbece1371d..1e806fc45c 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/image_folder_op.cc @@ -62,7 +62,7 @@ Status ImageFolderOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } ImageFolderOp::ImageFolderOp(int32_t num_wkrs, int32_t rows_per_buffer, std::string file_dir, int32_t queue_size, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc index a7e83d48f3..eff09d53c8 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/manifest_op.cc @@ -62,7 +62,7 @@ Status ManifestOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } ManifestOp::ManifestOp(int32_t num_works, int32_t rows_per_buffer, std::string file, int32_t queue_size, bool decode, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc index d3d68b2521..80f2a09eee 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mindrecord_op.cc @@ -63,7 +63,7 @@ Status MindRecordOp::Builder::Build(std::shared_ptr *ptr) { std::shared_ptr new_mind_record_op; if (build_dataset_file_.empty()) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid file, MindRecord path is invalid or not set."); } mindrecord::json sample_json; diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc index 363747b10c..bdf7006658 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/mnist_op.cc @@ -71,7 +71,7 @@ Status MnistOp::Builder::SanityCheck() { err_msg += valid.find(builder_usage_) == valid.end() ? "Invalid parameter, usage must be 'train','test' or 'all', but got " + builder_usage_ + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } MnistOp::MnistOp(const std::string &usage, int32_t num_workers, int32_t rows_per_buffer, std::string folder_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc index 0beb69e1be..eccedcecd3 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/random_data_op.cc @@ -327,7 +327,7 @@ Status RandomDataOp::PackAndSend(int32_t worker_id, std::unique_ptr(size_in_bytes); int ret_code = memset_s(buf.get(), size_in_bytes, random_byte, size_in_bytes); if (ret_code != 0) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Failed to set random bytes for a tensor."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Failed to set random bytes for a tensor."); } RETURN_IF_NOT_OK(Tensor::CreateFromMemory(*new_shape, current_col.type(), buf.get(), &new_tensor)); @@ -377,7 +377,7 @@ Status RandomDataOp::Reset() { // Ensure all guys are in the waitpost if (guys_in_ != num_workers_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Issuing a reset, but some workers are missing from epochSync!"); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc index c4107e7bcd..4c283ca272 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/python_sampler.cc @@ -36,7 +36,7 @@ Status PythonSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { py::gil_scoped_acquire gil_acquire; (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagNone); if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py::object py_ret = py_sampler_instance.attr("_get_indices")(); @@ -51,9 +51,9 @@ Status PythonSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { } } } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } catch (const py::cast_error &e) { - return Status(StatusCode::kPyFuncException, + return Status(StatusCode::kMDPyFuncException, "Invalid data, python sampler iterator should return an integer index."); } } @@ -78,12 +78,12 @@ Status PythonSamplerRT::InitSampler() { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py_sampler_instance.attr("_handshake")(num_rows_, num_samples_); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } @@ -96,12 +96,12 @@ Status PythonSamplerRT::ResetSampler() { need_to_reset_ = false; py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { py_sampler_instance.attr("reset")(); } catch (const py::error_already_set &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } if (HasChildSampler()) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc index 9c1e15aa6a..485255e277 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/sampler.cc @@ -116,12 +116,12 @@ Status SamplerRT::GetAllIdsThenReset(py::array *data) { { py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + return Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); } try { RETURN_IF_NOT_OK(sample_ids->GetDataAsNumpy(data)); } catch (const std::runtime_error &e) { - return Status(StatusCode::kPyFuncException, e.what()); + return Status(StatusCode::kMDPyFuncException, e.what()); } } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc index 9ca5aefc6c..3168645fa0 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc @@ -54,7 +54,7 @@ Status WeightedRandomSamplerRT::InitSampler() { std::to_string(samples_per_buffer_) + ".\n"); if (weights_.size() > static_cast(num_rows_)) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, size of sample weights must be less than or equal to num of data, " "otherwise might cause generated id out of bound or other errors, but got weight size: " + std::to_string(weights_.size()) + ", num of data: " + std::to_string(num_rows_)); @@ -119,7 +119,7 @@ Status WeightedRandomSamplerRT::ResetSampler() { // Get the sample ids. Status WeightedRandomSamplerRT::GetNextSample(std::unique_ptr *out_buffer) { if (weights_.size() > static_cast(num_rows_)) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid parameter, size of sample weights must be less than or equal to num of data, " "otherwise might cause generated id out of bound or other errors, but got weight size: " + std::to_string(weights_.size()) + ", num of data: " + std::to_string(num_rows_)); diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc index 0680f9aca8..97eaf21604 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/text_file_op.cc @@ -51,7 +51,7 @@ Status TextFileOp::Builder::ValidateInputs() const { ? "Invalid parameter, num_shard must be greater than shard_id and greater than 0, got num_shard: " + std::to_string(builder_num_devices_) + ", shard_id: " + std::to_string(builder_device_id_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } Status TextFileOp::Builder::Build(std::shared_ptr *op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc index 562545b313..0f85128ebf 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/tf_reader_op.cc @@ -103,7 +103,7 @@ Status TFReaderOp::Builder::ValidateInputs() const { err_msg += accumulated_filenames; } - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } Status TFReaderOp::Builder::Build(std::shared_ptr *out_tf_reader_op) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc index bb4946d910..5346dc6937 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/source/voc_op.cc @@ -93,7 +93,7 @@ Status VOCOp::Builder::SanityCheck() { err_msg += builder_num_workers_ <= 0 ? "Invalid parameter, num_parallel_workers must be greater than 0, but got " + std::to_string(builder_num_workers_) + ".\n" : ""; - return err_msg.empty() ? Status::OK() : Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } VOCOp::VOCOp(const TaskType &task_type, const std::string &task_mode, const std::string &folder_path, diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc index 2177e3886c..2ce1d89545 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/zip_op.cc @@ -123,7 +123,7 @@ Status ZipOp::prepare(TensorQTable *const table) { draining_ = false; buffer_id_ = 0; if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, ZipOp prepare phase requires a tensor table, but got nullptr."); } // fill initial row @@ -148,7 +148,7 @@ Status ZipOp::prepare(TensorQTable *const table) { // fillBuffer always expects a new table to fill Status ZipOp::fillBuffer(TensorQTable *const table) { if (table == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Invalid data, ZipOp fillBuffer null table pointer."); } TensorRow new_row; @@ -199,7 +199,7 @@ Status ZipOp::getNextTensorRow(TensorRow *const new_zip_row) { Status ZipOp::drainPipeline() { // we don't need to drain if we reached eof if (eof_) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "ZipOp draining should not be done if already at eof!"); } for (int32_t con = 0; con < children_num_; ++con) { diff --git a/mindspore/ccsrc/minddata/dataset/engine/db_connector.h b/mindspore/ccsrc/minddata/dataset/engine/db_connector.h index 2d2cf6d226..c6647a798f 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/db_connector.h +++ b/mindspore/ccsrc/minddata/dataset/engine/db_connector.h @@ -58,7 +58,7 @@ class DbConnector : public Connector> { // @param retry_if_eoe A flag to allow the same thread invoke pop() again if the current pop returns eoe buffer. Status PopWithRetry(int32_t worker_id, std::unique_ptr *result, bool retry_if_eoe = false) noexcept { if (result == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "[ERROR] nullptr detected when getting data from db connector"); } else { std::unique_lock lk(m_); @@ -69,7 +69,7 @@ class DbConnector : public Connector> { } else { RETURN_IF_NOT_OK(queues_[pop_from_]->PopFront(result)); if (*result == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "[ERROR] nullptr detected when getting data from db connector"); } // Setting the internal flag once the first EOF is encountered. diff --git a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc index bd1e3e6176..de9fdbfc0e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc @@ -53,7 +53,7 @@ Status TFRecordNode::ValidateParams() { if (dataset_files_.empty()) { std::string err_msg = "TFRecordNode: dataset_files is not specified."; MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } for (const auto &f : dataset_files_) { @@ -62,7 +62,7 @@ Status TFRecordNode::ValidateParams() { std::string err_msg = "TFRecordNode: dataset file: [" + f + "] is invalid or does not exist."; MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } } @@ -70,14 +70,14 @@ Status TFRecordNode::ValidateParams() { std::string err_msg = "TFRecordNode: Invalid number of samples: " + std::to_string(num_samples_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } if (num_shards_ <= 0) { std::string err_msg = "TFRecordNode: Invalid num_shards: " + std::to_string(num_shards_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } if (shard_id_ < 0 || shard_id_ >= num_shards_) { @@ -85,7 +85,7 @@ Status TFRecordNode::ValidateParams() { ", num_shards: " + std::to_string(num_shards_); MS_LOG(ERROR) << err_msg; - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } std::vector invalid_files(dataset_files_.size()); @@ -101,7 +101,7 @@ Status TFRecordNode::ValidateParams() { [](const std::string &accumulated, const std::string &next) { return accumulated + " " + next + "\n"; }); err_msg += accumulated_filenames; } - return err_msg.empty() ? Status::OK() : Status(StatusCode::kSyntaxError, __LINE__, __FILE__, err_msg); + return err_msg.empty() ? Status::OK() : Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, err_msg); } // Function to build TFRecordNode diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc index b5981e6442..e3e82b3baa 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pass.cc @@ -102,7 +102,7 @@ namespace dataset { // Driver method for TreePass Status IRTreePass::Run(std::shared_ptr root_ir, bool *const modified) { if (root_ir == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to TreePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to TreePass"); } // Initialize modified flag *modified = false; @@ -112,7 +112,7 @@ Status IRTreePass::Run(std::shared_ptr root_ir, bool *const modifie // Driver method for NodePass Status IRNodePass::Run(std::shared_ptr root_ir, bool *const modified) { if (root_ir == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to NodePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to NodePass"); } // Initialize modified flag *modified = false; @@ -337,7 +337,7 @@ Status IRNodePass::Visit(std::shared_ptr node, bool *cons // Driver method for TreePass Status TreePass::Run(ExecutionTree *tree, bool *const modified) { if (tree == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to TreePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to TreePass"); } // Initialize modified flag *modified = false; @@ -347,7 +347,7 @@ Status TreePass::Run(ExecutionTree *tree, bool *const modified) { // Driver method for NodePass Status NodePass::Run(ExecutionTree *tree, bool *const modified) { if (tree == nullptr || modified == nullptr) { - return Status(StatusCode::kUnexpectedError, "Null pointer passed to NodePass"); + return Status(StatusCode::kMDUnexpectedError, "Null pointer passed to NodePass"); } // Initialize modified flag *modified = false; diff --git a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc index feaaec1514..7e4510ef96 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/opt/pre/cache_transform_pass.cc @@ -80,7 +80,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bo MS_LOG(DEBUG) << "Cache transform pass: Non mappable leaf in a cache descendant tree detected"; // If a leaf has already been assigned, then we have more than one leaf inside this cache descendant tree. if (leaf_node_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for multiple leaf nodes under cache."); } // Set up a sampler here to be used by cache if we are a non-mappable leaf in a caching tree. @@ -127,7 +127,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr MS_LOG(DEBUG) << "Cache transform pass: Mappable leaf in a cache descendant tree detected"; // If a leaf has already been assigned, then we have more than one leaf inside this cache descendant tree. if (leaf_node_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for multiple leaf nodes under cache."); } // If we are a leaf in the caching path, then save this leaf @@ -140,7 +140,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bool *const modified) { if (node->IsCached() || is_caching_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for MindRecordOp under cache."); } return Status::OK(); @@ -151,7 +151,7 @@ Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, // Perform leaf node cache transform identification Status CacheTransformPass::CachePass::Visit(std::shared_ptr node, bool *const modified) { if (node->IsCached() || is_caching_) { - return Status(StatusCode::kNotImplementedYet, __LINE__, __FILE__, + return Status(StatusCode::kMDNotImplementedYet, __LINE__, __FILE__, "There is currently no support for GeneratorOp under cache."); } return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc index b206f43ee0..5cc1575038 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/connector_throughput.cc @@ -146,7 +146,7 @@ Status ConnectorThroughput::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc index 5b1ef29f2d..d2d788cd9e 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/cpu_sampling.cc @@ -53,7 +53,7 @@ Status DeviceCpu::ParseCpuInfo(const std::string &str) { uint64_t softirq = 0; if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &cpu_stat.user_stat_, &nice, &cpu_stat.sys_stat_, &cpu_stat.idle_stat_, &cpu_stat.io_stat_, &irq, &softirq) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } cpu_stat.total_stat_ = @@ -87,7 +87,7 @@ Status DeviceCpu::ParseCpuInfo(const std::string &str) { Status DeviceCpu::ParseCtxt(const std::string &str) { uint64_t ctxt; if (std::sscanf(str.c_str(), "%*s %lu", &ctxt) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get context switch count failed."); + return Status(StatusCode::kMDUnexpectedError, "Get context switch count failed."); } // Calculate the utilization from the second sampling if (!first_collect_) { @@ -100,7 +100,7 @@ Status DeviceCpu::ParseCtxt(const std::string &str) { Status DeviceCpu::ParseRunningProcess(const std::string &str) { uint32_t running_process; if (std::sscanf(str.c_str(), "%*s %ud", &running_process) == EOF) { - return Status(StatusCode::kUnexpectedError, "Get context switch count failed."); + return Status(StatusCode::kMDUnexpectedError, "Get context switch count failed."); } // Drop the first value in order to collect same amount of CPU utilization if (!first_collect_) { @@ -188,7 +188,7 @@ Status OperatorCpu::ParseCpuInfo(int32_t op_id, int64_t thread_id, if (!temp_path.Exists()) { (*op_stat)[op_id][thread_id].user_stat_ = 0; (*op_stat)[op_id][thread_id].sys_stat_ = 0; - return Status(StatusCode::kFileNotExist); + return Status(StatusCode::kMDFileNotExist); } std::ifstream file(stat_path); @@ -203,7 +203,7 @@ Status OperatorCpu::ParseCpuInfo(int32_t op_id, int64_t thread_id, if (std::sscanf(str.c_str(), "%*d %*s %*s %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %lu %lu", &utime, &stime) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); (*op_stat)[op_id][thread_id].user_stat_ = utime; @@ -224,7 +224,7 @@ Status OperatorCpu::GetTotalCpuTime(uint64_t *total_stat) { if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &user, &nice, &sys, &idle, &iowait, &irq, &softirq) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); *total_stat = user + nice + sys + idle + iowait + irq + softirq; @@ -398,7 +398,7 @@ Status ProcessCpu::ParseCpuInfo() { if (std::sscanf(str.c_str(), "%*d %*s %*s %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %*lu %lu %lu", &user, &sys) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); @@ -434,7 +434,7 @@ Status ProcessCpu::GetTotalCpuTime(uint64_t *total_stat) { if (std::sscanf(str.c_str(), "%*s %lu %lu %lu %lu %lu %lu %lu", &user, &nice, &sys, &idle, &iowait, &irq, &softirq) == EOF) { file.close(); - return Status(StatusCode::kUnexpectedError, "Get device CPU failed."); + return Status(StatusCode::kMDUnexpectedError, "Get device CPU failed."); } file.close(); *total_stat = user + nice + sys + idle + iowait + irq + softirq; @@ -559,7 +559,7 @@ Status CpuSampling::Init(const std::string &dir_path, const std::string &device_ Status CpuSampling::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc index b2188ed0cd..ee6e3dae88 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/dataset_iterator_tracing.cc @@ -70,7 +70,7 @@ Status DatasetIteratorTracing::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc index 8255ed677b..eaf3607031 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/device_queue_tracing.cc @@ -71,7 +71,7 @@ Status DeviceQueueTracing::ChangeFileMode() { if (chmod(common::SafeCStr(file_path_), S_IRUSR | S_IWUSR) == -1) { std::string err_str = "Change file mode failed," + file_path_; - return Status(StatusCode::kUnexpectedError, err_str); + return Status(StatusCode::kMDUnexpectedError, err_str); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc b/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc index 49fc54188d..907f4612c1 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/perf/profiling.cc @@ -97,7 +97,7 @@ Status ProfilingManager::RegisterTracingNode(std::shared_ptr node) { // Check if node with the same name has already been registered. auto exist = tracing_nodes_.find(node->Name()); if (exist != tracing_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node already exist: " + node->Name()); + return Status(StatusCode::kMDProfilingError, "Profiling node already exist: " + node->Name()); } // Register the node with its name as key. RETURN_IF_NOT_OK(node->Init(dir_path_, device_id_)); @@ -110,7 +110,7 @@ Status ProfilingManager::GetTracingNode(const std::string &name, std::shared_ptr // Check if node with the same name has already been registered. auto exist = tracing_nodes_.find(name); if (exist == tracing_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node does not exist: " + name); + return Status(StatusCode::kMDProfilingError, "Profiling node does not exist: " + name); } // Fetch node. *node = tracing_nodes_[name]; @@ -122,7 +122,7 @@ Status ProfilingManager::RegisterSamplingNode(std::shared_ptr node) { // Check if node with the same name has already been registered. auto exist = sampling_nodes_.find(node->Name()); if (exist != sampling_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node already exist: " + node->Name()); + return Status(StatusCode::kMDProfilingError, "Profiling node already exist: " + node->Name()); } // Register the node with its name as key. RETURN_IF_NOT_OK(node->Init(dir_path_, device_id_)); @@ -135,7 +135,7 @@ Status ProfilingManager::GetSamplingNode(const std::string &name, std::shared_pt // Check if node with the same name has already been registered. auto exist = sampling_nodes_.find(name); if (exist == sampling_nodes_.end()) { - return Status(StatusCode::kProfilingError, "Profiling node does not exist: " + name); + return Status(StatusCode::kMDProfilingError, "Profiling node does not exist: " + name); } // Fetch node. *node = sampling_nodes_[name]; diff --git a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h b/mindspore/ccsrc/minddata/dataset/include/de_tensor.h deleted file mode 100644 index 2e20ca12b9..0000000000 --- a/mindspore/ccsrc/minddata/dataset/include/de_tensor.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ -#include -#include -#include -#include "include/ms_tensor.h" -#include "minddata/dataset/include/status.h" -#include "minddata/dataset/include/tensor.h" -namespace mindspore { -namespace tensor { -class DETensor : public mindspore::tensor::MSTensor { - public: - /// \brief Create a MSTensor pointer. - /// \param[in] data_type DataTypeId of tensor to be created - /// \param[in] shape Shape of tensor to be created - /// \return MSTensor pointer - static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); - - /// \brief Create a MSTensor pointer. - /// \param[in] path Path to file to read - /// \return MSTensor pointer - static MSTensor *CreateTensor(const std::string &path); - - /// \brief Create a MSTensor pointer. - /// \param[in] data_type Data TypeId of tensor to be created - /// \param[in] shape Shape of tensor to be created - /// \param[in] data Data pointer - /// \return MSTensor pointer - static MSTensor *CreateFromMemory(TypeId data_type, const std::vector &shape, void *data); - - DETensor(TypeId data_type, const std::vector &shape); - - explicit DETensor(std::shared_ptr tensor_ptr); - - ~DETensor() = default; - - /// \brief Create a duplicate instance, convert the DETensor to the LiteTensor. - /// \return MSTensor pointer - MSTensor *ConvertToLiteTensor(); - - std::shared_ptr tensor() const; - - TypeId data_type() const override; - - TypeId set_data_type(const TypeId data_type); - - std::vector shape() const override; - - size_t set_shape(const std::vector &shape); - - int DimensionSize(size_t index) const override; - - int ElementsNum() const override; - - std::size_t hash() const; - - size_t Size() const override; - - void *MutableData() override; - - protected: - std::shared_ptr tensor_impl_; -}; -} // namespace tensor -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DETENSOR_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/execute.h b/mindspore/ccsrc/minddata/dataset/include/execute.h index d4320e43e3..c2c4ace8c7 100644 --- a/mindspore/ccsrc/minddata/dataset/include/execute.h +++ b/mindspore/ccsrc/minddata/dataset/include/execute.h @@ -19,49 +19,38 @@ #include #include - +#include "include/api/types.h" #include "minddata/dataset/include/constants.h" -#ifdef ENABLE_ANDROID -#include "minddata/dataset/include/de_tensor.h" -#endif -#include "minddata/dataset/include/tensor.h" #include "minddata/dataset/include/transforms.h" namespace mindspore { namespace dataset { -class TensorOp; - // class to run tensor operations in eager mode class Execute { public: /// \brief Constructor explicit Execute(std::shared_ptr op); - /// \brief Destructor - ~Execute(); + explicit Execute(std::vector> ops); -#ifdef ENABLE_ANDROID - /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input - the tensor to be transformed - /// \return - the output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); -#endif + /// \brief Destructor + ~Execute() = default; /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input - the tensor to be transformed - /// \return - the output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); + /// \param[in] input Tensor to be transformed + /// \param[out] output Transformed tensor + /// \return Status code + Status operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output); /// \brief callable function to execute the TensorOperation in eager mode - /// \param[in] input_tensor_list - the tensor to be transformed - /// \param[out] out - the result tensor after transform + /// \param[in] input_tensor_list List of Tensor to be transformed + /// \param[out] out Result tensor after transform /// \return - Status - Status operator()(const std::vector> &input_tensor_list, - std::vector> *out); + Status operator()(const std::vector &input_tensor_list, std::vector *out); private: - std::shared_ptr op_; + std::vector> ops_; }; } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h b/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h deleted file mode 100644 index 563ef66e85..0000000000 --- a/mindspore/ccsrc/minddata/dataset/include/minddata_eager.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ - -#include -#include -#include - -#include "include/api/status.h" -#include "include/api/types.h" -#include "minddata/dataset/include/transforms.h" -#include "minddata/dataset/include/vision.h" - -namespace mindspore { -namespace api { - -// class to run tensor operations in eager mode -class MindDataEager { - public: - /// \brief Constructor - MindDataEager() = default; - - /// \brief Constructor - /// \param[inout] ops Transforms to be applied - explicit MindDataEager(std::vector> ops); - - /// \brief Destructor - ~MindDataEager() = default; - - /// \brief Function to read images from local directory - /// \param[inout] image_dir Target directory which contains images - /// \param[output] images Vector of image Tensor - /// \return Status The status code returned - static Status LoadImageFromDir(const std::string &image_dir, std::vector> *images); - - /// \brief Callable function to execute the TensorOperation in eager mode - /// \param[inout] input Tensor to be transformed - /// \return Output tensor, nullptr if Compute fails - std::shared_ptr operator()(std::shared_ptr input); - - private: - std::vector> ops_; -}; - -} // namespace api -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_MINDDATA_EAGER_H_ diff --git a/mindspore/ccsrc/minddata/dataset/include/status.h b/mindspore/ccsrc/minddata/dataset/include/status.h index 7002b05f10..e5896687d2 100644 --- a/mindspore/ccsrc/minddata/dataset/include/status.h +++ b/mindspore/ccsrc/minddata/dataset/include/status.h @@ -29,6 +29,8 @@ #include #include +#include "include/api/status.h" + namespace mindspore { namespace dataset { #define RETURN_IF_NOT_OK(_s) \ @@ -39,23 +41,30 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_UNEXPECTED(_e) \ - do { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_UNEXPECTED(_e) \ + do { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ } while (false) -#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ + } \ } while (false) -#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ + } while (false) + +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ } while (false) #define RETURN_UNEXPECTED_IF_NULL(_ptr) \ @@ -73,9 +82,9 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_SYNTAX_ERROR(_e) \ - do { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_SYNTAX_ERROR(_e) \ + do { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ } while (false) #define RETURN_SECOND_IF_ERROR(_s, _r) \ @@ -87,99 +96,8 @@ namespace dataset { } \ } while (false) -enum class StatusCode : char { - kOK = 0, - kOutOfMemory = 1, - kShapeMisMatch = 2, - kInterrupted = 3, - kNoSpace = 4, - kPyFuncException = 5, - kDuplicateKey = 6, - kPythonInterpreterFailure = 7, - kTDTPushFailure = 8, - kFileNotExist = 9, - kProfilingError = 10, - kBoundingBoxOutOfBounds = 11, - kBoundingBoxInvalidShape = 12, - kSyntaxError = 13, - kTimeOut = 14, - kBuddySpaceFull = 15, - kNetWorkError = 16, - kNotImplementedYet = 17, - // Make this error code the last one. Add new error code above it. - kUnexpectedError = 127 -}; - -std::string CodeAsString(const StatusCode c); - -class Status { - public: - Status() noexcept; - - explicit Status(StatusCode c) noexcept; - - ~Status() noexcept; - - // Copy constructor - Status(const Status &s); - - Status &operator=(const Status &s); - - // Move constructor - Status(Status &&) noexcept; - - Status &operator=(Status &&) noexcept; - - Status(const StatusCode code, const std::string &msg); - - Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); - - // Return a success status - static Status OK() { return Status(StatusCode::kOK); } - - std::string ToString() const; - - StatusCode get_code() const; - - int GetLineOfCode() const { return line_of_code_; } - - std::string SetErrDescription(const std::string &err_description); - - std::string GetErrDescription() const { return err_description_; } - - friend std::ostream &operator<<(std::ostream &os, const Status &s); - - explicit operator bool() const { return (get_code() == StatusCode::kOK); } - - bool operator==(const Status &other) const { return (this->get_code() == other.get_code()); } - - bool operator!=(const Status &other) const { return !(*this == other); } - - bool IsOk() const { return (get_code() == StatusCode::kOK); } - - bool IsError() const { return !IsOk(); } - - bool IsOutofMemory() const { return (get_code() == StatusCode::kOutOfMemory); } - - bool IsInterrupted() const { return (get_code() == StatusCode::kInterrupted); } - - bool IsShapeIncorrect() const { return (get_code() == StatusCode::kShapeMisMatch); } - - bool IsNoSpace() const { return (get_code() == StatusCode::kNoSpace); } - - bool IsNetWorkError() const { return (get_code() == StatusCode::kNetWorkError); } - - private: - StatusCode code_; - int line_of_code_; - std::string file_name_; - std::string err_description_; - std::string err_msg_; -}; - #if !defined(_WIN32) && !defined(_WIN64) const float MAX_MEMORY_USAGE_THRESHOLD = 0.95; - float GetMemoryUsage(); #endif } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/include/tensor.h b/mindspore/ccsrc/minddata/dataset/include/tensor.h index e149f87202..2113036ae5 100644 --- a/mindspore/ccsrc/minddata/dataset/include/tensor.h +++ b/mindspore/ccsrc/minddata/dataset/include/tensor.h @@ -41,22 +41,16 @@ #include "minddata/dataset/core/constants.h" #include "minddata/dataset/core/data_type.h" #include "minddata/dataset/core/tensor_shape.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/util/status.h" #ifndef ENABLE_ANDROID #include "proto/example.pb.h" -#else -#include "minddata/dataset/include/de_tensor.h" #endif #ifdef ENABLE_PYTHON namespace py = pybind11; #endif namespace mindspore { -#ifdef ENABLE_ANDROID -namespace tensor { -class DETensor; -} // namespace tensor -#endif namespace dataset { class Tensor; template @@ -84,7 +78,7 @@ class Tensor { /// \param other Tensor to be moved Tensor(Tensor &&other) noexcept; - /// Move assigment operator + /// Move assignment operator /// \param other Tensor to be moved Tensor &operator=(Tensor &&other) noexcept; @@ -133,7 +127,7 @@ class Tensor { #ifndef ENABLE_ANDROID /// Create a tensor of type DE_STRING from a BytesList. /// \param[in] bytes_list protobuf's Bytelist - /// \param[in] shape shape of the outout tensor + /// \param[in] shape shape of the output tensor /// \param[out] out created Tensor /// \return Status Code static Status CreateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out); @@ -279,7 +273,7 @@ class Tensor { std::string err; err += (data_ == nullptr) ? "data_ is nullptr \t" : ""; err += type_.IsCompatible() ? "data type not compatible\t" : ""; - return Status(StatusCode::kUnexpectedError, err); + return Status(StatusCode::kMDUnexpectedError, err); } } @@ -330,7 +324,7 @@ class Tensor { void Invalidate(); /// Copy input tensor into self at the location index. - /// Index is a vector of axises which can be incomplete: + /// Index is a vector of axes which can be incomplete: /// Ex: shape <2,3>, inserting into index {0} will replace the first row. index {1,2} will replace the last cell. /// \param index /// \param input @@ -375,7 +369,7 @@ class Tensor { /// Handle negative indices. static inline dsize_t HandleNeg(dsize_t index, dsize_t length) { return (index < 0) ? (index + length) : index; } - /// Slice tensor bases on the given indicies. Copy the sliced data into out tensor. Only rank1 tensors are supported. + /// Slice tensor bases on the given indices. Copy the sliced data into out tensor. Only rank1 tensors are supported. /// Based on the type of tensor, SliceNumeric or SliceString will be called /// \param[out] out Tensor /// \param[in] indices vector of indices @@ -663,9 +657,8 @@ class Tensor { unsigned char *data_end_ = nullptr; private: -#ifdef ENABLE_ANDROID - friend class tensor::DETensor; -#endif + friend class DETensor; + /// Copy raw data of a array based on shape and strides to the destination pointer /// \param dst [out] Pointer to the destination array where the content is to be copied /// \param[in] src Pointer to the source of strided array to be copied diff --git a/mindspore/ccsrc/minddata/dataset/include/type_id.h b/mindspore/ccsrc/minddata/dataset/include/type_id.h index d4c0560931..ef077a832d 100644 --- a/mindspore/ccsrc/minddata/dataset/include/type_id.h +++ b/mindspore/ccsrc/minddata/dataset/include/type_id.h @@ -17,6 +17,7 @@ #define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_TYPEID_H_ #include "mindspore/core/ir/dtype/type_id.h" +#include "minddata/dataset/core/data_type.h" namespace mindspore { namespace dataset { @@ -46,6 +47,8 @@ inline dataset::DataType MSTypeToDEType(TypeId data_type) { return dataset::DataType(dataset::DataType::DE_FLOAT32); case kNumberTypeFloat64: return dataset::DataType(dataset::DataType::DE_FLOAT64); + case kObjectTypeString: + return dataset::DataType(dataset::DataType::DE_STRING); default: return dataset::DataType(dataset::DataType::DE_UNKNOWN); } @@ -77,6 +80,8 @@ inline TypeId DETypeToMSType(dataset::DataType data_type) { return mindspore::TypeId::kNumberTypeFloat32; case dataset::DataType::DE_FLOAT64: return mindspore::TypeId::kNumberTypeFloat64; + case dataset::DataType::DE_STRING: + return mindspore::TypeId::kObjectTypeString; default: return kTypeUnknown; } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc index b28a2930c7..983ab898e9 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/data/one_hot_op.cc @@ -35,7 +35,7 @@ Status OneHotOp::OutputShape(const std::vector &inputs, std::vector if (inputs_copy[0].Rank() == 0) outputs.emplace_back(std::vector{num_classes_}); if (inputs_copy[0].Rank() == 1) outputs.emplace_back(std::vector{inputs_copy[0][0], num_classes_}); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "OneHot: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "OneHot: invalid input shape."); } Status OneHotOp::to_json(nlohmann::json *out_json) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc index 4a6a55df38..791f189c43 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/bounding_box.cc @@ -43,16 +43,16 @@ Status BoundingBox::ReadFromTensor(const TensorPtr &bbox_tensor, dsize_t index_o Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) { if (image_and_bbox.size() != 2) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: invalid input, likely missed bounding boxes."); } if (image_and_bbox[1]->shape().Size() < 2) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: bounding boxes should have to be two-dimensional matrix at least."); } uint32_t num_of_features = image_and_bbox[1]->shape()[1]; if (num_of_features < 4) { - return Status(StatusCode::kBoundingBoxInvalidShape, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxInvalidShape, __LINE__, __FILE__, "BoundingBox: bounding boxes should be have at least 4 features."); } std::vector> bbox_list; @@ -61,11 +61,11 @@ Status BoundingBox::ValidateBoundingBoxes(const TensorRow &image_and_bbox) { uint32_t img_w = image_and_bbox[0]->shape()[1]; for (auto &bbox : bbox_list) { if ((bbox->x() + bbox->width() > img_w) || (bbox->y() + bbox->height() > img_h)) { - return Status(StatusCode::kBoundingBoxOutOfBounds, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, "BoundingBox: bounding boxes is out of bounds of the image"); } if (static_cast(bbox->x()) < 0 || static_cast(bbox->y()) < 0) { - return Status(StatusCode::kBoundingBoxOutOfBounds, __LINE__, __FILE__, + return Status(StatusCode::kMDBoundingBoxOutOfBounds, __LINE__, __FILE__, "BoundingBox: the coordinates of the bounding boxes has negative value."); } } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc index 674acb6e53..70529817da 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/center_crop_op.cc @@ -72,7 +72,7 @@ Status CenterCropOp::OutputShape(const std::vector &inputs, std::ve if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "CenterCrop: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "CenterCrop: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc index 8d287c7b32..426052e191 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/crop_op.cc @@ -44,7 +44,7 @@ Status CropOp::OutputShape(const std::vector &inputs, std::vector &inputs, std::vector TensorShape out({-1, -1, 3}); // we don't know what is output image size, but we know it should be 3 channels if (inputs[0].Rank() == 1) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Decode: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "Decode: invalid input shape."); } Status DecodeOp::OutputType(const std::vector &inputs, std::vector &outputs) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc index cb1be1d76b..080c6fbd3d 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.cc @@ -1,106 +1,106 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include "minddata/dataset/kernels/image/dvpp/utils/AclProcess.h" -#include "minddata/dataset/core/cv_tensor.h" -#include "minddata/dataset/kernels/image/image_utils.h" -#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" -#include "minddata/dataset/core/data_type.h" -#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h" -#include "include/api/context.h" - -namespace mindspore { -namespace dataset { -Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { - IO_CHECK(input, output); - if (!IsNonEmptyJPEG(input)) { - RETURN_STATUS_UNEXPECTED("SoftDvppDecodeReiszeJpegOp only support process jpeg image."); - } - try { - CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty."); - unsigned char *buffer = const_cast(input->GetBuffer()); - RawData imageInfo; - uint32_t filesize = input->SizeInBytes(); - imageInfo.lenOfByte = filesize; - imageInfo.data = std::make_shared(); - imageInfo.data.reset(new uint8_t[filesize], std::default_delete()); - memcpy_s(imageInfo.data.get(), filesize, buffer, filesize); - // First part end, whose function is to transform data from a Tensor to imageinfo data structure which can be - // applied on device - ResourceInfo resource; - resource.aclConfigPath = ""; - resource.deviceIds.insert(api::Context::Instance().GetDeviceID()); - std::shared_ptr instance = ResourceManager::GetInstance(); - APP_ERROR ret = instance->InitResource(resource); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in Init D-chip:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - int deviceId = *(resource.deviceIds.begin()); - aclrtContext context = instance->GetContext(deviceId); - // Second part end where we initialize the resource of D chip and set up all configures - AclProcess process(resized_width_, resized_height_, crop_width_, crop_height_, context); - process.set_mode(true); - ret = process.InitResource(); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in Init resource:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - ret = process.Process(imageInfo); - if (ret != APP_ERR_OK) { - instance->Release(); - std::string error = "Error in dvpp processing:" + std::to_string(ret); - RETURN_STATUS_UNEXPECTED(error); - } - // Third part end where we execute the core function of dvpp - auto data = std::static_pointer_cast(process.Get_Memory_Data()); - unsigned char *ret_ptr = data.get(); - std::shared_ptr CropOut = process.Get_Device_Memory_Data(); - dsize_t dvpp_length = CropOut->dataSize; - const TensorShape dvpp_shape({dvpp_length, 1, 1}); - const DataType dvpp_data_type(DataType::DE_UINT8); - mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output); - if (!((*output)->HasData())) { - std::string error = "[ERROR] Fail to get the Output result from memory!"; - RETURN_STATUS_UNEXPECTED(error); - } - process.device_memory_release(); - process.Release(); - // Last part end where we transform the processed data into a tensor which can be applied in later units. - } catch (const cv::Exception &e) { - std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what()); - RETURN_STATUS_UNEXPECTED(error); - } - return Status::OK(); -} - -Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector &inputs, - std::vector &outputs) { - RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); - outputs.clear(); - TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels - if (inputs[0].Rank() == 1) outputs.emplace_back(out); - if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Input has a wrong shape"); -} - -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "minddata/dataset/kernels/image/dvpp/utils/AclProcess.h" +#include "minddata/dataset/core/cv_tensor.h" +#include "minddata/dataset/kernels/image/image_utils.h" +#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" +#include "minddata/dataset/core/data_type.h" +#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h" +#include "include/api/context.h" + +namespace mindspore { +namespace dataset { +Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { + IO_CHECK(input, output); + if (!IsNonEmptyJPEG(input)) { + RETURN_STATUS_UNEXPECTED("SoftDvppDecodeReiszeJpegOp only support process jpeg image."); + } + try { + CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty."); + unsigned char *buffer = const_cast(input->GetBuffer()); + RawData imageInfo; + uint32_t filesize = input->SizeInBytes(); + imageInfo.lenOfByte = filesize; + imageInfo.data = std::make_shared(); + imageInfo.data.reset(new uint8_t[filesize], std::default_delete()); + memcpy_s(imageInfo.data.get(), filesize, buffer, filesize); + // First part end, whose function is to transform data from a Tensor to imageinfo data structure which can be + // applied on device + ResourceInfo resource; + resource.aclConfigPath = ""; + resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID()); + std::shared_ptr instance = ResourceManager::GetInstance(); + APP_ERROR ret = instance->InitResource(resource); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in Init D-chip:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + int deviceId = *(resource.deviceIds.begin()); + aclrtContext context = instance->GetContext(deviceId); + // Second part end where we initialize the resource of D chip and set up all configures + AclProcess process(resized_width_, resized_height_, crop_width_, crop_height_, context); + process.set_mode(true); + ret = process.InitResource(); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in Init resource:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + ret = process.Process(imageInfo); + if (ret != APP_ERR_OK) { + instance->Release(); + std::string error = "Error in dvpp processing:" + std::to_string(ret); + RETURN_STATUS_UNEXPECTED(error); + } + // Third part end where we execute the core function of dvpp + auto data = std::static_pointer_cast(process.Get_Memory_Data()); + unsigned char *ret_ptr = data.get(); + std::shared_ptr CropOut = process.Get_Device_Memory_Data(); + dsize_t dvpp_length = CropOut->dataSize; + const TensorShape dvpp_shape({dvpp_length, 1, 1}); + const DataType dvpp_data_type(DataType::DE_UINT8); + mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output); + if (!((*output)->HasData())) { + std::string error = "[ERROR] Fail to get the Output result from memory!"; + RETURN_STATUS_UNEXPECTED(error); + } + process.device_memory_release(); + process.Release(); + // Last part end where we transform the processed data into a tensor which can be applied in later units. + } catch (const cv::Exception &e) { + std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what()); + RETURN_STATUS_UNEXPECTED(error); + } + return Status::OK(); +} + +Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector &inputs, + std::vector &outputs) { + RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs)); + outputs.clear(); + TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels + if (inputs[0].Rank() == 1) outputs.emplace_back(out); + if (!outputs.empty()) return Status::OK(); + return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape"); +} + +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h index aae9c77f6d..33df9bf499 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h @@ -1,60 +1,60 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H -#define MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H - -#include -#include -#include - -#include "minddata/dataset/core/tensor.h" -#include "minddata/dataset/kernels/tensor_op.h" -#include "minddata/dataset/util/status.h" -#include "minddata/dataset/core/data_type.h" -#include "mindspore/core/utils/log_adapter.h" -#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h" -#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" -#include "acl/acl.h" - -namespace mindspore { -namespace dataset { -class DvppDecodeResizeCropJpegOp : public TensorOp { - public: - DvppDecodeResizeCropJpegOp(int32_t crop_height, int32_t crop_width, int32_t resized_height, int32_t resized_width) - : crop_height_(crop_height), - crop_width_(crop_width), - resized_height_(resized_height), - resized_width_(resized_width) {} - - /// \brief Destructor - ~DvppDecodeResizeCropJpegOp() = default; - - Status Compute(const std::shared_ptr &input, std::shared_ptr *output) override; - Status OutputShape(const std::vector &inputs, std::vector &outputs) override; - - std::string Name() const override { return kDvppDecodeResizeCropJpegOp; } - - private: - int32_t crop_height_; - int32_t crop_width_; - int32_t resized_height_; - int32_t resized_width_; -}; -} // namespace dataset -} // namespace mindspore - -#endif // MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H +#define MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H + +#include +#include +#include + +#include "minddata/dataset/core/tensor.h" +#include "minddata/dataset/kernels/tensor_op.h" +#include "minddata/dataset/util/status.h" +#include "minddata/dataset/core/data_type.h" +#include "mindspore/core/utils/log_adapter.h" +#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h" +#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" +#include "acl/acl.h" + +namespace mindspore { +namespace dataset { +class DvppDecodeResizeCropJpegOp : public TensorOp { + public: + DvppDecodeResizeCropJpegOp(int32_t crop_height, int32_t crop_width, int32_t resized_height, int32_t resized_width) + : crop_height_(crop_height), + crop_width_(crop_width), + resized_height_(resized_height), + resized_width_(resized_width) {} + + /// \brief Destructor + ~DvppDecodeResizeCropJpegOp() = default; + + Status Compute(const std::shared_ptr &input, std::shared_ptr *output) override; + Status OutputShape(const std::vector &inputs, std::vector &outputs) override; + + std::string Name() const override { return kDvppDecodeResizeCropJpegOp; } + + private: + int32_t crop_height_; + int32_t crop_width_; + int32_t resized_height_; + int32_t resized_width_; +}; +} // namespace dataset +} // namespace mindspore + +#endif // MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc index e27d1d08b4..4ac49c4728 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.cc @@ -90,14 +90,14 @@ APP_ERROR ResourceManager::InitResource(ResourceInfo &resourceInfo) { APP_ERROR ret; if (aclConfigPath.length() == 0) { // Init acl without aclconfig - acl_env_ = mindspore::api::AclEnvGuard::GetAclEnv(""); + acl_env_ = mindspore::AclEnvGuard::GetAclEnv(""); } else { ret = ExistFile(aclConfigPath); if (ret != APP_ERR_OK) { MS_LOG(ERROR) << "Acl config file not exist, ret = " << ret << "."; return ret; } - acl_env_ = mindspore::api::AclEnvGuard::GetAclEnv(aclConfigPath); + acl_env_ = mindspore::AclEnvGuard::GetAclEnv(aclConfigPath); } if (acl_env_ == nullptr) { MS_LOG(ERROR) << "Failed to init acl."; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h index 88b5eda4f4..d27b9611d2 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h @@ -86,7 +86,7 @@ class ResourceManager { std::vector deviceIds_; std::vector contexts_; std::unordered_map deviceIdMap_; // Map of device to index - std::shared_ptr acl_env_; + std::shared_ptr acl_env_; }; #endif diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc index fe1215d5b4..311b65020b 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/hwc_to_chw_op.cc @@ -33,7 +33,7 @@ Status HwcToChwOp::OutputShape(const std::vector &inputs, std::vect TensorShape out = TensorShape{in[2], in[0], in[1]}; if (inputs[0].Rank() == 3) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "HWC2CHW: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "HWC2CHW: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc index c70fff9c8e..85f34f7134 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/image_utils.cc @@ -110,11 +110,11 @@ Status Resize(const std::shared_ptr &input, std::shared_ptr *out if (output_height > in_image.rows * 1000 || output_width > in_image.cols * 1000) { std::string err_msg = "Resize: the resizing width or height is too big, it's 1000 times bigger than the original image."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } if (output_height == 0 || output_width == 0) { std::string err_msg = "Resize: the resizing width or height is invalid, width or height is zero."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { TensorShape shape{output_height, output_width}; @@ -632,12 +632,12 @@ Status Normalize(const std::shared_ptr &input, std::shared_ptr * mean->Squeeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "Normalize: mean should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "Normalize: std tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { // NOTE: We are assuming the input image is in RGB and the mean @@ -682,12 +682,12 @@ Status NormalizePad(const std::shared_ptr &input, std::shared_ptrSqueeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "NormalizePad: mean tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "NormalizePad: std tensor should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { // NOTE: We are assuming the input image is in RGB and the mean diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc index ca1785b726..782b60cd93 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/lite_image_utils.cc @@ -320,12 +320,12 @@ Status Normalize(const std::shared_ptr &input, std::shared_ptr * mean->Squeeze(); if (mean->type() != DataType::DE_FLOAT32 || mean->Rank() != 1 || mean->shape()[0] != 3) { std::string err_msg = "Normalize: mean should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } std->Squeeze(); if (std->type() != DataType::DE_FLOAT32 || std->Rank() != 1 || std->shape()[0] != 3) { std::string err_msg = "Normalize: std should be of size 3 and type float."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } // convert mean, std back to vector std::vector vec_mean; @@ -385,7 +385,7 @@ Status Resize(const std::shared_ptr &input, std::shared_ptr *out std::string err_msg = "Resize: the resizing width or height 1) is too big, it's up to " "1000 times the original image; 2) can not be 0."; - return Status(StatusCode::kShapeMisMatch, err_msg); + return Status(StatusCode::kMDShapeMisMatch, err_msg); } try { LiteMat lite_mat_rgb; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc index 4d320362d9..0105a4cc7f 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/pad_op.cc @@ -48,7 +48,7 @@ Status PadOp::OutputShape(const std::vector &inputs, std::vector &inputs if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomCropAndResize: invalid input shape"); + return Status(StatusCode::kMDUnexpectedError, "RandomCropAndResize: invalid input shape"); } Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int *crop_height, int *crop_width) { *crop_width = w_in; diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc index 812f71309a..c0dea9d87b 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/random_crop_op.cc @@ -94,11 +94,11 @@ Status RandomCropOp::ImagePadding(const std::shared_ptr &input, std::sha } if (crop_height_ == 0 || crop_width_ == 0) { - return Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, + return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, "RandomCrop: invalid crop size, crop dimension is not allowed to be zero."); } if (*padded_image_h < crop_height_ || *padded_image_w < crop_width_ || crop_height_ == 0 || crop_width_ == 0) { - return Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, + return Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, "RandomCrop: invalid crop size, crop size is bigger than the image dimensions."); } return Status::OK(); @@ -144,7 +144,7 @@ Status RandomCropOp::OutputShape(const std::vector &inputs, std::ve if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomCrop: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "RandomCrop: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc index bf965dec1e..1908cabd20 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/random_rotation_op.cc @@ -77,7 +77,7 @@ Status RandomRotationOp::OutputShape(const std::vector &inputs, std if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "RandomRotation: invalid input shape."); + return Status(StatusCode::kMDUnexpectedError, "RandomRotation: invalid input shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc index 4a7ede6d19..6d9ec7a127 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/resize_op.cc @@ -64,7 +64,7 @@ Status ResizeOp::OutputShape(const std::vector &inputs, std::vector if (inputs[0].Rank() == 2) outputs.emplace_back(out); if (inputs[0].Rank() == 3) outputs.emplace_back(out.AppendDim(inputs[0][2])); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Resize: invalid input wrong shape."); + return Status(StatusCode::kMDUnexpectedError, "Resize: invalid input wrong shape."); } } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc index 3b6944e911..2c684b1080 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/image/soft_dvpp/soft_dvpp_decode_resize_jpeg_op.cc @@ -82,7 +82,7 @@ Status SoftDvppDecodeResizeJpegOp::OutputShape(const std::vector &i TensorShape out({-1, -1, 3}); // we don't know what is output image size, but we know it should be 3 channels if (inputs[0].Rank() == 1) outputs.emplace_back(out); if (!outputs.empty()) return Status::OK(); - return Status(StatusCode::kUnexpectedError, "Input has a wrong shape"); + return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape"); } } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc index 30df753540..11fc31b58d 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/py_func_op.cc @@ -26,12 +26,12 @@ namespace mindspore { namespace dataset { Status PyFuncOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); - Status ret = Status(StatusCode::kOK, "PyFunc Call Succeed"); + Status ret = Status(StatusCode::kSuccess, "PyFunc Call Succeed"); { // Acquire Python GIL py::gil_scoped_acquire gil_acquire; if (Py_IsInitialized() == 0) { - ret = Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + ret = Status(StatusCode::kMDPythonInterpreterFailure, "Python Interpreter is finalized"); goto ComputeReturn; } try { @@ -81,7 +81,7 @@ Status PyFuncOp::Compute(const TensorRow &input, TensorRow *output) { } } } catch (const py::error_already_set &e) { - ret = Status(StatusCode::kPyFuncException, e.what()); + ret = Status(StatusCode::kMDPyFuncException, e.what()); } } @@ -89,12 +89,12 @@ ComputeReturn: return ret; ShapeMisMatch: - ret = - Status(StatusCode::kShapeMisMatch, __LINE__, __FILE__, "PyFunc should return a numpy array or a numpy array tuple"); + ret = Status(StatusCode::kMDShapeMisMatch, __LINE__, __FILE__, + "PyFunc should return a numpy array or a numpy array tuple"); goto ComputeReturn; TimeoutError: - ret = Status(StatusCode::kTimeOut, __LINE__, __FILE__, "PyFunc execute time out"); + ret = Status(StatusCode::kMDTimeOut, __LINE__, __FILE__, "PyFunc execute time out"); goto ComputeReturn; } @@ -114,7 +114,7 @@ Status PyFuncOp::CastOutput(const py::object &ret_py_obj, TensorRow *output) { } output->push_back(out); } catch (const std::exception &e) { - return Status(StatusCode::kUnexpectedError, e.what()); + return Status(StatusCode::kMDUnexpectedError, e.what()); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc b/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc index e394284679..974de7f3ab 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/tensor_op.cc @@ -27,9 +27,9 @@ namespace dataset { Status TensorOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); if (!OneToOne()) { - return Status(StatusCode::kUnexpectedError, "Wrong Compute() function is called. This is not 1-1 TensorOp."); + return Status(StatusCode::kMDUnexpectedError, "Wrong Compute() function is called. This is not 1-1 TensorOp."); } else { - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "Is this TensorOp 1-1? If yes, please implement this Compute() in the derived class."); } } @@ -44,13 +44,13 @@ Status TensorOp::Compute(const TensorRow &input, TensorRow *output) { return Compute(input[0], &(*output)[0]); } - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "Is this TensorOp oneToOne? If no, please implement this Compute() in the derived class."); } Status TensorOp::OutputShape(const std::vector &inputs, std::vector &outputs) { if (inputs.size() != NumInput()) - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "The size of the input argument vector does not match the number of inputs"); outputs = inputs; return Status::OK(); @@ -58,7 +58,7 @@ Status TensorOp::OutputShape(const std::vector &inputs, std::vector Status TensorOp::OutputType(const std::vector &inputs, std::vector &outputs) { if (inputs.size() != NumInput()) - return Status(StatusCode::kUnexpectedError, + return Status(StatusCode::kMDUnexpectedError, "The size of the input argument vector does not match the number of inputs"); outputs = inputs; return Status::OK(); diff --git a/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc b/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc index c1d3b6bc03..82f89f5d91 100644 --- a/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc +++ b/mindspore/ccsrc/minddata/dataset/text/kernels/jieba_tokenizer_op.cc @@ -83,7 +83,7 @@ Status JiebaTokenizerOp::Compute(const TensorRow &input, TensorRow *output) { Status JiebaTokenizerOp::AddWord(const std::string &word, int freq) { RETURN_UNEXPECTED_IF_NULL(jieba_parser_); if (jieba_parser_->InsertUserWord(word, freq, "") == false) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "AddWord: add word failed."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "AddWord: add word failed."); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc b/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc index f2bbdc1596..aecdabb99b 100644 --- a/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc +++ b/mindspore/ccsrc/minddata/dataset/text/kernels/sentence_piece_tokenizer_op.cc @@ -31,7 +31,7 @@ SentencePieceTokenizerOp::SentencePieceTokenizerOp(const std::shared_ptrmodel_proto()); if (!status.ok()) { model_status_ = - Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "SentencePieceTokenizer: parser vocab model filed."); + Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "SentencePieceTokenizer: parser vocab model filed."); } else { model_status_ = Status::OK(); } @@ -46,7 +46,7 @@ SentencePieceTokenizerOp::SentencePieceTokenizerOp(const std::string &model_path if (!status.ok()) { std::string err_msg = "SentencePieceTokenizer: "; err_msg += "load vocab model file: " + file_path_ + " failed."; - model_status_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + model_status_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } else { model_status_ = Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc b/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc index ac446c4b66..7b4406665b 100644 --- a/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc +++ b/mindspore/ccsrc/minddata/dataset/text/sentence_piece_vocab.cc @@ -74,7 +74,7 @@ Status SentencePieceVocab::BuildFromFile(const std::vector &path_li sentencepiece::util::Status s_status = sentencepiece::SentencePieceTrainer::Train(unorder_map, nullptr, &model_proto); if (!s_status.ok()) { std::string err_msg = "SentencePieceVocab: " + std::string(s_status.message()); - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, err_msg); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, err_msg); } vocab->get()->set_model_proto(model_proto); diff --git a/mindspore/ccsrc/minddata/dataset/util/allocator.h b/mindspore/ccsrc/minddata/dataset/util/allocator.h index f6e0ef846c..82cf9956fc 100644 --- a/mindspore/ccsrc/minddata/dataset/util/allocator.h +++ b/mindspore/ccsrc/minddata/dataset/util/allocator.h @@ -73,7 +73,7 @@ class Allocator { Status rc = pool_->Allocate(n * sizeof(T), &p); if (rc.IsOk()) { return reinterpret_cast(p); - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { throw std::bad_alloc(); } else { throw std::exception(); @@ -97,7 +97,7 @@ Status MakeUnique(std::unique_ptr> *out, C alloc, // Some of our implementation of allocator (e.g. NumaAllocator) don't throw std::bad_alloc. // So we have to catch for null ptr if (data == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (!std::is_arithmetic::value) { for (auto i = 0; i < n; i++) { @@ -114,7 +114,7 @@ Status MakeUnique(std::unique_ptr> *out, C alloc, }; *out = std::unique_ptr>(data, std::bind(deleter, std::placeholders::_1, alloc, n)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } catch (const std::exception &e) { RETURN_STATUS_UNEXPECTED(e.what()); } diff --git a/mindspore/ccsrc/minddata/dataset/util/arena.cc b/mindspore/ccsrc/minddata/dataset/util/arena.cc index a4194b9632..7d77d3e000 100644 --- a/mindspore/ccsrc/minddata/dataset/util/arena.cc +++ b/mindspore/ccsrc/minddata/dataset/util/arena.cc @@ -50,7 +50,7 @@ Status ArenaImpl::Allocate(size_t n, void **p) { // Round up n to 1K block uint64_t req_size = static_cast(n) + ARENA_WALL_OVERHEAD_SZ; if (req_size > this->get_max_size()) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } uint64_t reqBlk = SizeToBlk(req_size); // Do a first fit search @@ -67,7 +67,7 @@ Status ArenaImpl::Allocate(size_t n, void **p) { MemHdr::setHdr(q, addr, reqBlk); *p = get_user_addr(q); } else { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } @@ -240,7 +240,7 @@ Status Arena::Init() { auto ret = cudaHostAlloc(&ptr_, sz, cudaHostAllocDefault); if (ret != cudaSuccess) { MS_LOG(ERROR) << "cudaHostAlloc failed, ret[" << static_cast(ret) << "], " << cudaGetErrorString(ret); - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } impl_ = std::make_unique(ptr_, sz); } else { @@ -252,7 +252,7 @@ Status Arena::Init() { impl_ = std::make_unique(ptr_, sz); #endif } catch (std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } @@ -265,7 +265,7 @@ Status Arena::CreateArena(std::shared_ptr *p_ba, size_t val_in_MB, bool i RETURN_UNEXPECTED_IF_NULL(p_ba); auto ba = new (std::nothrow) Arena(val_in_MB, is_cuda_malloc); if (ba == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (*p_ba).reset(ba); RETURN_IF_NOT_OK(ba->Init()); @@ -278,7 +278,7 @@ Status Arena::CreateArena(std::shared_ptr *p_ba, size_t val_in_MB) { RETURN_UNEXPECTED_IF_NULL(p_ba); auto ba = new (std::nothrow) Arena(val_in_MB); if (ba == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (*p_ba).reset(ba); RETURN_IF_NOT_OK(ba->Init()); diff --git a/mindspore/ccsrc/minddata/dataset/util/btree.h b/mindspore/ccsrc/minddata/dataset/util/btree.h index 920e79c985..9e99fbcd5a 100644 --- a/mindspore/ccsrc/minddata/dataset/util/btree.h +++ b/mindspore/ccsrc/minddata/dataset/util/btree.h @@ -72,11 +72,11 @@ class BPlusTree { Status IndexRc2Status(IndexRc rc) { if (rc == IndexRc::kOk) { - return Status(StatusCode::kOK); + return Status(StatusCode::kSuccess); } else if (rc == IndexRc::kOutOfMemory) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } else if (rc == IndexRc::kDuplicateKey) { - return Status(StatusCode::kDuplicateKey); + return Status(StatusCode::kMDDuplicateKey); } else { RETURN_STATUS_UNEXPECTED(std::to_string(static_cast(rc))); } diff --git a/mindspore/ccsrc/minddata/dataset/util/buddy.cc b/mindspore/ccsrc/minddata/dataset/util/buddy.cc index bbeeaff0db..64870b04a1 100644 --- a/mindspore/ccsrc/minddata/dataset/util/buddy.cc +++ b/mindspore/ccsrc/minddata/dataset/util/buddy.cc @@ -36,11 +36,11 @@ namespace mindspore { namespace dataset { Status BuddySpace::Init() { if (log_min_ < 0) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "log_min must be positive : " + std::to_string(log_min_)); } if (num_lvl_ < 3 || num_lvl_ > 18) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "num_lvl must be between 3 and 18 : " + std::to_string(num_lvl_)); } min_ = BitLeftShift(1, log_min_); @@ -51,7 +51,7 @@ Status BuddySpace::Init() { try { mem_ = std::make_unique(offset_3); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } (void)memset_s(mem_.get(), offset_3, 0, offset_3); auto ptr = mem_.get(); @@ -70,7 +70,7 @@ Status BuddySpace::Alloc(const uint64_t sz, BSpaceDescriptor *desc, addr_t *p) n *p = addr; return Status::OK(); } else { - return Status(StatusCode::kBuddySpaceFull, "BuddySpace full. Not an error. Please ignore."); + return Status(StatusCode::kMDBuddySpaceFull, "BuddySpace full. Not an error. Please ignore."); } } @@ -126,7 +126,7 @@ std::ostream &operator<<(std::ostream &os, const BuddySpace &s) { BuddySpace::STATE st; s.GetBuddySegState(addr, &sz, &st); os << "Address : " << std::left << std::setw(8) << addr << " Size : " << std::setw(8) << sz << " State : " - << ((st == BuddySpace::STATE::kAlloc) ? "ALLOC" : ((st == BuddySpace::STATE::kFree) ? "FREE" : "Unkonwn")) + << ((st == BuddySpace::STATE::kAlloc) ? "ALLOC" : ((st == BuddySpace::STATE::kFree) ? "FREE" : "Unknown")) << "\n"; addr += sz; } @@ -371,7 +371,7 @@ Status BuddySpace::CreateBuddySpace(std::unique_ptr *out_bs, int log Status rc; auto bs = new (std::nothrow) BuddySpace(log_min, num_lvl); if (bs == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } rc = bs->Init(); if (rc.IsOk()) { diff --git a/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc b/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc index cc9fc9a9b8..39dfd7bf76 100644 --- a/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/util/circular_pool.cc @@ -93,7 +93,7 @@ Status CircularPool::Allocate(size_t n, void **p) { auto it = cirIt.Next(); Arena *ba = it->get(); if (ba->get_max_size() < n) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } // If we are asked to move forward the tail if (move_tail) { @@ -105,7 +105,7 @@ Status CircularPool::Allocate(size_t n, void **p) { if (rc.IsOk()) { *p = ptr; break; - } else if (rc.IsOutofMemory()) { + } else if (rc == StatusCode::kMDOutOfMemory) { // Make the next arena a new tail and continue. move_tail = true; } else { @@ -126,7 +126,7 @@ Status CircularPool::Allocate(size_t n, void **p) { // Re-acquire the shared lock and try again lock_s.Downgrade(); } else { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } } } while (ptr == nullptr); @@ -164,7 +164,7 @@ Status CircularPool::Reallocate(void **pp, size_t old_sz, size_t new_sz) { MS_ASSERT(it != mem_segments_.end()); Arena *ba = it->get(); Status rc = ba->Reallocate(pp, old_sz, new_sz); - if (rc.IsOutofMemory()) { + if (rc == StatusCode::kMDOutOfMemory) { // The current arena has no room for the bigger size. // Allocate free space from another arena and copy // the content over. @@ -222,7 +222,7 @@ Status CircularPool::CreateCircularPool(std::shared_ptr *out_pool, i } auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size, is_cuda_malloc); if (pool == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (createOneArena) { rc = pool->AddOneArena(); @@ -243,7 +243,7 @@ Status CircularPool::CreateCircularPool(std::shared_ptr *out_pool, i } auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size); if (pool == nullptr) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } if (createOneArena) { rc = pool->AddOneArena(); diff --git a/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h b/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h index 00ba0d84bb..b578b84de1 100644 --- a/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h +++ b/mindspore/ccsrc/minddata/dataset/util/intrp_resource.h @@ -39,7 +39,7 @@ class IntrpResource { virtual Status GetInterruptStatus() const { if (Interrupted()) { - return Status(StatusCode::kInterrupted); + return Status(StatusCode::kMDInterrupted); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc b/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc index 80417ac2a0..17b990b02f 100644 --- a/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc +++ b/mindspore/ccsrc/minddata/dataset/util/intrp_service.cc @@ -39,7 +39,7 @@ Status IntrpService::Register(const std::string &name, IntrpResource *res) { SharedLock stateLck(&state_lock_); // Now double check the state if (ServiceState() != STATE::kRunning) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "Interrupt service is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "Interrupt service is shutting down"); } else { std::lock_guard lck(mutex_); try { @@ -48,7 +48,7 @@ Status IntrpService::Register(const std::string &name, IntrpResource *res) { MS_LOG(DEBUG) << "Register resource with name " << name << ". Thread ID " << ss.str() << "."; auto it = all_intrp_resources_.emplace(name, res); if (it.second == false) { - return Status(StatusCode::kDuplicateKey, __LINE__, __FILE__, name); + return Status(StatusCode::kMDDuplicateKey, __LINE__, __FILE__, name); } high_water_mark_++; } catch (std::exception &e) { diff --git a/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc b/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc index 0e1be9d798..38351bcb76 100644 --- a/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc +++ b/mindspore/ccsrc/minddata/dataset/util/memory_pool.cc @@ -24,7 +24,7 @@ Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { } void *q = ::malloc(s); if (q == nullptr) { - return Status(StatusCode::kOutOfMemory, __LINE__, __FILE__); + return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); } else { *p = q; if (init_to_zero) { @@ -36,13 +36,13 @@ Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { } // namespace dataset } // namespace mindspore -void *operator new(std::size_t s, mindspore::dataset::Status *rc, std::shared_ptr b) { +void *operator new(std::size_t s, mindspore::Status *rc, std::shared_ptr b) { void *ptr = nullptr; *rc = b->Allocate(s, &ptr); return ptr; } -void *operator new[](std::size_t s, mindspore::dataset::Status *rc, std::shared_ptr b) { +void *operator new[](std::size_t s, mindspore::Status *rc, std::shared_ptr b) { void *ptr = nullptr; *rc = b->Allocate(s, &ptr); return ptr; diff --git a/mindspore/ccsrc/minddata/dataset/util/memory_pool.h b/mindspore/ccsrc/minddata/dataset/util/memory_pool.h index 33e6012626..150bd9ddcb 100644 --- a/mindspore/ccsrc/minddata/dataset/util/memory_pool.h +++ b/mindspore/ccsrc/minddata/dataset/util/memory_pool.h @@ -48,9 +48,9 @@ Status DeMalloc(std::size_t s, void **p, bool); } // namespace dataset } // namespace mindspore -void *operator new(std::size_t, mindspore::dataset::Status *, std::shared_ptr); +void *operator new(std::size_t, mindspore::Status *, std::shared_ptr); -void *operator new[](std::size_t, mindspore::dataset::Status *, std::shared_ptr); +void *operator new[](std::size_t, mindspore::Status *, std::shared_ptr); void operator delete(void *, std::shared_ptr); diff --git a/mindspore/ccsrc/minddata/dataset/util/queue.h b/mindspore/ccsrc/minddata/dataset/util/queue.h index 2f2e0c7006..3a62c24626 100644 --- a/mindspore/ccsrc/minddata/dataset/util/queue.h +++ b/mindspore/ccsrc/minddata/dataset/util/queue.h @@ -186,7 +186,8 @@ class QueueList { Status Register(TaskGroup *vg) { if (vg == nullptr) { - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Null task group during QueueList registration."); + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, + "Null task group during QueueList registration."); } for (int i = 0; i < queue_list_.size(); ++i) { RETURN_IF_NOT_OK(queue_list_[i]->Register(vg)); diff --git a/mindspore/ccsrc/minddata/dataset/util/services.h b/mindspore/ccsrc/minddata/dataset/util/services.h index 92692add3e..9600ffd1a9 100644 --- a/mindspore/ccsrc/minddata/dataset/util/services.h +++ b/mindspore/ccsrc/minddata/dataset/util/services.h @@ -92,7 +92,7 @@ class Services { std::unique_ptr svc(*out); hook_.push_back(std::move(svc)); } catch (const std::bad_alloc &e) { - return Status(StatusCode::kOutOfMemory); + return Status(StatusCode::kMDOutOfMemory); } return Status::OK(); } diff --git a/mindspore/ccsrc/minddata/dataset/util/status.cc b/mindspore/ccsrc/minddata/dataset/util/status.cc index a18b3cb3ab..6d0f7bb746 100644 --- a/mindspore/ccsrc/minddata/dataset/util/status.cc +++ b/mindspore/ccsrc/minddata/dataset/util/status.cc @@ -30,155 +30,6 @@ namespace mindspore { namespace dataset { -std::string CodeAsString(const StatusCode c) { - const char *s = nullptr; - if (c == StatusCode::kOK) { - // Optimize the most frequent case - return std::string("OK"); - } else { - switch (c) { - case StatusCode::kOutOfMemory: - s = "Out of memory"; - break; - case StatusCode::kInterrupted: - s = "Interrupted system call"; - break; - case StatusCode::kShapeMisMatch: - s = "Shape is incorrect"; - break; - case StatusCode::kNoSpace: - s = "No space left on device"; - break; - case StatusCode::kPyFuncException: - s = "Exception thrown from PyFunc"; - break; - case StatusCode::kDuplicateKey: - s = "Duplicate key"; - break; - case StatusCode::kProfilingError: - s = "Error encountered while profiling"; - break; - case StatusCode::kSyntaxError: - s = "Syntax error"; - break; - case StatusCode::kBuddySpaceFull: - s = "BuddySpace full"; - break; - case StatusCode::kNetWorkError: - s = "Network error"; - break; - case StatusCode::kUnexpectedError: - default: - s = "Unexpected error"; - break; - } - } - return std::string(s); -} - -Status::Status(StatusCode c) noexcept - : code_(c), err_msg_(CodeAsString(c)), line_of_code_(-1), file_name_(""), err_description_("") {} - -Status::Status() noexcept - : code_(StatusCode::kOK), err_msg_(""), line_of_code_(-1), file_name_(""), err_description_("") {} - -Status::~Status() noexcept {} - -Status::Status(const Status &s) - : code_(s.code_), - err_msg_(s.err_msg_), - line_of_code_(s.line_of_code_), - file_name_(s.file_name_), - err_description_(s.err_description_) {} - -Status &Status::operator=(const Status &s) { - if (this == &s) { - return *this; - } - code_ = s.code_; - err_msg_ = s.err_msg_; - line_of_code_ = s.line_of_code_; - file_name_ = s.file_name_; - err_description_ = s.err_description_; - return *this; -} - -Status::Status(Status &&s) noexcept { - code_ = s.code_; - s.code_ = StatusCode::kOK; - line_of_code_ = s.line_of_code_; - s.line_of_code_ = -1; - file_name_ = std::move(s.file_name_); - err_description_ = std::move(s.err_description_); - err_msg_ = std::move(s.err_msg_); -} - -Status &Status::operator=(Status &&s) noexcept { - if (this == &s) { - return *this; - } - code_ = s.code_; - s.code_ = StatusCode::kOK; - line_of_code_ = s.line_of_code_; - s.line_of_code_ = -1; - file_name_ = std::move(s.file_name_); - err_description_ = std::move(s.err_description_); - err_msg_ = std::move(s.err_msg_); - return *this; -} - -Status::Status(const StatusCode code, const std::string &msg) - : code_(code), err_msg_(msg), line_of_code_(-1), file_name_(""), err_description_(msg) {} - -Status::Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra) { - code_ = code; - line_of_code_ = line_of_code; - file_name_ = std::string(file_name); - err_description_ = extra; - std::ostringstream ss; -#ifndef ENABLE_ANDROID - ss << "Thread ID " << this_thread::get_id() << " " << CodeAsString(code) << ". "; - if (!extra.empty()) { - ss << extra; - } - ss << "\n"; -#endif - - ss << "Line of code : " << line_of_code << "\n"; - if (file_name != nullptr) { - ss << "File : " << file_name << "\n"; - } - err_msg_ = ss.str(); -} - -std::ostream &operator<<(std::ostream &os, const Status &s) { - os << s.ToString(); - return os; -} - -std::string Status::SetErrDescription(const std::string &err_description) { - err_description_ = err_description; - std::ostringstream ss; -#ifndef ENABLE_ANDROID - ss << "Thread ID " << this_thread::get_id() << " " << CodeAsString(code_) << ". "; - if (!err_description_.empty()) { - ss << err_description_; - } - ss << "\n"; -#endif - - if (line_of_code_ > 0 && !file_name_.empty()) { - ss << "Line of code : " << line_of_code_ << "\n"; - ss << "File : " << file_name_ << "\n"; - } - err_msg_ = ss.str(); - return err_msg_; -} - -std::string Status::ToString() const { return err_msg_; } - -StatusCode Status::get_code() const { return code_; } - #if !defined(_WIN32) && !defined(_WIN64) float GetMemoryUsage() { char buf[128] = {0}; diff --git a/mindspore/ccsrc/minddata/dataset/util/status.h b/mindspore/ccsrc/minddata/dataset/util/status.h index 7002b05f10..f7f19b6511 100644 --- a/mindspore/ccsrc/minddata/dataset/util/status.h +++ b/mindspore/ccsrc/minddata/dataset/util/status.h @@ -29,6 +29,8 @@ #include #include +#include "include/api/status.h" + namespace mindspore { namespace dataset { #define RETURN_IF_NOT_OK(_s) \ @@ -39,23 +41,23 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_UNEXPECTED(_e) \ - do { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_UNEXPECTED(_e) \ + do { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ } while (false) -#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_UNEXPECTED(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, _e); \ + } \ } while (false) -#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ - do { \ - if (!(_condition)) { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ - } \ +#define CHECK_FAIL_RETURN_SYNTAX_ERROR(_condition, _e) \ + do { \ + if (!(_condition)) { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ + } \ } while (false) #define RETURN_UNEXPECTED_IF_NULL(_ptr) \ @@ -73,9 +75,9 @@ namespace dataset { } \ } while (false) -#define RETURN_STATUS_SYNTAX_ERROR(_e) \ - do { \ - return Status(StatusCode::kSyntaxError, __LINE__, __FILE__, _e); \ +#define RETURN_STATUS_SYNTAX_ERROR(_e) \ + do { \ + return Status(StatusCode::kMDSyntaxError, __LINE__, __FILE__, _e); \ } while (false) #define RETURN_SECOND_IF_ERROR(_s, _r) \ @@ -87,99 +89,8 @@ namespace dataset { } \ } while (false) -enum class StatusCode : char { - kOK = 0, - kOutOfMemory = 1, - kShapeMisMatch = 2, - kInterrupted = 3, - kNoSpace = 4, - kPyFuncException = 5, - kDuplicateKey = 6, - kPythonInterpreterFailure = 7, - kTDTPushFailure = 8, - kFileNotExist = 9, - kProfilingError = 10, - kBoundingBoxOutOfBounds = 11, - kBoundingBoxInvalidShape = 12, - kSyntaxError = 13, - kTimeOut = 14, - kBuddySpaceFull = 15, - kNetWorkError = 16, - kNotImplementedYet = 17, - // Make this error code the last one. Add new error code above it. - kUnexpectedError = 127 -}; - -std::string CodeAsString(const StatusCode c); - -class Status { - public: - Status() noexcept; - - explicit Status(StatusCode c) noexcept; - - ~Status() noexcept; - - // Copy constructor - Status(const Status &s); - - Status &operator=(const Status &s); - - // Move constructor - Status(Status &&) noexcept; - - Status &operator=(Status &&) noexcept; - - Status(const StatusCode code, const std::string &msg); - - Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); - - // Return a success status - static Status OK() { return Status(StatusCode::kOK); } - - std::string ToString() const; - - StatusCode get_code() const; - - int GetLineOfCode() const { return line_of_code_; } - - std::string SetErrDescription(const std::string &err_description); - - std::string GetErrDescription() const { return err_description_; } - - friend std::ostream &operator<<(std::ostream &os, const Status &s); - - explicit operator bool() const { return (get_code() == StatusCode::kOK); } - - bool operator==(const Status &other) const { return (this->get_code() == other.get_code()); } - - bool operator!=(const Status &other) const { return !(*this == other); } - - bool IsOk() const { return (get_code() == StatusCode::kOK); } - - bool IsError() const { return !IsOk(); } - - bool IsOutofMemory() const { return (get_code() == StatusCode::kOutOfMemory); } - - bool IsInterrupted() const { return (get_code() == StatusCode::kInterrupted); } - - bool IsShapeIncorrect() const { return (get_code() == StatusCode::kShapeMisMatch); } - - bool IsNoSpace() const { return (get_code() == StatusCode::kNoSpace); } - - bool IsNetWorkError() const { return (get_code() == StatusCode::kNetWorkError); } - - private: - StatusCode code_; - int line_of_code_; - std::string file_name_; - std::string err_description_; - std::string err_msg_; -}; - #if !defined(_WIN32) && !defined(_WIN64) const float MAX_MEMORY_USAGE_THRESHOLD = 0.95; - float GetMemoryUsage(); #endif } // namespace dataset diff --git a/mindspore/ccsrc/minddata/dataset/util/task.cc b/mindspore/ccsrc/minddata/dataset/util/task.cc index 514b6bb991..6d2c0bcaa0 100644 --- a/mindspore/ccsrc/minddata/dataset/util/task.cc +++ b/mindspore/ccsrc/minddata/dataset/util/task.cc @@ -57,8 +57,8 @@ void Task::operator()() { rc_ = fnc_obj_(); } // Some error codes are ignored, e.g. interrupt. Others we just shutdown the group. - if (rc_.IsError() && !rc_.IsInterrupted()) { - if (rc_.get_code() == StatusCode::kNetWorkError) { + if (rc_.IsError() && rc_ != StatusCode::kMDInterrupted) { + if (rc_.StatusCode() == StatusCode::kMDNetWorkError) { MS_LOG(WARNING) << rc_; } else { MS_LOG(ERROR) << rc_; @@ -66,11 +66,11 @@ void Task::operator()() { ShutdownGroup(); } } catch (const std::bad_alloc &e) { - rc_ = Status(StatusCode::kOutOfMemory, __LINE__, __FILE__, e.what()); + rc_ = Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__, e.what()); MS_LOG(ERROR) << rc_; ShutdownGroup(); } catch (const std::exception &e) { - rc_ = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, e.what()); + rc_ = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, e.what()); MS_LOG(ERROR) << rc_; ShutdownGroup(); } @@ -128,7 +128,7 @@ Status Task::Run() { running_ = true; caught_severe_exception_ = false; } catch (const std::exception &e) { - rc = Status(StatusCode::kUnexpectedError, __LINE__, __FILE__, e.what()); + rc = Status(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, e.what()); } } return rc; @@ -200,7 +200,7 @@ void Task::set_task_group(TaskGroup *vg) { task_group_ = vg; } Task::~Task() { task_group_ = nullptr; } Status Task::OverrideInterruptRc(const Status &rc) { - if (rc.IsInterrupted() && this_thread::is_master_thread()) { + if (rc == StatusCode::kMDInterrupted && this_thread::is_master_thread()) { // If we are interrupted, override the return value if this is the master thread. // Master thread is being interrupted mostly because of some thread is reporting error. return TaskManager::GetMasterThreadRc(); diff --git a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc index f1d172c84c..d02b3d770e 100644 --- a/mindspore/ccsrc/minddata/dataset/util/task_manager.cc +++ b/mindspore/ccsrc/minddata/dataset/util/task_manager.cc @@ -31,7 +31,7 @@ Status TaskManager::CreateAsyncTask(const std::string &my_name, const std::funct SharedLock stateLck(&state_lock_); // Now double check the state if (ServiceState() == STATE::kStopInProg || ServiceState() == STATE::kStopped) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "TaskManager is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "TaskManager is shutting down"); } RETURN_IF_NOT_OK(GetFreeTask(my_name, f, task, operator_id)); if (vg == nullptr) { @@ -282,7 +282,7 @@ Status TaskGroup::CreateAsyncTask(const std::string &my_name, const std::functio SharedLock state_lck(&state_lock_); // Now double check the state if (ServiceState() != STATE::kRunning) { - return Status(StatusCode::kInterrupted, __LINE__, __FILE__, "Taskgroup is shutting down"); + return Status(StatusCode::kMDInterrupted, __LINE__, __FILE__, "Taskgroup is shutting down"); } TaskManager &dm = TaskManager::GetInstance(); Task *pTask = nullptr; @@ -292,7 +292,7 @@ Status TaskGroup::CreateAsyncTask(const std::string &my_name, const std::functio { std::unique_lock rcLock(rc_mux_); if (rc_.IsError()) { - return pMytask->IsMasterThread() ? rc_ : Status(StatusCode::kInterrupted); + return pMytask->IsMasterThread() ? rc_ : Status(StatusCode::kMDInterrupted); } } RETURN_IF_NOT_OK(dm.CreateAsyncTask(my_name, f, this, &pTask, operator_id)); diff --git a/mindspore/core/CMakeLists.txt b/mindspore/core/CMakeLists.txt index 02be915eea..8a95bc7d5b 100644 --- a/mindspore/core/CMakeLists.txt +++ b/mindspore/core/CMakeLists.txt @@ -7,8 +7,6 @@ if(NOT(CMAKE_SYSTEM_NAME MATCHES "Windows")) endif() message("************ build core ***************") - - file(GLOB_RECURSE CORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "abstract/*.cc" "base/*.cc" diff --git a/mindspore/core/ir/api_tensor_impl.h b/mindspore/core/ir/api_tensor_impl.h new file mode 100644 index 0000000000..f57b134c05 --- /dev/null +++ b/mindspore/core/ir/api_tensor_impl.h @@ -0,0 +1,47 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ +#define MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ + +#include +#include +#include +#include "include/api/types.h" + +namespace mindspore { +class MSTensor::Impl { + public: + Impl() = default; + virtual ~Impl() = default; + + virtual const std::string &Name() const = 0; + virtual enum DataType DataType() const = 0; + virtual const std::vector &Shape() const = 0; + + virtual std::shared_ptr Data() const = 0; + virtual void *MutableData() = 0; + virtual size_t DataSize() const = 0; + + virtual bool IsDevice() const = 0; + + virtual std::shared_ptr Clone() const = 0; +}; +} // namespace mindspore + +#endif // MINDSPORE_CORE_IR_API_TENSOR_IMPL_H_ diff --git a/mindspore/core/utils/status.cc b/mindspore/core/utils/status.cc new file mode 100644 index 0000000000..e90ce645d8 --- /dev/null +++ b/mindspore/core/utils/status.cc @@ -0,0 +1,127 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/status.h" +#ifndef ENABLE_ANDROID +#include +#endif +#include +#include + +namespace mindspore { +Status::Status(enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra) { + status_code_ = code; + line_of_code_ = line_of_code; + file_name_ = std::string(file_name); + err_description_ = extra; + std::ostringstream ss; +#ifndef ENABLE_ANDROID + ss << "Thread ID " << std::this_thread::get_id() << " " << CodeAsString(code) << ". "; + if (!extra.empty()) { + ss << extra; + } + ss << "\n"; +#endif + + ss << "Line of code : " << line_of_code << "\n"; + if (file_name != nullptr) { + ss << "File : " << file_name << "\n"; + } + status_msg_ = ss.str(); +} + +std::string Status::CodeAsString(enum StatusCode c) { + static std::map info_map = {{kSuccess, "No error occurs."}, + // Core + {kCoreFailed, "Common error code."}, + // MD + {kMDOutOfMemory, "Out of memory"}, + {kMDShapeMisMatch, "Shape is incorrect."}, + {kMDInterrupted, "Interrupted system call"}, + {kMDNoSpace, "No space left on device"}, + {kMDPyFuncException, "Exception thrown from PyFunc"}, + {kMDDuplicateKey, "Duplicate key"}, + {kMDPythonInterpreterFailure, ""}, + {kMDTDTPushFailure, "Unexpected error"}, + {kMDFileNotExist, "Unexpected error"}, + {kMDProfilingError, "Error encountered while profiling"}, + {kMDBoundingBoxOutOfBounds, "Unexpected error"}, + {kMDBoundingBoxInvalidShape, "Unexpected error"}, + {kMDSyntaxError, "Syntax error"}, + {kMDTimeOut, "Unexpected error"}, + {kMDBuddySpaceFull, "BuddySpace full"}, + {kMDNetWorkError, "Network error"}, + {kMDNotImplementedYet, "Unexpected error"}, + {kMDUnexpectedError, "Unexpected error"}, + // ME + {kMEFailed, "Common error code."}, + {kMEInvalidInput, "Invalid input."}, + // MC + {kMCFailed, "Common error code."}, + {kMCDeviceError, "Device error."}, + {kMCInvalidInput, "Invalid input."}, + {kMCInvalidArgs, "Invalid arguments."}, + // Lite + {kLiteError, "Common error code."}, + {kLiteNullptr, "NULL pointer returned."}, + {kLiteParamInvalid, "Invalid parameter."}, + {kLiteNoChange, "No change."}, + {kLiteSuccessExit, "No error but exit."}, + {kLiteMemoryFailed, "Fail to create memory."}, + {kLiteNotSupport, "Fail to support."}, + {kLiteThreadPoolError, "Thread pool error."}, + {kLiteOutOfTensorRange, "Failed to check range."}, + {kLiteInputTensorError, "Failed to check input tensor."}, + {kLiteReentrantError, "Exist executor running."}, + {kLiteGraphFileError, "Failed to verify graph file."}, + {kLiteNotFindOp, "Failed to find operator."}, + {kLiteInvalidOpName, "Invalid operator name."}, + {kLiteInvalidOpAttr, "Invalid operator attr."}, + {kLiteOpExecuteFailure, "Failed to execution operator."}, + {kLiteFormatError, "Failed to checking tensor format."}, + {kLiteInferError, "Failed to infer shape."}, + {kLiteInferInvalid, "Invalid infer shape before runtime."}, + {kLiteInputParamInvalid, "Invalid input param by user."}}; + auto iter = info_map.find(c); + return iter == info_map.end() ? "Unknown error" : iter->second; +} + +std::ostream &operator<<(std::ostream &os, const Status &s) { + os << s.ToString(); + return os; +} + +const std::string &Status::SetErrDescription(const std::string &err_description) { + err_description_ = err_description; + std::ostringstream ss; +#ifndef ENABLE_ANDROID + ss << "Thread ID " << std::this_thread::get_id() << " " << CodeAsString(status_code_) << ". "; + if (!err_description_.empty()) { + ss << err_description_; + } + ss << "\n"; +#endif + + if (line_of_code_ > 0 && !file_name_.empty()) { + ss << "Line of code : " << line_of_code_ << "\n"; + ss << "File : " << file_name_ << "\n"; + } + status_msg_ = ss.str(); + return status_msg_; +} +} // namespace mindspore diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index 70a3c1a3c9..7c673d0454 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -19,17 +19,11 @@ #include #include +#include "include/api/lite_context.h" #include "include/ms_tensor.h" #include "include/lite_utils.h" namespace mindspore::lite { -/// \brief CpuBindMode defined for holding bind cpu strategy argument. -typedef enum { - NO_BIND = 0, /**< no bind */ - HIGHER_CPU = 1, /**< bind higher cpu first */ - MID_CPU = 2 /**< bind middle cpu first */ -} CpuBindMode; - /// \brief DeviceType defined for holding user's preferred backend. typedef enum { DT_CPU, /**< CPU device type */ diff --git a/mindspore/lite/include/errorcode.h b/mindspore/lite/include/errorcode.h index ff74b02dac..f516e21f34 100644 --- a/mindspore/lite/include/errorcode.h +++ b/mindspore/lite/include/errorcode.h @@ -18,6 +18,8 @@ #define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ #include +#include +#include "include/api/status.h" namespace mindspore { namespace lite { @@ -67,6 +69,7 @@ constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. /// /// \return String of errorcode info. std::string GetErrorInfo(STATUS error_code); + } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 5cb14ab6fd..f3706af095 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -24,11 +24,13 @@ #include #include "ir/dtype/type_id.h" +#ifndef MS_API #ifdef _WIN32 #define MS_API __declspec(dllexport) #else #define MS_API __attribute__((visibility("default"))) #endif +#endif namespace mindspore { namespace tensor { @@ -45,7 +47,7 @@ class MS_API MSTensor { /// \brief Get data type of the MindSpore Lite MSTensor. /// - /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are + /// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are /// suitable for MSTensor. /// /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. @@ -79,6 +81,17 @@ class MS_API MSTensor { /// /// \return the pointer points to data in MSTensor. virtual void *MutableData() = 0; + + /// \brief Get the name of MSTensor. + /// + /// \return the name of MSTensor. + virtual std::string tensor_name() const = 0; + + /// \brief Set the name of MSTensor. + virtual void set_tensor_name(const std::string name) = 0; + + /// \brief Set the data of MSTensor. + virtual void set_data(void *data) = 0; }; } // namespace tensor /// \brief CallBackParam defined input arguments for callBack function. diff --git a/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt b/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt index e7231258a0..959dfe3e8f 100644 --- a/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt +++ b/mindspore/lite/java/java/app/src/main/native/CMakeLists.txt @@ -33,7 +33,8 @@ set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../..) set(LITE_DIR ${TOP_DIR}/mindspore/lite) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(${LITE_DIR}) ## lite include +include_directories(${LITE_DIR}) ## lite include +include_directories(${TOP_DIR}) ## api include include_directories(${TOP_DIR}/mindspore/core/) ## core include include_directories(${LITE_DIR}/build) ## flatbuffers diff --git a/mindspore/lite/minddata/CMakeLists.txt b/mindspore/lite/minddata/CMakeLists.txt index d0d8fcfc94..418ee00089 100644 --- a/mindspore/lite/minddata/CMakeLists.txt +++ b/mindspore/lite/minddata/CMakeLists.txt @@ -9,22 +9,31 @@ include(${TOP_DIR}/cmake/external_libs/jpeg_turbo.cmake) set(MINDDATA_DIR ${CCSRC_DIR}/minddata/dataset) set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp") -set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -Werror -Wno-return-std-move -Wno-unused-private-field -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override") - +set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer \ + -D_LIBCPP_INLINE_VISIBILITY='' -D_LIBCPP_DISABLE_EXTERN_TEMPLATE=1 -DHALF_ENABLE_CPP11_USER_LITERALS=0 \ + -D_FORTIFY_SOURCE=2 -Wno-cpp") +set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -Werror -Wno-return-std-move -Wno-unused-private-field \ + -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration \ + -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override") set(CMAKE_CXX_FLAGS "$ENV{CXXFLAGS} -I/usr/local/include -std=c++17 -Wall -fPIC") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPTION_CXX_FLAGS}") if(PLATFORM_ARM) - set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Werror -Wno-return-std-move -Wno-unused-private-field -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Werror -Wno-return-std-move -Wno-unused-private-field \ + -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration \ + -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override \ + -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") else() - set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 \ + -D_FORTIFY_SOURCE=2") endif() -set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") -set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-non-virtual-dtor") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare") @@ -61,7 +70,8 @@ AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/consumers MINDDATA_ENGINE_CONSUMERS_ AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops MINDDATA_ENGINE_DATASETOPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/map_op MINDDATA_ENGINE_DATASETOPS_MAPOP_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source MINDDATA_ENGINE_DATASETOPS_SOURCE_SRC_FILES) -AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source/sampler MINDDATA_ENGINE_DATASETOPS_SOURCE_SAMPLER_SRC_FILES) +AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/datasetops/source/sampler + MINDDATA_ENGINE_DATASETOPS_SOURCE_SAMPLER_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/cache MINDDATA_ENGINE_IR_CACHE_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/datasetops MINDDATA_ENGINE_IR_DATASETOPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${MINDDATA_DIR}/engine/ir/datasetops/source MINDDATA_ENGINE_IR_DATASETOPS_SOURCE_SRC_FILES) @@ -328,10 +338,10 @@ elseif(BUILD_MINDDATA STREQUAL "wrapper") ${MINDDATA_DIR}/core/tensor_helpers.cc ${MINDDATA_DIR}/core/global_context.cc ${MINDDATA_DIR}/core/tensor_row.cc + ${MINDDATA_DIR}/core/de_tensor.cc ${MINDDATA_DIR}/api/vision.cc ${MINDDATA_DIR}/api/execute.cc ${MINDDATA_DIR}/api/transforms.cc - ${MINDDATA_DIR}/api/de_tensor.cc ${MINDDATA_DIR}/util/path.cc ${MINDDATA_DIR}/util/status.cc ${MINDDATA_DIR}/util/data_helper.cc @@ -356,11 +366,19 @@ elseif(BUILD_MINDDATA STREQUAL "wrapper") ${CMAKE_CURRENT_SOURCE_DIR}/wrapper/album_op_android.cc ) + set(MINDSPORE_LITE_CXXAPI_SRC + ${CORE_DIR}/utils/status.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/types.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/cxx_api/tensor/tensor_impl.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../src/tensor.cc + ) + add_library(minddata-lite SHARED ${MINDDATA_KERNELS_IMAGE_LITE_CV_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/../src/common/log_adapter.cc ${CORE_DIR}/utils/ms_utils.cc ${MINDDATA_TODAPI_SRC} + ${MINDSPORE_LITE_CXXAPI_SRC} ) find_package(Threads REQUIRED) @@ -389,7 +407,7 @@ elseif(BUILD_MINDDATA STREQUAL "lite") list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES "${MINDDATA_DIR}/core/client.cc") list(REMOVE_ITEM MINDDATA_KERNELS_SRC_FILES "${MINDDATA_DIR}/kernels/py_func_op.cc") add_library(minddata_eager_mid OBJECT - ${MINDDATA_DIR}/api/de_tensor.cc + ${MINDDATA_DIR}/core/de_tensor.cc ${MINDDATA_DIR}/api/execute.cc ) list(REMOVE_ITEM MINDDATA_CORE_SRC_FILES diff --git a/mindspore/lite/minddata/wrapper/MDToDApi.cc b/mindspore/lite/minddata/wrapper/MDToDApi.cc index a8dcb180b6..cddb71c74c 100644 --- a/mindspore/lite/minddata/wrapper/MDToDApi.cc +++ b/mindspore/lite/minddata/wrapper/MDToDApi.cc @@ -26,9 +26,12 @@ #include "album_op_android.h" //NOLINT #include "minddata/dataset/include/execute.h" +#include "minddata/dataset/include/type_id.h" #include "minddata/dataset/util/path.h" #include "minddata/dataset/include/vision.h" #include "minddata/dataset/util/data_helper.h" +#include "minddata/dataset/core/de_tensor.h" +#include "include/api/types.h" #if defined(__ANDROID__) || defined(ANDROID) #include #include @@ -45,9 +48,9 @@ using mindspore::MsLogLevel::DEBUG; using mindspore::MsLogLevel::ERROR; using mindspore::MsLogLevel::INFO; +using mindspore::Status; using mindspore::dataset::BorderType; using mindspore::dataset::InterpolationMode; -using mindspore::dataset::Status; class MDToDApi { public: @@ -60,11 +63,11 @@ class MDToDApi { public: MDToDApi() : _iter(nullptr), _augs({}), _storage_folder(""), _file_id(-1), _hasBatch(false) { - MS_LOG(INFO) << "MDToDAPI Call constractor"; + MS_LOG(INFO) << "MDToDAPI Call constructor"; } ~MDToDApi() { MS_LOG(INFO) << "MDToDAPI Call destractor"; - // derefernce dataset and iterator + // dereference dataset and iterator _augs.clear(); } }; @@ -257,7 +260,7 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { return -1; } - // Set defualt + // Set default results->fileid = -1; results->embeddingBuff.DataSize = 0; results->imageBuff.DataSize = 0; @@ -287,12 +290,17 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { if (orientation > 1) { RotateOperation *p = static_cast(pMDToDApi->_augs[i].get()); p->setAngle(orientation); - orientation = 0; // clear oriation filed if allready preformed + orientation = 0; // clear oriation filed if already performed } else { continue; } } - row["image"] = mindspore::dataset::Execute((pMDToDApi->_augs)[i])(std::move(row["image"])); + mindspore::MSTensor image(std::make_shared(row["image"])); + (void)mindspore::dataset::Execute((pMDToDApi->_augs)[i])(image, &image); + mindspore::dataset::Tensor::CreateFromMemory( + mindspore::dataset::TensorShape(image.Shape()), + mindspore::dataset::MSTypeToDEType(static_cast(image.DataType())), + (const uint8_t *)(image.Data().get()), &(row["image"])); if (row["image"] == nullptr) { // nullptr means that the eager mode image processing failed, we fail in this case return -1; @@ -324,7 +332,7 @@ extern "C" int MDToDApi_GetNext(MDToDApi *pMDToDApi, MDToDResult_t *results) { extern "C" int MDToDApi_Stop(MDToDApi *pMDToDApi) { // Manually terminate the pipeline - MS_LOG(INFO) << "pipline stoped"; + MS_LOG(INFO) << "pipline stopped"; return 0; } @@ -338,7 +346,7 @@ extern "C" int MDToDApi_Destroy(MDToDApi *pMDToDApi) { int GetJsonFullFileName(const MDToDApi *pMDToDApi, std::string *filePath) { int64_t file_id = pMDToDApi->_file_id; if (file_id < 0) { - MS_LOG(ERROR) << "Illigal file ID to update: " << file_id << "."; + MS_LOG(ERROR) << "Illegal file ID to update: " << file_id << "."; return -1; } std::string converted = std::to_string(pMDToDApi->_file_id); @@ -407,7 +415,7 @@ extern "C" int MDToDApi_UpdateFloatArray(MDToDApi *pMDToDApi, const char *column auto columnName = std::string(column); std::string file_path; if (0 != GetJsonFullFileName(pMDToDApi, &file_path)) { - MS_LOG(ERROR) << "Faile to updaet " << columnName; + MS_LOG(ERROR) << "Failed to updaet " << columnName; return -1; } MS_LOG(INFO) << "Start Update float Array column: " << columnName << " in file " << file_path; diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index ec554a5bce..47a52f3fba 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -16,7 +16,20 @@ if(PLATFORM_ARM32 OR PLATFORM_ARM64) endif() endif() +set(API_SRC + ${CORE_DIR}/utils/status.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/cell.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/serialization.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/types.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/lite_context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/model/model_impl.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/graph/graph.cc + ${CMAKE_CURRENT_SOURCE_DIR}/cxx_api/tensor/tensor_impl.cc +) + set(LITE_SRC + ${API_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/common/file_utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc diff --git a/mindspore/lite/src/cxx_api/cell.cc b/mindspore/lite/src/cxx_api/cell.cc new file mode 100644 index 0000000000..ec3a3c3bcd --- /dev/null +++ b/mindspore/lite/src/cxx_api/cell.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/cell.h" +#include "include/api/lite_context.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +class GraphImpl {}; + +std::vector CellBase::operator()(const std::vector &inputs) const { + std::vector empty; + MS_LOG(ERROR) << "Unsupported feature."; + return empty; +} + +ParameterCell::ParameterCell(const ParameterCell &cell) { MS_LOG(ERROR) << "Unsupported feature."; } +ParameterCell &ParameterCell::operator=(const ParameterCell &cell) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(ParameterCell &&cell) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(ParameterCell &&cell) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(const MSTensor &tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(const MSTensor &tensor) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +ParameterCell::ParameterCell(MSTensor &&tensor) : tensor_(tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +ParameterCell &ParameterCell::operator=(MSTensor &&tensor) { + MS_LOG(ERROR) << "Unsupported feature."; + return *this; +} + +GraphCell::GraphCell(const Graph &graph) : graph_(std::shared_ptr(new (std::nothrow) Graph(graph))) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +GraphCell::GraphCell(const std::shared_ptr &graph) : graph_(graph) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +GraphCell::GraphCell(Graph &&graph) : graph_(std::shared_ptr(new (std::nothrow) Graph(graph))) { + if (graph_ == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } +} + +Status GraphCell::Run(const std::vector &inputs, std::vector *outputs) { + MS_LOG(ERROR) << "Unsupported feature."; + return kLiteError; +} + +Status GraphCell::Load() { + MS_LOG(ERROR) << "Unsupported feature."; + return kLiteError; +} + +InputAndOutput::InputAndOutput() { MS_LOG(ERROR) << "Unsupported feature."; } + +InputAndOutput::InputAndOutput(const MSTensor &tensor) { MS_LOG(ERROR) << "Unsupported feature."; } +InputAndOutput::InputAndOutput(MSTensor &&tensor) { MS_LOG(ERROR) << "Unsupported feature."; } + +InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std::vector &prev, + int32_t index) { + MS_LOG(ERROR) << "Unsupported feature."; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph.cc b/mindspore/lite/src/cxx_api/graph/graph.cc new file mode 100644 index 0000000000..cdacd62df5 --- /dev/null +++ b/mindspore/lite/src/cxx_api/graph/graph.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/graph.h" +#include "include/api/cell.h" +#include "src/cxx_api/graph/graph_data.h" + +namespace mindspore { + +Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} + +Graph::Graph(std::shared_ptr &&graph_data) : graph_data_(graph_data) {} + +Graph::~Graph() {} + +Graph::Graph(std::nullptr_t) : graph_data_(nullptr) {} + +bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } + +ModelType Graph::ModelType() const { return graph_data_->ModelType(); } +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph_data.h b/mindspore/lite/src/cxx_api/graph/graph_data.h new file mode 100644 index 0000000000..fdd2aec516 --- /dev/null +++ b/mindspore/lite/src/cxx_api/graph/graph_data.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H +#define MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H + +#include +#include +#include +#include +#include "include/api/graph.h" +#include "include/api/types.h" +#include "src/lite_model.h" + +namespace mindspore { +class Graph::GraphData { + public: + GraphData() : lite_model_(nullptr) {} + + explicit GraphData(std::shared_ptr model) : lite_model_(model) {} + + ~GraphData() = default; + + std::shared_ptr lite_model() { return lite_model_; } + + enum ModelType ModelType() const { return kMindIR; } + + private: + std::shared_ptr lite_model_; +}; +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore/lite/src/cxx_api/lite_context.cc b/mindspore/lite/src/cxx_api/lite_context.cc new file mode 100644 index 0000000000..ac1aa80aa9 --- /dev/null +++ b/mindspore/lite/src/cxx_api/lite_context.cc @@ -0,0 +1,303 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/lite_context.h" +#include +#include +#include +#include "include/api/types.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +constexpr char kVendorName[] = "vendor_name"; +constexpr char kThreadNum[] = "thread_name"; +constexpr char kAllocator[] = "allocator"; +constexpr char kCPU[] = "cpu"; +constexpr char kCPUEanbleFp16[] = "cpu_enable_fp16"; +constexpr char kCPUBindMode[] = "cpu_bind_mode"; +constexpr char kGPU[] = "gpu"; +constexpr char kGPUEanbleFp16[] = "gpu_enable_fp16"; +constexpr char kNPU[] = "npu"; +constexpr char kNPUFrequency[] = "npu_frequency"; + +void Context::Clear(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + context->context_.clear(); +} + +void Context::SetAsDefault(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + context->context_.clear(); + context->context_.emplace(kCPU, true); +} + +void Context::SetVendorName(const std::shared_ptr &context, const std::string &name) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kVendorName); + if (iter != context->context_.end()) { + iter->second = name; + } else { + context->context_.emplace(kVendorName, name); + } +} + +std::string Context::GetVendorName(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return std::string(); + } + auto iter = context->context_.find(kVendorName); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return std::string(); +} + +void Context::SetThreadNum(const std::shared_ptr &context, int num) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kThreadNum); + if (iter != context->context_.end()) { + iter->second = num; + } else { + context->context_.emplace(kThreadNum, num); + } +} + +int Context::GetThreadNum(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return 0; + } + auto iter = context->context_.find(kThreadNum); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return 2; +} + +void Context::SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kAllocator); + if (iter != context->context_.end()) { + iter->second = alloc; + } else { + context->context_.emplace(kAllocator, alloc); + } +} + +std::shared_ptr Context::GetAllocator(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return nullptr; + } + auto iter = context->context_.find(kAllocator); + if (iter != context->context_.end()) { + return std::any_cast>(iter->second); + } + return nullptr; +} + +void Context::ConfigCPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kCPU, conf); + } +} + +bool Context::IfCPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kCPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigCPUFp16(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPUEanbleFp16); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kCPUEanbleFp16, conf); + } +} + +bool Context::IfCPUFp16Enabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kCPUEanbleFp16); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kCPUBindMode); + if (iter != context->context_.end()) { + iter->second = mode; + } else { + context->context_.emplace(kCPUBindMode, mode); + } +} + +lite::CpuBindMode Context::GetCPUBindMode(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return lite::NO_BIND; + } + auto iter = context->context_.find(kCPUBindMode); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return lite::MID_CPU; +} + +void Context::ConfigGPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kGPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kGPU, conf); + } +} + +bool Context::IfGPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kGPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigGPUFp16(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kGPUEanbleFp16); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kGPUEanbleFp16, conf); + } +} + +bool Context::IfGPUFp16Enabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kGPUEanbleFp16); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::ConfigNPU(const std::shared_ptr &context, bool conf) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kNPU); + if (iter != context->context_.end()) { + iter->second = conf; + } else { + context->context_.emplace(kNPU, conf); + } +} + +bool Context::IfNPUEnabled(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return false; + } + auto iter = context->context_.find(kNPU); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return false; +} + +void Context::SetNPUFrequency(const std::shared_ptr &context, int freq) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return; + } + auto iter = context->context_.find(kNPUFrequency); + if (iter != context->context_.end()) { + iter->second = true; + } else { + context->context_.emplace(kNPUFrequency, true); + } +} + +int Context::GetNPUFrequency(const std::shared_ptr &context) { + if (context == nullptr) { + MS_LOG(ERROR) << "Context is nullptr."; + return 0; + } + auto iter = context->context_.find(kNPUFrequency); + if (iter != context->context_.end()) { + return std::any_cast(iter->second); + } + return 3; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model.cc b/mindspore/lite/src/cxx_api/model/model.cc new file mode 100644 index 0000000000..7d564c2410 --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/model.h" +#include "include/api/lite_context.h" +#include "src/cxx_api/model/model_impl.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +Status Model::Build() { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Build(); +} + +Status Model::Resize(const std::vector &inputs, const std::vector> &dims) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Resize(inputs, dims); +} + +Status Model::Predict(const std::vector &inputs, std::vector *outputs) { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return kLiteNullptr; + } + return impl_->Predict(inputs, outputs); +} + +Model::Model(const GraphCell &graph, const std::shared_ptr &model_context) { + impl_ = std::shared_ptr(new (std::nothrow) ModelImpl()); + if (impl_ == nullptr || graph.GetGraph() == nullptr) { + MS_LOG(ERROR) << "Invalid graph."; + } else { + if (model_context == nullptr) { + MS_LOG(INFO) << "Invalid context, use default context."; + auto context = std::shared_ptr(new (std::nothrow) Context()); + Context::SetAsDefault(context); + impl_->SetContext(context); + } else { + impl_->SetContext(model_context); + } + auto new_graph_cell = std::shared_ptr(new (std::nothrow) GraphCell(graph)); + if (new_graph_cell != nullptr) { + impl_->SetGraphCell(new_graph_cell); + } else { + MS_LOG(ERROR) << "New graphcell failed."; + } + } +} + +Model::Model(const std::vector &network, const std::shared_ptr &model_context) { + MS_LOG(ERROR) << "Unsupported feature."; +} + +Model::~Model() {} + +bool Model::CheckModelSupport(const std::string &device_type, ModelType) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +std::vector Model::GetInputs() { + std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return empty; + } + return impl_->GetInputs(); +} + +std::vector Model::GetOutputs() { + std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Model implement is null."; + return empty; + } + return impl_->GetOutputs(); +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.cc b/mindspore/lite/src/cxx_api/model/model_impl.cc new file mode 100644 index 0000000000..989c8e3897 --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model_impl.cc @@ -0,0 +1,241 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/cxx_api/model/model_impl.h" +#include +#include +#include +#include "include/api/types.h" +#include "include/api/lite_context.h" +#include "include/lite_session.h" +#include "include/context.h" +#include "src/lite_model.h" +#include "src/runtime/allocator.h" +#include "src/cxx_api/utils.h" +#include "src/cxx_api/graph/graph_data.h" +#include "src/cxx_api/tensor/tensor_impl.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; + +Status ModelImpl::Build() { + MS_LOG(DEBUG) << "Start build model."; + if (graph_cell_ == nullptr || graph_cell_->GetGraph() == nullptr || graph_cell_->GetGraph()->graph_data_ == nullptr) { + MS_LOG(ERROR) << "Graph cell is invalid."; + return kLiteNullptr; + } + auto model = graph_cell_->GetGraph()->graph_data_->lite_model(); + if (model == nullptr) { + MS_LOG(ERROR) << "Lite model is nullptr."; + return kLiteNullptr; + } + if (model->buf == nullptr) { + MS_LOG(ERROR) << "Lite model has been freed."; + return kLiteError; + } + if (session_ != nullptr) { + MS_LOG(DEBUG) << "Model has been already built."; + return kSuccess; + } + if (context_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return kLiteNullptr; + } + lite::Context model_context; + model_context.allocator = Context::GetAllocator(context_); + if (model_context.allocator == nullptr) { + model_context.allocator = lite::Allocator::Create(); + if (model_context.allocator == nullptr) { + MS_LOG(ERROR) << "Create Allocator failed."; + return kLiteNullptr; + } + MS_LOG(DEBUG) << "Set new allocator."; + Context::SetAllocator(context_, model_context.allocator); + } + model_context.vendor_name_ = Context::GetVendorName(context_); + model_context.thread_num_ = Context::GetThreadNum(context_); + model_context.device_list_.clear(); + if (Context::IfCPUEnabled(context_) && Context::IfGPUEnabled(context_) && Context::IfNPUEnabled(context_)) { + MS_LOG(INFO) << "CPU/GPU/NPU cannot be enabled at the same time."; + } + if (!Context::IfCPUEnabled(context_)) { + MS_LOG(INFO) << "CPU is forced to be enabled."; + } + lite::DeviceInfo cpu_info = { + .cpu_device_info_ = {Context::IfCPUFp16Enabled(context_), Context::GetCPUBindMode(context_)}}; + model_context.device_list_.push_back({lite::DT_CPU, cpu_info}); + if (Context::IfGPUEnabled(context_)) { + lite::DeviceInfo gpu_info = {.gpu_device_info_ = {Context::IfGPUFp16Enabled(context_)}}; + model_context.device_list_.push_back({lite::DT_GPU, gpu_info}); + } + if (Context::IfNPUEnabled(context_)) { + lite::DeviceInfo npu_info = {.npu_device_info_ = {Context::GetNPUFrequency(context_)}}; + model_context.device_list_.push_back({lite::DT_NPU, npu_info}); + } + auto session = std::shared_ptr(session::LiteSession::CreateSession(&model_context)); + if (session == nullptr) { + MS_LOG(ERROR) << "Allocate session failed."; + return kLiteNullptr; + } + auto ret = session->CompileGraph(model.get()); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Build model failed."; + return static_cast(ret); + } + session_.swap(session); + model->Free(); + MS_LOG(DEBUG) << "Build model success."; + return kSuccess; +} + +Status ModelImpl::Predict(const std::vector &inputs, std::vector *outputs) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Run graph failed."; + return kLiteError; + } + auto input_tensors = session_->GetInputs(); + if (input_tensors.empty()) { + MS_LOG(ERROR) << "Failed to get input tensor."; + return kLiteError; + } + if (input_tensors.size() != inputs.size()) { + MS_LOG(ERROR) << "Wrong input size."; + return kLiteError; + } + std::vector old_data; + for (size_t i = 0; i < inputs.size(); i++) { + auto input = input_tensors.at(i); + auto user_input = inputs.at(i); + if (user_input.Name() != input->tensor_name()) { + MS_LOG(WARNING) << "Tensor " << user_input.Name() << " has a different name from input" << input->tensor_name() + << "."; + } + old_data.push_back(input->MutableData()); + if (user_input.MutableData() != input->MutableData()) { + if (input->Size() != user_input.DataSize()) { + for (size_t j = 0; j < old_data.size(); j++) { + input_tensors.at(j)->set_data(old_data.at(j)); + } + MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size."; + return kLiteInputTensorError; + } + if (user_input.impl_->need_copy()) { + ::memcpy(input->MutableData(), user_input.MutableData(), input->Size()); + } else { + input->set_data(user_input.MutableData()); + } + } + } + auto ret = session_->RunGraph(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Run graph failed."; + return static_cast(ret); + } + MS_LOG(DEBUG) << "Run graph success."; + for (size_t i = 0; i < old_data.size(); i++) { + input_tensors.at(i)->set_data(old_data.at(i)); + } + auto res = GetOutputs(); + if (res.empty()) { + MS_LOG(DEBUG) << "Empty outputs."; + return kLiteError; + } + outputs->insert(outputs->end(), res.begin(), res.end()); + return kSuccess; +} + +std::vector ModelImpl::GetInputs() { + std::vector empty; + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return empty; + } + std::vector res; + auto inputs = session_->GetInputs(); + for (auto input : inputs) { + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(input)); + if (impl == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + auto tensor = MSTensor(impl); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + res.push_back(tensor); + } + return res; +} + +std::vector ModelImpl::GetOutputs() { + std::vector empty; + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return empty; + } + std::vector res; + auto names = session_->GetOutputTensorNames(); + auto outputs = session_->GetOutputs(); + for (auto name : names) { + auto impl = std::shared_ptr(new (std::nothrow) MSTensor::Impl(outputs[name])); + if (impl == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + auto tensor = MSTensor(impl); + if (tensor == nullptr) { + MS_LOG(ERROR) << "Create tensor failed."; + return empty; + } + res.push_back(tensor); + } + return res; +} + +Status ModelImpl::Resize(const std::vector &inputs, const std::vector> &dims) { + if (session_ == nullptr) { + MS_LOG(ERROR) << "Session is null."; + return kLiteNullptr; + } + if (inputs.size() != dims.size()) { + MS_LOG(ERROR) << "The size of inputs is not equal to the size of dims."; + return kLiteParamInvalid; + } + std::vector inner_input; + for (auto input : inputs) { + if (input.impl_ == nullptr || input.impl_->lite_tensor() == nullptr) { + MS_LOG(ERROR) << "Input tensor " << input.Name() << " is null."; + return kLiteInputTensorError; + } + inner_input.push_back(input.impl_->lite_tensor()); + } + std::vector> truncated_shape; + for (size_t i = 0; i < inner_input.size(); i++) { + std::vector tmp = TruncateShape(dims.at(i), inner_input.at(i)->data_type(), inner_input.at(i)->Size()); + if (tmp.empty()) { + MS_LOG(ERROR) << "Input dims[" << i << "]is invalid."; + return kLiteParamInvalid; + } + truncated_shape.push_back(tmp); + } + auto ret = session_->Resize(inner_input, truncated_shape); + return static_cast(ret); +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.h b/mindspore/lite/src/cxx_api/model/model_impl.h new file mode 100644 index 0000000000..0309f1e867 --- /dev/null +++ b/mindspore/lite/src/cxx_api/model/model_impl.h @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H +#define MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H +#include +#include +#include +#include +#include +#include +#include +#include "include/api/model.h" +#include "include/api/lite_context.h" +#include "include/api/cell.h" +#include "include/lite_session.h" + +namespace mindspore { +class ModelImpl { + public: + ModelImpl() : graph_cell_(nullptr), session_(nullptr), context_(nullptr) {} + ~ModelImpl() = default; + + Status Build(); + Status Resize(const std::vector &inputs, const std::vector> &dims); + + Status Predict(const std::vector &inputs, std::vector *outputs); + + std::vector GetInputs(); + std::vector GetOutputs(); + + static bool CheckModelSupport(const std::string &device_type, ModelType model_type); + + private: + friend class Model; + std::shared_ptr graph_cell_; + std::shared_ptr session_; + std::shared_ptr context_; + void SetGraphCell(const std::shared_ptr &graph_cell) { graph_cell_ = graph_cell; } + void SetContext(const std::shared_ptr &context) { context_ = context; } +}; +} // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H diff --git a/mindspore/lite/src/cxx_api/serialization.cc b/mindspore/lite/src/cxx_api/serialization.cc new file mode 100644 index 0000000000..660cf107ac --- /dev/null +++ b/mindspore/lite/src/cxx_api/serialization.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "include/api/serialization.h" +#include +#include +#include +#include "include/api/graph.h" +#include "include/api/lite_context.h" +#include "include/api/types.h" +#include "include/model.h" +#include "include/ms_tensor.h" +#include "src/cxx_api/graph/graph_data.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +Graph Serialization::LoadModel(const void *model_data, size_t data_size, ModelType model_type) { + if (model_type != kMindIR) { + MS_LOG(ERROR) << "Unsupported IR."; + return Graph(nullptr); + } + auto model = std::shared_ptr(lite::Model::Import(static_cast(model_data), data_size)); + if (model == nullptr) { + MS_LOG(ERROR) << "New model failed."; + return Graph(nullptr); + } + auto graph_data = std::shared_ptr(new (std::nothrow) Graph::GraphData(model)); + if (graph_data == nullptr) { + MS_LOG(ERROR) << "New graph data failed."; + return Graph(nullptr); + } + Graph graph = Graph(graph_data); + return graph; +} + +Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { + MS_LOG(ERROR) << "Unsupported Feature."; + return Graph(nullptr); +} + +Status Serialization::LoadCheckPoint(const std::string &ckpt_file, std::map *parameters) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::SetParameters(const std::map ¶meters, Model *model) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::ExportModel(const Model &model, ModelType model_type, Buffer *model_data) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} + +Status Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file) { + MS_LOG(ERROR) << "Unsupported feature."; + return kMEFailed; +} +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc new file mode 100644 index 0000000000..41a430bce5 --- /dev/null +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/cxx_api/tensor/tensor_impl.h" +#include +#include +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/status.h" +#include "src/cxx_api/utils.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +MSTensor::Impl::Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) { + std::vector truncated_shape = TruncateShape(shape, static_cast(type), data_len); + if (!truncated_shape.empty()) { + lite_tensor_ = new (std::nothrow) lite::Tensor(name, static_cast(type), truncated_shape, data); + } else { + lite_tensor_ = nullptr; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h new file mode 100644 index 0000000000..ca248e9e85 --- /dev/null +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/status.h" +#include "include/ms_tensor.h" +#include "src/tensor.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +class MSTensor::Impl { + public: + Impl() {} + ~Impl() = default; + explicit Impl(tensor::MSTensor *tensor) : lite_tensor_(tensor) { + if (tensor != nullptr) { + tensor_name_ = tensor->tensor_name(); + } + } + + bool operator==(std::nullptr_t) const { return lite_tensor_ == nullptr; } + + Impl(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len); + + const std::string &Name() const { + static std::string empty = ""; + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return empty; + } + return tensor_name_; + } + + enum DataType DataType() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return DataType::kTypeUnknown; + } + return static_cast(lite_tensor_->data_type()); + } + + int64_t ElementNum() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return -1; + } + return static_cast(lite_tensor_->ElementsNum()); + } + + const std::vector &Shape() { + static std::vector empty; + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return empty; + } + auto shape = lite_tensor_->shape(); + shape_.resize(shape.size()); + std::transform(shape.begin(), shape.end(), shape_.begin(), [](int c) { return static_cast(c); }); + return shape_; + } + + std::shared_ptr Data() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return nullptr; + } + + if (DataSize() == 0) { + MS_LOG(ERROR) << "Invalid data size."; + return nullptr; + } + + return std::shared_ptr(lite_tensor_->MutableData(), [](const void *) {}); + } + + void *MutableData() { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return nullptr; + } + return lite_tensor_->MutableData(); + } + size_t DataSize() const { + if (lite_tensor_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor."; + return 0; + } + return lite_tensor_->Size(); + } + + bool IsDevice() const { return false; } + + std::shared_ptr Clone() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + + tensor::MSTensor *lite_tensor() { return lite_tensor_; } + + Status set_lite_tensor(tensor::MSTensor *tensor) { + if (tensor == nullptr) { + MS_LOG(ERROR) << "Tensor to set is null."; + return kLiteNullptr; + } + lite_tensor_ = tensor; + return kSuccess; + } + + void set_need_copy(bool need_copy) { need_copy_ = need_copy; } + + bool need_copy() { return need_copy_; } + + private: + tensor::MSTensor *lite_tensor_; + std::string tensor_name_; + std::vector shape_; + bool need_copy_ = true; +}; + +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/types.cc b/mindspore/lite/src/cxx_api/types.cc new file mode 100644 index 0000000000..876780459b --- /dev/null +++ b/mindspore/lite/src/cxx_api/types.cc @@ -0,0 +1,199 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/types.h" +#include +#include +#include +#include "include/api/status.h" +#include "src/cxx_api/tensor/tensor_impl.h" +#include "src/tensor.h" +#include "src/common/log_adapter.h" + +namespace mindspore { + +class Buffer::Impl { + public: + Impl() : data_() { MS_LOG(ERROR) << "Unsupported feature."; } + ~Impl() = default; + Impl(const void *data, size_t data_len) { MS_LOG(ERROR) << "Unsupported feature."; } + + const void *Data() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + void *MutableData() { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; + } + size_t DataSize() const { + MS_LOG(ERROR) << "Unsupported feature."; + return 0; + } + + bool ResizeData(size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; + } + + bool SetData(const void *data, size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; + } + + protected: + std::vector data_; +}; + +MSTensor::MSTensor() : impl_(std::make_shared()) {} +MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} +MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) {} +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : impl_(std::make_shared(name, type, shape, data, data_len)) {} +MSTensor::~MSTensor() = default; + +bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } + +MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + auto impl = std::make_shared(name, type, shape, data, data_len); + if (impl == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return MSTensor(nullptr); + } + return MSTensor(impl); +} + +MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + auto tensor = CreateTensor(name, type, shape, data, data_len); + if (tensor == nullptr) { + return MSTensor(nullptr); + } + tensor.impl_->set_need_copy(false); + return tensor; +} + +MSTensor MSTensor::Clone() const { + MSTensor ret; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + ret.impl_ = nullptr; + return ret; + } + ret.impl_ = impl_->Clone(); + return ret; +} + +const std::string &MSTensor::Name() const { + static std::string empty = ""; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return empty; + } + return impl_->Name(); +} + +int64_t MSTensor::ElementNum() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return -1; + } + return impl_->ElementNum(); +} + +enum DataType MSTensor::DataType() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return DataType::kTypeUnknown; + } + return impl_->DataType(); +} + +const std::vector &MSTensor::Shape() const { + static std::vector empty; + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return empty; + } + return impl_->Shape(); +} + +std::shared_ptr MSTensor::Data() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return nullptr; + } + return impl_->Data(); +} + +void *MSTensor::MutableData() { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return nullptr; + } + return impl_->MutableData(); +} + +size_t MSTensor::DataSize() const { + if (impl_ == nullptr) { + MS_LOG(ERROR) << "Invalid tensor inpmlement."; + return 0; + } + return impl_->DataSize(); +} + +bool MSTensor::IsDevice() const { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +Buffer::Buffer() : impl_(std::make_shared()) { MS_LOG(ERROR) << "Unsupported feature."; } +Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared(data, data_len)) { + MS_LOG(ERROR) << "Unsupported feature."; +} +Buffer::~Buffer() = default; + +Buffer Buffer::Clone() const { + MS_LOG(ERROR) << "Unsupported feature."; + return Buffer(); +} + +const void *Buffer::Data() const { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; +} + +void *Buffer::MutableData() { + MS_LOG(ERROR) << "Unsupported feature."; + return nullptr; +} + +size_t Buffer::DataSize() const { + MS_LOG(ERROR) << "Unsupported feature."; + return 0; +} + +bool Buffer::ResizeData(size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} + +bool Buffer::SetData(const void *data, size_t data_len) { + MS_LOG(ERROR) << "Unsupported feature."; + return false; +} +} // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/utils.h b/mindspore/lite/src/cxx_api/utils.h new file mode 100644 index 0000000000..03a6c5a5c5 --- /dev/null +++ b/mindspore/lite/src/cxx_api/utils.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "src/tensor.h" + +namespace mindspore { +static std::vector TruncateShape(const std::vector &shape, enum TypeId type, size_t data_len) { + std::vector empty; + std::vector truncated_shape; + size_t element_size = lite::DataTypeSize(type); + for (auto i : shape) { + if (i < 0 || i > INT_MAX || element_size > INT_MAX / static_cast(i)) { + MS_LOG(ERROR) << "Invalid shape."; + return empty; + } else { + element_size *= static_cast(i); + truncated_shape.push_back(static_cast(i)); + } + } + if (element_size != data_len) { + MS_LOG(ERROR) << "Invalid data size."; + return empty; + } + return truncated_shape; +} + +} // namespace mindspore diff --git a/mindspore/lite/src/tensor.cc b/mindspore/lite/src/tensor.cc index 0fe2ae7b30..a4a23c503c 100644 --- a/mindspore/lite/src/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -29,6 +29,11 @@ namespace lite { Tensor::Tensor(const TypeId data_type, std::vector shape, const schema::Format &format, Category category) : data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {} +Tensor::Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data) + : tensor_name_(name), data_type_(type), shape_(std::move(shape)) { + data_ = const_cast(data); +} + int Tensor::CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor) { if (dst_tensor == nullptr) { MS_LOG(ERROR) << "dst_tensor is nullptr"; diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index 5f72854980..544ebf8e20 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -56,6 +56,8 @@ class Tensor : public mindspore::tensor::MSTensor { Tensor(TypeId data_type, std::vector shape, const schema::Format &format = schema::Format::Format_NHWC, Category category = VAR); + Tensor(const std::string &name, enum TypeId type, const std::vector &shape, const void *data); + Tensor(const Tensor &tensor) = delete; Tensor(Tensor &&other) = delete; @@ -72,9 +74,9 @@ class Tensor : public mindspore::tensor::MSTensor { virtual bool operator==(const Tensor &tensor); - void set_tensor_name(std::string name) { tensor_name_ = name; } + void set_tensor_name(std::string name) override { tensor_name_ = name; } - std::string tensor_name() const { return tensor_name_; } + std::string tensor_name() const override { return tensor_name_; } TypeId data_type() const override { return data_type_; } @@ -117,7 +119,7 @@ class Tensor : public mindspore::tensor::MSTensor { return data_; } - virtual void set_data(void *data) { this->data_ = data; } + void set_data(void *data) override { this->data_ = data; } Category category() const { return this->category_; } diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc deleted file mode 100644 index a6cfa9a91a..0000000000 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include "./securec.h" -#include "common/common_test.h" -#include "gtest/gtest.h" - -#include "mindspore/ccsrc/minddata/dataset/core/data_type.h" -#include "mindspore/ccsrc/minddata/dataset/core/tensor.h" -#include "mindspore/ccsrc/minddata/dataset/core/tensor_shape.h" -#include "mindspore/ccsrc/minddata/dataset/include/de_tensor.h" -#include "mindspore/lite/src/common/log_adapter.h" -#include "mindspore/lite/src/tensor.h" - -using MSTensor = mindspore::tensor::MSTensor; -using DETensor = mindspore::tensor::DETensor; -using LiteTensor = mindspore::lite::Tensor; -using Tensor = mindspore::dataset::Tensor; -using DataType = mindspore::dataset::DataType; -using TensorShape = mindspore::dataset::TensorShape; - -class MindDataTestTensorDE : public mindspore::CommonTest { - public: - MindDataTestTensorDE() {} -}; - -TEST_F(MindDataTestTensorDE, MSTensorBasic) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(t == std::dynamic_pointer_cast(ms_tensor)->tensor(), true); -} - -TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - std::shared_ptr lite_ms_tensor = - std::shared_ptr(std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); - // check if the lite_ms_tensor is the derived LiteTensor - LiteTensor *lite_tensor = static_cast(lite_ms_tensor.get()); - ASSERT_EQ(lite_tensor != nullptr, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorShape) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->DimensionSize(0) == 2, true); - ASSERT_EQ(ms_tensor->DimensionSize(1) == 3, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorSize) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->ElementsNum() == 6, true); - ASSERT_EQ(ms_tensor->Size() == 24, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorDataType) { - std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - ASSERT_EQ(ms_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorMutableData) { - std::vector x = {2.5, 2.5, 2.5, 2.5}; - std::shared_ptr t; - Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); - auto ms_tensor = std::shared_ptr(new DETensor(t)); - float *data = static_cast(ms_tensor->MutableData()); - std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); - ASSERT_EQ(x == tensor_vec, true); -} - -TEST_F(MindDataTestTensorDE, MSTensorCreateFromMemory) { - std::vector x = {2.5, 2.5, 2.5, 2.5}; - auto mem_tensor = DETensor::CreateFromMemory(mindspore::TypeId::kNumberTypeFloat32, {2, 2}, &x[0]); - ASSERT_EQ(mem_tensor->data_type() == mindspore::TypeId::kNumberTypeFloat32, true); -} diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 74cc8b177b..0d5eba5088 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -24,6 +24,7 @@ #include "minddata/dataset/include/execute.h" #include "minddata/dataset/util/path.h" #include "mindspore/lite/src/common/log_adapter.h" +#include "include/api/types.h" using MSTensor = mindspore::tensor::MSTensor; using DETensor = mindspore::tensor::DETensor; @@ -58,16 +59,18 @@ TEST_F(MindDataTestEager, Test1) { while (dir_it->hasNext()) { Path v = dir_it->next(); // MS_LOG(WARNING) << v.toString() << "."; - std::shared_ptr image = std::shared_ptr(DETensor::CreateTensor(v.toString())); + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile(v.toString(), &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); - image = Execute(Decode())(image); + (void)Execute(Decode())(image, &image); EXPECT_TRUE(image != nullptr); - image = Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image); + (void)Execute(Normalize({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0}))(image, &image); EXPECT_TRUE(image != nullptr); - image = Execute(Resize({224, 224}))(image); + (void)Execute(Resize({224, 224}))(image, &image); EXPECT_TRUE(image != nullptr); - EXPECT_EQ(image->DimensionSize(0), 224); - EXPECT_EQ(image->DimensionSize(1), 224); + EXPECT_EQ(image.Shape()[0], 224); + EXPECT_EQ(image.Shape()[1], 224); } auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration(t_end - t_start).count(); diff --git a/mindspore/lite/tools/converter/quantizer/quant_cast.h b/mindspore/lite/tools/converter/quantizer/quant_cast.h index 9445f27b10..164ea28d2c 100644 --- a/mindspore/lite/tools/converter/quantizer/quant_cast.h +++ b/mindspore/lite/tools/converter/quantizer/quant_cast.h @@ -19,7 +19,7 @@ #include "mindspore/core/ir/anf.h" #include "mindspore/lite/include/errorcode.h" -#include "mindspore/core/ir/dtype/type_id.h" +#include "ir/dtype/type_id.h" #include "mindspore/core/ir/func_graph.h" namespace mindspore::lite::quant { diff --git a/tests/st/cpp/common/common_test.cc b/tests/st/cpp/common/common_test.cc index ad8de4d322..24a999a0ba 100644 --- a/tests/st/cpp/common/common_test.cc +++ b/tests/st/cpp/common/common_test.cc @@ -61,7 +61,7 @@ void Common::ReadFile(const char *file, size_t *size, char **buf) { void Common::ContextAutoSet() { auto device_target = GetEnv("DEVICE_TARGET"); if (device_target.empty()) { - device_target = mindspore::api::kDeviceTypeAscend310; // default is 310 + device_target = mindspore::kDeviceTypeAscend310; // default is 310 } auto device_id_str = GetEnv("DEVICE_ID"); @@ -70,7 +70,8 @@ void Common::ContextAutoSet() { } uint32_t device_id = std::strtoul(device_id_str.c_str(), nullptr, 10); - mindspore::api::Context::Instance().SetDeviceTarget(device_target).SetDeviceID(device_id); + mindspore::GlobalContext::SetGlobalDeviceTarget(device_target); + mindspore::GlobalContext::SetGlobalDeviceID(device_id); } } // namespace ST diff --git a/tests/st/cpp/data/dataset/apple.jpg b/tests/st/cpp/data/dataset/apple.jpg new file mode 100644 index 0000000000000000000000000000000000000000..023bc50316071e66cdece5234ab93123395153bd GIT binary patch literal 159109 zcmb5Wc|a4_7C(MwCX)?;009&zju9cI3NA>Y6&#RMP>@itu4u#!6)SbCyF-c=gVngB zD2%uTY-`+=+8Qk?S{K}r+7>M+D)m`Js;KxoH`w>y_x*m~KYj<8xpTL<%ekL(?m2gO zTlKaER0BVb{}?C=fWjYm`wR33l~X&Hc1o3tvx~c%%A>v3r@fa~`<|VuUmxR~&m zasR*RtsUIt@;k0}hSC7-Mlo*G+j8*1raB1zO%%;YSShCURbdw-l!PX~3_~*tplG*F z(LI^uv7ftVYb2|;?eplxc8)oz{^UyUr=I=OO?UqaNLe!@bR3p$|G)L(h+xf_qH!Zh zz`VPV7gKJud#9dEw1=h_*8R2`+A$P1l5vB6@MiYK(owEr(-=rx;T;b98sImOpIEr` zPYd0$?>^tzWUPWkAYH{#O8{i!DygQ)UN2SfNo+6fNXRrX_#1Pkn*qG}H1CI@%B9y8 zkwH4Gzul!7!0v~yR-DhJHf1Nu7FavI5hPg@-&yEfl{HF^Nww{AtyxM5h8Bd;^}JU- zU0?7)Dv3b#08J0_@w zN?8@&A(LDHoCI=xZ&gMyC@~ z8JjIJKp5v|66BO937`z55Fv_haTPHY)Q(4hqrR(*i#QNLn!{pWC8?O!MwL_pfCn$G zt=JFq5}{ZN!mhl0i&K$Nw$8x6SzhBA44Gki;dVW&ju>7@vHa*x!14f9nHE>uE5<$w z60K5j`a;hqJ8Gn+<$x;>GbEc;Uho#|0D0qa6egYYqw2#Wg{)CfmDRQC*mQ2$oTM-P zKq>%4z`ox>)=X0%6Zy2w@eiED@Pa#)!XlusczX%^b-P{7B zX4*QQ{Lm(hKUKa<&^-Mjo-Jn&aQ=hl_Yih{mbE~3EM-dNvahqCWCow-8y+SzIfLR5 zI05Lu-wu5X0HV&>X{E`3#oxpEkCx>)=a~IXc1o!@r2mnRi3bZ?F{*Og0e2`kUjxmY zb{KY!i4%dERreVuGo>;g3r!$BRbxd)5+HyB0}rWTV3j$Y@_WIqOJ%QWz>O!N0OTG%IouE|S0WqHxwNQ6rbE&~Hvat=`D56EjM{|5 zsnuYT2xd-c0xbttt5Vo@fESp z(gjf?U>L~jYLhTV!}-H|#nOWyPipfR)Jk5^4q;a8w0ySOA`(M|e04 zw=GZ<{UZ-<$B(4RT12AB&yW3UN$@n=f;eI&*E5>CLvLxs^ViA+MyKWCRe$W1;7Q4%0;9lgIWZ62ElPMQ*!U}{# zk>dbD@o(-&rIglbyr(23zMdIS37L0!Xv0^8wiaXr3CRgAc39vH;HMG0aU2e+>o#N) z7O;I1Ky+~;U}j+iN!bRRKDh+B2=R&(qzsKi2)jRH+Dr zsXV3u(*VIhiGUMq7NnWqOT6PNylqzHSBE6w0=)VXxrd_0tq3Rur5q$1Ns*|vI_bg= zAo;*ffj4-ztHQGJ&6EY?wKg9;)o&XgHj3(Tf>(6nfWEwLe}SYfGp`Eyo92-n51hPc zk4PFQW#q~X2zUV8dlpY{xtsNo0MO+oA6k&sWnb2qgXe|K`_2ZbMjq{WrMbiyUgt?I zENI-^-7?I5?s;?m+NE#S#0CWg{M{LRj}?JOPMtw9+N10;{Jra^4NJR#lTt1fY~UNI z?X)-xp+q&R0fzAgaITTGsuSo}fo?p*)3jLZ%d2Zge=_Y(f1OZjrs&~(lgBAu3*&=f z(x>fa$ZQpWBX6%{pQ2dl`0-B`c!ks8B($eK!{&NJNKMpomS?Z3MAL))I6@^CtQ_Kn zyu_|tBbrfM`2fuWSt6{VkT9K%Hwi02K55bckq7}$ z8AnotlID`y=vdm0nM4wSq@|3QTk)=wl$!E{{Q@$c)|X&7Z%{?^Zpp}(QT8xcBoLHy z#;ci@Rv-gLovy|;TTD&w6cGWIXRv6Q-7Uoub+cOSy-vn2!$OT8c1_-{P z;$}}^u#4a)c$1Zq-Jm+zB(g%Z!FBsX15hbzBJ46TjQVwkpaSWnwF{{%DEfql`9LSL zR~jHeX2WUYp{nCKTMe|@L6WvqP{831li^&$n~RkHnsH9w`-8A@us*)&Kz;t8VKMzo zOO_t#kXU2rJ=-nRH%zfFg>Ru}o;xfIAM}D+6o{}7PqL^WQBfP{On0qsJ*i$5#LWpt z?s2Ya355mx=#;rBM$IKBS}PR85q%RVUYqWKD?*Y?KuF;ras$=@`)m*~crFW!5f&BN zU&$}{5Cs8HaijRb<87lb>;l5*Pj1$L0R_q68ZE|TkjeNDWk9$8JOB+=5g&7K7ICFp zSUu`kI&jA)TA*yTDj<3t$>AW1!Vs@?)jJ}7vw z(*^x2om88tdHp{=igj&-cAOTdCTeRA%+nXz-8V)MB{Bjld-JnZ?$toSXYf?ki~!z9 z&r8L@Ol9iTc*C~VIvOk#dvK#z2vo)0do-SYm>aquK6`3R4NHIBF~+O+D-R2^DZIz~ zca&EDMkmyj)3FB3VPlFm@3#-L1N6vB#MIo zp5VHVPvwy>7>f!!O(FECAQhmUK(c8icq5yJHbTBc%$8d<0%sMKCaK*yxDq{AE((>d_ z7HKoxA92f8O_EjWJerkLyS&MH246JzaZBI$jko&en#f*;s@#42O*BK#K zLw%gRnAnO8*Wwtyu+H=fyMAtFgCcT^&~f5rF(T^RT8nD~6&hUWy=JT@vrc#*q=pwL zyFb^Y@#_p8w-*%JJ=Gr7El#lUtWqe>h)$F%IaI!PyJt4bcJwHvq}ib&t)-e~&NWdy_8RP;90F=-J7jjYuspDpTZCP)R1B_v17-ug1#`3!9gdOvUpW8sjCTtB zv#D5vJ5H3!5N#kkhkZ3|3Ngt=WCu1aC=UyrKTC zmQ;!wQL=`zR6@q#SiQ>2PN1noXV!oWv(X~S{10c5YLT<(Vj*yygaS3H?_ls$x$q>D zgBkI)U2ay;+ZIx@d8VO(vOGahroBEwWIdWcj8t(1xbS^WHbcgZm%zgZ zoZsFYu*E@i4b6jwuT;rJI&Ccc_GxP@BA^uxpHbnY^qdy}Ob#vpwR3K1honQ*oKgrG zU#VPX2bQNFL^%syxaRilA7q4k1~o*~Li`WwlHf!P{gn|;K8mNjA@C{ty=h0@1yl+TqMtie|Ejx1jx55=1DYo@ts-g> z!okT3diPTvkbK}umZ!6p1KOlR4iLecQj_?9h$nmgwxOqT+Gb5J!Ht@riSAfw+WDQx>2j^ps930ZmnJ8QG*rgp zK6B|ho3Fmwp~_Wihe@LOFq5e*7sZh~YDL{d2X~0%k&pmgPV;T7$wb#9Nm?wP9 zaQN&=P^m>g@CuC+2VBBD1-vl<@;>A5!KsaVK89kQPf`VRb&du z5e5~E9PVN-#sdC!x`w(yHS{^3p;p}cJGx79bPo}Qa)1EtItq+V*m~#CEo1={bq!0U27BZ)Y{k-Aj{#HtUHQMo$va3dgQ{@cRt~Dc zl{nU@zg{~iL#>Cq-h9C@eWjn~v^{K%)<@6NE)c4dR=CC7ZBA3wgvM>-tbW$6wnMe| zqPgMxXda5jKP}{6;jV}=Qmb*~YMiNaC}wauAo)N2AUCJ+y{wuoT)rKf5fY|GCF*9;|?l&IxvX@EEaNTd-C)FeG3+m#%% zoMb>d2@)$H19&4d>Eqd8>`B+2_5deNp~KQ>#6oLDEkCAf(;_&`Ll1Fdg*UwYY*^vHI3Ea`atp%ss))k=Mj6O;R92&C60$3(<9oHz zm!sz;N%;VTQ>dhlZ+0@va5$CuLUIq$|6C*=@Kkz~wo_TPPbEbISHqV4{iot|pX)5y z0bbo)XXO~UqmUtQc-psb&2UbXp5#uE03aN$|QUchE|oFI1CWk z!Y(HPhZMmLzaHli?O#-E8W~?l%0CyYl&_EoJ`SFRt`%*XFDe`VO9+k>@_0AE8uHbI zrS!3=QNif*Fq~kD0IVPfaqjJc#3d@jMQT7rB zlxaC{NM@-NK_T!Y1*uK`R|s0JYnBA)MHY}j2KJKM`IOed=N!a`qv2cdUqKc3H*Ox@ zOW57K)b5Fcc$h!xR}caEBWo+E_93-cmD?AqvE9LzgD|WTc&VKy5MEm;T^${yGs};v zbX%_%&jH7T;H`yj#OO z>^sGYgDsR{l@0_grr=7FUFh<=e7U;7KJLSe2)OXu8g&BxlHK;0!7QPpmyzP9(+`45(PNNgb3SZRYwmaA?z8 zWE{yx4-|Deu2^T%nmP_d1v#h*lnNu33y-opG#8Z-t>X`Qg+5H4iTQ)AxD!-Sd^FGF z+sIyykX+;(RyAZmsesRFge2(!ExZ;p@gY=W4TP({n6B|#Qj{_KwJ!Ce~HIB?{qEr6lRd=Jq zk=L1M3&w@4c>#Kjotn;J3t9Nv0yHpL(|UR>W#Xr9hI=8#W~1xQm}tYfS|&Zy>*3Dk znOo^4lX{JMkRkC2-u|m4?5d9f+eQ!Q5b87|5sX!9E3_SXI@2W}svMS6nUf)Pv|IGT z0J$Gk$1kPWt6b)Kwcf+HDCfIuX1%1TYhM~n+xqHqZN%ibqhnP^9+vQIV?1z2!0j{Y zy;VjpS6;>1k78>V2?*TEY{3N>Be8{N+|x^dEd$+f9=Xeaqa-1VT>o$6XFiTzQn8iFz>0Z>SQxur1~fa{;TKmEO`~#K`{;8a6^S0g+>aK zfYph#)Q&lMOxIJa-XX40f22GmY8Lkn$Y_{?H^%7TZc@}FV1&Q{wJ#bGh&)IIG+L?O z*xjN-i}7h2mU9*91qGa|HE6SuOeDx+E2uAODUR0eZM?p~O&-qXWvIN`0aP&xV+aYK)2J!cD-qO zt$Tme8BDpyiw0!DQO(0npL0dghpITOyCBdek+MUA(1C)=QpIb(jZd74&s^o#_n>uJ zce&C-wGpR26kvFmjnaxBoES>18x~8r3(yMExYSCbGof)8hmdcCY0>H=juq6eKtKSV zL2{^2uP|eeaQy);PkN3T?G(YV%R+b~H4R_Hq1Q|!Dg|U3tI}LRq1-@oU;<9M5;YK< zMbV74;R4m6l^L~q;)7BOQai@6JYN1y$SbJ5;EIPXLb6(Az$sUu7DO3{X#{ygKn(sP50IXW>^WsyRfT<5x zVJZW%AY&yFo;C;4Dgt;$p7AXR7R1THEtiIvXIK*{vjO0(yxm|MCo`w>t@e>*fokP;U8=h>xpkA4Gz zNP%|H9?l6DT7-F%yv;#_)nji8fsE#k`htAj7~ABYKBKNeZvoNXEcnpJCUf ziHHiw2B@39Q(tvxe?*U(fO;_g0B{gospdU{wGP>WOr{iwl8dfvuEd5;MbwBP87Q)% zk<+?Y5=8~s0X1d9vnd}7_~;;n7WbL6)zQi1=_qMGQadsMY@@7lBuG#47(wCkHK^38 zO6to>9DR2{76XZ6vq(}&7Bu4E1asIFWK27WxapnFVWY5R0!SGi=UKpLBmMzmoFo}i zRk-*tS|nErGCc+&d+F$%w{VLgNCi}dtH1_H9OZ>F-7VQb%}WxIY3E9qN;GFDO`*Gq z-F68$x*SODmB=l~8q2O%Df*VuRsDIN56*FManR97R1^wP$)y*qC3`u&!NsG3Zb%vc2UfZ*5W9*vE(V3m9Gqc4;jkO;lAwIhr_XtOh{Zdv)EntlpwR zW@0He5mZwiUJHX}MB6~c;l%wxh-;UP6G zu&!JiFxLeZ2pP;I3gf z?^44)9Y*2A_1pFdQ4$2Uixd4uE@nCwdM_MBjj;Mu0cmaM7P`=mSmPLsGST7mJ601}~ zRw1vJRAJXBW4ns+?d}8#aK#9QDznh31Eku4%IJ%=`yK@>0l=de3ykZ;^JpoZWmYs` zXL*cPQu|Oi3rxmgAz}3G6kZmzrUnm;6t3g@Zm6KXDS&o@ugeQ6;^gEpGF#B!NjnP- z{{P}^Q2LVkhH@GJ?BcRENAX0KVU({28MeLzOk`~PA7OkP*MgFEZl;Nk&v!FQe)N9N zf_;rG4Y}r8uC0KEsvZxBgKxjtn z-+^;6VC|pY77Z@kW~m^+38TyS+8XzlBL#FiV#o0C0PuJsYMI@}J7uG<)WCbq)uUR1 zIvDO^DKFG}DIO&aj~WP>dFg2hi{=A7e1%KhB$&aYf(#3;Y;%xka44bRW<(Liy4uhV zA06#wC&{Q3Yq4j@6U1slP9dO_!z;cK&_#pn8i1K_@Ey?TDA9I_#-^aw)aDa~N}nFs z<~M+cYzhSo@f3U+vmC7poDL0!7(;4lgGo}HB^Q7gyD-D*2${W+P4O@u*;I(Ru#H32 z3j2hANIdB?LL2fHKFJTG+f*`K2S`B{s@4?x`V>^wbs-(DM3nd0UIWGF3 zt1WKqZ9kJ~PHLecvZQ%4(=hypH9RP8%UYc)h$t`ZBh5!u1BDpZ-=c%D^38b<(yLsn5J$<;x~GZ;IHglilTM2q0wg>bg%8sw|p zoZRYkFx7%87Al91M91drpKL&pQhNM#Y_4(5SCPFD0tTHihS#+4gj=HQ!89VP;D=bW z49_o-bx)(DeMmSH)kO!4O+&jE5-TCM1{gI67hhHubb4hXJ6K8#UJxpWQsf7M(PqJF z1-TC^!Ig&D86H`*C6z_1J$YjlR%0i6^c;^XwlQ&8q8SR8V>S-A{TcxSW`V_9wNL=e z#7oeU8Y-&-Y+Q!K)rgGi)OG&@8|Dg|fzr669VBAV(!{6YPJyBfufR#}gPy{v#6`$Dowmir!{GZ*^2qZoV0)$R^irH}rJyZIV zF{x8MNZ=JUM^Fl4$+;RlbTm&-MR?g|93-uYfW84Q+5S@y$H12{;?+TZsB;n2S>Y!D zo;E`-J^g#vL#VhQ9|4c^ig40tEU@cv-;G`+B$mfXaPSb}AIWdT77jSjM81)9Bpl=a zkfRWJr0~z{T|C)1<3S}Lr*_p6If+fhIYG;iLxDn#LP~7I`e;vuMU8n0kusG~$UB>h zj6hA7o_4}%k)z?o0JZCyNe*)OnvcswYlwawR!g{#G${Z@Dsltiolb^!0^R@yg49kT zC<6r+%)}c&&m$Z`o1&-yCsii2GsQ0_j$al9n=u1(QtB|d7Ch?Z4FTGqLkUnK!s#!K zXdAGj=SiR^@GkXE2hm%uN`)~8acx03Ae6FUIRbV}=T1B<7=iG@yG5(j{R>^f4gEEy zkyAJri@a(BG&$hek$2r?Dk3adONrxn>YH)ypGbm3g zrd}BnQt3OWyB%O(6Dp%m@$edIJlIq~p2h;kB^BXl`5B~-wzxbedjj3XKTPa;*cu~nM-=({9TQZ**JMN)%2i4(^=7Ch=z z_*z0}WI`sE7vwYV87bisCR(4V|s&8u)<*A22{+6j6DSl@f%m%LyN;Wn4;4uW9Drl^=&;k1dzlfuWq zb^m=dl7_LDC*3wWEwcx8TEb0E0jfZtP3UigO8O}|qB^n~4a*$Rcdm0vj;e1)pNi%V z1lcu${5a?1EZEF2`dJo)4TcB{6za*_*~DTXJZ{}-WOL&URf zVD=%xgfI@m4$#8CGjbTxpa+FSNXg|@n2OAUgQoSjRB_U1v=^xUw#WH0TEG+SQESJW z%$nkD*=np0QA-_woI!-MQZ8>RM#K`j7FEt|k4jYd$xG0XAqP}h&}k=p633^y8qxn> zMg<3_R8d(>Jxi1|`XjmA+P3$DIO$K5s$@1em!**Jv-_N@p#JoX5)6_wFFu&3w#^#^ zzGIBpKPC!ouaP@tQQ@9=w4UOCnqen~I8+|H2xxraNEW6FX5cu_P)Y3IriGG2Oq`Y8 z)}$2`6ZDn2#dPtD!E-^rit9Mh&RF0BBYhv`F_l{xSu%EBhFg-66TfQ81Pkp+De4Rr zc#_|nUV5$JCTv+IIPaB%H#2spBy4V|(?c}np&?;Eu-{20eQbwIc3jKj)jZqZhUa|Q zBwM9Y@E4dWZO4qGsKh$eR!Ls!h3=Dyg{2avuf6FE6V#IZTfEQ~6TclBuH~|ZA#z)Y z2h6c3G0s6$oMZVR6rds!jf)LZ(HKs@5fON>6uJ!Y$1(W-lyKz#_k#n){&{jtIkU!2 zWDymL?6&K=3eWxmdR!a~;NURHzp)CHb2OI=W&z8irPm($73!UgKn3jIfzfi%W++sB zDokctQ_%*2XjVeW60MGAj3So7RgXl6Q+SOONSLTI2$l%1_$ZPHPQ;_3gq2{t961az z!Qvof=TRA=nEvY0A-S^?JB)|rv#tyizi6hiCPEh zAC6rXVv9tTswSSQHgdwo367vZFRkH|)zRP*<9IfJB*lUuR7=rdoy;-}$SfLG^MYB?qYs$8{O+c6o({{oE#$_T9H|=dM%d1LKETETL(}?R2&jO{1u%KP0 zXU{BV>>b0(!O=`ZZ8l%Cufi{Tu|1#wjB!EO%g03Nb{V!58s~t}+Jh`E$ zLLq^lj)-VQ6-f+|RB+tL`g}*--rZGFJ82amsva+l5ic;zMy`b9#C5RFgC{m>By1=+ z0+>Q%2R=fZ+JxmTkhlTR)yd;9D}V=8I06!_VAln$;VIm>gWDjo*S2a2a1g#;Wm*b7uja zyavguARO~h9^5k=<`y8c){LVaA_C%(yen8l#ZsP{0HJ<}cRrC644zBhkM6(bk>C|1 zFy3iF^1Ydxr555i#AcNkTX7E}Z^Ui&FlfDO1gDHAf|O|}Xx|hrEkxfPf!)3WMmJ8N@|L z)gn(kuvq9CA5bS6sX5E+ZHLNfytk2vosDJ_Z})4dtq%j2jPjPQ{Feu6J@D{>m@$ml>=loP4;!Gq}W<+9-CK5YLp=MF7W92H1Ds{u!-yuYq9*b2B2f2XP^IEB$Y)gSYnFUaTV~b>}A=m#a zIjRZxce2-2`PU04EcU4BFd%-{mCf4%=YM{2R^E!dK21l``W<`rFzHRx;`{lg-*>-- zAB%on;yfhj$>bI9k7-{R*l+d=X^%cn3(ua4&fdGCdCXszjmGJBBUj~o$;|v~<&;&w zoBEK3d*4%?X&5o;{kLGLPdVM^#v{K6t%Encg~2&*;Z@4G5nFEU{i91^#9LUi z=Pf*pU38#d!Qe$LwS9qX<&ESwM( zvcBW9l(Oy3Q~KR6{=K>N?7DtG@4oip++X>1&r06HrsC$!lUt_0h0_mL&i70_;xY4l zm)9+;rtO&5^)2MTi5yja_Q;4OwMz!2zJ)^@)92nwKOMPv>5|9Smu>%JsOPn)Wi$5o z3Ef@Z!7F`in~lIR%rxRJ23Y8aOzX{~@!D^T0Lb~hgi$DJ=|^&v@*m{JQ*1b!BCAaH`0OuI6pd4?M9mR^m;)%ORWQ{ER`lG86!YfR&1E57+!T)4*4+(e} znIKEsHiLjRk<{=#B6!IJ9u4EA@sKt0W9T*?*CwELfw&hbXm(~Vej-4EfEF!0-z`mf z3!0pbT(!i~k)PA<4!)VR0NF>)yi%c`9CZ=JQit7z_Y9?rcY_v>e4jF9Sxc=mg;+W` zQvpK{pQ-56UDNe0zNd=b;iv8NFs0rJWczDiF0N~kRf69)J5Yx$EGOpe1x&s3GNJPt zVEAnfRSOs+$UwG0U~_5xI{MnUd3X$k4W5RF;dopn0U&ajtlMlm<$sCCx?EfX%RQDl z<2enPNFMt)N~Ie8595Q1%3FG<4@$0b5Lwqazz|8GP}|A+QD zsek7yel5=bAw^X;=Fy3yYxhsAYtFf`cI4r>oe%c5_TSm@aniqn`z$#(&SU!aaWhx$ zsb3g);l`N3tD3*df3H@!^y^J@@>0 zTi?xZ!TZN4MX&Z8f4N~v*DVQsJ<{$!AN=WuxFSk*yQcO0Am`?Za}AsCZ8|h$^e6M1 zKK{++$fKY=qi*$SO}V(`$Irtz9si>5=AyL^ueF$`+&R>W%VNlw@9WME{iF5Uvm++< zj_A9pagp}%FEgFbJ}O=Lere}l>RN{+-ur$~^3^x7XWzo7YlpuDZOf9D(&sn!{ynAN z&>OgZqxQ^Sc=@lPZXKfz#+;s&oLg{t!FRh~4!F7M=JO@3-@b)6>)*nyp*>gbe)@9O z^Seu)_pE-fwqI>azn0&=Is5AJHIJK*hFnaEs;eowxiI~T*Ni>Sx5gLz=GvubZT*ew z`Ky{S!fkNl%R{ZjC0lOR4IS~(uYI>hydE=n?~Ue-x)VeH4%yl|JU@MY+4oU{4>Yb? z*jjq*Y5DUhTV59?HTLnGmpJuQq{pWrbC#UT_|s*`kb(j4ou7ZWadOSWp(AIe{&H>o z80+!$wC&H`CY!grEQ#8Z*y=WPd18{+xNnDiJN@8W2)Iyp{&Q37(4nV~?de@`pfP{Y zw@;^}zlFk^Kin-_l9GQY<<+sXXVPz^EP1~*c-`sy6I+@0-ooX^EsaaoEL_z0Ep%KN zyzl*QUi=XHd{6G#PposcKi@iVVo2%@*LFiPPI_LvHT?L%SMP>wevfpk+(IS| zYE60mYxtqmv!C?cyLRjAZx)?cv9r)+t!-|9||4R+U zM{XFa@6V&f0K=$qAn7H^RIG~r?X)M>gJoY}1>vX%RDsT$x&69(C0Lm2k~!dl zF@6P+h)cFsJ7bfJUV~A+akYjHlccU3LV2R8>kc4p$N++t}zIb|TBYr_$4(cje zdV$`uCNQUOH!p;l@tG83Bqs@GjBb^~A5#PsM4CJP5;HHX`$_WAagA$_85wZ@?qqsi zU+@v2U17H2SQF|mmC#<*=X}~6vLW|RGH#n$DQ2J3`Wao+j7J!>6Gk;=R{H<+C3=SX zDg_BSX23S;=$yaSu>nwx$Ebv${U6cUIvFS6nQpBaTYG@VgKWnC>_5Ok4jm0gzD<>L zM0w(Ym{jy1p&Ck#&-@=m*@*0UM-GH^gr)X(QHYy07<8Jv-aLsvUp=C?Ex z9)0BX#)mB_Q$nuqIji_~@U$0Ar@i0bv-9ezmi6UPLx;8|?Y(|>uqO9uW8%!-jo%&H zH0avieqWy$GX^z+PFITi$#HSLI`H##)Da#JHFte+_Ny=J<{eK-Z}@HNyw^MC>@B&D zFgWnUhAE}_L9PO%a{5LO$q;LecHeS6Q^wb%Vo&yz+20v4?h0%_lVLZ z>k4=F{AtDWj}G)na%=bU!%Oc?c{FLmq&%VUl#us(F@XMw?eLLTdocgGCcF=*-9v_rn zh^hWIam=yZISYdJ^jO=HcltonGyNZLc6~DOm(ZuHl3q?+^5*fgc_pRO3%#bC-2LV8 z3%4(oyqdB3evgfPCIzn@({SO&yc6w4%+mzc-~4dl@gp7|&b(T_?1MfR_s9O;ZS>q@ zwF%SjFZ?4ny*bCj=|RAyPp?hgReq*8>&QenbF&~Rd|B}5#7Vw|^9Eg>Gx62=vrZ*> zepw~ng+43x^wGSz*=cWHL_@DzzJVbRHR)+xQ@=?`Oig_FXo$bh_HS<%fB*V^r|&~v z+_QE#`saX0kKaPsjR6}+%-`zsvMA?x(I5LFKl-b;`^{s&O;{9FH1yl6c}-={N0$Ha zUcZ;0ok)5w&iu!vj_#}0dn`Xb=d*svQ_KITe%W(L_?wOcT1RfXe)f}1+pquHu>ISs zk4E0TzU#BsU-v10*>t1Nb8W$uPlFQY^+ep-wI%D~&X<=~z25lf!(rz8eIws=e0hB2 zx7U_GbE@iyjjc&Q zxB+)FO_bvR=$nh5s9;ZT zX%6aWG~++Kh|49Lw_4y3-cWCAig%~Z{Rr@-kvwr`Gd)lwC0M%)u3-Tqaog0 zNv*-p&8tlLWgC)WbTdT%wVmqQd53+h4iX~P(f!?Br>H`8X^YpuYJ(>l@tXXE;7WP- zDhLbl%P!x=3Q2}J&S(^=hq`Eh7KF0w?Q83yy1|BDB)`JX;%Y(dK6vn%kEn6HOv=2V zW(8##?t*uma)A7Bg&(@;WR2$4u&pnr)R*JgPc<`P?Lh6weS4wfZ5wV8&*ikc!x>O} zMTbllD!4T&?34|)-|X-3eeZ~l;opUb1V9~4VrW-vQjUKY*)|gYEqTD`FD3g z$*o7vL;Bq6eXqk0AI^MP-4Z$GW^2**yo*ET?DCH~^1A5L^j|+r&uty;Q?}3d^F1Yt zgD*b(z39vbHy=O$@X+Y8U3+_7K7S|T;umW_+R*2>&pVHa*mTm(_rksf-5WOF%G-Ur z&$xE?#;x1)Cg8@I_aDXf>~lGP#JJusE`ERD`pS)izc0U$dTIBv?w`e$mh723bI*$P zLwiq6Q9Ve#P#xmEVfM`#r&D^CALuh~{ifV?d1G>BS&sjj{%pv$X&w>F9`3%|@$uB` zp3{zAO!?r5%hvb{313XgKk_6XC&@Z-`r+c>#M3kK3%*VmTXcJfBG0SnEzC#{@l1IY z7qHXw#4y|%}`-yWBYII;faz_ab+ zcTFqsIn!s}zCOdZjtE{mWP4!#jVEjK4<2Y87&5hb#PU^6B`X%r%*|i-G9mrh7x4%G z4D4)7PJ4Z5-j9B*Jw29vS2pg8?*f}HH#E&UQTySqidA0^iS4^_%Qr(l8#?^>`B!PK zFK=&|t({ts*Q3+Bni5Wg^ohOHsr3ELGu^L0{CV&BF{`SsJ$;liU$;Dd`HDe} zZ$fU|x>j&)RQZoZ4^P~m*!9Kp^!aOlU$%R2(-2|(=S#M{wM$gYoCp&W?5#vha=Hpb}SWi$vQItdu&&}{84T^fuQ zOALx+W2oiO+gU7vn5SKYaX`cODpV#GIg}_A*i=(~t*6@4*&7&|=mL?20-$Hm2eYC| zlXq^M(H&6>#YB11($4dYbyBhV7^sxXDs5yXu@z8#YH-4jm zpO_ekTF@SP3?nvoN>sbWLJM90JLwcqmOpxB?QCAt0 z0pHj3=e2y0t%@$Lr8=Qq9@r-Zy<(K+`I3|vVNNU_c)8jQ;_#CTY@3K3Cy2}fvDF$V z{9FlbT1+;NPh`)H7rG{n=ye#;b~arUHG<66=3FL2)pq8@5%|yLN34brs+4MU;-O)c zu5P@OqDD)>r^3Xpn?{sAY|7YNwqD=!*V;EpuXFVEZ$cNA{!wS08yI+ObJ>lHv*+C{ zNSpWi&z9%2Pt@u5K7G@@U+WFSUx^;67t`k7FFXEi+Msc{S$p@?itV=uig7AYE62TG-BoO(!cAD-8KJ^*P(jQr|~;J)tPQh zF}}EPM)O&_gpq54`hDFyW!9`mr6H+0SCYJuCR=^{}G#y|eqA zef{}+Pfm3BOf&D>qAf`+cRrn1D9^iiZ2jSx0XP2oGp=Xqh|CT5{u~jsyiB|F@V#~H;tZqDxs?XiiBSaPW={F z)V`^G`Nf`5@~?kAmRE0{cA=UIxtl(y|M2VjoP@NwoeO=;v2ho=-Sc_zd(XgcHN9u& zlwR@c<2K+_P+h;MoA&i%(^IA{7@U}X=3g7Wdk|Xw&A771UVS=j*uB&9+J)Gc69bNY zv!iu$`gF8TT>v@E2@k>y)YZ%p2^sQ1g* z`xj?=%so3X{hBx>ujV(&ek6o{K#qU(BDFpa1gXg+GmWfJ#yi6GBL?2sDRF*$Eep52&u1 zv+Hq_LoKfHIN-;PiBug`yI2m8`v+Sr#C`c={gjXbV$-iwnFFXdc4Gqvfem03>gq zuT|kH4dp^?QGyc`U|S?DsZ8Z)yT+si6P}bsGY?H~=IE#X=my?a+!-{_PTCO^LB>f0 zkp9Fg%B3e$wy<`mwCI3+4F;fj|6j!=x~>Nya;d!~>_C1Uz9*j4&e~z*dDx#b5#;CI z2!3+*MzAPVkpmZW5_Jf7qbeE4<3ENv{SKsfJ8Ae}g9hJgg}8Hvgwjy*QV z;3T3u$ZAz8$jPMtCBz^?EDr`e?)NU0xOC8;lvmMt)|GjIlOJB2|0wsXub%hYTWWnV zYf1Yj8;)H5VDGA-BcG@ByY?jTyULeitlupe(o)oTYoWe4_`#4d8zxRE9r@y7&zBd! z@9({Pd(owszx3gE&1EZQmu)B?d#Y?#^Y>mq_&xn$@UFdI6_)NQDHt>);YRAm-4(qP z7EDUp9lqK(aNMy?TTXAA{ntU~zR!mYP1*C;u1%$BEsK0gR<*26m^)>;IlTPuaV6I; zP4N8wT0-JCb!o#c+`Ux#xafJ_**O#U{62YV%DTNJ3HPD~&(^;>bmqImt6r_XnlN?y zfKySQ+_`Y^=#ek_b-Qq}Vfo4{*Ea1bXqi+}-JCwzoW5!KrG^XoXA4fXPZ)J#Lvc&a z@n>T{(Y%~f{Pn&o`R4nxKkM>a*F*OMZ(a5Le9GRQ8voB{r&zL`Y|9XDzoJSXTuZXy|>CpP4v5Uht=nA8HZY@g) z7~Oqohy25n*6nWG>zNQ6az>Xwd)Mal!d^>f?J`f@?wR!SrRsxoCYmP=%01h>azb+X z?Mvw)%kGD?FWu6ms{cP zY~%m&n<$;l9O9ugwU9$=&gD?gsMts|hviUHlR4#>v(;1DK}sc?IW-w3BqTx}ITRZ) zqZEY@rO*k<^Sjpb{Jy{M|MmJ`uiU#&yWRJ+&-J<9pZEKH)q7bqm@VQe_S|&CeADma zA@_a!kd(T68JXbeX2uOi$v>#6rm?T$@be{E9VytK`V*F>v-|=6^LE%Z= z4|d&v&sEj-zypFe5-4Feg8wddOU|#{VIQDW)Z8+ZTYag2chtwqp!*peM8>7375QjE z%pV-|o`Gw*uh5Az9!RzMgI@|7#tg#YLxy|$z-W`rEoM+?oj2Xm>j{f?J0mxhPt`S; zFGf(Aj>QI!kl33jypai>9TJQ8l@QRtk#Cq)MF5UX?x{T6jPW5gRi-;vL9_>ip zcIc~5SxLbeUTuwk=eX^tA-TBPW0aVlTARL;RD2-?lOzK;Yev5c~{Q0y1KhL0iNv>uJQ!~{TzK`hL z&09xhfKpl0#x4QC{(N5 z1g8LaivErYLA$O=_=fAjKudbcFdZ^}3Q2(eGk_K#2d(YZ1)c~BK__Og#H1MH93dGLwTg*sKBY zKLpikT$7$~*_;Rc>VLZ<49GBaK^iP|bp86Z_gu2pw@|Y>nq)Nd5O+YDtWTBkwvkzq z{eA+{d>Id(4BBb~7U0>azhC~RJWhNo`S?H}g#Z8vt!}abhXET+(zrlR0d&>C83&Sl zSpR!E~nSwy%rsFBTq?@9>lLNnOHMUe?2tHrGtmJ=7Lm5R> zI3+YDxfKT^=Ub;0CURE+-nlpI$55ZDc)qvpT<2F|Hc(-?BblSsdQ7@hK z59A-3Q%VrWgqIaPjv@Ltnh48ou+rN*LiS7&ic`X7JeU1rDE#+R0>n8p)8vq$+?&g# z*vzTXshFyc_}fhJLkA)p8@%WXM@N+WK_ZtV-EB7B?t^#cS>Y{yQu+oy(rN3k%mUUc#pD!R()Wlo@&mo?aW>4&>|8O~0JN1K) zBZKrs<)-mu$|CYt#+WIyU4I!#VZ>dCDDGB`nsk_3Xwk#8@l@{)3Z3Cw=$CkN%FO6T zUMue5d{)ZvcZ@xsvl6O8@>7ZA>y)f4gwa|CgG*HTA^v_jCzygRWf3}9=}z@Tk}B?H zaqd-iFrChEL>?uvc9{#gqKA;P!tRh-02QJKIQB(3cl;C2l`R5RfD;P1WQBJ z&<}PbBjiI7i`M1MqA|#szB8SLDy*74pViMQbr*{o43zLDEtZxPQl>vs;N!Ia4W61} z`^Z@F70N}qnTWKd{HQ0;$(otr%}>{+U)5c!dW&21SeEH`UXAHEc;9YMpKVX32bsna zm@1AGT_pAU(|G#ZDqy!J6i82>p_pjpu0ZizGV;}Vg?}K!4dQ^Yje=S?R46ZB;l}>A ztg9D~z+%Saiyj^KR(-RvT~2t|2P4;N--!2TtlUDxRNPH7dC}3~{nFzI)#tj;qaDIN zitfy_D7o}LiaUQ#4eJP>Ju%ytpVcfvtEb~pu1I9g2UK$FBe>NjWwgl)SYI{o><8O( zDP~=J($EU`d6sU5{*Tg)tG^@{_-Ra)E!--3kBgZiz$_ot_jncbrr)`U z+E(m)lQs@)>+ zjhE1VL7{b3DqVV~SLUcRbllo|O4U{Q?i|1yW)%&SiZKQ2h*)>^Rp-NIf93=a(2wMMEeis-a|8_6km)!JI za|x6#wsxP8B#mpRE{%}leQoJ2vh!=7%F9xL#L`99_OQ*T;_Ba4ZMD~+=gM*qp=fYd z^WX=h_HQK*rzj29Wj2G5DMO{k%1Vxa2GrG{EBW1y;ut^#$p`~yNwy_@4+OoHEF|on zf18pdQUazoa4-Q(T*-#46bX5XK|Y;R*7kO=c&|Ej_5jFo*HIf1|l2E!)l2?_XZDJz!Qx;!!pj}o!I zymi|`pU@9@{eV3GS_J}8i2v! zv&mLQN!2R_#jHlds1Vbt-L^3_8s`9R+m)#f0#=C9j{tSR=JHlDt)7VZl%|rbTQb)M z_vp(+`fVU6r;HC;q$zqd$KJ%b@=FqQ?{)18=wDbK&B=Ca!~Z&~>vWUG#Gdo`N*t|# zDZVcpR~mj1%)0bO^Rv<*i`9A6Ropt`|M3Gh(S$_$QGeoe>P@1TPQAYl@3qD794zn$ z&cUKWFz2V^AjLumkPFUITf zmFpdBI}MZio4g_p)A_w@ex|me?FJx0>S1=~Uz5g2f zmU9;6LzGDcf;0qZR>zV~W5@|cf@+wh9wFXguW&~HKymJidN0oFM?_=ea|pwBJ)gDl z^Z5~7fjInz`10-@PfeBd2sxf)r<=`Jy_5t&gQ>hALzzhZJyRMUuZK>P+k1FtN|Zw1 zRIFIMXSRsfKYN@{Kk@{K`?I6aEx@Jz5WW|Pz!MAj9zb<`&!{Pp)et^hp zU$<+lZ~68YWNr%2_b-59{xWp>reRfTN{^^sNDRqeV$?+v$v%JHUK-@SNq?1X_HA(t z)JY7*5g%Id{_`_1IBTRKyiy*Clv-u8`kH|@7{5yzV06g>GtvKIsAP})^B`GZ*r6y7 zf`c5$qQID6qE`J^1O#jl%mEmf2X<>RRAuF5Hm+{E0TR9@z=@Nx%mfLS{Ki}Nq;tTw zt+~|3-&#Ob@CzV-JVHrp_Jh|1Dp-K%dN9-R?U|#nlaSfbr&3cOu*e)WfmWy5K+yA5 zd-boaJ?sKf;~ay69za~J_zs-~1sN%cjtEqKpMrt=3vdl>Wbh$rJ-`5j)ZT3}hh#UO zkjj(#cs}X=v|Q0@$5kJtqXH#SOu9)b9kQJ~1HvKbDpo;itD2N+?_JyRQzF#tF1hrG z3IIUmr%ATdO)|2*VTUe(OM}AAo`cXmkgsdUI!L`x`7UEp^YlUx<|~(*duFwW#6a2r zo#gjCaH{%+^PtiJ9vw9K=M0dSN;PY5zxThwtR&WgrZNQ}Siq?U=BY3npeL5>0}W)c zp#3ML11sQ4F5ueEAoNNdFj#x8uQY^D5pezvHLZQ>Q4Ma1%%$vR@+#H;te4o&+O zZd-@^7VKkK+RrDGa^focrUjqA=sCkdHnw+sz$s*MF}f6G)YAP)FyKE@R^XcI>1z9~7D14BlnWpyZ~9n_0PW@EhFXzEc=MF1VS!j4AjWvs2j5 z<`-w=m$-T(UmZ`f!-}1b?W%J3idj0x zv;NgJhsBZ??R7fist&pa1?$iR!pxFiT+t;SBFZB>d|=D_ucO6$5;?e%fZtEtx9`vy zD@)=2tSXXuXTsqIcjlic?9fJ#0pYE6t+y~M5498aiLsOwR;E_Yj{b3S4*?&DA{c{^ zlpTcPHEwy_w$ZpFYcZL}qCI%U;}CEiTuRxJQGYT0A84z2&Znd?I$s3eGZSF9r5yIU z-?zJuc7>?EdrD-^T={HEz-3LGJAI{Ho5njku$!KB(7x)%%V_4#{eljH3G;mPr>UO8 z<#Y_jr!z$~-xxThad^u4%)y#CY_nV6av~Z<)}2l?O+A*XHU5_-Nkt6i#<5u_M}x*$qjLMbZB^JPo8f=?@>@JD!N zVIAv(V5GeSr6lm1D(OB|=Fd5&qRyvHY+(fl1%Jf8bSY6g-0}>b)de4CujZ0T^REdo zwQxqo>Cqm++T$Fo_f!;lu6;wRGdh6ld)K9Yc07K8vWt%5sMS3|qn<_hsu^S;5@u3824{rsT zGCeZOolvJWNcD9+;;NV9ix}Zs#x0~(x(3#3F!gTB{9;5TdHegRZ=>JYUZnXYXHNqC zBCAq}^D5lfer&-twAW9SNG=khqd&HY69q}v{aIDXmP3YpKJd<=-d|oWwxb~$1@pz3 z7LOMChK$`XcgqsOehzK!k63z~vSPeXXfiU3D9`5Y8gsGSR`@H)E#Ix}VSQA_5z&>A zZ#;OX#x`+8k~4!+^*zIbp3D-Q@&J0Tf14!a{?>y3w=D@DGK!KY3RL5gB|siQ?EbyB z>ICyPSb@MO78p+=sYhKT$971WABE-_tEp1TZy~A5!+AT?fs9sBN_N#5cVHB?jtNp% zkbPd$lv@RI%XRQ3DD$ZNPjKynQaE+$j(11(05x6O00e&sx{=Vd>OhlR<;QEMJaC$~ zI;e;y&?`&e>6vCm>Hfh-hO1sra0UUeyGs^x$_TLDWD3{A2FA1ZicLhx*r~oTDI{P)&CkIRK9; z9Wcq0l7<+0anK&gZGo02t@3Y>wgnq&-$4@0i;FC9fH@#hT+4%Kj&#f3(NrMT->o8bOj%QIobUfhAja+QirfFD_?J&J z6W@KRMs1RlXu4D`#`p64d0~r|-gm|57{|h6eBKxH(1|b*rO%BoI(lhwI>dzA2z`|d z9sZ}NM^CZC!~#Yr+@%^jNk3gJGNnX!&1M~3OB0Ou_XK<=vlg+w8ArHeq(WaA9PKmi zpR7SM%c_l8(ZD`uzCY%dWxFlYQgF=#)K9@bQB+9Mlw`C>7|wKG7&k>;)aY2zNi{J zP#U&y?h+3blgv-4fhD{!&IG__Z)&Pr0gl5jPF1b$5@+h;?oOI3`>GQ@Ht)`A!TbP;u3$7_Xn;;SXPI7nEH-INZpYk6=HuSy z)k&pek2ervSZzC8OI2vM#>O7_-nxeDZ)vG8X_!pr>euYUb|F`!Q7(A&nenb7TARUV z)GU`Ab2*blVW(ed^Bac;D%Tq;;Yv|!vBPunb-j4(#P>?{q0VF;-xYa=kK9Go$!~U= zO4_~5^{eVEMb9(%?CMFF#`PX_4BRs|+bsWADcmA?DZy+h*ne>VZZxR1Nbx)BPZ8PX z%MY~g7G-$kk732ehiI=1qH1kb1xn8Tqesg8nS=AW=4_Zhy_)GQ$F4dVb1i(jeQZ;> zO5_J@t3fDQ@ihg7+LNndA5?pUw~M|+7}yVZ+kplZ0j>Re`p#lb;SeRprd1x@h1TZZ zI`Q!N^U5q<5vR*@nrVz$#Qu$FL%OrYgXOUuo7GBz^Zu!Pxc(al=w7 z&y3EK6;6ycyAOJ3y>heDb9ZSxuy%xfDMw*Cg`j+E*QV>^R@;jJe zqVga+s^(>-peg*pZ%lqi)r+X-0Qjzb~tPu+#SVebXJMhDGwg9 zDjjgIZ$Q%<&?w8{H@yIra1-TT^_eexG%4~Sm(PFiD?69*-Xg&S2 z@HTvTfaDP*+yD*|&`zZ2xP&V}ZnmPy+V;PT0U6DPUqI-Lx)T z`_8N!7(&~Chzxd0l3y1W5Vt`d+8h8(o_5^>=A@Y*db_yD1B)|ha2{w*_Ed=z5eT_g zfJYluLkxs1F3_qIpr{4_PU`;*)t!M?Vx5Elpz;y8$sCpaF1P;gU%NntC<`nqrKxLr z-swpnProU1@6H9;>rb^09;fO7dG)&6;pd?yP|DMk@Sr97NE4Z#;D+6=LKni4Fo2MF zZha9*f)9L@x*e4XI5>Z5u3LWxcElERIh)@qNyk7N&xiea&D_k$bMyNPHXEVUoBaL) zRyXQ}_W-FHbh*&uK~JCvjWz~Q^}4kmH@HJ)vOfm&5PrxIPDdi^c*Ozb;+)cOkQ)9l zcXNpkYRy_I4;fz^Q*v!EHLvOl8DQ*yri54>ga<5eWaq>!N zmT-^}H%~lS)b2~f4fQh&HCQ}D!O$=zwKrw3l9EvSElInPo-;pAz?Z>x*z-gm?N zCO^c~?a10kCX^nNg*U1!`%<(LF=U3AGU$62clUu-+OdHYeblXr{EK#@AttG=+I-TK zGMn}wsPm4nRBqBEbMKJR3mQA;IL$7I94KfJqLo2+qh4wB=?5mgS(|m`&XelUg^@{K zU4C)v=<>!^ny^5Z2RH(ec4wWG&wJ_?n)EtqF>B0^(9RdT8eh8QpY2~tyRkt`(V9!C z)+4CAb07DbW-}+;S=BS_$k9+govgXghvV3mNC07AZ9OjAli19`-dt5S`vxv-I)apQ z=jB75zMp&HxT(mb0e_;8n;C@KKc&A9$>5PhB$2d??~YbJog>_Le{ffe^r2%6i?n(A zLbpMKJD*O=5?OyhJ&ajO!?`k-8}>zv)=e<1&-LutSXrv@D@A{9l*wEPd z8=l^A=T?)SDO75V?;5}z5-@adk{2_iVQ}6Y|y@KvnN+yEYzC-3mMowp+EbvJQSKu6Q=9BdU zTdL6pp19f9gh*xb^Nu^${CbVQ3#ydOjGx672?Te?iwi4J%)hpM&~J6qP4-RygGXl9 zTWVO9Z{FK7cou1O)$w7;pxB>7-Z{oCZrnMYTHd8K4vS8;3v@3zHe@{LS^ns*J<`%8 zGHS-@*v=7pl}8&#c5L^>5h{}W#Levw)B#`RK(T;Dx?!CXFT7@u#UMj)UGEK)T1vq zkk)SmmNfkBSM;dv?V(aOPggfdzXI?7>tc#D6+DyCx&$hi%SQ&VT=S$M1QkfAN3T%T0BJXUb3Q!pXQ=AwG zmiQzJS|0`c+uqScxjy9@)@{uA(x2;*<;P1kwp5#!C0$NM1eA;u#=9p8UQSoGrQ=t` zj@hGDsG!P#s!3&_39Fx-ODrrgCcLS+?EToaZ+;FVjKmR>_0KIYtlYJo2j;cK>ewex)HAJevVw|F7Z{?DfbEpKky1u~tGuG9rrF5Fi zi(QjRy~YaPgV9?!4)B-S_3K`@wCiD8CxpXM!WqKo9vI^vNd35hOZ&U2r1VJa@POHf z5n!vH&P?*GOc}gyt6T_k^wg=joS>*F8Xv7NG&VLz zdv{0|LHh9t=e*){@mdXSR}dQKhMJyfPmq^FAo9?A?GR{lJ~AY1Ai$Hr>TW z`37GX47sAM+_Q}Jy3czwj3s7PwRnFdF9hq>d!KM=xSx(2yEL#V(tG)ZLdf2+{o})k z<%dEO_9N$GqT)(p+lT3Vgf5-GSDd8EY`!Q&V-LaQ1&78>I_B0LZjPtju*)) z9*aVuT>M^`5=Q)J_nmZ9eqB65e{4d@`qLOUZBI#4D?<*{VX9Hl;S8?sJyV4mA-i5> z?v!0QuWVAGZ-dxpW^GL<<3=akG`T!#CkbuZsQP&$A$3bd9I9me%9 zPZ644L8BgaM^1f9YWG^aa^5yHE@(LO(Z}+9(JxzTpiwez%X-h?k~0_2XK;50PdfQm zx>dEG($L{lD|vLx_^701xM$c0j7-w!`&@QjC0Jrdy>qU5lVakR{5G~mdAQd%K6WUk za2vCh13-S9$j_-KmbVwHKlzei(2&3PyR(?RAe;d(o&DH=VZ)wWC+jI|x0S=GxvoL3 zg^$E2KU`>~sDG7AW5=G}b2_!4MxAFN`(3R*=fv+quN|8E@E7H90cpj4d zdY*X!pvo?wC(*ieUo-fv1eY*^ASvsIQhUxwNMwqDk{<}1vZZWnMllAeiZF-s+dn?H zOzv~u1iua)YJ$jmAwm8uc$7SbrcUJ)=s|1%?C=)k4$BvqEy&I3#E;pLsb z2OM*MuS-&9NmnT8=_FM?Fa`k|pl%07EdR^KLbCC@N#V1Lii+}LG+(MqhZe{yGB3}{ zd|x#M$x4UGttV;eMK1MVh&w!tb83!{z;Wmt@c)(Ixvc&3W=XN7eWTIb&_IE0{ubIe zGxGi9Ri!DBlO6Mr^(zg>f@1c~=}1GD@0V5Cbq^T)*LjF>I-1>EOTi}Ad;XXYJWry5*q5I0_> z*L3u^X8RS){R5qS@!XU%&UYGt&u*3X3va< zOS+ELQL;VMLF#pc&3cec;UQF6GjdbAZ?@u_KRq9Eur;M~HU=oOE|%55tW7Z|z>K$V zDa$0^C3${PKi$h({HEU$w42K+>m$VarbndH0PV||cqc2;%1wB8$_tfr(>7}YL3!9$ zfSS)APXNS;C)qw12T%9!&R=$=^&zQwmvxxN11ulw{s@oNIW;e#%L|Rz#1;Ky}N?ZuE=;&=(eyAM#8c&!suQ-a!LHoRGs8f|L@hL zw1<=E8%AxX5np1i7{|?DQTHGlZWw&s-16qU>Ou^uEUV=WEt0=bsU#)Z6{qY2HZ0 zOF>=FMM75lbyk*Os#buCSrquF$x4Iz1t>TgF+@=uG|*z{xy9=55zcS-_EoPR za>_=>4ZWu`JKV5@M|y5Klm_!27)rgg6b%mvnB$8;W$}3n=1nrCS@@Z8*GXgC8A;~& z?d2d1kF5$fch7p0yeq}g-VmLIF!m$|rfm!pF3Syc2ylvm;ocIcwb88O&84!FOq$4f zzxfc~uPOauI!^tkYDH!N!1r(Q=SRegIM>H*S^-V?{jbj`v9};?E^w&e3~Y7Q^X};%EPIbI&#K+3nzrmbPfc zZ?tuE-oxEz>ouHnm1tRy!WSqVzm(40MocXoMMSUU;=lToGD>?tI7Ygio2>J4=Y>D% zQEEji;n7A;sF%HNTOTIo6p_0(zqj^MYKf?;%>7CEw4`L`wR7MP)D7tk7pb?YAi@9; zF%&Bm$;c!U7?Ip`QRLDpbGNY9rCz{C~rY}SGh z58CSIihq7o+9X*89mYe+>%VC_;Et^=cp+CfFCA7C4us-}yfwXfAO#0;4-^XXsE2{y zHAsAAd(LjY18lQ2!3OXJzaUNji@Nb-WQ!&wb>}{9M^UT{=rKS+j}5LXyMAqj;$g{T z1r>SF(Wd};kh&MA2Q=ut7i8~4;X6~_Z?btVOI6z7qbap+jm~n0)X_cb&1}9(X#$tD zIXNdPMBQ^@)_ns=)urMQpi5H&n0=GzQRJsJ&W0t$zoGZi^aEq%<1k;p^Hu+(@wHNm8Dx| zl=m@`rbMsyR$8w!tc{_t%SwFE`@f&=#RcbfgtqA8DaR5~D4c5QlC+asdH&w9 zB}O?R|FZwpODZwQ#AB0Qix*FpX{|-gu~NKAQ-pR5KgeouN@RUj=ptyHR6RF^pDTPp ztlW>ovnaGh@frImde)#yJ__fS^8IvIgN9XZb1~!L`DWT z0ecHq7N>9;$*PLy>V6a_xN{AES{?HC`v-EiJT4?SB6iu;e$g-Ya=oheMjeg8is1Wa zoCii74_{3g_1k~fBlD9{M%DayA4inpbfNsFn0_3O%G&sJL?h%98XHn?XrF~Yll5nT zr8b>2*@?#xt{ROyX_vrK=5N3g&*D+$Da~C<3M)H9QBkCj{fRSqRf8wlEqCpL z(x+|FX0VRC6}P6x>FJbRoieSd@HB2%7jmz-fWqrvqxPs0r7W-@JW*z|{5|S1C_+@W zh1l*jze*eD)vHS)hX6INrkgo!@!S*e%jp$nCD+5AV+9jOT2Zj>Fg<~I(Y7h`Zl?Xr z{1epaLx;1@>ce18Iu&ZHC#dbP2xaS=^v073Y0C0dp;1V{3L;N)Y6mRE@BnzumU`@~ z-Ee6OTUQE*xSh_XS+fYS3TM|brZZglEJq+lo24-XwsiU@o#VwB zUW_F5bf!-*c$u>|2T-P$?I)M3{C?$O=$z_v7NIbeKs91(Vc>}NklD-ELz{iYr}w;DSY2fJ;mP0TEE z3IX{)9;kwM=XTRZ=0N*flkW4SQ+2 zho~C^LB}NhR=!d09q1?IA*r8g(Dx)8sk0lYAW;9JLVKm;w>AA?0C|GxlcJ=Ten)7y z#O_RDIdDPtDX^*6sQ~>?o&4%cKz!@3t*s1%CFd27{`ub4_V4$S4vpan(s+={OT6hB zpmU`H$7wUdzolW&OZ@mR`v*e+m4lk#-isg=S!m-0fc-!=U{lg$sbC>xd-(140|x#i zg=pD#P9GnD!9#`bVFXY7XBI@=rOVZZ)}s~iF}C*n)NIQMZ-W}Q@xFyO^C90fJZSaZ z<8E8G#_%kh=d;9mkpK#?vaa6E8L88$2kHQVd&CDf4GVqsne21nAHE^TJ$KDTxcp8~ zwW>Bt){nTMHo5_aKGtH(C7&G;PkVoFi6V<{xPOSWa<;gv7WB7gC3Yqw2F0&fG!|v@ zr$x$DM_j!1xpkwgD&nu!C?H8s4Pa;s@PZ#Vc|oe7+;4=S1{cvBZ%;aNs)N3`!u8Aa z-8!5ZYhHi0m4JsUH~N`erXbAf3&N-C+MUMa#YQuh)jne)PRItc!7(qlk}NafHr>w# z6<7>$Jx1?JH+F&Mo^tj9bGGkbX50PNM6X6PDtDaCT|O?g`_hcM_j>%`$?~~YbWGOw zM+J(4)(OoJPYRt?b?8$(Qs;|-7%R%WO<(w;LSO40Twn~odAgaE%CY=K+>C)+$vCBQ zbi0OmiBWsUk0(k4iXuvozh5!IWz@7)gl5wk(s)xf$E?sMCC6^WnlsUGlv%_wKOe8@ zyiEr=Cp$$;8SmBM@k7m3i$y&07x-m&+N)frLd5|>l0m4Sd#a$q;T4VY1r~3D3^l@Ho*0QN9bWqn%&{ley0qABM=;4hLZ=5@u!q_|?$GVt@MZ@g z+7am&W=fxDAp7fZXmr$P^q^L+&zAGY`VoQg)q2B;9#`*Kch0fssprW2s)$j_>$gm7 z>qt)dxLcXj)$F_G5oCtpOizxR^*F23;iNr_2s*K1iRVZ!O^?|((w7O+PUB|n2-FrTJiiTl##GDFOR z5r(()&M(Vg+KybqlGLi^Gh2+62K5oH6uu6fgY7;OnqN!^MHAqd?-_B}!RM{c`=~gHRBIv@*i=_w!FO%F%59o}8dF`a{v= zAL9p?K7nGkK6(*vg)c_A9V7SGsqfCFZuN0OgcZUWRz>f8pd_K zxiK{`m(lynaGF20OyNa~!XBRM{M@0jwbZ(3$~NM2I;%1t1UyLti2T1R003F{JMGPr zEPv09D#5KG=&W4?RSMGh3qbHX;K)HKSzs;+{9yrb!T{ECGA^@Faa+@GVg~@U56gga z)~s%VBo)t9nh>IhaY16tDeWi8t~DX^>;2O0+$QO~^?495^@G}|zg?Xnz?}pk_eB)w zTefVnkqD&MXu?iN?O%66T`y1S1mHn}Nf+Jm7iF;*&*v#>?sx+LBct>27n-QrDr+h= z`McsS-sqN$g`^K`t$Fq;=Siym(`jrS@l^y6>*1KyU{6!`81HPBX3SxcLTFf`EcE|^$KRhuTv za}EJ@b3i@zPm{Wp_<-ycFQ;tJLz^N61B7>rKJx89BXxLMuQ&r*qTXfg%!2U`V94NA z*WhrdIU|)!E(yC;-EBQ^ILl%=RK>T@k)*~31wdWFNd_pYM%TbLo8tG*cpGj~+B+o( zFN?t2x!rnSc)q8O^s>Y(lz*->f!Jt5yShQ-6sqN=Qyp)K^7&;}KRYQZyv)tX=+Hk^ zmf&WUZLM2gSW;kHYag4W?gPH=VVzgK0?sj$=34=e?vRXdVvYtAa`cYbQJ&nvUpnsD zXeGxq-E-rQdjV*+@MTW=B%D0`xE(l6SzMx^`*=D4t7TP2F($-+!lKLKHrh>)UovaO z-Xzo$g4iUSkH`#vdVeP#wk2kQvPfQjBm!jiPC_v+(K&k1)W9t1dO#VgWY45emqog> zBFXI5hbaspF+DVEJd<0Ln?9eK#%joZ?6d;3GIR99JimEXWdTf@I7zV160~V(=g-mU z;}l*FZ^E+1)|FPMeDN-EQbcD=jz0-k;k4A*8oksfvzdKk|wQX%?c#ZudTQ)nFmC52|KAP%^MPa=u zbarPUkz_sKXe0t14SJrfYz%wY@y3+yFnISwBh0O2TNa_T629;zc{HoCuj!%~79duc z7oOvB-fSYc!A(mZg}M-R_=f5yJdbcN$2ax32u(OjWaJTke?k%S6=tq-^iQ}OS4IoaXH$HQy5@u>Y1 z&I+V2L8jYM>}^Yx$-b1k6OLNz&0ZpEznCiRBp$i2P<^l1zfr2gH^cl9E&h9K$s`>}c%if1oiD=`PjD5jG_m&o)Kx|ZSEenXg*#vi1>P<{imAMZ(k;w5r z7Osn?JMMXXtm6uSMX&M-cL~@)0)?=HI|f5Z>;>^&ELnfo60L6_W|Z^+{v(PkayrE? zv3ce)_|bbgcXvyt%F#t~uUB?y%kg(~@?5mmub}Ebg#9~dB?f6*Fox+_XET_0xMxiZ?YQIhxJR-!t@7`9|azEwO6r%mx!Rw0!xd!3`Y3*TLu{6z1E3L zUM9D92xuvKUsATDysC7)x3dx*#f7KWH|d*ko;aH~W__v>`#(*s>o~M?w4eTWlC-J4 zScQZXpPh0UMZR8@BEXvtiU@c3`}S&85sbPo6DhBRO5XbVB9}RY`!^A4U#x}Y5nfT_ z$x&|c>wV7Nt|;G`mA}5>pYzFRjoe_})KVB>zr*~{y4j`N+rK=&mMpkgU;JEIXgv8j zqff00>#Z6jki;?>7s;eM|28QJOb$V_(o|`{Z`b@C`GB9}H4wysYxz@1I=RWlXdQsB zzRQ9B1+t}rY~1s?+~d>djn+y3c1Q--lOXX@@1gw)zwvpAUdaZ;#E)PLdSQ3^0GKu9 zIj8}ENuJk1(Kc^^do$z#ZMFeSbI=KZlR&Q{iL{!lMV99y7}VlO8<%U+LBMra)_NXH zWC*P~A+t`o3CcRi+Og`rftJdSzr6@}^cJx9 z|I^k$nvis!E*Rc`(+sHpb4<~v>YcQ?N!XcT=C2)h3yzw zT-n9-^U&5V$T_vF} z&5c|4(~?VR;nPuruAbAJhXm7l3-OiOml`odoz@|9{v1pe#`!ZEXxk%pmU+ecj>m{# zsCim&@ge~g81}}F;m_bdAe<8w7}*uPpCX{KJvir}XT3b+o)4JQnIms(6~nLlR+bva zhbSLTo$b6;I~b=fybTDT+enHJ}1c-q&f2ozRD!ui12`1=IR{%=lG~+BD8N4&$xP zzxr)4;bq1VjXFyY-khy&OFU766+QpU^Q>6J8`aKE;q;)XfWq*bH2syW zpJ4WN>4s9`(HSLg1AX5T<$U$}e;}P2zu;=0OAC)u{()-D(#jm(D3eH{Cj~B8a@G^L zxuC=Whcd=`74!Tj^#J`^{_F8^Sh2#x$x4@&>cGi#9)c8)pkOQDErV`j1i~=#dEX$y zV1panI%8HKRuo0^v>p}Wi_g$#lflL~g7BVR@YewmqbRq-{DKZ$P@u6dbWA}xIs+Xf;{W4Dl>k*IhST!mn8a-=R!&iPM z1rZW+^MS(@GX8$ElbJ33Z&)byJNng{LBX&dt<|%D_u62-h9xt}vMaoqb9O`bg~<1O z{jI%Z5alb~)~T)ocgu4qzs^SPFt(o0`mF6=15j(6H#&^%E#%Fcnc62IK z>UBDM*#50NthV_9Ij{zBmKdQ0GY{Bcy@>I>N*^NZ4i|mdz^P4&G+PAvY0P4i`RXzoaP186+&pCPZTk;q@ zf^V=sI+OI`)&%)S##%lizD8;I^=3E2wtI_eM@x}9~-8L%J(c;Zn}%{DE6$oTKf;AvDEo0zb~s@VF^CG zjZ-gz5zzjHhSf`(`ww~3c;(;wegJ?gzgMrVhbJ+UQ?&UX0T0xB7R)rVx@nUQz*A&k z7(;N-e{eW_$%vMp&fx0yC)1mxpnmqy)o6RID znESk79CBFw96_RHMy@+Y4{V6)ZdT{sAR0ZcSd^t_(O0DJW!l& z_Og)!T??epe1R$l$!V{>ZKDUtHT-P@rpf`MCX8|uLoz?_ABr-1+Mol;0}t@O8sO;z zratiD|G%S`-^Y?pT{zEu8?%1vTTtyMcY?{}dqXwXQ5z4KNkzU?gxNqBsbF>l8E29K zy{}{pm(SL*to=p-Wrcqr_poV!1KxT|&iqJUhCzv_-5?Too8AyEUrip!Qfyap*o^u5 zy1SVZYM|r*I-AavXT*s;Ga{Y{KGc;NLKqrF^uN+4I(00Jbkz5Kh>Ms349V~_7tI~- zc=wZ8BmBp&ZgQ>78RW6xG#CsGWQ9sjZO?G;c_Ez%a@`#6Ncm*kTVyPOzp)hwp%>)0FTD3xb` z1Mc6%R3&^ADUT^oAD22~}H~`i5;BYz!+2aD42F?%hfI;jB+sJZ3xn?EfM?@QJZH8pC6sIDeY=@cr=4F zG<1ef>WQ*IDYtso&6x~D5VPGEBSz!Qf(99l=XiU@3FgO)v7-Wl)3_;RJ851=ajN(A z0j#mM@b0T8*{~Ih$$y}}4=9WDp5nXMb4O_WT_)MasAzIg%f*;!4vDznSnJMXa|G3> z@#7TT1v|1qU!pVGWl;S+*O2wdGUVop(Og{qZWkh`C!lw@xhZ#gG`w>&oFjgL-Jy!_VA-35vq?9LHI11=Vv@#>lyEB#@15E zK0Zb%3pXWEB7n(H;W+K`x@kJQWJ_C)1R})$b&>`~wwZ1-K^m0w9LL++2$oF%-Y-#c-h#&P8|&O)O&+aUAso zr3v;WLeENFrgr~b!PxUHm$-EB=Vgr=*qT~++1C-4i+|E#;qW1%e8QF>txWnO)qJf|Ydx*y}7g?oN*Ztz%1_r~M#cW{N5 z6b7n(*vC0415fS!#=XpMR$jFa73a>7-{=#)Yu|sRa4oVom7?8NUOil~)*IAat&=rB zATM-hHK5@;ryI$$>s51z^sH7aW@4tr+gz6ZgqIbaVNb~xq}17+&Mb$gKUTP5>f)aL z1n<;Q@*|AVi1r`#s*60-M#>}AA$1n*+zJUJqFxOP*YsH-E(3#!TwkNn!LMd zEvuG$TlK$wj?2SF^Yof5lL;16;Mkdpzy7oOL!VM{{*Bd-N28`ppZ+B)L9NSB(N z6v#@OPRW56NjSzx9CSi#xYW2phOjG`zt{aem{Fo}pCAc|UoeL&4^$xehBJ^3W!s&K zP}}2P>~Eqs+6uPsmj^hGaHTwa8wBNDEAY~I1L8_w}+yA>3kNXtqB+V zEa|lDa0ny*$%3*0y~7(+V^k!d-ws>eW>cU|fHw=l%_tD?rGFrB=|wzH;1eqlzYW3s zf!vMV6+yylvD;gr@nh4QE<3;skt|@+EGo>}ed6GTdmm=Qd%{pfQwuPpC`FGNgn@XM zCI;9f7bI_}C;xvJ9H1lxz#;56NJ(%Ecn)Wl9Bi+u2p@z51VdqRXi(R|lrvu_K!%~4 zUl*^z7TMRgZm9eS{N4w5v!%dQ3Pc|OAc5modERQ7AJpDO7*d?By%7J!pfBd>)hPD` z)4<|7Ud=#9O&BOq(D>D?ykjV_W}_RAkrO@5{Uv?-Z&AsQ5sRFx@@Pi(TiptrBLVj< z7peZ{Y%(dP-b89Z_a0h-5%JjX!aYaAlRm?~6txIOJ4%kTIMU;*J@k2~+vxeLA!-$V zGD(!Il{eGugl@a%u2qF|p|?As=cGhwkm>qy`@&^}Bi z{lH^mBJLFPr+r2U#Nd~>yq&?-5xeMovlA0v`0F-4az|FY(_?c?r&gxPY#gWG0jOjftzylqg;w0T#LNxMmY z_43|?VouxlfB;AIqrkd$@0a72N~7=Jv9GrG5qvUDi|L_73oYLVHLBmArUxo>uDIT~ zc9(+)@jc0@agmm4s^YqJo_NxGv{7$g$LO>H-un!9IE$@Tlr78aBUsb8UU}iYU1KK| z6>CDVZ>(}PZz)em=}8prF=UJso1~>HzO_0fp&!y1rh*5umYuict_6ozWZe+o-+S&M zYoU#(K)aruzdECF+}l_H-uO!&-Drlvz>Mq0Ik2u{91b7Xgm!d0 z$oD{^lq8r6IJq5|h=~d-Yie8vZL<9vEgs5-TR0M_iA2J~!6toU!5&b+Q3TT74#Nl~ zG6JCt_Yaq^V<4Q&!SgJDdu-G>;DY~I!lJuB%V6Mb%x{o?&&I&ohDPnS6C0+#QWCJ^ zLzMFr*Go1RDXJ3lMVn^rVFxfvat^qjkj<31n;%(ghysa?*)e(bT7l4bM6oM9g3^4+jUD%V=O9i36NRdj)A_ampw;MxGlz?Of zB%1UZUH~tE++-ac9kD_((De~Pk4)G>87WYYfq{^4)+IO-Cn(diQB?uI0`Ps|lq^ue zIJ^Nl26p^!KM;liUjd}ljRSldt_^-6KxVsw!$H!=A1X^;7dQuf`W^~Y&SJ$O_h99d zK$2U+@cgAhBvU9A?vy4pFIBMV^?rEb&GSW+injA=#P|TFTwFxi1tM=L<_l-xbZwu7 z5*Uc=m>_qWyOgI|KegOqw*yHf6|J6sFr!c(^ggyy5`S2~d79bd63eM)ZK_DM4yXhF z9s{Y&66uP^!D*J_eUkBLnTclChDlcr;wQ;Zd|=-sVd=;nsm=`Ye8=e0^F1dI-;;c_ z=T{VK;Q5>Mn$>{s+DAz5`#I&t=~MibEzisqmYd%fj-$}pj&{3yivK{56v{1JhPu}u z1-j7>4UdUC8nwH(OR9GCDFcCp*Qw7?wnV(sNc-(yfflyXJ4|vG&avYwG<)yO?0oly z%H+D$-lJvHMY2g!Q<9M@?q~Gvjh>=cb-vv)YSa#6=BK@T6s?gysr@M3cGhFghmLkR zaZE4HUK~HMo5UAyM#WtzmW-i=6sK4jPVCN_?>IqHKcQ#dMm`a9S%GsP{FbD>#JdX5 zAyqfez=+4==d7-^Z|RfP{Gyb%k83~}Vm#J%kGx12iuP!2)zTl9Dq>_Gy^ljb{IGL7 ztDMW7SHBz4E;KWjinGv~;BqZR;!n_r!^KHql9{F~e#QjJID+xalde3`e>BI9 zGsQ?G>4*+-01ra5gN~1cuebEtRN_6G3w{(b-gG?fse6}MZJ?F65W4SGZWM?yEW%B^_ zn!4Vn>|um4fqZnnx5>bkOFo6L(qgP!@Z4*7^N4d1-S!^hjCzI@-9V(mRlBzdd3hE$ zL1g4C3H>$Ih zVu4_Yu&ZsUGRmcw_Piyhxh&n*+@p^$RKA*f;GW4;6f2;fbnhfPYix00juS}B;nrfC zcYyKE;vL3U?ZOwGESna8xXiD-Mafl8uChYbW63|r2TV5y-|Zj{Fsc^D^KwxJ?n@lR zTr?>;fCI)xe8(8>q*`S1blvKc>*V6)A1JweWmUlE_ya9#PlT+>GjcHK(*|}@M&2-<|P*j61p4LRJ%|EnfVh6D_WK z*3YH+#zA82=-EVyEuqp;RU;q!pg6{{x^vSo`E+Y3IKlb*ZO(${C_WnHvPSo z0cw>DAxJULYWx(CWZVkyk(d8L9osi{{E%u9MA175!)lA>HVeK6lhemjVVFH%F)*wT zCP^d;<9*C{gY8?J*Z@!%ycgyW2CcCKNvk)&1$VxY1*}@J#I8^9o|<=R4~1SLS}3p2 zQ-I0ormFx$hx94%Bnlc*{3Z|mzTN=ATDDLi71$L4nxNQc0M*}qCiDt=a#Day5yA*z z4C723FwpKu3b4`wUlKx!76Cg%P;$V55=N*pD6=#(9RNQWAUJR$7AAyK!wPNabN~c= zBf$SXfR};wH_(GY6p#kM)QD3+1<|~$9cBhpqJawhZviqD8mzn-1Kn!a)Mx?>wjp5w z+J40+u$NF!8!AO%_hA2M3exUj_E2WUBp>g*@4^{MaxWaicjm?9wwt^7bjJCX zc(L9_uyrKGdKoLtcw4U=o!FJU48N!5J}oJSx}^!ct}Ri8M|(-vS}uk0JNpsc5(CG& z@q2^Kjhmj^jLp|IrJPH$%QTXE3TEAYCYfaO-r7}KC|uJus^i_s@hfJMiaJJWMmkGc z-%Ke_gm*FBhPrV??vNh6VNk!TaZWz5I2+Z&E?TX33f7*ny?jrfYri$E`CgWn)}@+( z;9Mtqt>tfT&yh!-u_2!-oZYCjw&n->hEt{FdQxu5CngM`?(r-ish5Pcm6A(Gm{qum zGyQro5#{KSj8S4Al^gb6y@>nMPg&ui(N&ytqMxd72KgX!+?Up4?WGjRpQ}U342AdF zME334eXKum)OV|sVWxL38l6Ti9X3awsWwWk*9v*HSJUg{L7fcKTM~WIDdFRuK$OT# z9LsnWnD7%lUx(8CT#a?=Jg;-#M(*i3I-&w?)P!!2a^KzglbD*uyN$EDqLAd6j2g&gdA6Aao{fBV5qOz!?pa> zbn3A{zv5m_ZTaPT*`k5=HutX|G};@qJKdP-F@B`#Y8u&3)lPToExEiKoGLk; zXlnx4+D~Po0;Y);iqCR>t!-NvkabzF!p#i3uIZrhL_%|UbIOUS>^A#*^;V3WmzJh? z&PH(l85s&HG#w?!YoGQ!MN6r**R~1soG_~l{&tgFtwe4uLe8cQ6Wp?=^kOR-`?xXW zFnSSw+++!KAr-Bxu2?w-+`EQfD2P@I)xq->W z?B`w2b^G``V%O1zG2cIE?XU}AjyU$mrgsA`L(^MxS6H0bG(a{i;Z1wiM*s<>+@p2A z(qEOPabDXkQF_E3-uDTPz3c=9dc2Rt2+@((QT1Nd7H!|+r~GfV(;w&lQrH-OUp(FmzA!P7}l&djhhLxvbHnF zZE#E0ZarJ8n0kB1vpE4sAAMij=Cf+Sp)yxbbC=_b%U8YJ zLyYL=J~P)%zpytOJ*Vp%2r?Y@qHX5y^spdh^KBUr3t=07(JyH5<;LybwW$F# zRyJ@5NJd5mI(Yaqs6%Y^sstMTJ`3Ij?3LlLMNRlYQPArziov`Avwe_?pcLf#8u;`0 zgGLyb-dltNG(|%5#uB^ZVnjon;KSPm!iEKj6ai+ z@bFy^;04>oK7sc}S3}(&w-rD-?=)Tlm;uabh=Cpa84Mg!U2hPh?ELpuAQ<>F6uPto znF~?8*c7pI`;YEg5#NbPPjduXO9z?(l}b<>D$t-v5_dbu*os0yLI)2*Z{!_ieEbV` z-y(@#6ctUrq?xQq*^HzjMFC?M4#pJ!-}wRpzl+dEscBF#BxmZvsmKgnN+_Em;4cb( zPeot_3~f`{4*Yo_r<0eYLMl}JRfXQ1FceLIAA-M7*}U((#sde>BXOeR{W;AN`j95c)1601 zF4PDnjeUwU{md~~v5q_z))Mf{Q7QJ9fm08QoS;?B+8IbX;Mm^BXD=rCD3jC&@&Y3?NHPOMQceNc-c7P^$MH^*csW`XHUPJMa1oIoCuyAI+q_3U*`bm_3}Hl8)AOGOk2l!A{%WZUvfq z(r9Y^_G#~<`P@L6gzAwY@>~GDZcQt@WA@9ut@de&$E}%qX%SlM)`+lOFBxA9NDFGC zMf0tw=*!Rk-67UWMntqV0Ex?%cVC!nzOqdzq19y#B}Yt&e{dsy5FLm23Me#UHk1-g zc+zr#99B^ZR$AeCo7$!mV7yK6rRv>|dqsbsBj^)eh^KsVh?$RwvfAWs8o{m2Ke=YS zw(x*Lp0h#+fs3ug$Ft_RXG)gc!=xpt=>gf9RpB>pB1b1&KkS{~7kHA^T-p@2eN&|b z_P!hSltM|Qs-n_U5fh#JBPjOau!!O>T94ansN@4 zf^r@&&hh@~izU3wDQ?^81V`3D*#9p)< zOr45%D<0RmC}&nERhvO@_(Z?7{>nH9X|KYY-0MGcU81(t7=KXI04;6YWJrHs@*ypM z{Trgixv1U^*+oHe)P1ED9x3L9;(i*bA6J?h(C=9KzC84^y?yEZ#K6TJqe19Sn?KO~ zH4{fq?$9@8_1p=$p!vSC(^_4qQ;ven>SRBY-$L0Q*UM7*krcNU586|?Y#k}JSks^` z3p;|{h1@+}U-x}EQu-q@aJer!^CqJI96PjhfK$Y-4MMdoo_=n}kK+!V$jey;gX)nB z=$N{a-17;`WsjF@f%VAmSjO-3w6T4wt%qu;^way+^el^a1Ya>|87Ug?4S=yRAgqB% z0=zUpN9U6mr2x8E1&b6Gq=3f14G_ct?y{Rzkl{c)9SOe#J0<;(fZ%0#0VoQ95A$Xf z2of$({#XmAEy4lD40<`>{Qr$!X&`t*l3?`Nc%x$^&H?5ezg|-GF3nSWRpO?!3aS?`?Q?jL%Ln z%o9WLQPcLVg?jq<+I2R04sU`LYcIF!Z_h0!w1=%GB)xx1IVH+^ zl%(Cf{|mtzZ5Z)@r$J8}=CBZ=HS{p@uZY>a;X~<8c$c|ng#rG_8G3!Itp4?rRB9RL z0ylE1&(^@ZbSY#~mbV}mkw4c>Td{R|oA~^dr2U#{#HWY~Ai)`(Z9r&!D@*LH!rHlV z0y+H?5-khdc7s(vJkiV1B3&KFih^dtrWNOaEA1lsbnTwS}n9 z!}50QEOmEFDMS3V8b%vj55vk55&dDoV-sfpJD(Knov?;lVsktXFVdzWH)$h)0onW?Ik29}>L5rI6E6|0E% z6l>WS7|toqizbGHx`(tdSX+mt7iH9;jFYS@n$V|3$M@sL$$C0CL!$8&TJERIZEC?k zt!zsD1hhg&j2hay5JP7&9&YKFaLcqbNm;l84qk0BXNy}(4K}ICxn;0&G*oZA$5E@i zvhJI|Wi}Z;QT0|y&)4p%mLf-Xcn;pLG^>`(@RLluIY2lNHH09~Rv6OJ&Kk<{lI*>8 z#8b3a&VD8i&&Ev2OE*LAatMLypLqBa)I9eN%4KHbEN`@5(6ts@qO{i3v(quBwD_sG*?i?}=qU<3TjIz?zPAHcW(CC#C1 z!YsxKQ&6A8ffEyma1?M7JV5~gZX@nrhk*Kogbd8A2+}+wPAP(Cifn9|!HhIm0Bt>V zbc_Ox$jonlP$UJm?LzI~v?x6M5e9d{PS|>?nwI1i{B2%5G9^pJh)F(7zqmj-K98a@B_39H>-T{l|}*N1>Z!0 zUL)Q?D%CFmLcFQ#GbEn>Qo`MKUPba%1$e^|NJ!w|@hzJu9j_obC=x)cOXwkn#k%(b z64F<`B&CVe6IKBYp4&jm6HvF{jmauSfIS7u5E~UyI1*5gK#M-@e{`{dI`zh9|I4BR z?Hzy%XaWF4tO|(q34PnFx^QV*f$$U9oo<#n43>%!z$hmbYyp#hka8l$G{lOOeAe)3m^k=vt*9w)kuQQp03i$XWg^#223*=|nB=7^K&XcC>}!Y14FK~DXCNoyb<{9Gs%9IG!!|n` z^SSHTP8u^ugl~>NUkO}H*7chUraMXhsCG;}lrrXddlgOWYhCM1>kKaOLNcnW_fki) z?xQ5~_u2vl(U2p%8I8^NcWG4*a&)*nYE|oaMr=B_tHrp+h@&@OeJ3%Xf4{B~v1m0m z;+l51tZf(`Z$NPBc=%p{&d0||_Gq$!AfDsUH#Bj&!%;qpy_eC9-Xp7Cj*1*j80^ox z+_yd_DTcB|Ew{v3FIQHykr(IUf|A4VeW%a4O*cDnxmLWl!o6WD_)cHxCIfF$oDmVV z;vF3!{zUum?ug*zW>=e!<>-?PfbkY((kqE~P@94~TweFm`5eFE zZ3Li^FKv>#(>n=`+Gc5TBsR5|*JTp2#){IF>Ga4NWz3g3s}AUMKV-x@$^0nd1U?(j zD#s#9Y~VDXdI*KDo=;om9TI(ze#-aqrrOBi zSN-fEmuKrr2pnuTAo`CbmP;yEy4Z18zXWM3)_ro9vdH-&KI1Qot$AgL?Dkw@sMr$T zyUdNs%zg|nB`({VzvGN*zjrMWL4_&be^okI!$>lHuI3EJ;`@xqoG@BrR~56d1huQu zD!@TkLfGS!37rmC$hf2GmwKG0=vIYeWDk~|LM~Cc9k<)Z-`6fjECW$QWHDi+jXhm= zIQ>{JmP1-Tc;Kzn8~31-qr!~3*3iJOy^kK^hs`eG-!sNT&Zu_EsL^}!||B~p_!NKDAhGc1Yqxl6UO5L{*{_6+i!|B%Ab@TT1g^Bq@yze zbE;!#P0}<;qZga22!V6^9<%@i+xFt!>>DO-JUEtPXL2A-qw&mDRa>r)m9oxEoP>xZ zD%XI?_6wM2(C8-un$eO7y%hukv1hNXo9`Ifh|Nr6k(Wjf4yy(8NV_a%d!}{iV1IV}gxLw#vKw-h_tDXr$UUjw((t$le2GC& z@O`whO*g+p&*GRm5biCZNq%Exe|UP{qOs)3dOpyVd|P3G6aIh zCoBXR7QmsC;8maAC<+{p!GLYNhnuYaralOM2Bd}kc`nE4F%L4KSz%biw$M<`0Fr~l zZ}`toa6n%&yNC`ozAl7;;1f+>Wep+e7b;57)=#1G0`daKH)(@U0v-LE{V=XqVRRU* zMG$<hGvH*?*SBa_s+**~QKyWbJ`IfA9Q1x1iiPWFl~-%^wzgz5prbU|s-Nt=qmrTk&Vam2Zjde4+a6-}BZ zgWFTPn+)v7Q@6929G$H$!UHxtJf=R=`-nfb8SMJbJXhxwk~TyREnOV%`vb*Q&gXTW z4`7s|tLwXK%#Ea_-i1zLO-0;Su()we_UVqEYfJ6VBxApo`dA)AWRW;`xajqx9S;Y* zne>EXWAe`mtY%Epd3ZeEAB>x!@z{jNWYcsa5S^}g|9PrU-*KUOA}s2bO}KHZeaXV| zYJ{J6t~6j!0v>=!NOY2H%1YMVk@fb;F>+NWEqZJ=8%z4Vk>GSjADhwHfe&3w(6_o$cYiXly zSvpVQeI38=wuIwH2TZQpqEOjJ+Tq%+uK88eRt+Vg<#nT!cgwuir3wIJ+8VvTI9ZaX z$VO+{m2z!a;oM=y#24AlC+|>O({b-+P7x`O_EqGQRKxK)mFd;olbsp;u0rg6w-hzN zK>LY|@4?Zbe;_lLKENU?Zw;;t_@JAR;@y2iDw=dEFuR6Vwx!Q0XYx|bV|Li-_ArrC zA@B7@4w@%jG3Y_oq0uWEl0lT4&EE}!%akV-T|4wpCi;()*^b9lD~VOTHNE%6A}7;F zWe{gN0@Fi&@Q>es;lo2dtjwpv+W_Br8MSrn5hmf(nt zV94!?A8*2n^r(!>HN05C$&6DqiDW?%=@olM9(Uwy{jq@h{_gDrM4UlP^_!H???lO6 z_g44;w4CM5CRWzI;vO$0QB@3)JQ(8VtgnrN4J__sa{c~}iT!;BzQ}8SQyg|)6@E_t zEx*uX@%@4FW8|#+k~>C5Sw;2gor_VLQ+$H`xWSRhi#G$7act6T^TIycFhWNWmAv*z zDRsu{%9EX`Zr-pbRrr6Li~zV#zb`#k##bJ3EB690Z?IbNREN1WJGu^U;zaxvHmo+h zw+@?P)GEeAQ{Nb*L3P|Fy>gumeoqmSKmKeNU*DFC` z@uP_73ztfVd_y`e+@yW}vG)!d-!bY_ux5~8!S6QPXe&j+*y6ATxaK8DlLC1`jA0gN-uzP;TVvWAoxcJ zf;R6C-LOT0L4W01gtUf`AkB16ikR}{HpG6dW z*roz)!Z^U-4FxLx+p00pZ(|`qZrY~aD+>S+U4ID~8QejT41q<%#%}<+beDc|;X%bxjb+pxqnV;4?^w=yX~>*+Jc=$J35WXYI_ z5`#{~Y5P}!%%_@Pc7SH_QGSPKLf_o}FZq|BtR`sFcYL3{7CJ`99*$VA80k8=!_-mI z>z}OL`xctn!=2_@$)}mKU!?t+v&G&QN^xG+Us92L$z2(dy`)KWWU$WuFUlu6jf0t0 z+e|1k` zz;~_wHCv+Hp0S<2+D;3c@|SKFKZf%cSMX#-PF4vu+s=!1K%YzGO%2?B-MTlcdHP7K zOni|4(`V?pvLa^nGC6?F-(}He^w_k7zFJbE|1+`P#AgM~&z~DW9U$*!PIavyIkYMp zC#>Z$&S-=wl0jpp_8%EbvMcfCkPu&S`h6CPw7}wJlHwG1!0S78yra9At;Ozb-CEVv zUMMrYyF4Wdv3PfJlrc(jQ5?vNC_GRWz%`dW*6PeivDAL^u6&D=^96_I3%jQ{2ES|Y zwq8oa@%&!3)cUSEnRVt&Wxw5q9SUGP@-!u^M<`>}jrKa?!9WPkINN-O zX#ktXDgLnueY(rsm^Q=qwV*ghLs zVknaLu4K!Hy`GcsyPZ-dHgB;JN|fN&Ino-a>sFlCeMTcX+12ykp|O6ty&(fZ=bJoB zEeqxnQT9-WlQPOM@X3lhDM)*+-P%$rt%L3U{3{+ehcFBrU77_8vTlWS~hvoGV7g_kJEVi6m*6rhy&VGp+s8eO7mC}z!)fyA} zXFI+Y_1h572BCAcmmQyUeeLMcQ0+UTg0FB{cTdZIf5Mg4#!ZhM(Iej=2PwA8kCPpb zUoTwrd8t6JX^&`eOP9)Sb|k*vTgLeV4Fxxd$qvnlDWA%2?qAq1?s10L?qtr$JE&Tr z0afhnd`@=WC-(DqMq0p;qpA=Mv2C9i+Q9B|m+!J*T6Samfj%kq6UR!z(y9y7PlXt{ z=C}BI-PC*WRn(nHZi8+ay1v~0{4(>3^+mmS50M@8Qz7>bD>uG1dNzXKUr7}V&h`+K z&IriS4i^`cK8d65(UMMNow*Yk{>&}}oM(IsM|-EHeVFN2Yr126Etua{>7AM$k8OQx zesp;N#g27avGlX6ihr*8$?|~u_mHWQD!!gzpY9)sU6L<#?|ec!ieP^Z7&KhvkoU6u z+Q%bLbINjB+Yp0s31hVxYdG`=sfyO3Uk~ZW^e3R*p|wXjDk{aHAlf|vT`D+;j4%~Q za2Bvf8$cA?^dgc1X1FuAyuFCi1LasQgI|mPatz` zT%^r<2W`VB*<2SDq^Oor1oMv<-?^~WM4(IPW$LOX3!?9On4n<-{K8Ex+CQkdxK(9~ zH#7+Qyj9>gr1x$Jmc4!9M8ai>ZTkz(ZBsb|iH!YwdZ7|7>><*vPjRRU!J4+}F*Ui%m54ZLYR|AmTH)^`kr(7%>FHAqC*G z30>K0OW9@F-O^6MAUN_L@VOPx3geb)o z2hD&j1-SD6zciIF0@leJpnO4Ix(u`kZC2SX4n9i(6e=pLM36ju+#kH?;~S8bq4bX0 z2)$j-A2gH7i@9b|w!d!ao*%d~e|2%eC^E^IksM(*JJoCyURK&?n54)vbuoIIE6rSL z4{NClZz^>E1AUrp85^p#PwjOyz_Il73|#&|+N}Yxlf~w`X}pNSL~2$EbGRvYAkMz= zSH#)!Syq=>$K5~!`!-IwLXx`{Gpm^S%y7Ijt6DsAn!%(WP45hfq0i=PqtQ&}yX=Ps z+u3I-ozG?aa2} z>#zRmmTR7`NG;*pw`7qb46J%NiwgmyVkWf&=#vha9ge`db0&==V}^sT9U+GhI-JI* zS%DK-zG-U)XnUaHot0Y~*@yMJiTwg#aV)-hI@5k`BsR>jJ@j3wVGUrN^mee!pIl&B zbo>fwS}TqXcr_vUvhFggl@2Bc+uZ0IWyWj#QYwyIUk zb2-pYt^R23_-qzyP8YC~;vN&Fl}$R(*_FPIEf=DGkZQoJAxnw6$|BDXaH$UszsSbl z*;OG&%?f-g$?bgG>*W@6x^T>kW5r%CTa04urFZQcnad`9J8QHTm?IJ#Xa7J|C5mBF zeK~VyW8VYi-g?2V(>}-4gQTk5Er#^Y8DDD)X*%27&1T6BjPDtG>z~HRn+dq7C&{ec zZ5Md@a&SZE_yI$8jq&KUELzDVqj780;hnjGk6fvBSC6Pm1~Q&KqV~L%U(flTX^V2X zqR3Ha&Z!<(BcLa=GdrfZK+NaL*AAgvCF0aqixc zqjA%szu#FqWITPm{Zrp+B9LI5e{8+Hi5wHg3GeNEH=S{pz-SA_mD=F%4)<_7NjiyB z&!svX11Ds6XmSo1T8%S25+yH4JsGRLJF8Xlu%AIt_p98imd4FLT7TovnVUxiXX%b(&y&_MZ8 z5J5(6M*(jYlD@zlDa%EB&D>UQVH=YPHu%{aVi#}+%{jtY_@&^<#fS#hUB*Gds&e&) z#OiT|V4%61Z$1lIhsXQY;@yil>a)r3(KK)0r}9mX4as6Vv93cs9f~RQl_=@fq15;P zJVVD&7w01<^@!53d;W1ocGH@w?DmiCYQO7EYSMO5@J?AJ2NOqtnf5eE*2iga&F5;E zL6~uzg}uej>c|g9bq7vK8#qb)1e$P>U+OiaEOqZJqEPyLmuNsTx3$nsF?@TK@CNe4 zH~^gu$N%I#Aeb6v0}X!y!CF<|;bP%n%(h_jo8T6l33O;AQV4@1KqiEZT{$ELw*1up z*s73FHvz&3ew2Bwc^+~1QW8K%0L}}8grGgb5ajs;w6(y6tT+cj7}g7yJ5t5FW-WC<0=t>~VCKK+7EGwsfbLajTWJ28SzpW&Y_ zLz`z|>RVt1PlPLe=di@O!naio(TzbW+e87gdX$}8LvI5xh4R{b$fv*&KrbI95>9|O zlM}!|yBY-ZDB@slVgwQgmcQqdUJDm&`U>sBk`3j-6rCodX}1kP-H_(doekm&0xI@U z!cX59i$G0L^);jir^1!Q;T$Q@S_5^&6-QY}Ru}{BZus;7Et8G2LXb3x0>L$fMZp8G z3jQ7eK=j{I*|;%Q6_S7poK_K{;0yt7QfRalv*d4>EGVkiqk99G7SD=qmV!0FUWM!i zNf=xYgC++!-8*-WvBS>Y`QgMnZ`MtHdM(6f+}z0Ce$vapeyuX@9~VQm;*{-__kCw> zQ+C1ta&rFhP~;?9t2aMq$vsH&WKcT@y}kKoxEDgF`e+*#4eTu70nVVem5pUi`^_e8 zO>r;I`<7qKTqU`E_BbF}Ggn`@GcSO{&%AKAxlqp_I#0ID>sN&IO2XC~hEshPJfGX6 zJckj(JM-?LWGrvw)z>|=Yi*<^llO&QjUUQSx^-{a zo2f>6PJ_0rq|EGko)+28&^du#eT z0$p@>#5%^br%iapMrXK}{wS`RT^Nu{sw|Obe>ZYIu%9^bgnJ0Lzqk1zk2mzX{f?s~ z%Ftq_^W7A?B%XC<24Gi39IdLO>22pClkbeoJy(m^CBg{1mQKVyAy>B&&xD z{(-g-Pcg?C=?ZyP)`ey#5Jl-CdNCRqe)OVBe_jQ8f3@w1zPf6m7`LHFEdpg-;b|G# zg5(ZnJ*@oa$j3(Q<{J(lb3RomYtnZL-*VmJRw->*r|ZfIQmL6yeP5O{9ML2@jLaiS zszu#AE`2MZyH${*A;M8; zj!n)vLS&&-HEwj4XQ$L7c0_OA;^K7u&RCLHZ4la1;Q6CLqQHo&WbPEMX%T}kz z1H`>}Ka929rc8PCApbaY>9>?PfZ2B)yb4);(m2rY6IKu0lV*h|&d|M$9 z#$N+u9DwI=P&*WW1DvvPA^1rDcY(gt#_J%5*BWAx6uEtQy`Z@!n#`m?lwT?kWgC13 zW-BbJOi4Hnav2;W3`Rt>-9Oi1Uz+(8aeml8Hg5Y-zP|&-c$#E?PMY7%=blIR<+-OE zPLDd3T=^w^VkKDJFVQ5uP}bV8nwY=Oah(;u}Us)S&}ooa5KA)8nx!-N?q{N&FQk^2a`LWSqCub z)4}M00TUJpX!bPXP)e}@cUE()x=g^t*R?dhlBd0fW{PW$hi(P7%~sZ_;~pxi&3`4# zI5SYoFK70>;>d~9cP%I7k{%aYT;`2N&PVPU?|W9EcPatt|1@k81Wo1pE8GwJ`vbDe zj~#H7dd#y#BiqlLdjxme-b+nKM@O#V!g^S9A!(!AQ_X!&6T|RiJzWCUdk*ik6QGRD z33n>BQKO~sKu-OxG@ebJ`<|v;$Ca-1MwiTz?yT=~fU*+9rUNxKRN+C!nKpysIp#C+b@Iz*Jv{1mK+Kx|4KttNGHsT&(XIKAqc?cPjF+FW<#kV`R;2 zEubgR3K-I+BJnC$EDrC7Et0-eD2%QhmQ z&6hd?QFwR1c1J~7*`R=}R-><}x0)<8X%G9%4q^QewU)-gh_77l(8r9N{T*%-uLx&o zZ3_sK)k;PN&`ByxxbklAA1GcN`{-EF)zVI>x%d)1f!iL{TsVY7nU&zfAJ|`EcLN&j zN?;Lvc(+bySlQ{qHH5}nN28Q^VyxfTVx@g!&isjDQ}?|cZ<1L>Hbe0WrbOcOm`E>` z?vbdM=TuC*+mOuG$WV{i-_<^TKm_G{is==~8b`W+*>o%Plv}ZIFnziUh@7;3Tfk=MO-lTCy_7oxHlQZ5y*G*+^b(*IpRE^ zlPf75CER*<{Pd)V)D~W4FAK?@{h*{r50l!m2pqN+i2FvLF10}YUy}9TDgsT z(fm5&Th%C0ddT+v6-LVRu>rvuEu*JSgUp=p(u^ARudsR3XBJmHCquV7SP!aURrNx# zV-IIxW2dN#eO_RYe5e~C->7PH=(gY`9p3e$+i_i52sxY;y+~!R+I48~KqE$7_eSGy z_`b>vXEiq+isTj!gkW)IYT6~+32lfLRIzSsI-^BaIHQc4U~73jl6Ao&DRAe7Ykr(Y z=c)WOtWB6<7#k2~8O|QGb|Th3Q)_+-=e&OTbfICsy`NqAFPE=#cBYjRJD>Nowiou% zI-V$&m;6$y-^F1^5xjm(GAl>cvq};!l^4FP-}f%8B+L-0d8Gh{kp)WOf-qGX^7v}O z>yG}9H3xde8+0L-=Mz}my%)B(LuDb|qaa}jU*G5&8E#lJP=E|AMJfoBa<32KQN=Vs zw@_J1^vZ9FU!9cdcRw_?*De`7X-$c*fZqPlRJ%T|8(8cqLt*v%3rfnLzog95}B4cD%a)%m7}7q>gUa zc&G`B`X~*BZ))m=TzlrTKvQiOMsTz02!wMu+DP$u@vs4c9#ViV?#*bx_>~fcH7J2D z+C^VO0&v?CFIGux5I7EcZYmNGSi6ox+yBzozz+eWp(GrDbO`|vjyImu-8lSLKY%g| zXi069U;!9$IGm=U2iH@4qMS%al)a{i3w?wP;{;x7YE*B5)J8CW<>&RmD?~hpXwIfJ z8_kq~)Ve;2^wFZkFNMY=^4ihRsc(~GRqk5#c^B@kasUIAR9CUB2@ zV1I|}x7dzn%v^ddQBppZqdZfi1x4vJ zO^V4C^FXZj)bEvzQCCp$hCxwr`6F5dVsax7Ng&>{)fiazGeXJ`eLXicwN9w-72ZjH zqEY^tMw5y2>jki5QLJ|j6Tyt0xTJ|g73!u4tpE6VEvxI0S( zi$OU8lU`n97=Z739wSTHv(IuJICHF8C}F6^)O}JTh`#X+jgZ$pZ%v6JHrgN0$Y&ZU z6pMt7_!pK8(Z!LdXdUApg`2}n+o>DAtHX36>Xk^Lnanm*lYzdcs#W)smbNhYvV(!s zHuFHr_1{zbX?H`FN-C~4OYrEj6x)!y>`0LP7qXvoMeq&_P{p&z=Zsx9kYOnit6Eb+9a2PNy)We0}YxFQN4TwrIBM_T$gg!d+M=_ZWaH+ zUb(US&*s;Io+MEzpOQW))Q-5+sQnU(V4&6SlX(85-Aeb!+;+P$Z=~+`SW--i_4J+` z=?78&l{9ojr!;1E#9ZZHsQ00k+wvX>J-;bNm(82Ay_=d3U6Q#0pc zBAy7G$^l!T4DTnZ)=|bL&o`eXcdSre#5rzvZv0)Q_pNe96I5oXRJSli4rBqgcJ97> z=&e^hUTu8aj`Wg#p1`XDlEiKfmHA829fdOfb*G2+KLi=ho7N@%3*9$5E#_OT#y$D& zDRI5$T3ZAoB&1sYS1d|4c8D62R~=TAfy;0{rXbeMf%bJ~&w&orN^^iP@oxmXlN>c~ zL?6kGtTCXfWW7r90%7$jk|mcdpGtLX?tS|``N|YH(K9C7?Y=B~Q|=#$F`hlA8z^6} zC$B}hfEbd*`E!lD5ch#xZl}}EPc8~YHJxo0b^DM)jgF!9`d@0iS}oAdL^++fdvbMV zX~6kNyiW4uR#nX9@>RnY^pvojhF{-`*Z|MV#0bKSd~m*zq=c@4`;Lu$;-J;~H^#D$ zqOyocLR_fn+0*xAb&h0e%iinHbWliEwh>z|e6#rc&A0tWt=UV`*|fYwqfyI1g;Lpq z-ung9Eo8e~Xjr1h0h0gz6nYvj5&jj&#?4k2`Cefl-vkEJ8(@b=M*OX2kc9-K32_8~ zo(K$E2M9{37#uh6SRllWaE1%tA2&mZ9(hw|H*+p{bErRfY--YiH9a~34SqSC`u5gQ+`kYJefl?aotI-pKOf@C z@xE0F@xZ)ay@$xJxTzbQVe^OYf6|PC--qYi^OkQSu|A1b7Wwg>Tl$FQ);mC7GJA=vfb?@buyF&Nt;cqMp{){J3%kvJ zup|cTN)F!m2Ek-=^`Ij5lNjjkTkx&sK$|XHWZ)?El@ALCatt!qIsP?&0=^|=fP(xT z|JT_1-<`jciVL(02idLU`)%xsP_R1JHcsjIm#MZR5VQ&3#ZR*gMZw7ulZNE~yFz+# zxX<)Nrg%oZ)&5YXa`&jlOuN(%>TUX#5ACyzcZGvOrv9rdtBcftZ=wEuxjwxQ%}I9Z zXXwOe8_DeTy$S#-c55Dcd0<^OyO;ahP}c1^%|`#Q55?hL-P$$AJyMN!=dUekWN-D4 z{|ni(?(@GIR>qH5+~8&a&04|9_)Xxh!~d`vZLwE&BpHhn@hq()xRjFw(|$Z$HY{Jn z8-DcEHE{M|p6nG?8( zwtsnl1^;rSKUH)|)|FHVE=`H`VeLH^nLzO2)uGX4C!TyQ=naZ7=~g4rmqU{1z-wyR z{PO|y*RFp}+gCR7IeOX$sAxmgqkFUOFe^|o3zEf@*puHxK2{)!vO%l2JLNtbY5S<9 zUr-uXAi5{DGY1}0GaUYW@<(HiY3WI>QBe^70(MbGury<7h>qyDT4Gsx1y#gbvzajlo;2~ z#5u{NU$B@ucZ6`+J06cxOXh!fJ6AC&8M}c1_7?m}>0i@7N)pw_qeGl1eWu0&=}LXM zzbwiE!i*gZvXd6hzeya)Y{;~s7tWnqe0Xsy!J67IBG5OMWyzuS)S1=B`LyMCx|32$ zi~0OX>6p4Pxm~B#Wq%DVg*ZYG-8rtwoZ6l{LTx`ebQKqsURKtFoqO0k&Ck>Xt)Wq5KjWDxNzruP$49SnnGJkl{IyviXOuV@T({q(E z*`pXdS(5WqQAQRL6SqpaU9I-w(c}9Vy~k{^lg+5Zx{uY`FO$x_Y#O6|R{SE;k`XG; zR3Syx$c^*$zbaKew$Sh2e=Yt{!$spWTKyQB|4)BswIDGM!poojgfmO#I{!k7R&gzn zdpI8*TAEkmn?-*eSyJ|&!LzIXLR`aX9{2^tv*m;>iPL<2X43$%J_R-?0N_x7d_@&= z0CNdcBK8~(NWP;^Hrks{0g`75J%&afAG#0!lZpdtX0V|k!xvs~X0`jhJveq>1|H0& zH81s^>+84xTZ9^<;&X6|Q;fwvr$x=lYl7uH4E7fx;ECZ3lZHQ$l7_EB0yX(l-H#pe zT;T8g-*ag`>*l6@hexpkCAL7f7(Dy(V}Tv69b3aDI4nnEEe7IeQ*F1M<{fOxk?$}u zy4{FjiE&O-Bue4|-`DKH#HeN)I6)6E&WXJ#eeJUU3 z9~Kz%l|DW_Mutw_B-^Tlw!tbTl*#bdqA(H6d&qVJime>uhkrcSLePrm3Xyuu@)djy zZP2|`9dEUdTi~|_%0N-Sya&;cCisHFe!<$n^N=?T+<~`o1N4Ln-#!fP2!RXF7CV|t zhC?dSoKav80>S=(Gogc0V+Y!xv(Fv5;T8z&7dFh1OJoR|GXOWaVCsAHm>0u~1Jllb z!{mQI0A1}bXalASX;DZPQZU7Q&fjW4=xgdh$3$7kW3c2giTmMfjE+p({{haIIoiwYBR^bZ4eZw^2Rt(u<;OHp{jsfASTW-3x3a zVDTNdjq(R}!~%N)BtygQzxb3-aPv5FXNP1{h*s&4GswK!dSX)NQ{ipdKFcUK>b9xp zW!p1-R}im^@^O~HD=$GI-hhGazwS9DCV!j=yj*odlR~N#oi781Pt{SKrM!6^nKw;8 zLyvu*2>ShNX|&A2$fb66kfG+ED@MGY=1Qd&`f2u-@s4}Q-y`DVt?;T+NmMpdT76oQOK(gJ2(N_t<$%cNU>d$#k2id!v4k4 z{<)>K!VNZnK7Hh#ej3raI=}9xTdcggPetsn`32A|T0e@jigCV^_;J2AH@N9fqwgYG zM?G5pR_(2EYq})eLsQL4w_;Q0k87T};b_@OK}v$ckI>X&Es=sUbfkTunR|L~g_g0v zdBtZLg*?E+?P~gb>^0BrD;1Ky(rJY$ zyGfxZg<3y$p$Qe4SH6bkD(-5KiV`!z4El|SRIe=ET8CG*yv!`zboOVz7$U>rZaDg9|8h@=VaF@*(f)+T)L!JYV0Cm z2Y6wVqzBG5WXX*nv*Y?GiRV_0q|-O%Z*3D_9ZM^dH*`2gC6*H8M%3Ds<2z=EXXp8X zJ=FwG6eTE3T<=M1JZ~?DvPAb-C76)_zi9@MxyE2%%NErAsFu$Oh1{KG@*#~M6?w`8 zc{Uht%RrxCJo8yy?Z6yFrnS0TBrQ;6KSy*m{m&cFmw`4NU4|n|Z{VNVngg}ZI=|&Ij_kB6P z+U*g3Dvc;Hd&4aE+8K1Y0qI9@vVk~H-#R~d^{7OHX!Y;6PMhHLxf38Z!-e@(l?N-_ z`>}WAii|FGw!yGh*(c{4Rz*NrRlZjXxBn)J6 zmsX$z0sQV&Y9&uf`#(=XGe{5q#sW)F+96791T2k9y-)s@U|xJu@a0EZ#Tm;Wt8-g4 z(oR6asrJ^*^1Y9vc{&Q2=Z<`=%~g9OR;G%b?;+kOwtP`EY_zhu=@zuAGfRy>@9*KS z%lxBdNf*g}&s_UsXo`sKG+Em$Z2Y`?H&>%(;Ct0T;fE|=y8{^`pqM}Ga-;D zr^5>ezR=Qb-_0;`Eax&>SG;Vq0A z=8_v(;nq`(?8F*50&tFi?mEX8R`QhmO8hM(^*u+o5~`o)<;c9S=F-R^9mVPcrZ%Q|~r7XCO8051Ps$DRE%Rj_lC-pTW$-?Q83bA}H+OiHBYQ;Bi;36kxs zSUuhOPn}PN!x@5ubXNuuy;@&57exh_NKwB+d{+Z<{TqJZPuYICOgY^-I`#bCx01QZ zGfg8NbmwLuO-lyhL?f}uQ}3AReXU7e+e@Q``xSGl(FP>+kxA>sbu5lnE-a>Vyr)WA zgNa-#K_$`9PtyS2UjUjnz(}YkDynR#JmXTPDKE&Ew0}!UlXssfS{wI~xP(X?BqZFD{eMOQ0zmI=8PHJ6Dim6u$~ z=oqoM{b)gvKT9xIavzQ!N$es`i3l$BsnSmxH2BX9)82Q7dl0ZA?60#5a63F^{M@L4m{$WS#badkG{M>LZR5C0W zN=ThUpBD=?PjBdwsr=TGb$8e24ri`(Mqi^=fKQjLTWmk^EakJZPWSed^N|35F}k8o zBf*d`A{6E6ycXjT-W;@8xG#tQ(NSvS8tqq4hX{s~>!`Rle57EG zGra_v^|e)kDv9{M*SD@yD^=BP)CM<{=G^y*Ms0cPnPGImZNTUTr8?7M1n(2+m23`_ ziFhUFLs19K(T6z7LeO#6#^91QhQ zNL5R-UhXP>{e-sa&ed>|&rKd#==nT`B#sPNy0(~EjV|NgD;F=cNh1+me>;WRkcghF zpSSD~2g*54u6#q*&GhSh64tB}7)GB} z^B6!_Lfb$wS39j}lI%cb8=7>(pxl71tuAsUH{yw?a1NJ`xMcWEyud%uE2Ik4($)eq z%zyXO+f)IbdgXN>8}~HtkO13myN3dc&6Up)u3bY!UiN?J;54FWPM_-jd<` z7Zf2|2qg-NW_1tSYZIvq7@3MO0XPk->6I^rqB#Jk5#@}O1q5Ko7pNOK0Y?BrgpwiX zJoz{*2-E%8o^=Yc<5&%CI}8_KspN*hg8kgVoK`Y$Hdi>Vz%{P`xy)TAv;8$dfsoNP z`9&vUw+?f{o*#y!W742HELnI1p8s0{{^!9*Z&1XK!?z(mHIBy?AC8E!f$xwTs(nH7 z2W9x^nRrpI2uL+=g%$cGmU5(rBP3nL9?)^c-)obBcIDWSHs4!X;d$@AK>WAZd0=lZ zq*Zvyt0_QF; zEI>XW8(>DW{%@win4H4=eHfg>^54OMHV}C@cG5+}lptWO1l^owp=igGMI_iZBGTHR ztNej_T93IjZziq5b8^Yx!v6*us8yGm*4489(X|X?4_cZHuHXD;XV14>o#@KI`@P%+ z+Gv7HRMy6q&GsAVzB9Yg+k>iiGV21wG_vDJL(1<4E8?y-63jQ2vF7S2V${}mEy+vY zm=?1KN|c!dWt;0UL2sD5{k13byRuw5%U)!Ba(=l)_nE1y>9AS3EcrcYR{SH+E5lsp z>Q-G;u$s9*Icd@HERjKUnC$cM!bsb<7RKXs1B7k-x&||BvO~K ztNks4w9OxFlmRo8%X~k8PJ0WiQ`|wzh*Hz8%9BPbirpVDHvS_SSlxGwE}xn!ju4nH z%GCNyOJ^b#75B1jD3ql>w2oA0W*Dd9o4yg6M)O_wjB&Zi?H}*{h2(WE0R*2v5_qjI zW0&7Cb7fwZj`n=n4!He|&&cY}^ZuWNVUQSisaEOJDYMVVY)IcM9dpwUm6>ik($X&* z$i~nqFNLzZIS0T8OP>l>573eIdgVf;Fx2*GI!kkhKF8}u7Bf2Puc6Si-DhZZBSoRW z-QCU0xsLQkb#pZ0R)JMX)+uG5ahYbc-47w>KY0g@wC_ZvTQ|5;k}iju|Kae*$bkp_AvYAak zyL;cXntce`eHN#;)YHA&ZfG~-TC$(r^^VujyzLWmh2)jPkui zC>`tF_p1}w*ps}I!(OSsFH!!?^DK*BN5N`Vj7Z7anCD(1Jdb8feJ#(FosCNGTr@u6 zcT%H_zp@0?Gq!W36kcX{DtG z(YH$1B%;!}wt8+`cuMLs@{M{YxIV_vtIbiuRRkK}8PiT?ieDJp23n9V*S8ARUa|#3 zW?srR{4E(w_a1B?I&gldrSzxx#us6Z@6{rLGX?r!9(uyQ@FO{T;Lu#pzmQ{Nt*P8To&cVySI$>jFs`Jv}}YOE;T$B5Bke_(kiiYb8j2Qt8?m ziBBt%!BHuLZDd*ZGapUGAD-XR3O`K1cHpdQUR0yis8sUR>Ax#d3`YhFS|<3Jn#@Bd z7FSt>YVx>rxQ_(y$4$tfTCMFg(_LMzXc%-8%5GM-qiC+b!Ad1Bv0gfQH{~@Lf|}#%M^7gQ8qrL3Jp)C>+=wORP}1`n1rN$yW*Ml z%=0Ei{|r(hTQ`|I-79ORF40*)4`Qg3+@*1^KjL3#=+V~vu665WP5c{ry-9sr(Lsn6 z&~c9(VF6TC7?33Z3#fjGMNcgI@(cK9nTfa3wyZ3!nqcV@DGK%&1@B=d!2Fe)ZCs@7 zZxG;Lbl-yQ;su1o7v7`J?_u8DAWQ?A#ekC_yBrtzVkPiEg!$V^Sg1h24i;xf&hcg? z19$+^lz;z4z{S2z$O5Xbe5+8eb2Ax=fEk&vjTuSl`|uhGIQqRk>B!MeK6sA3c#u2(blih%D(8(>q_@sGj2W?{yB zzCdU3u$*UO$Kh=U#h|O>WJmZ`7EsJ%psLdFE$HC=Hns=xkVs#S$ZO1RNa@5|n7Kr_ zD5v0|U2Y|KiV8ch3vfJ|CnY(|^{5TT;)sdNQK1X4avkTAKKPtPSCrKkg5?}705O8l zQ49qA)qfU&UKkkyJ0akc0;pi%=>Ho<5K@dYb``q%<$?un43@-JcZ{s0>j+IoRYCf% zy^p@wI$H@MegC`uzpKdz#S(!|6mk+RE)ah)bh$h*Wn z2ABO^;N3O=NG8CG;soFZkC7-f{f{!m)D-g@%+o5xneSr<1DQ<=GFcOKo6d9CPmO~( z;G!StVCI&PZ6-?(6C_6c3%SO1nxFgZP>5tU|C7HIR-h4khMPV}lJr?^DGe#}Q2!*H zXG)!yjWevbj!QN=0DAs;siB!xHJ3>uE+r!N+01S}{MAT@6AS6%vVT$wao?y};*Ua2 ztt@3?a_^|zp0GJP;ax`P`1Hb@lJJ2fomSGs`1Gc9JTuTzI9H4$xoWN)LX&#rgV*q1 zSobdBnE>*&pp`}UD<H~EISA)vS9-)~mByVGod)*upZ z{R}9MPQRvrHWIPlxs%*4-F#A^({IDNbM{!qoM!-S_gJ}?a5Jw>5tZ^%SOXYE)kPXC z1NJ~imtLPQUEZPxSNMsb>%TOJ){&V>=Oaq0>BnBS1AQPq+~Tu?>hz(ceKV_LNlTmm z2&A<0Ihdj?>8GfdMhHbdkQC{@x8 z)g=Gq^iG5ePzWxZa9f~L}LF}k}-H)L>X_67?R z-W?+zIm|?}8()S4#H1AbbY!yLzCc?{Qlc6Ofkdy6BTw!uF$>W^w&tnM(}>4a2Eb5zJ=E2h_fA$_y9I5DLmO{cu(Z*Rw!g zhFv&y`eVMyP~pS4s37%eaZv;X#?N;O!@ogh&$+CWZID$iuB?o65F(wgQl)mtS2;Op z@f6E)2&cY0p>4W<{+bx7N{p9<+BS0X82zUnnzF!iZ5eNPwVC=pZspwrA-32K`eh-S zB6Mxt4b6~{KMqW;T`5c-tFG9~L!%VyxR2Jwb#57}S%8haix@0opn;(3uG?elmZ(nh z@xp3mbZw(%&263v^$Oi%9>wr8;^$@k8^epp2>G**#$ngHIQ%$$jGQR12ZTq#1+AEy^_^h zwe8@08w?8-`U0{1g2Sgp$e{o90GAK7*^dLA`oVaz05DA}e4oR4fQ*4*rhy^kVNsY9 zMo#Qe8&n4g(^=ut2g+FmAeJqt^6!@k9e8~KOsoG5Ho%$&0{}sA4EXtX{D<9u=~#js z!KOzxHO0L5k{J_7=nxAByFQLsh{r*)4P?rKs1Z)iziP$D|AF?bzKFN#p@os0VM5if zJZq+$=F2@4?6h>UJO3D+?L@lC&$kZaRy9(j2VQCXqb+q!OD3~Rc=t}n)b~u_6J6-A z_G5FAC1fuENVlndlNuHs!}I74jZRZzqu&&>Ix7Insk;+ZMtpXHoC^_JHihYz3eBjU zw%FXtW=Xe>kL5|Rp;~s*QS)Dy2G?VCkE$AoxmS;VH%x7rfuw_AT*>Ym^6Ib!Oe0aWgOvi_^Q zOhfz0hOjsk7Te{!WO0pY`i4HaKpZ?f@F!!E5;w)O)m#veA<4ABzA2xpj=)d-IOZQy zA;5J4u$UjYWR-*j5@FRp1`n9S}EJ)o&i)*Qo z6-ZpD!WLmTd!zH*;z%KY>|6g-J1Z%3_R8cQk)Cc=x0j)czy$VK4o*1JN9~p{EZoqE@dP} zJoUi?sKs9$0Sqb}=!KZQF`%!d?0LNj2$8L|SeqGHHstH@J5*|rJ7Qf&lc_a5Pm%;u zf8d<+j6ZMZa>njYd+NqKGVsye$irqlChe=&(KaXdIezL88g!R_-15U1) z$R1f$WGK=}t^RE+Hy-@OwxK=2p2ZoV;Aa($kSwj*aPwuDJ4I#GZdCCy7RF?09__T$ zrXLZ>gQD$tMON9FzbWVZGbvykr~Wg3}9K+54oA z))8#Yc@5r1CKm&UOD%T)LY>&nSz?%ciE_JhyE4l4jVAlkR5q?uBvF3IE9_L8#0y3@mAGaRb;B0)KMLk>Q_c*Pv3P3Zq-j)Su@$%Pw^NI?{CT8 zvvb&7ZoN}G^DmSbMWcUhnfj1HkIIF+ux*KQHATU#GhkO7_J->0UvN0>wv`KC-*bovtnjv6xw(G@a<6*5d1~v}re3~} z>RH?0EVw2?*k**fD_6SGEl_<~qCr^?hP1dnEa@!7+-Ecg`k55BquzF>|ru?E6GHA1S|h+@FoWr*_MmUN`_#LT;%7Q z2Su+|Ld9fI@8P0K;C77d5(a8yWn+o3U_)HsgRJ2g2vECt|8o}uNBfLW<#t?xW@ zWM|L47_DTs$m}!qTnX4+H#SSiV2~8PF>6n#XEG|ktq+u?Nqy==xXgbpv6@NvO-iKHk4Lao?!Ss$@>+QG=4bvI^cmU2xKf)@8`^q=Ym7^?QOyW{ZM8qm!l(1O)F*mh;Z@qQMD|MZXNGGo_9@?O*cvscL1?JAN~Sb{RvOF>H}Dw_5oy*@yzR$G-&tr4V=t+5nCQu*1N@rGadOt$%)A+NbmWLjx+ zKqp!t^=W-gA7zP|BDGOMta?7=@@3(L6IHIhl&f*_rV)J~ByT)P`!+<+ zesm+g%3>lqDk7T)9j*4r`Hi^%shp4IEF~+Ae)+NHUN~C{hbfh0L)DM6Aer@D=JDk~ z$*pV6G=5r*Zhx*h7eO8B82=TK@iyXWx^>{3jA%*|Fmk9N3Q7`WYAyy^=+B=D0xLr9 zWPR3b*PeB;T*01?DqK=$V?;Sw5pP+12*uSZ9B&^!+=Zo=^@>Kk$jw8))hmjT(32iE zMdKNyQ<-E4*=3#4f6Dm_+Jg zCQ5qE6s7l6+tI-~h0i|gu2iHA?T>n4i*RvA2m4Y!CAM_U(M+7nz@cGkD#BCE_M>>T z*ah&COZe6CxQ_#cs!I*#!oikyGJR(8y)_`+7VEauoL`pr}a5~ zGU!L-V7kL$bpp?H%qeeEHvZX@dcf7nloiLlYlM@WKXF6B-RuLc=Z9@9h7)Tb@2C_W!0t4X~W$ia}{ z6v43Eyf1xV0J2NA!$~z`_L&LkUJrr1Kk8_bQa?Czx-A;@TMw`FvVoRq`mc) z6&TTOrQ3FqkyrkOB3K@_l391pA0VqKLB(5Mbq5~M(?S3mwoTbAp$I?m-9I{=+GFF>KIb z0O`P`i=Z6v#Q!&LK_A;*u5kPdJ-Gpwm4&PW_3YdHyfJ2yuf4fazeB8Ka6Ttl7Ah(N zZZF{azrM%{Pi`L}7&$G{<(n% z4TCf&k=iLdJ4nIzq^GH<(YIhhIS5S}3D z^T!UQ`%O|TY}K{LdOnWX0R-TQ8%7hk&eN;!V+rVd`xdQ}N8_z%XTRb1o@eDV{MUP! zoupAK;QordVkn*i$1~E?Psl`vJzf;fs zF+g&mjW{&=bAwD867=qLv4QHcB23p_u7`Yeg@CiUih=0D2Yu_Cr2uJ|g#b`rQ!J2I3@=uw))?kzxB~PBF}eh7Pl&BsS58tLjpBnVo6}!`JakN1>_E1 z_l`(reJl>mi?i_u(+Eh4=yc$Sy>^9QGea5D{eAwLu#K>5VwcYC<#M-@xxW}}0TJYs zA1ep_n)17T!Pk2dN=F4=Smqn$+fcsGq1PFWKXqhtKNRMe>|6)baT#kF(lUK5Fb%18&K`30q|`6^M|{ zeug|OabJFD{>7QgLui#pdA|_yN!0Hj#NVKw2&VTU(7>cPI9o==f@CgSHW#s0ltGE^ zGJnxwZPF-9el)@>s+jx|bH7NXx=nq0I!g~UI;^wjg=)H`ix}Ia>m&oBd&%q}M!|Tw zHu`Lr=a8I{7b{~x!gnOk$LE;7+r8@&f*fC(Bfy?TZ{GA)EXq@lHCpavo4DARh4Z7L z!g-LY^wb=U)V}O$VM7wX{GvLeppu*XkXHiZlJS_``#@WUSdZ+XUOlmEFXT3)$;4SX ze22}~U?mvZ@qphLqx0PxbFzUX(Mz~2Af$f5RB_T&Zc^=$P+|II6Mve{?Of)n%p1$} z8Ebl9^IuOM;gjm5AoNMb-3%Ex@lOU z8{diRU%&RzgO)OyPiS65J6ziIJGq;7ebYI$=a15-pZ89p6ieiFWiKvqMdb>x)%(Nr zzBI60QiXX+-IowZ{*7U4ka%GAHWlXejQtj_K7`f}!>++68lc>}3TO0ULxI2|hwm&W z#Q7+k_aboFU8`P&>};=WX}2lde#`asmH*r`z+HX!?f$gFHa;d+q+i{5r9q~t)LT@s zO8vWZ4!KF_qKTb%IvegmmibjNoY}j1=Wt|C!RX478;|P+F)<=NWjQJ62QmAgyU~_>o~%Z zHRGLtHd$ao>b*#^P7Zv&4TAo0)grT|1ELk6-RePhFvEut>SJ(_%nF4+ofhViT;b}E z6Z-=RsBo3$c*|5jmA_H>@OkKS0rEY?{OQTK8m`SpZ93kNk}(-<3nE~Z+T^NtC#xzU zGTIw-e#W4yq7d3!@iCMr3MBe4=m3i<%;fFC2bcTEh)Sr2e3O;sokz?;3CKrK(HmL? zau8ON$B$J2@C^{{DihWFv(kc(S$-d;i|BFXhHuG2x)B5ncn;93{x|GQU>L+r;0nXW z`7~sZBh4ZqOJ~Sq2M5X=I)bTltA1q z*G~n*024+r3YVqnXJQ&>2J-kz&>hWMUcH5$L+YOiJyClk^?*NG3tMrQvW+BvcxZum z)pb^*k}|6_xyjr#FEo+TD)+Mpu*8jAnmQa(Cgf$Z2NF(*tNSf=nDWQ=<#|qA5G)V4 zo|dQXR@xbphgc=VlH|Twb!BC)rZ^-0qtc3~kIQ{_aM7{EFP#-P`^tP?7Oi{(v|BV< zAj*g4v0%3wFv82Q?-l%}7rk@IR3-h|!G2~Cqcni%jBvF&Ulv+Xj15h2i`N7OZn4-t zbL#h{QJQmsJ9dw@(TNW3&^L0X&5EncTE{eGtEC&A3Po!g^qCu*$;8=1xo2X4n4>lF z>cRvcz7tjFDJD5;OZwniv7gUGzsX++Bn|RSb?nhfI%cd%ihLb5zdo7UGR8l<8~Tsf zGKF8Gf)?5qPgobu11EJ=gW?_2Ea#NG_41Ha$r2(7Ve4gxIH6I9RBa5SFB>WiS{W&x zoBz;?Vkk2~d+AxB(!kmN@-W*obd5~CSEyc)qY$&;$}mc&N9`E;sK$Y!EiOnz^ z^`WS}3w34yoY)e~|B-cM!N89XzlDf-S(PqPra8?Icoa@~B?z1kLL+hFz}zuUHDanB zusmUwDOhN9nVxXj*tD04%u02uKnRTWfo(eLz%(O7>w zTlkJ~rNE9tBs9`@%G)fspv~OHmJK7&;P{0NX~16W_g)sHUEO}yakBl#TQMqxGlci6 zgs-sap9SymSywtIeBMGa+JZ67TlHEi_9AD*nsimqaZ}b`-WLK-pXUT`Bz1GGnM~O@ zDC&3w`l0|^xepvg4p#C)xO2WD$q?xFb1goA?uC;rAiH&rDhw;zMU3|SdrPynC!S~O< z0>-x9-znk%h71j!?u9w>s>9$%F-1k}Ibqi)16%ZDA^;{!SDEs1X3g!HxrhNBunjgG;zwUb8cNq2$kQ*4qTd zcNW)*9bO1)?Edsy_i#%!m--i?ZdwWC`jH458!Is@oeXT*KZumOPj;}S zHXr!t3sT%2jCJlMEY5;qF6h@xV@A*RU}MfD;|kgQcJ7j#RfXc#GaF3_v!@5RKN}FG zW=lfPcXTk(DsRwA1U$p&YGM+;18;~{f7F7iixOV1&u&*#Q2M+`NU5Xd62;|A?RB_5 zH^quGv0==eYcgv)guC<}+?{#XsGW|73IP@M^Ll3A$o9`rJ30HT-vQrJMZ8|g zN`09$-)5+9e%)rgArKIO6u)jO&P4@3*H~i>&-$jx{^Gu|@{GEll0%*6n)O@rV>}G! zo{Y<_M>OEfH6#@tSPe|chz^Ka8F-^^+JEEbz@r7Z#g3SoR|JQ6O1-*ZRSeB*g7RW~ zqB?lm?OaXg58`#N?GDcRq?ZcV->4@@w)>bAvj)mePj2V;S6dwf*XBtQ6X8iXR!r$~ zswb7VO9i8Yd+iIN(+y&(Qi7G9%LL0nHGTDl8>LgXxHQqXYdNk9mPDS{7&zh=dYyt60kGs$y-Qy?Vc0`Pw@xWgd?SaN#v%VUg_CmZ?7~3$|o2+>^AV=5j z#@TNl(i!;MzIiFjf)!R@NzN60F9GHhuV;EN4c>Fm z!S}XDSH>XyufKtryacaSqz2S+(rFL6^#yoKo%jNAghQ(DicLlx)z~l}Fchta0NjB6 z4Vb+v2ML3C5&sw$<+cNfPQ7#?2!0RHg#r7uJtr8F_udlAc7$z0B3E!^2$X+EaGf_e zfGP#x5Xf5mJ4;-^eXRWZ5Wq@!{_ag67RL(*CCM;8lW8_?s9_$mUwn#T|37sG3mJj| zXW{=F@`1Ih_u49t3J+>26ae#BORrSDzGpvA{<+(nTBG~wNLG2M2HxvUrk@+>1ZZ?$ ztxmL&#|ArQ+uKYOXYamB=>LhEX}>XNp|`m)YV+e5GcyGHieT>Ai$<;uR23zyZ`_ql zA4E(k%Kf0S}VOWt=yZhwq zB2q_q-Pu5L{ZWBW)zm0$tqt0HQSS%i1+6(0mpr$P~m&xq~R)>94&HjJb`Vx33+rQs?24zVZgGs4mCri^< z3r#ilof-SqsIg^VvQ?v0!w7}UAUspHku8+1lC`WMSxVW)o@^D7&gFUk?>X=JoOAiO z-DWrVUDx&feV1XwnU&7YX*(vp5?KE_+WWEu2dafUk%f1PR~B@tX+%GayD1Rec43Nd z8x~SBx~#qA;&ET18h;cf#yI0;}n=@W7#+IEyXYbJBwo3e5VP?PY1C0Tb&GERI$%DbnrJ$7Hd(JCQs-93KzI@SNg~sUDS=;Q)v_c+u49aoRqxD~bX$e%CbJk^@gn++ zHvw3BmZ;dtTf#z-Njzp;K(e9`w%loQ$G$=kXV_)@@dQ#eQ8u*ZfoJ7qJq=qb$&4?J z;OraAfP@A+C1NZr=^sKLS<7; z;Fqmu6^$Q1&m>B(oG&cbzuh>Pn&BeCof;6#3cAGREjcbl;1B3{dwHEK*V9V~NS$KgkrfUzYsg%k0la(cZs_;rMe{U3%9@G`n34duHd85`J)r zT2Nn3EiWaleQ0WE-?tL~?q)ARaaUM)ro@-|hXwZLLRt@;U}`az1U?wMIllg2mTNf* zZ=_SM(EB0XlXkC3oBwy2VsY?&j3#HE~|wPT#TQR)KmjrEZka+cB0}C?; z>)cxC^^hkB!efVFAxMdcg@;2BC|LigR~&o@Eb!f+DkfDci8&@YLH|e(2LhcFdxl2x zGwMAjE%uott;3pR81%%SS_anwBJlNkSn|7dXivk`)>-m?(VF!@*whX$+4oHr^b?Sn zMj|*G1H_u)mz>DQqXO$M!K9P1K2}3}qB0dqsKDO6YyJ3e+K9G|n{WcOZ zm4F5K&aLN*UIFdlF<>XSsKgJubw3Jw(?OG5n#ZQ^frr-ha$s2feZWB%%c%+RIWCcJ zkf;TRp?yclyOo!1@u&XYQ&-+Y$1FhszXuaZ^1C%80<3m-SMOl!!liK*P=L>m?JyDTo-r?^4FC}N^*G(h*XFewWb{Zur;WU0I378&~9!ww1P#-+(D_1n5O)pxl zT20hb46%w$=(y3EPE!8f57K6g2UYJL^4zpp0pSA0-l<6C5wY+H#@V@!9t6Uv&Zr7Q zDSbwGU60*l_}luT<@WEOs}D$S$ad?^p9_M{q|iCgeqj>3mD^gCtdGcM821Et1L9)cju2(( zyVlvYbrOx~IWW%pquCW=gNT25L^fR^y#8CCdgz*&Zg!u`>ZZf1&kucn&-L{9`Jj=){&CtW#%))xXFA8^|6$TS?|H==RjEC9oah@> zyYu*nnh>#mz{9ZUne+N~^-u3p8iQ&Xx_~xm-F!iye`TJ4sR_xGBB^kWZghn)rGgYO zCWIY^lyrg!Ka&W2V-O~%3!DkP$>^f@O z+GXeMiKH$6KYAE}mHjWM#seCZfEV4`#x}O&15EMptA{G^bs>fI+o3&r`RLWnk@qa8 znZ~&53Il}4pFrYb^TnQGCnoJc;fMJsB9SymN@u1QP{!k?PsBG`U`&Pv%}_2MY&>b< zJ9c$W9)BT;=BCtum1ov9u})X(?oCV3>z|BlUi_#FbdsIT)An&{JZd^YHE4`Tm3mCe zZT+kTkIZkBY3uZ8lKWIDVYPYAy?ZFzwae;cH|cPlb!`6I5(Ir|0B^5UMXSsunA{G` z(gnsKLxmwI=m9ktOQ}0Gs)^RTg*O2v%bgk>=KCtVb1m+g+7)5$+r_!n+Ni|`npmgM zEz%VPuUiC{PDb_%-5hYCX1WT$GNB^wJH%E7Je0DhQ|;(w@$JT=h2!naXN-Z9tXKX6 zHZ(D%E2Fxhm0uRnL(xKjVCyVwcmH5)`}Cy=4t}5GW6OKq-B;vTOp~&(AlcE&Pjxjb zW!`g}6z-6VFC>6T<7kw{OLD#qcJuxJ7VS?L?f!CTZi{oexHp`7?nyoPqTu>|Yp!2P{fj0C>4 zl1^2wMc;WD`%JC}uIzBtLE^J;mVPYV?pATPUg%H;*U{mQ1h`8^itKmS6lf1wwa&Rg|oS7-^3Kbf9Dmo7Je0E=7f>7sS0PD7KAr%G;BLs# zle5LazYM43K(HLZTjevy1IOE4l^7(45CuoYB+#J+^$Xap3jjMlGK{>td{4I_I7OT{|n*=<@^G4FBKQkevw`g1o?M<0k(L|8R@u6-0EKXTOYH7hn=LB+T_I8OnsHR{1sihufaXaL{b^xE{sTr=t94Db5k)55j{jo;&>V3|{0{Wrib!K|R zPfF&?WM_Y-n#SK`AFxA`t6wRxzdpb}?*_Mi--QS~jomg+STc|J(v&Y*o!b4)o( zcKl{Q90)HWUbn)~Z`r8K(lR?8K%Z1_yC6^MVrxWo{nz$hppG?beODY-*Xs!U?wGar zDJ-#?I5DP2rPrcP(7=3@S7q=wn&}JY^Z-4++6R&T3n5Zc%=vM#(rmNsl$~ynfY`P0 zz3Hiq7fDRy$EQ;mV_;n$TXbartC?5Ln&JQ1(jEhK361k?JLV8@f}4~ zCr!|v^ats$lsl{K3p2jp?2BSEB(0E*CTa!gQk0k9*$j+3JKKAYr7XQ4oXew?{BW3f zdHso*C5b|bU6iUBXc?y0D_9|q+UY(N`t#tI!`KDWYx-3C!zoOqA=^oBqPp^cW5v3% zL6j(WTqn>{2WC=uaaS)_3m5vnJ2`wZJ$+KS;!Fsw35l}K+#=ce7OW;89M`qx_d9_z zDHY3GO7jYBeye73L&SbZBx0b2@P$UXo&JiJ$s$Tc@lkWRYW4xg4Jd<{e+a|Z@OY4o_`^`yY}kY0`Y?Y+D%AoWY%?M z)z!aVDgO(tCar%CV!m)s#aqsu;J+DLS*^?gXrDL%0(RGqh))r!y=`UXrP{g8^3UsyK_@2LJaD$IqdI|pd6jUgJTH@j8f6wi@1NH+vR0y5}YrH0d zQWRAf;Jr!NE#C4G=32M^b)(QCd+gRQT#YLpRC@3Mz5|GSb>H3`r*H%of#X#5hxPcg zZK);qF0$PMH_b(Enmgt_9P?NfYj1p@$&Aa5H-|;MPP8uQ z5WM)Q6jYD2(-p=idVAVVyDMJ1VyY3GcjJu450$82M~(x31@sw%G~4kRTUcl5`8TDty{r*KhA!{`Q=%~7$dF2gQ&#e8#W)}PFvuDPWx@F zjSeQ~nk>|>RQ-is9}J$g+W123OYQe661oL#(D;^3KQ!X{<&Yl!2P@&yEAgvcD%a1ZJ{GG&ZVcFXhA^~kDyKyz z26QSvOx89RH+LF#nyIFpO>43V-n@>ozG9b&UZXX2?Rd@48(SPqAerU{gW0RAUaCqX z;!H;>0Wh{2<>Cr9O;p^e6~+U~BZV7{-d1Ve&43Xit|-u@+`{mk>)$RN8fdJElikjp z*))k#8_e=kTW0msHY&WMTEA^oosE~r>8P=K3JLFxb*61t8^hgMPkS*ZU+&NRA@`#F zlbd~0q49vuczamonGm@uE7O18rI&lsH>SfJzVItFx1I3FLx&O`p1!|P8}0@e${TX|nq3=~Jwo9ZU9D5Y2toCEPA6pQO zo&_qZXpGZi6ZGjTw;4p{U+AR1L)S=lys4yuzhp+Ik%4-&@CMF#)qXpW#4I*8?z!Xr z66x7YeQGRWDzs^I$s;2--C#<%tj=KSfI`1oNBsELfBK6Gaa|Mq<$dl(kDfRSA!wZ~ znR$OkPmFil`$}WD2zkA{xXW@>{XSG>ap^&$O9z=xHF`)z)1AhK4G&2^jnCfegG^uoY%}cQO z*LaSl067@up?!&%+?y+oxM;T&+FQhFNkI##qJJpoP!A^LRATtJYKNsyz@_<}4LUO^ zTo2~?J4;B&^Ok&qT%s^6p56SURla0NOw>i|(ksNo4?TuQOPc9;f(0G9&~oFX0; zQs-1hHOF)^%dJ9m9?Ro)Y{xAM#xEW(SQqPjqTqN@RL5X?0TJr{d(T{UifC;pUv{;)(7aO%X2`kryY^@H0luh!!+NFOUD9dv${!Wf;^+Or z4mo1QOYL78>aXbAR&!^+XV7)N${d0KTHgm0#bW1tCEt$gCT&Tb0|nedXoeHsiUU|N z2b~LR)f<-bdryoaw^V}@?u6z!(cE$IRF1Pxs*eXksIHz$3!^lw(Awu|(=!k(fWzps zAa26wW^5VCz48^cK_+d{QD@xF;bttbEK1@?!B@+~!T()j^21)Sy=jnxWZ}EZc}Q{{ z@R7&>idg0jsARVqfu0;cD2Q{Qc)Z+Ql=pJxKsiUr4!KQ>AT+PM(PNAw;i7u7)HB$W z=FPkY6jc;xe<`rGLwL{-)=t23v;)J9-NHsLPz1#rCcp`?J#vLzeV}@iT4MhGK+%&j z(l+mkJPg=ub2j zg5K_F022z^CXkwOuvLR}JP2aXg!uVEaXzvr1N#OoM+ia)129=YxZQNS-C^6Xd;ISj z4Ne8YED3)7u-%uodlke+J^-uSJ>lR~2P7kS0i3-1-TyP#wETAB>nnX0OSNS0KA>$+ zD|Mv#{)HZ`f5%N&&*A0@0(_hBdawn{j9V{7TVaVJjqIl8}e?sxXlC%R1c z!n0Egy8h#WEq@`DZ;YNu)rm(lS}*YyVIY1iJJBvK5ogKBeqfeuh@HQCQ_?O=0yN7&CLg`mus#c|DWins4iyIR>vg~P; zd0gn7>u3?G5dHJ%ey0hkqhZY>4jaw=YT3_(PIor9I=%`pDU8joeBDZ`{K8q{7nU!l zpK1dNtcG9y`%U}w=>gLyje%p@PlAkVEq>D(bddAc+_jukm-T4=kCt*cBNJYoKa(Zo+wPgD8 zP+=ZD_Rp}x$_c8`G1*raNu^ny)V839mI7w&4xrbktB;E!t6A4w+x^DLWH8_J>74I z0|DKJawoqINk2qIrA{^LT1!{Ldozuzn$U;6k!&Y~QkZ?`3C1Ho5$UH>gBLcfgx!iT zPC*36&QU+AkECF1YQD@1>sZpIEOVg^i56OVo*(pVRFH5qGymaiM*PsiY4ph(xK0<< z%Xd!~hbn~AgK0of9cw*dJi}|_T_McWl;5!!Ca90cF@E|=`*%4k?NitLh7j*hGfwos3G`EUh^0*T}F zqLpS&6I;^Cs2p%knm(jk(%H4_1aNtO+^{6O7il4?j3NK2R-`!PoYi^B`v&(pE6pT> zQ1lr2r}xzr4P7}a97xhUWvZs?{8tLDwwX7SU7GtM2Bud7KO_xyt{W(E~skg*rl(x<4s2934)!*I%VQ~b4DU6NKeaB z`+|C0Xa`?1ivCE2Pux-7^+ga7IYwC_x>XSDKSm=o%GEx8Y`~UN3^GjK)Jm&Hoc2>e zV6?K*ak(yk?5lV$Aa@2m^t?1eAJgt0&2lQF{_u=W-%@U=4Q}%gdTReCDTwI=Y;=VF z0LDGch{eS*+$i4-ih4L72Yz@~K%1=OyP$UA?XhR02XZVo8&N~bqTHf9fjn`*n1X>7 zo7I8&j6zPMJY^!8Dgah3IAX($Rc~=x9 zm7YNjy8;9R)Ls%6y5M*-2V$6lDw@V^>xNZWRX|0{A?ZlRs$h5QKuWT_OYB8X4GC{9ExsJRbBv2m5qv7_)iw8*8#e z!|jFNyHELOG|DTzutds}xpMffV#*H6FJGWHj@UciSZjT{@(X=^d0KM)^!>23o}!f^ z@fXvB_8IHi7C=%AtmiUZ6ijSp2Xv*R6`l5((T_+?#s`I6We^Lhj0wk5@gpDY-4fYu z5D7x{y7>pp&I5Yb^YNKimp`o!mEn_J%YxFE*`OsXsL4c%uvKBa%@MT#`GdJl)c zOn4IIIdkffMtbq-;7TK3wSn+yde)=7z!v1$6KA>}_jjlz)c2n?aLD@dC9@7U-7R1$ z_Wqm5xMM`{T&FY9c3xNTM0T4vav{Mdlu_69I3|BJ;p7Lam3ZFlL)X>Pj7F69xd=T_ z;uBYCnNKe*(lGuc9Ln^IsvcmohzQuRC%Mr=7*vacfU|Gn6ySs(P`hWW6@*x!*N+47Iz-=Tk@ zaB6p!Z+5qsSKgHc-{PA3`z4PFBkAP_R0sqYYtUa7Q~hpZSVN;TrAC5Ekr-uTNeyBR zg&{^yOipVj`B=vlfhpv9XXj*-Q0-fSn!UTKdltXk24NG&sO>)Za=OQ6!)tzhHr+Gh zTSR4V@0`Qrrs?3p)r>@u7kw=j*M(oG$m2SAPlxYuQt7R={@@cWZ(*pO~d5BIe znzT_~-LH{38uv)9(xoZ;)IHtHhvs|HO(-h&3$No{o>}Mmy@26Q!qR=zxG?FP%f7CO zAR(h6;S2|nij&4ut8tC99Hek%Qytu^VWzb^7vhT!X+=}sq*|ZS9iEJ67q3!ry4Dmk zpxiQED@V^;l~N1!x>x*euW*L$ab}RPec>YsqM&832nN48hlmXP;+okGCN5~0!JgcJ*21Vou;RjYE~$SKB!P9 zUBc0gnA>6=wG8ZiUhK^_j*{nPn80gKZc2WaY$PDP`N{3T-kG}UEc%MQFp^e&Md!UB zPqtyqr#g-Hcb+hFKK#0z94D|c@RY20tI%g7BHed)z3^wbsKHM-Y+a`z>m(#yeocgu zS*Z3wly`Fc+Oy8J`-fhBR9IQP&%T_~k4Ex;z~0WSp1E`7JryHNB7V~Y^k!Gf$y4GM zUE^V-#PL3&lF)mJPF#hCJ)=dnTCI;7_fSUZ&m& zF}S=8)&r}mLuQ-(4IXt1fd%m}XIjM5*6@?&C#S*d4T&#VIfZ2Eh>o0Q< z`cHj_U5{~~aOV8JlXxlI2TCTZ-8uurz7V49y)X?uyL1f_TJ+#ObVelF?E#h-MC%-d zGRP>H0NrhG*8&dS^!u(la#g z#!<%;FYRYm!XoZ_eHocP+me=CD)jW;&J{hRBL2oJ4PWZ5}^1 zYb7b&wJ+vl^%tQ+=5=OYq90E7T(22ydPe{+5slF*K49`aEG?}#A|frNB=hC9)0OLO zzgx17-m~o(*7cwwda6GgSbLHRt2EM-R-1JSGt!&W2dun&I0Q|H;~3#H9Tvw@2cCKr ze&a#C;??-NQ~dpsp5e0@f?8&6(7wSPrXFCu7;pH#%AFXjypZ{I5)&31?)^uvwa&EP zNm|>?#OZqGAlkV8k$qlCeJEurol#qQq=s0yP4r8t>dy)QHV)c|@TQ;->2tApZkhD~ z&Po~`#j2UQyh@6X3>WfFc7^D;e8+dT<}j~YA-W!>lYY_K?xVYDuWU=!@XY}Q!OYV5 zZnfO1=`Hs|0Zb`MUeQ)Wgx3CimqMga-Xq*<%DUG)*wZM{BWsn-+I^bk6R!Qb|82He z&ukJOaA5DkWWI6~BP`96S&&CWA4vNR9BF{z^xVMu_~u$v75$zkJ^S9o;k?@Gm`JBq z5~(na`$uD;zFp^K^(K|P$9I%6t2iw9DVgOT80*GMhx61QPI#v_th4Qu-ay>+cE2UV(3CTudF4z-4$@19_Sj&UOrg9uWo>|&wFQ#c~m8Ao~2$PXfj^2 z*xaDrc{$CH=B3>HIMg6lPvdg!KRq4ize*ULDigP`{8Pch&+C99*^`#{JaFfcMI+%h zvX_}J%TM+3Q-3}*66Y{}5yQ#hSfOfPBbNA}!gAKwo&QHv!vh6L25gn40x>wticB;B8 zwMW&Ihw{>pNN>rTZ)RZoID@3;TQ>diB0GI zZw~$)tV4IqI2M`uZy~Z!I6?+|?`pApQH}-<1YYw2B!Bz~YA723^`mcGKlXc<2ncGT zs5!PZ+ZphmsYb=CdlFo=qQ}OnA!HI>T~r9_i#=7SdCWBj7I3x-(r01WT)`s)jgoH` z)&f#2YzG)O8IX1KL>Uq0ir}jMTBMQ@TOb@k4hr491OkA$1jPRfh8u}~9AmdC%Et|A zr}k4@RXv5AYlkE_dC5Xt0Px1QNl0Vt+^m)^q2ki>7Sc>_lSnI{}P@@_h) zL>A~+!Z;`bTw0v$&R8w>4QT8SnHT1IKr}i>K=bZU9F)WH9y}>8d6(9>3W02$J!G&8 z_&0-X_si~(A+vMC8Qkz#ZlQm_-w&P=%S#57%s}YMDcLe}NC$LX!DawVum2ff5(CO% zEW~!X5m087SFgWVp10Cg{Bfb{FC>2Kwn}J|R#fozUucnq5z6brO>|qJwGO^e3oZiQ zJ=Z`kf^AjjU+7NJTxiXWTqJ};zOA0?i?3SzS zj?^H;O*r`o2C_cn5f$Zq-O&oVQjtE?FL&m+KihA7D%I7tSoy5E)@f9Qh$szl%Xiq8 zqh;3iSQ2SejB<1H)!?c^FAba1q&1A9)AdJ$-nD$pA)qEFe(GzcGySZHsv zhrhK9fEqbG{Pz#NO);q}!E$k4^@iU0iTM>XXw`aL4=N;G4Ac25ARn`~F`@32PzA(P ztKRn#>M<@HPWqH)w2P%Ss&-D&f5Aj8``zTGg_Bh{{q`S|g_59+d0`5LdO`K2W{>H* zo~c-`YMNrMGN1=|Y6p0arZeMaGfexf^|XtFa3hrZ+Vy#+T(k6r_1=&3&0RN+#cR1t z(VQAO21ep_65wQ1!e19mR&Mr%8Ve#j_7Q1rbGHdW=g;K-{ z^)6b@WVEGM*Q>AGwJ&c@=Hz)g7As_?ewfmVdrhjKA zJN4C}yztmNQl*}PrninLKDe*Z-r3ARDbsrY5mZ8%l!{jBU^MKfv$q_lml;YM-%(9S zL85&eLlX$KyLI%NQMZ{_9!B$%7Cdjt-ev|gV6=znb}>mr$3JRT$kVm0u@BuFKWP{& zJ;t!ah?{n48eU?9-V?&@@3@6%wj?6S8jbY#7BW~(Q-nKrg!NtMp9;Ok`#VVoI?WU}~f(YF_LzL=vL*m8D@&)I_uA1{3xt<5& z_(M{B=k!;vT1c$k(2cQ02S2wSnoKhOquA?~Nxq+a%L_>(ttgmX;=3(ct@!a&&%^v7 zNsCJlUtOi166NOn$Dg0L@b)-#h0}KFp)V}R?a}SWwa@AiM-9djuvv{=PiQS~6t9V7 z!bD!g+tKdi2p!le9rBVF$N#dUj^*Epk)>)KQJQPba`IVuJd~PV;jpj+WXox7vpY@> z;n!aGwbW`)5$lMo<=eft9U5h1;&QmCLjn-Ym$M*ew{9HIff+~WjO}|9MBaZuCpJm@ zCIr_9c!n})yRn;F%|TW%TQFZI$QG}-S5xi=6eoRFPqdsBw67$x;8lX<6_*D2NNc~0 z`%{O`gFmVL9|1|PTA}GZp(-$)K|cM-GRd=fJz@Egg@_zH}LOVKD)V z7cY|&fnJEqu9vH^fiTS9N<_#ZUzArAp4&MrU=@>0c9F{vU*x@{2>*6=lQ+o!z~Z}7 zY|ahrxjL@qA=CqZN2r?o;!ktxI;<=UssaA?$Zz2g&lHD|TN2P1fi^C?q!k&cPj>+t z_1&KqNUk6=Y(Y+5wGI}JRg5rb3mKA$6BUblMn3+{3aef9KDtuLKMJZ@KQspShxWpuYxi&OJF+M$1MM;R zLrzkaJx<2{Jg5WahxLLhykz7oI303?V|V+#d&r>8$;TUAg*|Yt0s7CV#R4N$VCEx$ zFb8R!JU8V)lmSp9{Kn$71JM%BSkr_VdKp;8`1${+|7Wt_d-#^?v0L z(lFIuGAnCdu8QFtM_LjXK*rvzoyqNI_Gery7$?>I`28W6ncaT8Dff5JYDjoJPT^So zT$ja%KR95Nf2<_=vWbB#LBO9+XY~CbG`G3m{h>nYEto1^``$lmF+iVd z)_FSPqU*C`NF1FOxnTl8q&RIe!Vw3(M)ee8=SJHmgNY_=m182L#2-eyl*y~99Fgo! zpUJ2Gp^D!N>1ppvwedDe4I(%h79X3>*1Vj?Ff)Cr4p5+xoyBAc~_5K56N&VrKwSHJyk`vfifFBrzWm#slNIrw;9$t$8Bz zv6W2(Q&QiIigD+?w*61n_*ju$qbFW{zCVIgO`*^;oTU5^DVz5F!JZ!}w9HcLUKC)j zrrNzSefqVpBJ1;sl0_Gh;!ry6@{von%mw`Tt(K|C)h`iqZM*BZt8R zo|Ko7d>C$)L^1qLDx{?fxuat0F!(v8Fr>BnKBR^q@_f$WH+wj(y@Qu)Nm?Z_QY8LB z=g8zO&SYWI#kyali>T|SeB@cG>5{H}BL&Z?RKGS~PV4q}yMc!X_98Wpa^ zy25#DNA1US{|O_x{}jW8JU;d9V+PJ@I3Y!+`{|dGK#a|L`DW+lwUU^q2*lyEcYidm zo0S_oN)Gm`P5d zQg~riDQ~%>Me8JXSB#J-c0cyz01T8$7|)bCbaP^M-_>&2D)Zr=N6npia=TA4J{x7d zDf$P%uYj>KD**AbcSF@4Y}{noN@r2toA4Mu?u%#3boe-?eq#^CyiPH<+~NDlohCuH zT7+4)khO1)?YWhN9t}>TlKEuBw#sFAemW^eS`aT`Q4M$BE36TRYy7#AYJS3ku2IKO zo)0$4*yZ%u138}T4~$g*3D)jA-V&BFf~ zoQLJ8z4$PpGd#ym2M}`GtFx2y=iMehtoB%0PC74L$6$g}b)Ce|kP0iCLaa8vJ{zHd zkngU=d*1h=+;1l~W(%FJY;jJ=W|`z)&}%>2wEXfj(Z~5Pedd_2iV3rlNBp%V(CU0e z?+cz!_s;Vh@6Sb}GmkhwD%>Wq0ru1=ut7>(&VJXQtClDB`e#K)|2#n`-fjOzcDo=I zRpjX0uV?|%AD=KwJJ3NWE^a9wjgG1nT1Q*&JVDF8^w{BztD6KQ!rb=Tkuxpd-Hn>9 zJZN=yyL}$I7e;3vc3POfj`65#a=O7{l4NPIOW)UC8y0-(a_rK+m_CR(nvh}~Ja?O3 z72ug+mz4<&^IybHDSh-RVrjUoObq%)*f71QJ$Dbe&x9vLo_eMck|$oc!J_0vq_&{F zdqQg~?d#_4fE?ID)7bxX50yYz2l6jL{I;)!ORM@?ojk`^R>(WQ2kDjS+Y>*dKhaWK{Z6t7 z->%v->uy`Ij)^> zx(lZ&i7Z0Yd_56?qjlh)tc5a8a^YR>kTOm?><zdFvDvWE$SHKW<*xZKG{?iC#t>)ij7)lMUc%wWHf* zN>I{tqy%{+l24N^TZASzaAp`}mWutOjO!13NK`1QLOl!68hGfJ`9_UZSk>en_|*V$MYxH9*b1Dg8sbMa3D6g~0 z9&a0;8ybJURhU-4DjtAFJaogfBAd)%IYZQzzR2R0%T>(E5e{*laGKqVR?!4@zkK8= zLfq3zy;h}e{c{o;i~*rf3jx~MXV!yn z4CISw-`?Iw;9n?ZI}yYcI99}elh@BpP4)447ao7K>wBJ`Vq}02{QCj%!*}s)fpxNC zEFZ^7Q1u%jTd?FEfs*s8QJpdgTLCVCnw+U)whlA3p3lt|-^2Da@Cd!hfy&sE4g*-nh~0%vNhD0Wb?4Dvn)Ir1_dK+*M&TG$p5 z*!KlT-vG;M_5)Z>efZr9um$*c{HxX41;fCad;Q-p1ps(q$lZiwM^W$LGMqASIoMW= z{=Yj5r4hL>-BkG;F$Nj@o_2@KzeiC1&y#o0b`Ry=%k2%N#};O*Y{I{V- zW6?iB9oc-litzgG!gsANhs1+TBt75 zhHCU^NBOH-&~{;laYadT+Kb!|v9!B(8Jb`78S7EsX(5}AZiU}QlvbEZ`L|g3D8^rC zLGz{Zj}x0Ms@tURBBoxgEfXpqfD5T?n@B3b*hMHHy~5Mc?nuD=T(6#G20=S>nm%W| zKK~*td++(W?tuNHzW1DrLS$(fI%Vine$6x*l;P z5^?#f&Fb7l;M#ie5&d+=$_qwG?lC~BeiAqz8Soh7DH6gv)js6?Fl&pMoBYH0)0!za z-)Y9BWtXY7c7*3TzM0_ba+=ux+1a<&YL!Xs&v)n~hWjTJqY(%b+os@wA%`P{uY(dU zcd$+aUZ>khwA)jM$;kX}FG_A4GfzCerp3xTHc`jQBaUd3YdWp&mW$|IwfUHr{_2cw zK+_K-TJ>(0^rZ0T+8;&frA_gjCV6|WyFV!@l^;&emklfX9Mf%^UP+Mek@+b6aw+wq zVyeHK5mLdS-Sng!@wnKaDpc6|mcGbabn*qCb!bjueepZp5Muv3&3{hug!!xI4T3ER;<#W5_8qvK`925CurHODku5OgjQ@Gzs zaloGF9FY-y>LE3E?2Et^_f(Dq>z|}Uw6=>VR;c@F2Yb=?XwBTMr!(?Q41RiXlkVl; zWq5m>Bqp1%n1)b%z|HqU?3~ca7nfo`P#Cx=W}sBE{5u{YyeR!-x^Usihc1^I8Mft- zNHM9!+ohkYQZ(iNVUv~c4}9$<{T{Bc>>?Wl)gostY-%OC8U)yH^Z(-DA(xLrLOku) zWGMd}J$D(b%Y-z91Z*u&?n|5k(SrC2 z+{j9yFRh+ZZ>6rb{w#yNL~1B_?n~#IyP>O2mIE+EJ-bvCvt3TB-$le9c^DNpJH$Wr=keEI;|V4RX=vZS$hP5-!+nLCW`4U2lJ5Hq{@uYk zTp2b7PGB4uqeO}vP3{r zZ_0$;ect(?d57#E{95+&Ky(;SsPT}HRz{3%183QzHy1hBezC=JC_Pxtb`7k>3ZBjV z47nv~!WAi#DMPYj&jR2)Z?qmvJUFDt@%nNClFTbFU9|+=r`c5oiZ6t6_#iJnZUCtJ zdte6ukc@C1GDicHbS53VGZ5SiV%G($ZP-{68-|zC;WC(3fnJb zJM(H0{XJwWtmxr=WMBm<%Roc0Agwo?q8w~wtUUJ~tU7!QM~0O_JY)#5xC_~U{n)?9 ze-AL==#1522TS+;a7Ym40l^hxINl>g_mCk9hfzrhrS~7Jq*FJz<7C%40rIumF;xbU zl;E`ig3G^ETomk+9&y0jcj-X9tw0!$z9trR_8N`7O{#G%7;cSct;FIotrIFOMjl8M_U{ohBCE!+n(e zd_9-RP3N=AEFG2Lg>=90x*G*R9V8}KMbIS*p|92dk=Yml7AAHdv#L}G9?YNNlgs(z znwaoB-}EouvnBy6l`Z4bC9*}29h*&-7W$b|wDdZSb&wGB8Y4+E4vVNjnoKC%A>9}Z zQV3yEJMBla65j{QF7)CDv}spArw`8USoBvm<*F844hGgM{N$cSAg<38hVNyn{pmJRCzvqb5%K^yM?;xFX4I3sx7YD~P;@Ves}f)LfG z;~xUrs@MXf9-Z)j$&ejym!lOaR`{)SP4s9E{Du7W?5eJA8r{PaSCGVr%HMa~3o*@O zUb@;r2dhS6Gg$4OOz%xIzPX*}Sr+;F8LgLV-Bn}5CyEFf^=g8|Wz4-FC#WM*Uz{QY zoD0(P52#scTZ9EaF2Y-(bg!(hhvdo7H`#@)kZf@PU~=SBYEW z7wL2_Gyx3Jv+?%Zp7ZloW*gJ#7blC}F1*9HT>@cGU37ZcY;wzZmef?oDY?#8y8)Jz z>#MK^iLMQj(+MV>NlGV)S7fe{#Yct(;8^~t52*&*x6I4sRf0)XK`FIw-v$0M80WBK zxt%sjd8^iJk}u!PvJ5~`%F@Q9Wz|%^JSH;I@wt{ zQ_kwA$$hO>dHA8!}$r?#)7b_B7 z4{PwL!lS|B+G82UP7c)sjxif-lmlf{LEMyB*q%4H1^4o#vU3qgY3(UY#}n1B5RRt_^M`%B>PTN z22(_cq>{!y6q78K>{}@$(e`~mzQ6x_z1RQgnlqoHGiS^h=Q+>0KhJ&N@yLMH1*d)Y z)+&Y~gVs{+1dD%7<>+PjNjxz!ctB_$Vc!YX{M&P!s#n-Zh^=h(-AAhI`1Sb7hR+<1 z^-TtdVAlB%em{si#_1?h&W*}@#Kow<%*#9z{cAaZzV7Bi$TY>wbpvX`O>RS z(_b95hYzNDM8;}c5jji!;&|qK7%k1{e7~daljn;G_WRLy<|x#uc9&KTar4Gl{Xxs` z`(3YOt}`x*>)}{yhK7UgV{dQvmKGbnJ91t$HQ1oopkl3&Q*}`JspiVuQbGgU#&`Ku zXT0*`G|^vK9lh1MUpg>aY+JeD?}K8o7io>eg$l8WpqRTO``tYAoGaGyo^y$P%tbFz zmS6RF4-(=X4-;6d{%r|^4R7@II~4@8UmmXn#bcFSs=5VTqA8Ks?V`RH)o3#1Jx$wQ2`<2C-{Kj!+Uf%Bu>s>(CKodh0 zw-)OFW=P9>YqZT?7T14kFgX1nS6!Thzv+80*LIE`N;CYUUQICT^xI#IuKC@L@oQ4A zCMFc*l_-4teM^jWGTVMVkBK^a{7ZN9igl<|G?>n~u%hX|`i*!zy)`G_+xv`4Zs1Qa zk(#2MR;>G_`g{jc|2g-#L>0w5Tv9p-ei+l60SAA^xo+Y(e`{~Y^D*tX%RJ7fZVCuf z=%+#@!ZQEq=J{NAr2irLlSoV0lgnM95{dzNc|~=EkB2XX%jtQnYhnT}ErRQEpU$@P z-kW{rDU&teyCwZe4%}7@dgL>QDU|;`14P&u4w913fIT;6s4lE?gps^*RDci~=STRkc6JTE=nqdJpVeL1?k=H@rD=A3Di39rLR0y4xcVF zgGbqE@gyqX?5#oGOCy|-W}{ZQuYNU3n?8^&lHe7<*ZBfl0uvys)PJ58{@!!osE7WK{?YD|@}G z3{XG)U+%A$r@GiWcXqQWL5sV z1>4yH7Cgux#eV@Rxq`hNJMz-w%h4O7B-_8cj{n6UGZ4NBoFC z?bEOQ8~uFGcM{4SxPC-b%)L(W`#Yk0ySKS%m+A8xIwk8LLbqCfTWA|K;XP-VN|@OF z&W!UkZ@x!Q?@oAQZn;l;A8s({aH13f*NRwI#*kXfl^jsi+8+6aXa2rpQ;benBHF%7 z?tf5XDt|?ViCzLCy0vqng(i6WwZ?6qDp6~DO;q)*&Txfn3;r^QL%v)r~h@out74WkwL@`~wHY znub?^1FvnZJB|CR%JNtn8`2QYBwR9Yoi`069z38h5SY(0b!C!*?qv;Yny~^kgFg|L zm_9|CZwj#(w)Ok-OmyJ~lHU_92cD~x_p65+WwryE^BB>8fa-5d*Z8YG3PUkYPQ0Z2 ztEf@Y9sSG8ETZ*RZjtk+K}Ny+Ma=`F%fz*NGcjYOIW9%k8)t9Wx{}l~3Lfl<3|Y|r zO&XbXk0IULV5rZmQB!Rk3aSdiRoR|rQ5{aR+xYG9dTkriI&@L5wJYgch|`1Qg8Th_ z?;S42G>S%@qpVXHNn`K@ncJ}l_5xK&{#LvR0d=u0(b&d(jSP?easeLDAolHOy>4N5 z>iF4vQ$_NKhu))>KP)$Ii3F(^8!+;w23GK=k<17BIXb@nOCxXcw^;p;kx|zI3$+WP zS#|nF-SPu2%wVGXdDV$elF}6CLX7CuW`*eRy!vv->jw3P_eBjd{tP*+(m(3dv8^-t zFkmk7TpG+p(XO?f6n#s=kB>X$>aSi4DdD^?;k)#lX$agROh6%vf)ua{dn-4l-~S?u zX;GU|<#v2rPL;j0q-5;(E$DPXIT30DlCq&tZP>|G^+#;w1U`$5D&*SAcZ(3+=zv&uizCO%AscdMv!ldja0Qj7OTIUZ^ zcbC@g#m(}x)Nhp0l~p2!$Z?cArw{2Frw1ITD|+!D6C%Z?4q+id0DFLcWf1VE#qyI6 zWa&VVS^8^O?d$ymZM$BdXhTtVAlQ9EN@bw)L(&^8o_m3!9Q0Kb?IdkzM;cLP)|kBU zl!Fym634EDJ820_Db;OaRi!fo660+@s|&0M)qj>e=pEWg5c$!*VkkW;_B?s0d*2|i z@ukT~LK>qy>zk70l>ZAbuZjKN=)a{Agyu&;&=bJb=N1RrEs1x5KIR!Rx9B3$#!?u9%D;KNhTon$ znNJu^0^}e>*$#yA{{jwALSIV+>-Gk3ItW$+rKsmGt5-IIqUz`4YiL$Kt{L34of=tt zdCz*<@wPveru+BJV~#{s&!?;s(t#^$j*ZS!C5@Uyu}GD1U3L8zm?P=$*Vc!0=Jb1k z5bYV8(kXF{?v2OzufaS&zYh}n#og+Qoj%8xkiLnI=J!{(z3Oc)DSDR&_@P@sP{a{~ zV=ZH;^~LsA>NnowPibDc7!j!dn@I&^Pt%bqiH3n8zcr@Ln^%1mHj10l7FR;wERGPu zZHmwgX86(X8+WE;5*mCLmWlZ^RNo3EhoLDdquDv}oHO`05S2H5RO9pk|AtrBmMIMO zX;Cu=pU}gnPyen%in;pa3tnPiKr=Lr)nB+cp~ye4an8q`zDAr6RnHm$&T{vxPD)Xq zpY=#X#cWs>W6)$p6yWDzoG5>~q$J;Q<&>Bmg_~Bw|owld2ptZAi_#FY+ zxtQ=$l|G`YRMWZ&+eA|B-KM9k8-MPT3Y$FUgBvSxbSJ@M-&+n&Ge@?gL-YHS-v)(+ z_MF~p>wP!3a8j=M#ub!<2Id+)AE|m?&_=8OS7ME*Xah4y;YB5IGB=-Iuz8Sbn?oYW z#qmoL5tlSOU0ch@$|uR$~BzWEJPZ}xDk%l(tt*vwwU5FcjjTx2Eaq{;2QNIgurnSRUa4f*E5fHw8hpeEVfA(fE@6(C z1t=2qhQc0}utffhw2z}S(3nn6aQ1jWKBZ5K6|3l1(vE2~cROPTiELgK1v6Q%-`KA+ zK3lnlm$^;%<{QD}6O_Wq9$trHK%3XFHs08U8yI9awVEYE5Yd!QL@ba^!~`j|G7tch zkJM#lGXOtF(2A|#T#-Dl$6FPQ`fUH|x`onFIjxSL85_2H&ghk9y~;f`X@*Aqaj75F z^iLZ&W9<7j;qA9YvxGtW_`Abn8wV~9ItJ&&Je5f$I%grpAde6v*#rv{6KhdMzdTY# zhh7)mw}O``-yLvN8r7DZy@_~)oqRl*?)ljo)Vqf_21b7HZ2m`w0lXb>awqfNhuOTDGy;|ra%X2yqw->kLYuLOstWa7fh77^a$#M;D#N7h0&aa|kGQb~T zWzbJ6jj`wjFNyARe6=N6LS;H|jj;`0Adv!_f;RTr>0p)PuC8^Hq05jXo_rEU1`sya z3YHtvffIQJpPJqPz{{ocyJl08MGy)BNRwfIsEEl+)_Zse>!rPuG z6CpFG-LTu{Fd)(J)9!yr229Kj_P>{Ni*XZ1mBAQJ6VhE?eARTjr13b~u-*;88L1p1 zHT47Mfz8;>-n_5}vCoP=(Owo=ajXWbkDa!DATo1&x$ka`ql7a%{Hl3!L!kX?U$h@k=8sp#lHpo1;!)xF17&BwW5uRmsHe~XExXGKHjmL0`jwN~p)i&Pd=(K-*Qtm*GR zo;K_VtWI62AvKkar_=m|h(?wh^XTiPRaO&fdlU7ay_c(hexi!*9Z>Z0=0T01h_xeW zx$h(Y_}0Ev5)T zy%$BV(_0esQ!e`PFEt%C=+vJpxLu?6=x4EXWkGznBvV6Jy}5dYyWQ&DE!PTdoKdJv zjA2Co%4*-3UgO%4)QE4uU z=5XIjfdi}{y?pP@T=UX~69Pqd!vrpojz<>$Q5_bwzU$lT4CI{VsLoZFNT+YeuiTbi zV#hvLmolG8ccIy6_#%Y!a}=JO$)AlJ?(DjuQ5-GaWE&=P$5FVISFBG=3)1HN)f_lFwFk8Y` zw>6_~NpG{yDw4KK97SLsd{)G<6eMq%+*ph6&=~#cOuI|2M5`4fU**E0 zcBWHO!r7?dhYVnSna zvi$DjTqd{inqwN}Tn{(RS!A?FjK*u>)*#?MCv)J&tzWPM!+LhyZsf~45IP{JUhYWFfnFDrD$gUysPG+gA6*l6n1(zVGDiV8C9b648m7#+m@4yzhOxv`;b9nTQ zGAtx1(M%@8i&vYTA;Vz5Wa+uHEl`O2v`Ngt!Jb-=So!5H&7S-41(W{Q65P~3Bhm0f z3C(_%WWn_m_)5`-V0gRd?Af6DU#@3P6Bp35Y@9i3^)x0x*mJ1vqMuGkC!4M!CawI7J}iF2a!}Vmk14 zc)I{o)+^I5ovRIf_Ztid2@3^6aQL-0V4VxL1Mzxi0f(4$1ew1KjK&28(^?RH%+u_Sb&vV`OF*WvJkOh7*ms9mdcAt(vpS7M7XrD`X zTJg@_iYQSOO==;bR_#4|XEn2^>$49*5!-@!|N1Gf4x=k=|^O&_6cmI^k7+p;?u6e}9cig{!DZeSJrZknrTOL&% zf0rYA@2R-$w#utt0R#Sx;lXD}C}YjKmKjZ*^{4zx6%TB;1E`G9oO#S;bj5M68F$;I zpWpEZH;1o3EPXRL+%RI*;WRHNq#a!oSmMhh4U+3!m)YzI%FRbrH@j3>O~|NTIn2K6 z=&vzPrKd{Dg+*GwX$p6B1@%ABl*$RxZSBY&QVh+SR`jhR-FBuHo_ogU>LtC@k@v|l zpf%)uSYtflpMFCmOPfGq(;xInX#k#4Z=1GgawA7LGb+^V{UYo{*1yE^#_N9KAv|dlGL)AZ=wn`K=2v&4{M;P|H zzzSFRlAi_CJ@S%M!w+M&+R`E|B?1|)gdz*#)qGp@_1DUWkO`Z7g`L>v!6H5a$mAmI zQTCRLuLGs^?mcC58yr@rGD@NcC3CkkN)lM%{R_3T@*e&~9@W@PQVx*EFC@uQTFX~Z zqP@{he*E&RZttGPb4}+~AG+8pNM^wlHI8PZ0aO|(z*3nOkxtJ+R>U`m*1={{9e=2TW#c$c~Q>O69$92 z)RI@8i^^LZqN7Y--%!O`)8)Y>$2Iy(?b=CGal3A9N#hK8-ZU3~sF@#V#X6z6O0 z#dy*TZyUWZSC4~Nf6bzb*){VFLymdP*c8upk`hg39nEy6cB|u_^-YRVC7{h+qq_#Z z0RO}e+!E{T)N{^ykYNFjbxwvJ4<2hL3rK~dyI$`D`2a>E(;+xHDTyCEs}Bb6@*3r7 z2+%2$HiE(cPf&0ch1~-mg*1LayluzHf3I@2$wIILY3uQA5WQ;`0{*e5-^G@n`$bF! z`At_cR$Axki%ar;Nn?{jiv9B)Z66!+StUJ1Nvb_F{~*&bDATup<@pLNDj>=1YI=BZ*++>=XCviR=N-u)@A>0{gb+3XjdMo*`@SG+=C?dPh&FZYraF3-j!#^2`LY7 zgT_+>2~yiGbnh+c$VJD4hdbXJlz#P}8>w&~y!*#mGwrSo_up#K;3yy`M4cKacxE3J ztoUBGYhY-kSK|_zJt5oO{DJ<1nr9L8_tc$^ZWJai@hu%alXrd3jiqg~%S@6I#XFR^ zvZ4CGkQMtVx8$4pQ}ajOYI_Xtgffrmt`&MHl#~M4O+(|fL+M)A>*CY;H8l@RON#B0 zTB{4Iji5{7+SkY}(3iL9H?!aWsV3p1kv{t}C8Egj{8VpMK_JZx_0;}vo#>v(iM&?) zIOF18j<(gBXtLi=+n*CkqCk1OfW-M*Q*7`rbTE}8>ODx*pFCYSb@14Sw>3qaZI9K3 zi>Wh5JiOJotcG=~61s4ML}EgNs$*e&06+$&j~;%7X-1K}2Sq-fuJ~)POpi_*2-O7b z7`j9EW<*8vFH@tdSWg^viuleb`YNJp@|o{3m}5Eq`P4#BEH?Yr3__|Gj$@GVO#*7_azbZVjL^Bbb>fa&34JJz%tle9&OYPl!+7@tFMyHAq3 z$uA&l1>8E9sP9gg&X916Va$GJSii^#Xjc_?yi?9bAgZjA-XVg?j?(`!W=zPbtWYI9e(WXZ7|oG<$93l{3eAO1`OFM4_l~?vn)8 zDR-QWf~k|GG`H*qE;Ny6vZPzUSjfD^9wA71TcM0x6iYj`j=fqtB%IPzvSk^iQ!Cpr z9d=z4av(s}v46P78D)i|lBSkfCS5yK-aaE?QiS>(mSAcAM-4pvR=;F9u=1 zhd_TJNiP6alHqhv;FbY42{$Z6iHx5G92rrOG9L$CvY;?*8GZ!DGX=w`(gCP^u?>R! zg=*D3Cdtn-gwSg9?`5GXS;qch=|zGjHMM$!)+oy(R<#cA@j3U5UOX7M8P_YdRTR z<^_QYR2?b{iz4vpT*fP9K%jRJN+gUT5__JtC)N(Bgy#t$gyEV0%eo*FgCh!Jr`-L& zAT4YAQBpX#IkXbHNmN6NvzZxpbOMUTN8J)1|%EB?pvXu~> zUpa|Cg&bsWvQud91UCdT*};oIzzp8t`{UA!(O`?& z&7L1Ky=|*L`1UE%+HTqmKKjYpejWW_{n7iGQbFsg#3IE9y+iB{!f?QpUj^n$Bj0iK zv||2Wq#GkhC%CfoD(3t=I~L8nnd^VCJifvr7~@<~K{Ps56PQmO@y(~u+$jZqNBdoP zg90pLGplqcjP0jy6<=#Tt{3A3&i@V|xiCY!0lZP#n{c_H1vPPIrdq3m!8zdmGn+&7 zOmz;_V~vedpQ~ef2hUuqTckN2TzM5)F^6GW77=3%`VUTT_0O!+#(B}e#k9y}j)_tK zPF$aG51i>9QDwcm_8_^zEQ4bpU2bLR*c(18)p{~ck7X%tMq2t8TIx(JV5kkaGs0gE z?$^s_{R;ugBZ*ovB=VU3QAQe_JZbN3hzXm_{aWX%DD3K^>q5zIF)8#CNr z{Yj3XK35x~ovECXx<8=kzMOk1Jdc=~%zh_<@g>-t<95zzNT-Xj{~+o*SftiI!QtK3 zWm3T6XVjI^#YldRPJVS(`r4gi-YG!2HRfz(hkeX8?oENW$Q;oxi%If%cu$;Uo%sN9 zG0)Oa+5&uUx)Uo9L9;7k#s^un9aj`KxS@=}iiy}&j z1zLqK=04oQ<=#=0=ZoIeyf2kfgBMJqk@>62mGZOAs16|aGBi4eiNx=*BmNU*RfMqU z63#iJ9Hfp&kw59!Ddv0j=%cojKy#y{b?ck;h%D+yY<>b3MX=K+3_{NX$w1na z&V*zGuo41*8vm(xU$b&LQ#sGh^Ue}v_l%n-xqlt4UK2*Egm(#i$`Z#tzY0IbRL6-! z_Bv&t?-;6lNG^lbdncixGT7`6dkqw4>5vfx3sqK?i9?dnS$38()7u2liHieN5}02x zqHI^0U=cmBn9rBnaT@~D@(}(D+_RltdG#vycp1+_`V|x083@6D&{Q2tDMKVBDT5nq z&lCIf$zq)&r>(t^mK7UK5VZkRrbB2I;G<*re!Xs-UHuu=-yb5w&-aYL)fkJ565&a<(Oa(Th( zGe!y_e=P_B`wVsSL%eBlaVT?1Y*I4>t7Vbs9c_FSK1sg7wWx(6$3Q$-c|15!po#_o z^Qx0I2UC>)je+3De}kO`xDEKfA-4Ovgdsf-g8qeGlq~bc!g-$TgAB>M|D5|fO}i~7 zVFxDggvEpfD_PR43GbDL-&IeWW1kO4%*;j2AN0_RN;o}~ zm;YC4`PknxzEM8wj1f-JJSr-W)ncFWOo!A=xwkLu!A0!?AUfe!b%p-x;*6G+b!|4z zYC_KxIP|?=Z^X^W)Vas{8Co`3C{+zIsfFx>`*%*^a2zdSVJio4G`0qcRa$LA)#B-j zOcXx5H^06-)-lc9J8Z-LpfdXov9rj!=DmWw)|*F#?sz6328T1wFR{m?BPq|$2Hu$q z;%(;G#@44W3y=L)EJ{!px9zN=&YyE}@aD*qh~6}|g;xLkp_ZUC!i}6GEK)SGYT0#7 zCTJj~UW?&P`j(uA?+PyV6w-R7)sO05Z zY6p5No+G_ssAuZDBz#Bt{JD_nZ|Y0>OguoOF3|3oTdn8NHkC&2_r$8!qjjwszp96x znIFh)^^-tKpcaQ$_9t7^Nrf@JgByv_;rZ<^^81}!qx=+B_-U9R`MdWKxbcg7vcEH# zc`0;7nH)bU;ez_Rt+(=f*rR;2r2?Ag>@%-E3^}Bq(RL_e4-ws7A;6`a8Q&k;cT}8* z+Yf_yQ-P9L&$&7sf<{pU!YWg(Y*l1aKTGsdY^4a#U>W+lhxeDeV2gSYbX%21vE-?E z))~c1RW>w{iBI^oaGjKSYNKAF z%yPd&?XCPk*%OA^qD5i11c89X@7p5uL&)eriM)nc0avj!vAwg;LKd9w- zU{W;)tA*+9k}ZSk_~WX$b()$2nj`lu5kV08iyGskr8EK?hv<-lC+r4J{{hE zX(U!(n6_(hSqvK51&aQixU#e08ihb(a5Kc)238<0*`4KoJ;Ia!9hIFmahMYpt1o0B zebjrNY&?MI=KV*RKRHEaakjT?mxwYX-G+dtAP}JSp%T^xRaWu?RYL&UC_^w=@M;J$ zmHIUJZHe7=WT-0upL=j$wORY~Ol=lXGVQ>};fQ#Q&eUuwaV-xxj2`%6FuH*Ih}5@q zB`F>U+y8CU=2Xl{ zlqmS;F>j_W6=jx~PSh^Na{bGCzZzQvbTX^m(NcOl+C*&o=4 zq+Z&>4JdTBkP4EB)Vv}aiM5ys-;@U=V+VC3t?v)m9x^{SSZt!HFQw4qeUs?Fkc9@d zmL$LN6+b@~Sm2FX)BNZ7KAa|IttiQJK{B=7QR-27z`gZHxm8ls3iJC`u4|f`Ec|?o z_L+H%>YA=JRhLS=9&m3CXS=`GSbJLkFe9Kel{INGx1KuTaH^^ILR}#J*XNW@{m*q6 z!}((enKfxQmw>*V2Ycf%qXpICCn_n9p7 zz@pFhWBgoxSM~MdopP8peu;%LKO24rdi%XBkBq(DHz%SiP01hn==j2_gW9d6W^WZs z|L&^vQuj;jXSpWF3NajLObet^o>3o&mberwAaBd7e-QOE`oO6WC5+3RmUz-}DZf>| z|2;E5?V`^dI3#`QQN2o_do-_# zM9oQV+OC|?e{$DPJ^Ed>=oa6pY4-2N$i_`Ji^FCmlkU~&JPCg(Lbs91KO?`6Izv*_ zDzYRJ*VA6A7JAs@XD8E+$-kG_?vZ3}wcB2;s3K|_o;wk)+9*OMBT8#4tIX#ZJ_4cH zF=p2!vieK^ZdD~IfZE1DwHUn zY-L!B^%(YQT8~WQ-gRI#m0#H9XY5;5bjh4`)^5sg(~>wa*6?g!d7x*Wj2|T|%JL zQ~J=be!BBbmPN(iQD?IA9Ga=IR^g_g2Osf>sf+`CV0=gFyf)J zI~V|@g=#Cb6Nfw$>|U$<^V6^!UBAeqb|KHXos5OA9IKRs`ywxnrdjsWqT89bIx^&W zb|b0uJw!F)z#!8Wi-KCU0|$tXx@=clhj!E1Zkxbe+GvXHX#?lM|~ot$r* z9@m zW{9^9RKdUk{*VrxaW!eI4SC>1+b&^+L>sOfWnPfr^)Ec^EpVkP+kqxv@FWOeG%4iD zGC;kCAST$|PQX4Jx7CH=q(0eyTbjvbFAr{8g=$pGw3gmqocQkW%KqIq0NU<1-*$`! zxBLrz`vt6QZK~J&{0IDsnp~5s?`<;V=_eES^zkBSJT+Y}qBY=6aukQ_|kX0Tr#i<~!sDB8I zC*U@t4I*PhHvg7XE(3CbyutUC*++Zpn_O>Rckl)l81Ab#EURJ+B9>dn-N6(car(ox zVwBnz`|)?hfEsVmSo%XoV_9W8K4Me!=Zy6w=CNZSi;dLxYg>g2c0|$Wqn~u9qeE## zoroGmf7W=+W49U2m|uwDYRT)K->f)+)d{Lr)>^E0A`1i$4Vmk}UCZw{C(Gma{XWSK zx_#CDmHif^0=i=Un|fLcI2s{W}V&Yn2uj!EZ*Ue{>9FC!7HW|2Wu+E-O8*9)Hl1KC!89uHe}dZ9R?1eWQ$ znu01;r!)Qe&88ZIKgQJuztg>gsAcKNwNw35j3}bR+rL$RSnA5*!cdl8q(%blaA_wg+9KyiVU#k1Isc|( z)&X6@$GVTNyHKw`Kap(d8BjhsOH}&fEm5MH+n-oao|pgF$xjeBBkPlc4tB}o6=5`= z^7p=At7m(@1l#xl&3J=ne0!u^IPF@(ZJy!B7s0Hom0!AZ#y@LqEkb4 zlLHej#Zel@R~!4ah-=r-v{m*AdL<5llL(Z1tZPpbd1MRc{s_|@m7nrS{^iD$XfNr9 z={e8ljUm(BT=F9sBMp7U?riK;&6rodW3hP6c9ye5uLnwGqzECcg~<8xM&4AKEZd?+ zHWa~^+gBE%!^?{;xI5l4RN1C(&(yp~a)%#k@|EBKO~uz-{H!;ky^SXDKA+NXf|K8^ zjSP%?dXTX+F`y~Ql(S7R1*FrMRM+vl=%Y>PFf7vYuJUa-<;(@LdJ^Ozd83T~{(2c* z)f{>V!BwS?L0DyOVVEzTYw@4QwM26m)^4Y-K^5^xxH^sCdG!UzE<1}o`z#2j`O zc1aR@=Dls>LK_sge87fl9KK$tmn|_ZZ4xX?r&J~vb%$m<_O=UC7_0#?w@F(|7M%6o z+)iNTNTGXTPxlr0(Wd935nb7nqP+`szYECfymGIVC3xNd6B(7w5TEq8>Q$`4Q7L1} zPUZr?op)JB;=s7CKdl6B8R~YazmbRqqn)D%!Gh)_+V2*+7s5}roDkqf&=WTj4|xgfQ@0>X;DHSHEi5-Q z%0u4S6ex;7SONj!Ar65xK#-P(xSBy;w-@JV9^*dU|7oM5!hL3sBWOA(IcWQRzg`XP zSsWpjboN!{_rGzU`thDp_bQI#?MIirFUp6^Z?LGXu959TiNY7Gi9V@B*MjFgoL9fh zgtQ|DrWPwi-uxy}sj~`QukspNA^@|iZkYZa!zp_BY18O-C3 zNi!Q~^+ToaXamlQZ8g56Ka)dR)p>6Xrf!H) za{uYnzVZhWVZS1%S%<5qa~8g9pSJC+oOyXuGN1HKQ@0g6plj~>~H7p#t{~sBW8bRAuW_0;P1mjzP(|BHi;u<3`pZU!%ih*Xl4P&Z>WVdt1NFkYo ziJs2^qzE%u{A8h`jxAqIJ73t`H-B3wh*R;VVcfl3UE!wtcNFkjH_vBmhBGQG>TL#J zX|@+RmYiN79?!-cIc@!gTj;B+z+9O;iQXzuQ<@TSt=J6 zfHL_ELxwEMtu~bgU!bY6&W!s%6yyp&`#vtR*-I_#Wjz!Knk+$J%~Q44N=OnKJ!c$| z{_Ux3t5e*zku-#L;F?t{36Rz5anI z#*Q;y_V}-uzjf@#ku9aif5si_(0sP?eYs%XKh<$BDUZU!(vbd5K1ngEA-i}f>t#hM z!sgs~N`M{BRrQTo1SN2=mpinbtc1yMav7z)62y5?rmBkhudZ;H*Pz1na|2cSj0`TS z=RS3!Am80F>#dB5(~nTu8jR~>YGUnqs3&FbH8=}j#g9s^0_!DyZme>YAx!x8q}W(| z8-!JXWGl-cfm+C&POgN1-2al47y$=8P>x>L!Q6gwGkPgF$Ne4TJzJ82J>#Wwh@U16 z+nIHNh4F>h%_AUL9uwe72+i6Z^%u-I=*>F`PIY0Potz7^Bkz=MAb>gGPW*R2tqP{* z;XIUEKTC=5+*PkixbpK;sAaQ6Zh z+1G&uUqct|PYfUZdagtj{&_Qo%q2sE@;n@tca>#E?Hd2JXUO2FGbA*r8v=2U|3LDH z{v-;}$rs7&^j-Dmz26Hc zN`FS?wlmtb3h~~;8o}<|GNRo5sih2C5*K^DYjCy%3$-ENkH(Qv>pX3c0JKYX=YZ@S zm0df@7lhpeg#B+I2AzcXAy~RF1fNt1;Z_@c4b=?@i-$!krHlH(KMSE$7ok$bE`grW z(S7(;YjQ_lj+{&)gert#U(13$?Awu`O9KO#y&WWu1?99*8CZj$NkKBCYT5I%rkL3p zszKyb;UfJD96Gljd&=Fjn(Hk6{`bH@lZ}rk+wf6C^Y;UP4MsC1Q#J8Uc?{o?qLziNqlb22Ui`3>`5@uBNW6UgV;y~f9_zN<9g7IUVX3*xm+>m~y_3!Z4N*K2n$ z)Y>=ZmGZ3;kB zW47LK3f=c@{;Pkg4jO2Pje7wh9rk!sB(=eMJDmBWwO6OO_)7G3Rm(W^GjWe_5%IUr zJBylsGvbUQJGAX_9Ni~9Bg=Rr7y3()C1MqALZl#%M!?-@v96E^1A=!?I0w~Nm)b-1 z!6|me&?+h~^8tqeRPd*Ro-~hOg5~d2(aiJa-q46I_YA)*wyW;#QO-o&f90L;7E6yk zi6KWkxU)uKdXKR6UuA8x-7b;N13hX@sQw7+L)D)=-k;&okp*@B(a)UTRcqrq=A`NZ zb5$akoIP%$0+%22MUp$Ku1yCoi0g`$m^}7w36ey!Gj->Xh0`29dbl#3qubc?ircGk zR`i^8`Gqk}j&k)tHyyl@l>7k_)EhvBeetk@D zr#!2Gp!7IwwkD*Zy?Bm!QH?0U3hOG%9I*{}%sM zhHDDxPj{!UUsWYH%~sIU>Lp<)Y0OVYyWJKpg5&27QJvQKrTB2y)oVWW4{QntOj>+q za04HXTbm6$=@XA9;*V@b_-{D?dZWY*?1?)th+hCyP|8>|Qy4|&H-q>Uv+;()XJ8I{ zOt3tM#CY(+kQgHU5RQlNLsos5hlf&kpbeaRI0zP{PJaAZCW+S15WMS;|0$<*2OAz~ z%MXjlo(8f#{5wfN88mYCgxEbNi`kY#!eUwoaGoZt?{KI0avJ9Vo37VuC7vce#c zSV-_L{tkd`B^2Z^QXXylAd#Og>7u6kc0vsg%MP5HrA}_}Uq|z?I@`Fe+U$1c%J@r$ zw;|g+_2j&q38TEh=I}qVuq83cQ*BsuNTtANGZdhJC5-NRV$n4uL9vhEX?ymn3?3pe zEBwp3vD?xOYTg|Z-1h874%APPgVY9si(W+Y7%H@Rob@R@IwG;b9TGq5_KyK5TxGcK z$U?!hMdx`KCWa}{Ijn<5g!Am~9B#n^9wDqhOs5PEJP1flEa|x+xRoCI6{5+ZNtPczUlty8{ zl4Q%!WI#!I9V5o&y|bxgH@plSTUdxVc4u`bxV*qoKPr5<)U!gotM6j|TY5m%rd-3+ z^qCfCO<;Wghh!la5-=8ZgGNgF05S({6Z5Kf}nhM0#(qH^Y}5FZSwR zTV!sqXpZF`LB}7zU1EpIl!pe^?TuU-`9=>&9~l6w{qV7xTnBH(lC#g6Y>kWjY4|5{ z4NoL0Np3kL4f8rG&F6xLO3{Z(z&)!3#)H+Eby|~EN41c)Hp~0$1ch=l%X-q{`OlP= z$kSFVYi-|oW{@tW{=qUzlX!H-Pyn&)3M6UHSKK?kudDA@v{KOv#y4DHYvZx+kk+sJUU19z?v=@eSsT`T5ZPN zC2>u8+pMpb<352+D?Vd|vypyoTOn%2S*KoCdEOhCGQam8Ci<4tZ*MipcF*Jg*zR{oI{R=hNV@NNz`q;sTW<5Y09w15S{zJZ6X#n=metyQKT1F2h`WWU zo+8P{;VoBk#@qv0oK@2e$*nFvDc-D`mJ*3lMHVcX;iGj>HhvYa$w-ut5HV_`{vWE| z1Rkn3{2Mw>6X)Gk2FT^J8Ct)#7M)#C|z)F+2ic#%C?ekAJ=l-mut*Xm%58w zMuD@e8ax>%Oq4u;^trWEP+FiIs0Bp18kvhs!0H}mSmdB>$_ZzL;r>bHM!Amjb2sG! zk3LXXr9e<=0VKpzB`#eR9}f%f5SLL=6{`pGw8`TLiCE~Kt`G|aYID&d`tHaaA=2sp zua988=S!HtGTpUihWn9y_6`3kr}iG9btt18eaqfawDIGpufm zEJR?K=P?eXG&9B-yVF(1Mbo|>gEn)KjVBX(LDvoKsh;$Mj{2k$c7*v(F7A{f;A!c# z!yZ2V1>5lL><60-C}PEzPVp_(cWUEE&rw(x>#1E4=Llzb@nTheRtG7qQ3^WptCVtN ztMOVID}!3x{q`#6%{b)PmSM^d_}Gf0+qg`1@_G0HB;keG^klGUI87U!;~W~N|JZPq zC3PyuEm?R<(4z@Tf{8OvsJ|=a{RP2#7&|6ZW!mM1Bm>HJqt+MzoJ!KTc$~2qOfjOI zP=_S_TAY7=o;N)3o?0?PiEBIi5vfSm)M4>_oNzJ>R1~Bls~+LyAR-iJ4ay5&5GJbG z5P)IBIMpC9ju_!U9}X~y?I9@V2obykT6X`2;Q!zShx>p*gpRSq0RVzjKt>nN-iEvT z*AZ&ACz8VgE+x#&OLvKkgO2_SYUm+)4*vHMuoVYC|8p2vp&S$kS!v%8U-NCf8ZW5y ze7N?VC6;4Xuykp!a31w@{dO8~B^e3TkonUU+tO$G7|8aT+Ljl%dY4jGqk(cVwcgSy5oQ0Kl-Vr)~X zXorSbt-O|K-<DCF-WpzRbH6Bg-xYmpez>$Yr|TNSvoGEW zTGYw&D(1lJk7wrB#+acQ*Sj+A48cF|Xrl!#WwF_SVO-L4v7r3*Mq0gB8ZRaLO^#*G z%j;5Bv;tvK(e<+hfl5J3miFNo?9~A^1v@t5i!{euRaUHZwoUOWMgq@A_Iya0w0Fv^ z-Y|F1fuPJ*+Yfw+NoKbr<{D;?Yj%ZU&yMuzL|eKy6V_u0SG=+f=TVb1#e0dCawl5^I)-dH=7Ik!b03AO_k+6$mfizS}PfCLU|I8l&KD{ui%hDId zPmw<@mQEF6nI!{j?(w<+AQPPQz<6HuUEZbm{`sY( z8T#{=xlWpb&0owpSNEQY98g%>XGHCUIZQ@I<#= zWx9*LTEvhSns~*$;dWRRESQ+@6A8htcfgt8(Sbs~oOj6}Y}BI2D#xg%_!FhAv({;M!f0i`eJL+t^6(aL-XvJo5S z*UBA$7^eeZ^tD6);YVrGK}Y5wWhrY=ETm^gzSH|L4m))l-d#mhkpdW|oX9CfU%DuW z?IMxw_$j)}!i-QRt07ETzM(B`zuf`@wFv005F6gs4(&(M|8|CX=q=$?(nyp0;-PH9 zCskc8xIgc`&l9aOhx{Xi2x#*RMsD1k`^yr5_3?~tOunyq+M`Pp)FSe`p$r*{VD~_s6v+%7iH_uH+VBNP3~(|a zK=g&C4j);Fd_ZLE-vNIx4lF!G)Bbz)-r@LLj&ss4 zKwNYLSdp+*9hO>{;UGH(A7KKIOrSM(IAB6RV8Rz-$%dw2A#b{ODjzV4;{|^P6+m=$O?*2_jWfV)(?~mO1hML_|h<(hYF|!nis(n z7>C%@1-pN^>{GIcpY9O$${>q=P3SK3UpAE=#iFBqvZ&RnpFc=-TFjUZTb(P-+l#4 zmB0m&oC@RXN2%kNw~OCbK6&C2s&K2E>*!vHX!hx1AV;6&MV2JR z0%bHWz9%Y5Tz|Zlx{btEfAnXR*wZ>&M=D0&WY=37nrECVW9S>-Bmljcpaua_W;LzO z5+zfwt@|#iyQY(8I^3M&J3h!8c7RebzJ%96XUXmnhQ;F^lA%M&XjURL&&OV>$JkQJc)m?l|@ z1{k@kKblcM2I_RIYh<{!a+J=(FNZkhnSog%^RcPvwUBIk(!@2PNOwJIq+iA9GbZ@y*7kRvrmuE4W?uUgBkQx}^1eew{M8uPloC0}rux5b0LiSD9D>g!f6zAQgpmnDn7wQ6ppzRd9FXp#F% zWP%@WS|Y=8SYS1G=?k$GQl$@Ntr}`(m@kdwz2}fh9lv?@{{WZ|NL0AYgMCo@Kr%&T+4fflnQf=IKK{U`>1RzzV;?NZxOvk{$uIG3DY zZ?O}Rr27TTPU#3Q5oUjUwL82!s^1(gefB#ZizRp^Y~Q|Z@lpJ710G{w^X!yXH$%tD zt0+!L6W7BZV%9EOiDQqKfXa)3g#}m~dQzC7$e-auIFWlz{zCE$`Ql8(r;*6PEEA>A zzS$z9?;Lnzp9G^(&u{*l5u$G1&*5qq)g&DQ85wD1$O|Vs6|i@ z2tvSMq3b;$&O*n~^+%fl)Jn><^8Ps4CI(DA87;%VtK=&3ojZ(~t7ICTZt+W*K{DD2>_`r)WIQ|P-$CSg1}6F{VSQk5BnC<&JQoLanQ$@8M3X8nAGpwAr2o7hc8{`;RDE94+rR3{r?6F zq^%;LYNbk6Yq$w&Da7)O>4 zQ?u|qnbw>Qpo$xM{p`+I;J&fexw^iOYxjL`?Co~Hqhwz?XZU-yeE(@|yvkVRWyz9b z0hzQR@|oht^C4tX;aWo1baUBu5V<*!H7(4$#lSt2d^-?x0QnZ)UcV^%{li{a91ty^ zIh!8wtR%9@F$A=}Sx2qZuBPo3$QpRXBubEC)(i$NHMKY#_ou81kX{v2$lC-zhELWl zo$HLs32FURKMhg#5p~)+{*|WNUZmLj3(|^zFZ-QuRB?1BwPDjU>uNFKNU^gQT|VmR z=Z~7+%3iCxK@ge6KDe4Rk5Oz@zP`Lsl8H7edx*u{YBKzp>76Tj>RA8zpn`H*8YM$z z&kb;3&ja#Se?{%(kFn&&c*BxiRM*)hbdv8V_oG~z9J7OEKZa+|)hLkXV?~qItrSqh zB9wa#ODnzP($==OQf`^{ZuzLs+4-9#OmkpnQNDj|gX+q0XeC(j$ssU)Z|^*QIq`?t zCj+X14xNJNIN_zi8~YAz96s@^?X>{O*v&Rv- z$c)POQTq&R``(A>W*;5RwUIr~g>WbWjufeq@rF*kE%;%uZX^BT8GuVaqHP#_!J+jz z@HR)>l}OE_7&)C?caum8nUsLf%w*rx%0{`!jG3vUy}pcZ8$FW6hVzZ^J51V_VA0pXgg&ak<`g$vwq$yLN+BH&oraWYsq2A`~uEpqfohA)GzRD8Zr6t^;@ zKB_6#v~p832vvmyjU`AeDyCErhZB$g!6wu=jvUyNjI3MBhu}ch1lHrkjoiV_!;Jr~ zc`V@)!7Vxo)YXUK6iDh+N{@IvkOyoRaG@KdA1)Ha`T2p{HlP&d4>Rw5udIpuo2m`J zUjzC31B7gT&hcb66c+Y_6HPRMoeq9HAPHqv%Q5FCqT9s*QvYc{9*&7vcKy0P$_xjT zWSW>#L!DWT35n|MZ25Fix=@S1KpouPt99 zmmg&Ey9%oQ#(cjWVWJ5jys&$8o~4oLr#T_y4s0slAFLVzR300e#hizeSvJv{>OK@CIEqWsFp{a3ixZ*#L_9NZj2iID> z(U54X*YfZCJQ)`r$k|z$9<-1}7DHF_6Fj2UUn*dp2{I;hN}kg0Gw5sJ9K~!MKn6yl z??PhgEESR2z_%9@w2`ys-JYE=J=|0<#M!@^p@PTftQgq!6p#oV$k}}hPx(dv zk*_6X`$tIyj0`DL=VQIkjoQuTe|YA$ZM|(jXLv6CqIB*LovvGNDuT#k3&$wZlJDf+ znDv;9OG@gF7TzMd_+jQVy`Mi^N#UJxcX1IrYh^KtDb+)8bIfV?pe2ek_iB|fm?}ef z$5Uo@l>Jb+sGD|<7uuU4zcj=!kI-JN?hevUs#-$dhz20D)W9Jx7{VGQ?kLpDeX-=k8{?4aWQu8F6R~4 zg|)go;9qAmd!lK+SWEj(-rS|E+E9n#I2R$_rJx4YU$&KPyDogBu4ClPBxgJduSBug z4tvgR)AvO;=2d&lNh?m;+LK6XV0sLJ1TOS-$j%29&~H1GsBQg|H>2b!t5OS}&E+v$=4@g3ZFi_XlsZ{hpl9VyDjwM@c(20&f_ViS^p z+cPev;xlmQcrhksxeU2Gutp1*E-Oys?l}F#4I-m7a*$J=m=Bc+0HdAzuZ`FyL~;>k zhD0N0qxl*pc9`qKf_6#!*rsDdWo30Dk6smBrUk?n016Uu!~%g}<|o42`jNe{Lj1W# zxK)@Bhyekx0wYK&{I$QxrutnRVM{hS5l75r4yMyt(YxBtFTe^@Y{{BG@5{L>4=cR8 zq)}zAnGY>O#}y$yAxMlU(&z?F6cWv2b${gh04&Hch)b2PN_H|()ot3DPS4#$WLw5@ zhHo}X&3Y{T7Zg9vMo7HF59Bd!L_hjSw9nHSWNWJWtM{@{ycv=glSIV83rJO>_quCw z3_uq~5|ZkLuSIR*a3I5EcQoN-c{a$xBovo*FP(i5R;>%~xox7m*^0uYMo+FVh!Y>w z!#S39m|%=m7A6duIK(bY6X6g=Ol2bS;Y?thNGO^0!eQK_bEZ$;rNjNVppTgP1ZW%x zA_Quo|4&U7f;2xdz}o?_7W{6I{$#C?K|3FEu8|22kNn@T48G;RLlW!)1|sBe%L3)Y z0apQ%w&)BpRKWj=HoOiZR`!30@WY?Ij?8B1zhr)PEe*HI=Se}9g^}?a; z%~hI%gG$@gE3L(t(WSj_yZ4q&feA(5Msq{Xkm{RknoJW-rLMse&F9!={C7wBoSN>k z`AYeIVUo!0d6eg+ve<+MODz6!dF+fX*`++Z<9nwA+VC>%VbB++3uUzaXBnww+B7Vh z+u+7x33IPOgM~Y3#&k5B+MSk}LfhDvU9-ekLG%0#9Q_e|x9d%L;`Qr;g4()opK0mw zc@j*??mjzUTxMX{oO0WyPQc1ccO)T(l1}~|3r09JY{wDfms~rb(9(RYLqZ7c$8T#Y+uU+Qt(t4*Ed+!dP_kD zvPAqpfeRs~IRmPvn$1zicRb3B-dLwKxaM9~lf-}`bnH8G2XhjM^!X!s)ROZ(>vR2S zds-VbpZ$I^u|DXcFfT=2!gD+pyEd1A{=()h=~s3D+0}-v2t{(o)$rMtV=eGMW!6`- zu7x#D*nIGn3}6&JFCEE{$Bdm<5#4Nz|9h!0px?4@E^4qe)p8xZXT0HTZN*-=>U{j>lp&#K(pn*ExW>lZe2B|XB~gzE&|8$2s7Tc5XF?26ltfQ7 z{$m^_#6$Jes1-`lR7}NM_HB@^JZnJn=_0_ZbR7I5XbvCMw798H+7%Ol@$sJ;zA` z#>E3nM=|C+uQFzRZhDK%rpEzZUJsVvgN5pJFOw`fwrf;!S!A_98je={$V&;s0;qPz zC71Rwqxf$v7llZfD>J)z2g=T5WLp>LGkDM9F zP$FPkT@2?dAZ1miklxUS3x?2V6DB{)?_H^6_T}YiI>n$>ioC1eCuF^AzNBsbGyJtY z8E483?V16|D%%f}qrvGvh%n|~+ORMoSzzPNGYH8HXdAW*RWbQ-!PWS5yU+AUvgb=m zMqQmDLXb1(usG2U1F_qwuqs1?zTChqEdmEQAL>rQ#>e3qjC9GH#D+dtvI7qec{du# zyp6L?o`ipb&revcujlFh>(vP6iCUng`@Jj`e{OH#Z4>Mtk@1Fi6NUz#x zDwuuH9|r%s*noCrMkI^Nwx^3FC@UR5K z<$U*vvYUKR^k!`!32KB11w%Oxgzh{DaA^-c$`2hO_9q+%(ajm=x&<>2D-~0G&k41A zt{ow+=EF3FOki@9W2%VYG?sfkRO<`+1(tGfCIF@wAI3b`VqU!63R66XY~o6&Fin@mLD%S~O&r4>jQjMbpxdS_tGc*M%H1 zcMt4`9r0Q_(cVcHpaNu{svwgm>MU}(xVbp2bX&I&$Z@{&fO>pI%C;SFHMnt3)hA6@ ziYiACCM?Jl+#r$&>a@oI5Mvw?=s2v|LZ0J5ryK;+e`^asyZtb*{y#$=lpo+?LPvT4 zhQst^Fc7O5d_XkAF(iZd>-oPG3a$%-|9uGN53V66f(Li97(wR@9a-sKK8G146Er4| z3;QVp{+MEEwxL2Gn_7^gsG!v!HMTYLwUYh9?XaCfRgCNC_CL=xjLHfKaSIy*JhVK! zV)@anfwJJ`jlVXDMo%J?x2!xV%2&Rni!bZg#ul_~22u!l9gSUa-zcP5Z;-{x^5Owl z!BUw*o3!5ZNqS%&X6)ff(HO4iDt`BD_zBCUwHvo@m|AsRdGECwx^u(yL&@|=(oV{o zcOyXRu}gd^2h6iFXJk9vGu?dsDu3Dsp&ecbR zk9tQ6uGL)W4Qt-m8Hq@|ePhVG-Xx*uoh6=>VoMHv7PRoG$-$-R6i!|RkbV<(QVoo!Eoa_izay-5^uLm{V~3-5F?BFf0{L`P{2RgJ#*w^~Q!``J zmBM}RHQ&1k6UOQ{6VuIKaB-~jpZX(uJJsTAO|PW)0aW)zui$g*qtMd}t|YXXOV1~r za5I;|#;iW0<7C%cE@Q8+DEPb})vRdWv>17D+5fxorPa_5E5lc@mFA4T6}flK-af2j ztmvG%Mhe;sm+A}FjU}P=@jdaqeKv~ZXD9vAqUP;i5p%lslii0QVarP2&zmU!oHsZobJdM$s?rf{Q% z%UbIaA}6Vs)Miv`~Ok$zDsgw2GHGzUpS zb9C^xq1%-koU~Q)Nz=GvQL5qsktRZQ?F~S+_65(|)aGdE8c5q3qZ-f(S(rdo%M)&h#tB zkmq}d^=Rh^8l8JTWIfkE;C%CSArpg6h@Bha?aTQ1HTt#3PB{J_ zXJPphFCkZt0GhLqdWLiEz?CZVLCND)p-5Ke@sn!P*U~!EScNu&p&k2=;=wRi(OvHB zyT1^tB&BzSqF|C3s?rQk+KXbsp2N}a{?~17m)_=U{O4!+iJ-J*bWVbwv<|K^6;eTI^h$2xSWZN(NWre+R3% zbICZsTpYgk{e>`W6d}$*PVgc)Q8W!b4gm#AXUG#`XF-%g#6xhwzsEn^8D!St!ud>* zRUocIP&rywV>!ZjJ?V+y&&He9Y2`BN+F!(y{1-ngIjRS(>_kxZrL52n$0+hE<`#z~ zzfg=Tt^4+Rx7o&?&;HvIP`v#-_IzmDe62ukR>#a`fBa^Knb$V{Q5-pTYb^6dW^4>4 zuhF9nKeO5Xq3OnsRZL~tuKn?Z~2p_Rgk@%LNpVwfh$?i!m={&b#0yFzDKHp~7fBR{!WgCtQ-5Mt}cQ}|=%-LoeyuCsK4vhcJx z;47>%av8Q_l+l;3$T!4BlQNT_JlV7Lwl=WX&l%MxVtb}p7kz`aiVJ`>ud zJC}Y@>V-EZJt?6}GJX?nT~!lV&YpU#>r_*S#c)}r>ssbM$^MaOQHRn1f~xI$vNE(j z;b$qCV7GM>ZEq_oea3mDnoz*Z&>xdf7P9(-Eh+exUEjJyF-x9IXAIHEJ=@{K$>-5{ zOhU}GTkW&j7|}dv9;22NUqS`{ykutgVKWI5i_5swCz_|e8*c)aL;2}ww@jb&b{_2* zUj@{QR;-HzBF6h{HE)h|3&Z0T8(&EU1MQ1ul!XdY?!?8et*e>jnD=fM)fJ_x)2mL$ zE4a9}C*-_w;$AA4$ib;}%Gv%()sWJ*WyL`K&&uCnNdwXY3AL*2+%cQgxjeJjl1rXj zopahTbsmrXrh#Hkbj)H)l%QXqox$Ru=-Yua$MjnIH<;C)D))BSZpOpS4gN@mHajR6 z099kb=rFR}kA%rG_ait92?GvK0l^V4(?jcFdLhrlTWoR?$H9$beBCbPq^LE_z}x}PfL#z0@=2z{;R;@3qT$~+ zvJtYMJ&x%~Cp!q@i5PXg*ZchN4i_r)5~zyan`6@x{;e7rC3*nq!hfhicS<&DYn{P? z0kuGvZd^x6c?1f9)N+or4|3B1$S6zXdqap{Q~e>te;5)7^F0t9Q6q2{IR&DG1wdzO8^YUo`2svw*Ild?Ti=` zJf;LQ1u3(`w6;X5+Hkg_QufsljGeylo1+_D*mx>#H*-8fY0>E4)TkMLOi-S zrcj(ULep5ht%s5R=>_53b}bbX2K7NsgWf3=aLW|nxTR^V4Qc`c?GPLX0%Qn8H~4Qk zUJhDUHgCJ2*%-}s*mIN^M!9v!|=Cio zatMET07LvOidk{M*Qn#M&4$<>Z`Z%yykp`P#*z%2c=eKI ztIqdh4JOoOOh?MEK2a}nTFW_FT(Rh&!9VtUXRg+vqEt?fqE$TaUA<2rT(I5hPn}*knU@}>T{5pqQBbyyrTF)0#nf_l*aX1C2zO4= zFfUpygQIAer^hmdNtr&gXS_^f6TIZ|QXXvlEznH7T05ZMhMV zDA{MpYH5C}59`&iYkiTdj_s#q+F2tuZU3qH(W)t$%l*Ml7NU-x6ZJ0l3)M@ub#Y0f z>^YsX!f+c5oE1uaee2gJ{Ok8#!hWibCq%OYGEuRw%;`IPzxGND8wO6c4|IgblKaD! z>z@()u<5+@k3ZxzaDF;~PS`thP-ccuyu#Uw_X}c6axV`GYs+@n-pe9)sProe%YAb-?vVrK`Yr8ZLb= z=r6?(KlNMA>%x*DiEbM@>JXCZvmg1bjtF2 zqYas0Ke5AS;*}KKsJg%otR3QG0ZEj@$uCeV4q6<}faHlqP&9lWen+6l4kw(Uu?F3q z%7&rt;v^EdyF7p@3VVW#N#|?V@#K6Sd?HdkMh+K8r2k@NgJb?|j)PAt5AqDJSR$k-Jnh#t3mWdkcY|0LPz?eBvgwSUZlt_-Ju-#7gZ#Y8?fIc1I0^3R>vY6wCS-+s5M%F(4n4FRV$oI89OO26=Q#ilpN*(0ix$O2`LVC@{cszug16`p@3F4ea$k*J_UA0e z0(YMN==!f^!w;3UC(Dgbd@fryG~Lz?B{U1)nliL)wn=(;{cNX2-<7=RuCaM_{D;y? z%FX)=qXd#_((4Ry(TXf93PAN8BUq0`P% zWtDj|ErF~zHhyUL1u9<^-soGCoA(LG3d(#b)E_U-=jeA@&1P$oLRfi}kt7Km>iZm- zwYo7i5|oIa$;J=2jN=waeO#6b#&Ef!*Zlq`ozqN9GYMi?xB207K{Uf4S!z$Z+$ko*2w^Gc^U?JmBuI$Ny5g)XVUb6)e8LyA zE3|2B>g>3VMnbB=o9h7~M_l~+@L5@}iq;Mm`1IGgvbcolfW zumr~BzC(M`ifC7_1q2NOWQZh6&#KcDcCR zYZB}&j~G&?NW$1T-n!rHpUiK>5!@TenwjMF2jLkPg=H=0EAxy*{ONJ___GYJ75$@sxw(U1-S1!y<~h+Z}O1yN)L?iAb$t*`pfO%~^>!}~j5I7&#T z;QB-e=a;0S;dS0*p&jOKf1Yuer(Dibh>68KZd?%O%R)qMBOx5b4D_a;9VADke~AitQ5?dg@BNfXnA6kDz^O^odR<^I1O4B{>I#suF>QI3&Od zv35%mfmYGy_)@`U=1qo(llh8n4U!CHo1WZs7m0>tRY!@UzTwMoEuvWZd0-f0jPIr&Z3HncLoRbsImt=}m!kGpJ#aPfqnsAS6 z;kbK3VRwXJ?fFE1h(S>6Cnuc{h>T285T6O~QGl-yk26fsUA*Aj=H}RAPnYT}y(=bG zJcW)lAB6IGHDTeeacS&anEHE+7ku*Z`V?TN3^ZxL2SM_x2l3&FKyc@DIC$ewkCvb4 zi9FPqVMnq9r8$Tb4*+36w&4F35NgDEK}rmkGf>E>8BGIFncpNHpM!;-tYTGRbC%=) zu0Wg+NZdhn0h)s7w1^NSTm&?~n7|VVmJc?7gLddA&n?&hQ;$ z(7E6z6(;Pln%#g_Y%!2lyP9L%)z%d{?qNR@e~R|`+w&3yop&V|i(CF#WKvyc=h`Lj zN=!)RI=O6lV1Fn;8l#w2<#Tyy>E zya7h(A>$zrO4@X!e#Z45u@=UPqR%cagy9i$a5n@=R0bjpCdVFA;+@IaJ<*@Y*Bpuf z4z3FbX_ve?U!T2wGkq(pfIw>W$+1$;%QLM{vED_@rPnXn=7w_nPwu z_0vx<=+CYt3d`DKYzeYmYOhQObNgvKp@A6Kb1Cz3rIo!v?gzF7!hYNyvue>}=Y6;T zsB0Xn(|KS`79lxxn<}BYvIr9kwz)Zha!Z^BD;dczqkwo6TE zRj-&{B{PxZTKu_AW)wTC1x>En@VQKzq+fQ^=tzkbB4^8mpuldBFTHRp{&Y>bP~X;l zLa6&oDO-gQRAwPht}Xd-fh;MgAikUEl0_)HSy;lkHY!h*#Cm;$aW=~YLB+)C{mQFP z+U*N=8Xx?6p+Q9HLiY@`@<9zzp2A+Xyt3@q@-*IyeE zn=Fy}Cg^+7s1W5jh=u1Dv+qqK9G%V&SjSI{k2wA0Ef@~2_T-_UbH+;kk+)R6+88?a zQrOEUZ<2XD;k^M>#In2G$t)E|+GW4}lWGsqGtoC8SU6gU5Ascp_xhc|v^6Iy?WZW+qA&a3!omy-5Une?YdI?d;4Cm~o`d4H#f2d6_ z-);hlXe^O~g>SP$4L^9G>WyY)x^cUv?8qN?J(BNG&2#PCArl#yEd-le01%QX0$DKk z+cT=LZ!b)6tVkpda%9JMB?BP?(A8r*1$jd1@l$Qi5E1;Q+aKY2bODW&aM8i9=MGuPUdhANIZ%d_wVEbKb#`|9b8~HaCIT(`;lh~aw}|I)xHQc zEG-uhKB;|Iq)1=Bf+_o;u3&0lJdggFwPRO6V=ElnKRvTSIIn&u$k?cys;_uH@JnNu zIyQ7tuO;GzgIL*G();*rVc`mG>hAmrg>qI+= z^_4$$GDZvf0&B>P{nlroGlylh4A0G~)$X2nX!P~Wtk-l_i~cvO9ov`6H#7`I6&BVU zKXus6E0P^N3N5kG)If$-qX=Gd0DkTJtw%1>Q>rBL;HROr;>^0;tFmz0MZ7}L;wtIE zt?!+tSBo*~HrwVFi$`yW*G-t)eGe*)bE(YvvbXp^FoF6hjv(q6-%P`xwyFC^ngaP1 zP&b{DF1DV@a?gu4aB)^8ZHaApQ~Y$)KbL*$zub3zXy?3b#TSju%>JcWO5$^4E^{pF z4Fhc#(t~k|i>P|g`f);KhE{36;zr{@_MENgadKh~FuW|O{W!9(biuOkR%T$*aJbV$ z3%&Tc)rk06EG8sYG=A$2+sX}LtRZ%Zw68iKZz!(wH$@Av$lXxg`G1v0URk?1?_@>7RL9tgW0=PQ zG-D{Yp#$a)Q^CI$`)`8&WBl*Be7MHeMV+x65MI%{6Ae(KHUAY5+Lija>YR zLj1*U8|XAdBrPrF{4FdI^!n>GNc;QQDQAn=yFO}fhmJ6zjseWMpRv>5k_}YE!xMUc zSOa(3hQMEwF_u(%0L{_PJUL<4hqvf6SPhtN&_SAIBv_lW> ztD@cSsCSdL3Y5LYw-!!FWBE}u(t?yo&Ztz_wfByBD=EWC`y+P;R#w`Mzw9$d$F>wZ zT=F`AYRcAOi5MDbbTaWt-4YkIOY|LD?adh!KK_Cg9q(4h-j(o2T+GZ%>d74MGCy#5 zlSQz*uvK*>TF#%ckzxm57fssqJSir&?@huVO75{&H*RrFwm##Wp<$=HZr58X$3$-# zg`16-i3p~xpW&<*;1-_hGkY<>g2zA=ABW<(EIIp|a^D#$SUpc*NHLOCJ!h`tc{Mlh zrfy<~kNnW*#&~23WQx-I0!dW|3{}Cj@eu4rCiFol8deJfw$m>5OZw^OnB48WdB{V&dbPo9Ghk) z#08d6s#1s1FP~lzp$w9V`49)p1r8~X-+^A@I2YJq z=HB!xK4OV_a1G*X;jHc_XN4O+I%;prPllS^#Tk_+JQnd8_!rRsRjwjJ4<5!Ns z^^q-*!OeR$zI69sY{e71QIgl|5n}h!elUMqyn9~Np5;NnI6RCWNB8RvA3o!vsVQzA z;x>T+=O<*^;_K=Q`bC2f1M)Q<*LXNj0A+kQ4ranRP-l&}I}Y0y)XWFPx}mp_WT+w} z06NXa`Jf{s$DLjInw8sx$E~ z9NEv#q{{;?k>EwpLIo=^Yv5Z|b3gKsijO?}5FTX$o`Uu(JCp)SRCLbZ_yw^<=&%6x zz8#O_iJoeVM?b#(MDpAS=#W~S(rw=UTDmW`wHQ3oa>p?(I5YS6tI$r$yCkcPx*HlF z{M1O*wA=&e&uaLcOBhRxgL)RZGQ)hI;8k8r8{JoMP|2p!_TCIQU6S55_xzRl{oF9c zCsAEN#ef>|3T;sT_gn#O`1!m*!2HNQ-okj_G2LRjM7;UC0p97mh2;`ysn2kx>Anl) zTw>wW7)Sr@!qLx92){QX~ia+quy;lNe9rLP3#i`S4&*; zcKzQ&`=q-$8_VPfzQUh>yup4AI=}dRmNp1uv3Ggit}mQfa3O7El;lfivWaCcqT&i%CymX7BlxO+PKImjE)4|S=3GY!UKB7gwKt}JFVT9M z(q@dsc?Z;@0BqHAC7? zY)sx}%&HV^$17p4Vyq+1Wv$FXqklbS+aUdhi%163PIag$tD58UE{$^qB30+dcH*z| zY_8wXFdupKlXEN9KeuA^Z}sN9WpJ0~hqezly!tFPUIM^P&)-Uov1eAlZ}P@c2<~E4 z&$T0Z|JFKK=LLb=Y^&|NYh2lWuKopO9)URUKI}%H{2q2-v*ONZ#9@4mW@AFgxCJDk61oK+MK18%yP%PdnW%83kR-K%jedO z7eAE$6Oh|xr?@;O_;=XKsy7UqZ=7cqvTT_r6R6Ndpp^U^1{Ci8K<+w1N6%tbA=a{` z7mq?wr`xnU?^nN;QLwwWwJ_%W`5jCp0c(g+t*4@VnDUNG;Tx@E z)5$}Zuh3=2v&mh)&JC7moC65}U#tlu;nIGaeIEh0W^yfl|U zqm|Lz=U!b>u5A1x^GdzpvRBKW=9B~Ir;35yX{^_KAcEW7<8% z$E{eVuJbgDn+yR7nJvCU@>g3Q1|#3iqf7bp1){_e z4xqHp!UW;5YiOlMUTxWQ8tN-PjFy=ByoZhGrqlhiQwe6QBri7~hmAaX8FQ0qv>+g6yA zmjTSzKQ#3pr#WZlEIKBJW&TzyqWv=0DgmW8y zHAX%^els~gE&whLdsR8CDyvQf;S3Kk3he@UOS2xK|BtC}frl!8|NopbXYOP$Mq)W8 zmtlu4BsO$L6j4UB%hEO^sYXd{n_6{dq+Gg;E)vBgm2TUhbh9;D3e`r}(N?QcX;*!% z^;Jvk{~5dA-|PRG7jw>>&$&OJ=kk7@&-1wFdeqag$d5r;K9`#qz>3qUx|q|h9cUIy zl&C|&HPeBoU|&vmz!u|5$sEz$cQe^J=()xTY_w zZ;+{bo%n=pnSO2Vblo;=*AsXA7_RpGIXsi$P<>d(sfJ6)%E#mNSGd5U#lP!tN;WEYrZ9xoFtl*B3BGWYC=DU0z7|4w=-cubaBHB>xak$NEA z4*Uil0nPO7_w1f5X~V6*Q77MJXXgm|!0#HQ#(Xv4%{hiX+bNrRUNOODviU(ec3Ua& ztH)}${+TOK^(RC#-klh;?F5fgPqHzcaBeWtAIdI{G1$UWx|tNOc9imeDqPXg~)+s)0v)4v(%v?yyS3N31qZKO(^wZnVG1 zOQ`d?wZg(ES_vA2mze>1DvTBUs8BY00J&$;k%}6quLwvZ&EYw&n$MAkASldeFnZ+$ zV91GSTW}E}KUYJkU|`|r$!*pJC1T_k$+y4P%+HJr=^FRriw7Yu&(;qwYzK9+?7%1Y zgG)PBCN93w{QH_O4ss!(JPhu6Z*Ke4N9JYS+56&NxyAE`!`6|Y z;;p~3F3k_%z3KZaI?$%Oi;qTs+WX-4q(Ig2V@HCYZAy*%J$mJhV=FV7q8o2}Uwn}8-O<2?tL4i# z-g=Pmq4vO|S;G$+XXR#;9SibW{~LSurNT%r(W*12Eq&`NA8nN6t}gx6D>3@`g87Sv z7p*>TbM(&*5r3Un@z&?TtNdfl$2Y8!zW8(ULX!vNZScONHhN8b(%AUT!`1uWyJy_G z(^LD`%t?ay(7P#J*I%7{>|xUQjg;k@8FA~^zC|aBx22Ux%8q;qUMqTA@WOd}7d*Qh{jt5Q@0GXG?)9qFSNV%BAGD-Il>~_m?F3t|>e4bJB5feuc*r*2Q1PZT>Lz+mCnP zDn5I;cTUFsMp53qGau_4MkMcDF5VNa5G{Lq&*686v>P*xYSO|NhtK_#aQLMCkJcp@ zwtdkgykXA&!{(9XSpB))`gLnQ7FovTd+<|+kJPywDUhxYZ1im(cH1mCw+BVjGah%F z2lRcnTn;YWUkITpsHt9(w-^*x{)9I2!BI;xf1MP?r5R+Q^gOag?R+TQLnXv~?{22> zUy!)_e222jQao%vz)!wA7>}<=k6SmzV7a4F!mM~zbOW_k;c;IYSm`JBG0J?U>lY@u zrVcl?-+&|;F8BM9JI7sutAl-_+2@OT1oGhdBW=-tBi2u(^S&Mw zOYucMq!LAW8BBQ!L5|*?sw3un5JE&}85U8#n2u;XL6;V8(6S#N({f_WXZSFEtS>kc zo10lVg!BUP9*5fP=t{9#f@UQ>@>ez6gQ?ViY?og%zUpl-+7W^bs%~ZgVq12;n2{tB zeN+WUKg>gO$`w^vp86XWtNoE>b8MSSZ#;jvuz8&I9=xL&B@g;P*{?CH(K}pJ(qp4g z-ItC~`6lF7*BuA5FvQ^soyW3%Hfvh*G5xlWw2>$PnD zFeDpKOQJ$vcl|-n{c?ph`*F;IOOx}}Shx2foiFv2h8+FV4^hDD+Yq}K;b7CiyeIGV zOhz_^mg3=R+5))dO4<}d8R@p?RBJV0YtW1!Ex&(@UPGc{L@(6MxY!}L40<=ijqL=0 znY(n*$lW~=j+IuSG$BKQHGoM(*tL4lguzWGoT}Jn^q!Mo^hLp}31H4l@PtvL-W%94 zgowg|EP#$*Ap)N zdbIn$f1}4sU%;*`?)kz2Em&$^c4Ua%oz&B57! z1UE&G6b2q`Y@9W`+hN7ixj9y=mVZn?c{pZOdFtKAX&;{jK6wq;&KvuebzWghsdb@_>PxhX&F^@}|_Yp9Xs^lZtcki%nhPDoyVQ7D}=eP|hd{pg~M zH($+r66}>Q=TB+VQRYvtY>Qm({S|c%e?#3nnXvR`u-SwvHdfXPbWW}=(})d_jd2^kN($r;kG&6KRxjNP&+9mdir*U z36qnS?+iaQW#jx8yN}0wzwxu_7uTHl;$Zxr0KN3c0>j`)>1egMoS|78AeM+3gN@#(AZk>5@XHsp;I z{`K}~)r-=LO;&fjNABFa_cyW`jDNlV#P>H(Pd_{yF#Y#?*_Oi#iw_@4_@%ep^Zv%7 z;|HPx%PRp^GIw}8*ZO6|Dsz_W;8*&ccfZXnJMarwI+Z`9v`5^y_uf!HJhuGpLh!YP zJp^{{nOuJNTbJpB^M-aWO-Y(?VItJq`B*;a36yg0A26q&q&S>an{+!)>RG$8;x=|FzG21J)yY!|EFRR0OjDlmrzGo^ zZSWWe_7balC9~vwQp2eZr8aka?0H(IB-I@$q>b*uWpsP5%;xOrB2wohTYNq<9*@=4 zvA(>Sq|d)P4_^|JL+Fk+NGqVccD#sy2(WnP8g}~;sd#<7gu7TrVMlZ#smF+FZ-Twn z;!%9owPAEHH=B=&e3vz;q;CDhYs+OYFtjVvI|s8V^6n>*vwPJ>T}JpL|6ai3$A@`{oHpE zG_KyT+bILp-M(E!w0|o_o4Sw`BR7#Op=E1@+LJ~Ey26x>C8jVRIEM7+Lj}P#1uGAC zo1Hy-rkuH#^S+=T<6hslWHPgQ!`5?c#~00u!0l$vp+#L{J#Sr&f>EAQ@Y8BXXpdb+;9(ydpf0*aOtKDa)DlbiCjMH}$om)Gj^Mve;kxtVQeM zKf$gr&zE?Rvoma1wW060cFb3e$e~T*y5d!(#z9#&k6e>nCR@V+_qGowV0eFk6F!xBs~4kIZ_wXd*fe zb-H*^Q5Nya8sq?vf#{%O5MRL@vm{Y$PwQOI`9*w(8+z7{Ut+;-TPOw}5ZD$7kse-4 z7oBW-`|aV8Bj#Qk7ZuU#zPS3w+S2~lBhD{=8!0$`ue`P7@STZ62hOx^aG5gg%&l+T zN;ZAEy6<@Ai5&<3oU`WE$DjWDD{0G9PP8u|9liHOLl`W@x-RtbNK8QFI-=Cy~}j}C`_iyZ7%%l(&6PuIS^Cn}hAW#w~b zbq|_Pf4eEgwe-)w5oiCcZx*BuPl!M6^S1xklj{!Jqxo)4-qDd&%a%3$kd%4F!=ZP0 z+0o(IZ;wu!VPxL9Z)Xk7|L*F=ZyWo55N_OhC^)HLXvG%`B!8p6=WR9*lf=G9$9G>d z)NH%+IC9G_#T2i%uI{l@_AHfVPu^{lw*Q=J&r)pv=GvtbP2ya}6;lsWY>L-J9!ltm zJHA<#m;K^#?AThLHN`24v2_+5mltamy|`WXd&)-g&7?K8mDkgYSkm3rD>_`5tIrOE zuYGm5u`&DLuAg@^Rkv!_O2@rPl|J3o?3E15Cb745PwSoZlc%h7R+XB@>bz=h$Lc07 zy)0sdPf60Zq?}e*C~{NJ(~H8**A_4)F-rnfrJ`jwywbx(y$=e=LYs!gxtO$QnLaP+ z*|>8zV{LMCYUtQlHEGdQ(q0;gT2I-j6}3^rMa`2>PEza3XM0Y=V{5J#;O04x`?$$< zkE@lOB9B*80DWj+>DqWNI-E!D_trY^>lS+KB4aE+xK6%^G22lQFjt+p*iYUc@8Yp^ zZ|?QB8HtIe{FTm^PaLW{mFxI}m9k1@hZl>QPH}IDf4QKJ#~XR#U3J9WDgH$=zQ{`# zVihbbLjy%}PI#u=)OBLzA1d>3J9YYok%!87oy&vF2vuS-abJQ*hz&ZXE-nRQK56EB zw#PiU8P9bIkz$G{-va3eQ>|c;#q3-yv5Uh}$)cCh%+msn+xwWQPYB41hdkSUJe@Nm z<)Lq2A17#VvU_5vMQ2|tH5E!O-N7M#@!AoLXV zS_CnFO33%T09b#P3MA86=u%oNW%f{bhwhbVy_D!LV*Gj|CmEhIz$ z@QoIw=uv{1whNO3=I@Rwa5enpYSV$+@(?K@fhm)KiHNuSNpdoHbD>->Gh~2bWe1gv zk>P_F6`%@KBapWQeiZcU{#b^FVMx`WPNT1Yp+Mk*LAX*b7y|{x7$hxL*_{AEvRPYV z4RKm94NI(r?Vk6~=+RZXHk=`nJ}n3`iLG6Ghq))IsiZpDraI+x zPBBXwchW;uAiDYGot$&ein2p~WPC2D z^CN_vf_Zv1UG$DwWC*}khPEp?>R}PMf!?$^U(VQl$78(*JtL&%LLc|s4zmuKl*dgz zSAtE1**&e8q6<7eZAglfT9*eDxF2I zwz1w*7sKq@NyWR?^L3$(TUi6__aWC5QIj8@3eCITJdn1r!QZoIcZ-41IDx5unNt@% zIFb3MAvfd&)!v!F+icgO^Nj4sc`ZBGS9c|b*b&mk{gkhs;HZlc(!OLzCj#D3`toyK zkbiNpQx`+?MuDAyNtRMJIKVp#=%DyZTAPls4dvlG(z#btiq?6v2X=p&V2e2}+QIl9 zyTa*3Lt=<)?rt6jHB5|A+&GC)Gg1N?_+=w@8YKMS)@85DL6r&;%wfe#P!Wg@d#I^u3Fo`TUj`tp<*QE zFGa>@39S?Hp$aMt2;)H28dV?~*Wwjg9xWjy`JChDh)exCOJ?PHV>4rZkT@BjQP^0AAROV7*MmDZgBL_7BCA478r%Z<9v<}U zktCl+Onsr**}a$#u9k>Ht03_ffrz`uIIM75{Q-Q#LV`jlN=Sr7&-O}QmKdm^FsXa| z1(#i4htVmSGf%rsSKQ9QN}Wr?LQ?MTHjN`vnjh$jtoHA8OL>=E%wiq2NMtFCZc>vb z7OiMeq%=Rd7Hd-zcT&Wf_w;dfVyy0QtW8}@?y_qpzqH1kTqia8Wpgn|ve=o*@MsP< zspD0WNZ(Xf&8uT6Jqj2;fhBcy@zm|gv*BM#xr>(JSw($EF5__?&rgbo+&^|FcodLD z9$Ao>w)J!Yo+&&VYd${>-O|_f;HDzh{0v-KQ$&aD6w^zslqMZ(YwZQ5b{?0DdRV(+ z28rs_Z^E(ej>W{;u*F~z%!PX503TZpO0~|F@kmXJX?SAn-noP~I@EtmWtjv4Klx*O zkU6rI8eIbcxtBibIO$qv8j3;&qj;1LFO=J|Sjz3L`NKdweN;B*v<_bHcLE=LEBy%Z z_fTe?azx}{OyIn!-WiC)8e_H$JmVc!t_s1z&sTN^HESKay=mmuIxA;>fIXrHY+ydY zrO3nEimsvwZf_|_IlG(M5PPfGfT^eVEgNKg9H}s3if`5hysQ=L5#<-Av5n%UQqwDaGMWHch z5b?h=HG=Ule|qQ6$w`3BG9e|jXQ5m;bq})*36w%9nUBE6{+{TKHy)=&srn&YNpibPRWP}w zJh7i3KAs zob&uGIBU%$SW=8kPC;r>3V6gGdCW*y&OI+ZSy2%ulv%`BNlTUr7LN-ncsn!dvhf zR4ct1A`cG{OBr{P)h_k~Lq6$1`iHk=HwA zoz&#*#)mhwvNbq=dAH8FUexy>uhZuIUZF+P9vxZdp$HA!12*-o>+|J&9!Z}dE3a`R zoVajER7}hyn3z<;@z~*H;Yb0S0EvRod`rU-4ID1Z9`?V4IvPd(xElcQ$cJs+C*df) zQEZ(j8Yghc6uaCU;@NS?Ds7SIiH+5mFAcWbZKIkivFJqANO6FaqE&OErDy7@4ro)F5>6#-RreC&(6iE~KqM_JV<=xiZ)ivun z(H|Su`O|ZSoiPs{edRa4&@%Yi8*Xz2ja?+icq{A7ba=p%eq@)-pa`7Kr}FI1EH6P< z?zkeCmAo>c0FVphAC}eCWf4#nPp%7(2>Kf>nMfQ!cIJ18%xY5E$kIP5_Aj(SKQ@Kx z>?`MC(oG*0RimLVm|JJck<(fB9`rgy7m(dp6y2;A*x!$J3Z6d{GC%{&9nV7x z&|TU=L2H%#ik4>c<^r7nTU#m=bQ&WS`g(z|$UvD~I6 z!iTPGHZA$j=Ln7X z$r?RJiz%zbe>8IrbSJ!H#*6pABg1OHk>QOQv%t)oZ)MRM8yx=~78O1YxBUD%!MEv+ zcgm<*PUl`=i~AtK4Vez^;Z>g>vXW*Q=ZiGRJTh)JmYVx-d7p){3Wo4d50&>)%6s?T zH*^dNx^b$?-QN1Gm1#-5UBxlinx_ri?3ta-t~>!g!F4}l^TMbhb;W=bYpfXQq*1V7 zEOPm{H;=%dQD;w|D!(QD%gQI~)sk!R^{SqnH@j71uJ6=ZE*neK9FPSn*=+OXQ1*Sh z)_hU@hq=hLwM0s0v2t*X&#pn|?!1ju(3o8xq0W0HV+N>`LBJ(AN$g4iOp+UHDF(vt zm`%ES&{CFuvm7CduN0tG!*E;q zgi#E(W-My2SDlS_Qq?9tVNhBGCki285(_k3K$$7vTZ>~P3E$S{P}x(6L2OoVZK!)@ zfP_;}N#U*HVVTmX*K%O+YEjx`txX8Fzt4Sk#rRM)kJsXkhx@z-S>rbF=xnk1erd($ z;x3(6a^2T85x8?n4FCiNRth&qY7&dh4U$Syq$DZN|&JAI9U`$4f!$W`bAGfahulQ z4NBbB8+|v$ot6#Mp^!mWmHb!UDwJi%{1I!Fuq$hZ3vbior}=g^v+y*I-DjtGf~>M& ze&gfk%g^*zgk1^QY|&QGHE?KAOFyP8Y(1ixR=#}Lc~9WZt`|^qU`mF8k*$6~ZVK+L zSKGYWNCz>d3V+4>6c&CSPtK;VyCAS7{Km{hx86;x<+;b6U(_Cm43X?fbOftNU&wE$ zr*}m@?u)f7KNLpnzIF_^a|rqq*Vg&t$sgR4Etj`?(j%7dl!ZuY+oJGP4&O@0u1U7G zuQYU^gxFUepXqbX)i|N6Ewv(I@J4n5&#f7)oEW@yGKr39X==O=+B>7oq59cP!FP4o z^m98b|5~U3W^J+-@n^p5>2TTOc|pdkS=IX1wRKB@Qa)pOmkigtuX+|U6y>pMo*-@; zj-_?SM(ScZ+$+rYwgjriq-(|3W;yrqcgu99TdRnnc_W)06w;KRx6wdV$RUL?evQ@} zfMH=AR!sN;lPCeQ*?{!#LDOhfAE-M0AfNnqr-S*Uy}TI(qSK z+7fYj`M9SB&y1(@b7-?}c{A)x8V?YQd@=7T6$o4fn1UVWh?vdRsh2>_V{nYyVNlZ8 zE9>vedKi=MhB_V+$b}TlDKqW~1u@aw@hwLp5@Px%R2CA?0=eYehJ@Uwah>#vXh<#YJWSm?^%~1?RPbC&Qrl|oRpdkv^%;m4~jjCZp z<2f|`bVnvJ=HZlcXT^D09^36Ak;AMJ3Za*ewU*3+Ty~bIH`0Cc(V}ZC^JJZ4o{%#T zu{I=^XpxG1K|SCmze3GbEI7Ij z1@5t!-g`8(%i{OpQo@CnkqQZyD0DbC! z`1IxuJyKdZ0T<`^&=n}-JPK#(qpR02BX!SYa@tH0$}6Lpui!}kq@d&Kw(LOBiq*)L zGde|ay>UYu{B7!Fd}V@Iv~hj3c|*_?e*oE1rZ5Ew+2VR7m}aD>ehBFivYAXuQqiC( zFF&7Iqtc)ep>N<#Ivp`D1c20nq_K81veO0uyN(_`S$IEm}f`&O_3CEq6<|2&R zWmssC?ccooptFwS&MA^=Pu&7cffaR_%GKtAmRGeul?8T=kC52?xkMdXo1i7HRx84j zZv{1*7;1)`1mHK+9jS-eNP4nfJ=yJ$yr!r?&BOT$K@E+-Y>{vivx+*21t_1yX^n&v zM3?x=E%aJ-u5*FgVnLjMlA9+JQZ1P#V^U825Obn+3^+o`HtN0eQ7Tw?L-R0Z7&hY; zLR+2d6&O<%WcoZAO~S==MMe#sE<4u+@zg{JM)i~mGz)OM0{qCn1Ufe(otHZ4Y4Dtb zL#OHRMdbOWrpR~F2kp4Q!{>!E6!)CWS&J;!UVOd|6S-c?2v*rFDBGS))D#iY3F$rs z85!&W&8Fuv;rX<3MY>(HBn7I%%2r*G*U=psY~FPp4j|1NiPq*(I%B`d!OKP)+Pr}z zC{O)eUWNT0A^=Q*$i$5|)zYMNhfB@KNYWX-TYisB=8o;?`KjKswR;+P$xz-aNO-DU5 zKw-=vFl=CY5~?8@HUj|%AdG~i|GxoF4L#8~3@RQWHpwgSr|ASn8{lQ0cqdBg1G>IP z5q-v81fxCkfe1iTLgFn!6f$2M2hap?u}c#qm;w|ubJ_n07$Wu2OM0%!TG#d!`mGxZ zmgC}Fm|Sc;5a7<>+C_L)6$eapS55`Taz6@-!F(!C#MV^moePtuiOkJUl4OO6p*TKS zcd7{7)8IN{Rx6U_!9t)eul_ihyoTNqqjOx06`wT~a}VDwA&hR4%32Ec_tdtmxM`;b z#3{L%)H)YrRQ7V>zTd^!{`D{ZYJNS5|4BM+i0}a4dDuc{l~kgRn=kZh=5Q`|*ewp0 zE(%`*cOkdjE0t`SFDpM3Y`iBX27;T3>*uKBA?TA3W9s6mwL&*L_VZ)8)hzl1^(%f0=v#h6c} z+6{>=E@#q@-IREyPu&eMH)|jAFF1=ETnkRwRcVxU{HF}O@-NhJ>zhu3wip*n1lZq1 zS+Owl5O19gW&9jn+t}xJJ`JdHyvoDfH0BX{guXT{(9QTQe52l;mNVXAa$}KFE(%}5 z!=4HUaeO?uf=5vwRK?QcCOGRD!t~2kGdj9ai&&LR0-P3ffE;}NlkJ-1F-AJYnn5oK zpy5IM+t@(p#tp^>?CS!arex_+xubRxh6L%GNQCvXX;Yf50s66{*&T}pkt<|VvB1=N zyIpqpQO{Ktk*9cwhT#O9@k$$duLPoFHDr+p*;9HTOnMx1%xe|b8!gmlskb5)Xp^j; zBM(KMA`YyAPRP#qoS{*p|5^0tK;jw4B)GUCFc#*Qmw=mkflAM6msy7zrzNr)7XbZT zj$EQO?js!Cu@9+X4L9m0`FJe6ZB!S7C{Rm`lQp0dghoN|Ft<5Ww5RSq(KUzy3&V&# zQJ59NV6$;#Ku?B3Ev=-bs{pjAa2JCCn_H1r@pO;XLH_0gV(0pzy_7k)4S@IRjwhdc zsL(kVo}87WZ?2Pal?6&%>;Z8eI2=dZ1EE;paf*>@^ZMNsL15t;KXSgyGztFA3a#C4 z!h)NWNQJ4CxG3Hn74=BC+17H)XOTzdsFDm&2H~>jZFQXW;>+sTqEm1g>fId2BSP86 zf-F3Co_LVJpuiwM+32a#@L5R^pDkM(YZ*>ryR+>Zn7)j6lRBsn61a|7D@h*0#6kcA z$L|mlXV(h}rVb-;x29J}{7`N_2jyFEw795_4#=mMDAGX|k8wnq>$oL@QQEuzubF+MdE6dfUUyQ?t(% zLG!0-tbCAlUFO1cB2m;q#rLkXOvDDpbpAquT548H!}En^nvO8V7%uXop)4h=vh*7G zwF`6#4Fk{v1u8?Hz<}-bgA8!IO#l}K)}RAZ^uqs?W#|etiGm5WianW*_0ifBTyO=9 zyMSiX=(Fu`uyoR{3+Ze-k-wg`q0Bsl^L}@;*Ob>u;+3sU1Jx11w?4?{S_v)uzKP&q z3jDRNkK4~CEWIfx<4&-*Cj#Bj*x<%7nb|~7+E5}7p)k4iV}Ne897r&SaCcl!x+71$ zfviI4@{BfP|8WQ?6&etX3=RcxkkC;r!KJO3oh`|*4xv!+=w?bTlfwePgh|A7p<<71 zxW$!=6r=q&nRLP$MkWU82W$rF2Frmkq18oYFC0JLbXFq+hgvX#?43a}nb0b*z+>}F z<#w%e;VEYo6Ua1pDaMy@`}1RCgsc#qI2PEV445J`y)Z%$Q%!>c^#3&oJi#TX$XVuL zK!NHpltDsf(c{B4CQLmma|h@1cs;a#29sR=hKD=n!oA-R2d7aBBswVI?sLo}WiOcUfy^a0hn>134O zD7Sz(722tas~AFMGfInE+3hYjSM?wR)cg2TOeZ~W=tr{%wdeNIX4d7xMGC|g*mx{9 zW0}=7^R^_TgpT072op>?*PkUq*_R}jy-DkZ1_ZRh*7YJw<4I|J>*Sdf7nlY#>&bOzlft zY<)`j*f+uk42c3r9tlBG?l7#r#R41@%-#Y`a9@VDs(iU3n9IaL0vmHT8R?FYTVAL_ zso3OXL6@1rdC!PS%2YAyf@9zW0=Xqe>wIdp8i0^8c~EnuS|qwwlTrJUbjmLRiWXW) z^EpMrVk;nDJvpIPp&p`)muB2V2xZI}Fs%>#A5H?f(VGiCTjNGxYuK3i2-G9otsf7w z7b%Ec)*3=?!CyR2ADJ3>Ut_+8R_~*yjSm%z;yK4xe(rC62Z>7w%N%An6ngEg!froo z#VJvgs0Lm}& zb#i&6b2L{_1MVobj>2Sq*K=R)qFg^(`O2NP>Df-PY5|_bH~{_OwNE(^k5o^ffpmdo+%X1q$7^6vY3&ThwIl9(mEj6Da~rW zKUo2AwpJ8w1i^)}#pWm=A5tGxN%DSe)kj1A%dZS zLH18GAX1zU+#bQn0QEq?!FZfB3S>8MaVFrHOu|`?dSkggB41+AnTj;b)0f1w()p>D zHBGy(sZ&KjXuCKbd3sSpg`TO_r*zk&f{rP*5_E#Q_;DJ#E_$fpyZA%?v$N`0ujTuM zZ!RJxEjYirJFtlvfhBI1gI@WZrPV0(z(FU!N|K> zyBi|V8T8y8^HEF!z3SRrA$PwHvD+0+&4}5BV8~+|^_Xr5f_%rw=F8)|I5+$v&s|wZ zL_VMKj<}i=m}AO9=5y2@KU4mdRmg%}n@A$bs&3yxGv~Z6^!S*6K6VJ6;o$2XiZl)4 z1OTTqL2O5~4(~)nGQ!j040>XV9>H{_e5Aa9MjO_m1+48!y%F&OHe#*E(#=6=9t2k@ zj?7>iw-Y((F)bcG5J)p9#ADZz%u1lGY8nYh8rW)RR#gW0se!j2P7#c!k*^ug{-@c?E878JZZcjNj$O-ui~ zF#0920M-m3Gih9t*zr`I*p!?-3o!0w#c}E_ly}faukSg#*}awds%}AFL6Sc9;g^;uKS*0Ft#TJF_{-0ni8?A@t>N%N zxWC4+;OHIzPS>hwc=w;+4MCLwnwY0Y5Kfc*0lDgdLuG);JzWQvM8SK24esbDhQi6q zT-407!3B)PG%e)vX+k@$OXwq-q6*?4+#CS43XZj$Did=qfKc^TG8F9gD|$(Mat3Gc zIeSaHaNE8pqchy zu4RXxa2{ul!j#`nR|M(JRYFEj>&DL7-(IAxMfoh1k9v@pAD!!s2(g2L2crTG0CX_= z(ZVr#w7WY*sj0Qb-~Vkmyg`X^nZ{{g=s3yCcLz3(AcZ61YLSKr@y0eiML3{q6#Q&1 zV;a8zvl2iuaT&KWutDm%aYwK$%SC@)gYGV!qb6twk%3#NVr%~edIjh9lFZ1%ngDw~ zPS)FKWH1|15Cy1@)SU;OP&zzWZuucxI*wpaMIJ-P69rDlJA<=$-ZdJe1MPAxk}ALS z_?H;sI_J_+F{)m=@QKc`altC?518rFOc{q+OvD5>;Z2*(*FpT8(Rf1gNSRUfBrz=r zCroI(Vz7K5vn0^A9PBu-@vK{6T*7lkOxg-bwT?&T3K%qU4r=J6>Skz&c`*p>9u!2J zsR^GA_1sA#Y5lLpqLIk9pFBRw<@m61)zZnDQIv z;_JrVRHC4_zA*Cu<9GBJfG2o%Bv5FuNWY0po#xOC7CST@TGq)XjquQPt#Q3`bbui4 zIRmF&0SOIWIZ~lQaFu-%eT~jEp^a3E-D3A5ml#Iy22AKQI%gLESyD)&egd|pAro5U z2>k91GOCP@jevS`&&Bx6ZC^)>-@A>Ds62r#O;=}dAVv~DG9*FE#Y95H&wHRZVwxJT zsAvs9y8mlX*a?Bip+^6#5R6@8OOGT$m;T4r0g}Q{SSKW=i($^CkPoJtj|6RaE=q0a zWYC)hK%%1^&8ZLy$tmORz%XafjJ(N8!n(UGcn)t)13;2cOUr0)0bVSy0&!% ztbh}$*@gxL0b#Jbz7SB=hfszqnsG|9#su`T;5YIu)-5~yeYTQFPM=o=U zL|LxOY2d7mi(QyOdW6ai_iw+#CLCx{p3#At`c9-bN2wNJ3typ^vL0|yjHm?hzcs2c zJ$A>L)`1x)lgS}6QEQ|}fbdQ-1*8d?!aA^J+9wMJkq;H;MmrsQ;5 zG-XpP2y3GGx3u>6na2uvfPXr~K2QT1R?#Ijw6OnyG7@~xNco)4tB2I}4aAqIY zHIRYa*E7AHUw zD_+y+1>_zXHO}F9z&rl0fpR_SnD&IHuZLf?;JpCr!nj?4VyKwWB}g1HP_Nz6a%Uo5 z>W2ZT3I63rU{ggwPoW?N#^CN{Byvd9vvd|L&G`&0xXm#+j*1cK#afuBoeQ_A6~o{m zQ7B^Tg7j3yYTbyHsKq8nh1&Zy+MrgHCKt@p({R^XavT*hnd1g~f~gsn%8EK$Xd}AO zM*et5D~ncyhO(}aR>s`M00#iMB?)z|tKAts9-mIra|b-0rgaUPPb*l_;%mj!iER={ z93Z9gAT7q@Y6mA8Jpvq_JC8;*!m8mA6hnbV8`ccILs;UFmFnF3pbW=v=TyOLa|L)d z=4^Si-yDVbA_my6)+0N4G_E?B3&PJGm*j@xhXUd0d0z@3)i18ooq_ih7b1#KSE*f@ zej>8QC<6(@0mCKscPct7<8>B}19Rvx6cf^@$gI2Ps9biA^eiVm=Y20>_m#+9Cgtws=;^(#-!f9Q5Ste2FWun7ErdeeP=ERMwMrwd3L1iu zf{fdYHvw8}u9gPPDYie2BKR&3tV|IrlVXO@me(@njEg>E6cWtEDHwF1euB^g4ICbd z4Tgk~=MWh=oAq9NyCFa{jXs`=%oXN%Q_f@niH(u%gaF(~6~IU*^k;}8lXEW(<*?GG z2X#z_;!z{^|%VeY`^CeZ?2BmSW2EMA#{M*$kTL*Nkw9k}ued|BaF}r|n~eYS{|L zwZ-49M#*hhZDK$7d5}M?3;>F-08do29ricUkMpSbv>sJ|c4UK7td&y$a2Q)X-q{D^ zf$@9rpR3VhBUkKI-qyQNz(5qhdZ2lwTeN0jT_Fh49l~83UpmBBTI+F?x|fpsfO%Y? zgHV?+jEQFS0KoVmWB^_V>IIEmOv6Qj(hBboVcIt;isq{Zu-ax=MK(Ywj3l592$jNn zurc~eX3q8bB{Aq?n(1JKcLUYEVYh*{lJW_$k((B5Fd1osqcQ)dEnC<( zF(^sWAoV%eS?vJ`!dj;Gh zAu{yE#)#lL+Hl0nbVcG>g>o~tuKtOI&ynre_KLC#MDFnRQ|LfJa^YOQhzJ|k==ud= zT_16T=7JK&Mn`5^GY(3ud_dj26?=!qH}F$0am*W9!E8eBaSQE|$LocHy?>+U$W=3hZf#x12sUzn zE>JsJdeUEA5(57DfHUF@e&_T(gUo=flaYNdOR+&%~@Ej z%XY+8LRfRH-eLGjUu-F5x&ML))zAr+ilcgq7iB8bru&SZr_BM3-wv=n4vj_VKp7oy zfI={vWt5UA3*-=N8wfYnp9R^m^l*dF@J?eaSh2rn-Uu)zFe*9+8Du$i49DMrR;YGI z5Uwmic!m}RenB(9#uId~mzfq1qCy1B(h^8c3}x>@f+-+&3z>n32TXt5=&0l`7pvnx zB#85&J~i!DMKhAzImZmz^@toXEo`=@abOLMCA7eaMs04NFfBrcRXCd-LeugOm1`-6 z0zrf0@R+ozdZq#f5M@>|yay2ns%Wk8Q%*_fsw!M-?1z{%GWSkDX5ds|M;=466d)ru zY{L52k7{oaRt@jdm@?U14%j?|fI9vcRXJ60mXlx}v1kDt!k-pb0F6#Wcqt0DVj8WJ zDUL%Ya@sl$p>42zqdvDDhD(*!5#CDY__!LlwtO6HHb-}ymy3X&c`&o!8WnD975sj7 z1$)okZcV7@8MS0bHAh`h zr4qyZRVN^55PSgpiGf1%Nr?lca;DoXuh9;VQE1fxw_56JM%cwX+1d=seYyI2&J0S) zjS&RUTDupf+T!ztYeOBC?)Z&8uPAC8k`=fwq48D@kInrw4oe)Pyi6>xp%#qmywiqd zJE^c5e;SD(*Fm?Fsd?F)amX{B#R#E&1LzEyo;{F?@O6x})AseQA~V*xguWr;+1N1c zjzn_HmUlAOfJYiA>&-r5K54MyiV(AH{39wXSGYF7NdhHDW);ocU zt>-2SyNvw9=wE>zm~|?@J94UVY7qxwd7%`q(X_VUsZ}H$dR#x@uF(L_2N_xl#C8GG zBMxAbnBErmMOb7fr;yv#(Sxu#G~0=$@rrlC;3(i?(W&^+3n*@ne}3ix&!04$NHEFL zD0~`WEzi{V!-zM9t#phwUSpKj?!4MnCdi#*q-+*# zw_JoT&`_79whXbm;0p(4m2 zLclqoG=v~Lk(xtPjf-4HZ|u7MNa1pC$CrOSp|=P}8X&W|cf#O8gJlnS-e9RQsQEt^ zz&ItVv>k-NTk}&-!9qIUe z2|9pd+j~Q`>j~4Yk**=21kP2(uJkH=2u3tDArCF=p?zt}%36VSGCpYN0F6E6BRfFb zPWeP5tOj*D0l_GJA<~B$^Ls18dn^rAtfsIoktO7CL&rMa=;iPt`rf3Qd!sH0(>%X=xwRp)f>1Zj*!1lgk`2qVGy1}o6aDa(v1$O~O$E;GJ@5cr9ac$h+{fJCH9 z7lT~0%PA}!yiHaXdMv!Vr*c+kN!7aCSJl-|1R7{) zMWSVk&~^ZgG6+|}YNR7RkTx(AT}8`?5e+J$@exBj-9jXgm{!!NFjz7eF_X=eW@}eCK=ozsJ#cvf;&JXkS`h zAf8-T2Q=4Q68SE3OhloCY#}e^Ak|=2Kz$7n;ONK7m)KRp3bjGZRR&59kb5jEbmkDhfR!e2UgYfnA0g2kLKz$I9{OnMIx?o z=-8t7ZX$(sMs~N59(=!y+wqsM@>1iro0oRp=V#}HADMh&c08PRy1-53hYPotim@S= z)|~K-|I6+C=RI1VU@;T9?}4k6h>$DeiFhBxg(N|8V57}zPw+U^pbEdXD#94Z^Ba5S zq%}p(b&ii(lklbanMvYS#}3>El|7xJyCjh9`^0k|>l}DkT}&f$*Qn2_WPCN^L9C|E zskkPxIEq7fXC>Q!WH@nfVdt2&gV_2pW!k)YZO|NcvzI9p+p8`@qcQ6fV=|cWi_bgL zwCpa7^W3`?OwfF@Ra1gH{@JcjF;Jr)U4j-K9QbG_=GZLS8;~ghE;5ZWu(G6UXTMUz z9Qt<0aygY(__yDZz#_QT!Bx``pojbml+yej-HK=B6EF0_u~S2X|ZigP)K6|as7wU!0gaExLlbt(`fDnIVMH5qR9Lg^vUV&jM>hUvj7)zyG6t^=( zst|sudI$+~>FRU_H9%DT3hE3s;T?oVCj5l6j-J{X$y8uhR#2rS*&{ivY2%Gc-ha>Tt%frR z2SMFcyFI?M$?nZXnulSuJZn~=5UvOXvbW2UR8Dh~6k;tE?-+~cEx+}tFB<_Iog#5B zaCQY&d9HfDhzYZGLjR=Z<`$d&pl)r+{RLwUn_mbRS8qtJ=g#%)K6hntLaKge^_&^y zG^SRptDD-cJ>9hXIPFH-jf3-Zi+5TBiIJFx0d$9#Z@%FUzt;D`PEF@ghU;?&3VPg< z%N4cUJ0x|e`(B9;d?~E~sS_Jhy`#^IzilhnBc!(Im}b$#{cW)$>*Qhi$_GBp^MNhC z_MsV*e)M!#-%wgh-h2E3hOyT`8l=F}bBD{%Rdnsp`_^p|#DO7T{RHCq%(g9Hf zav8#Xo2KZz!HOa39gsiaOH0(LeJYtF#NJ2|hWCrUSJ?@J(%C!QUosxM z<5oQ!10kYdvETvy$LhL)NwyPL>QOfsxbFcS*_Ww6ts8t6z*!nL$MGqDf;h6C{~beB z+#G`oC>ZTgGc!V)I{S3n5fjz2dqJ@|dy!jnd3@P3DRbguO1Jt$k>tQ&DUvIbF+H6O zh{+xeWR#a7FS+BtNIMl9*r0O&Wk4PsH%F+LCxZcOI=CYDRw9S-rFLK`>&wEoI> zD!8D}l9ZoNnXLD76L)Y!=1*f$%a^5@>M!gt`sZH6gV@j~oYH^VYbHmTz4UsOQ&Xim zQ@YJ!j^Fl8$Xa)H-hOG?!T9h|9>Lwg;xD?PGg;H1DDe4iTB(jxIq8+GOF>(3<7C#a z-?E&QqOPoR-{U#1>4q9nPNg9Xj7P(8vtSf6(a*4>Y@hn^E2v7?4t$w8AD!Jv4*SsD z7-S|5#4>hrPcYyJPw<3n?WXQE`-G}Wfaw>tTUtj4P5gx=?` zfLO`!2RgBRW{)`)9JhvStahzCSIg(#4mwial+6{|@7lQQsow8KM7(Z_i73V`WOy{t zp|A!6C7!gCXE24|3bn@y=$Wj#Z}Ypd{~X%J;%eeAoU!xCts=ZgE`dH>R3LL=4D6+h zaTGbcom23%L5K*u=lrc`x^FJ*heZW_uo!P)gsFtv#zPFW$JzmC!?K;rp(Fz&z9;~2 zJD%_f6TAAkUDAXzNoIb9+nS5mS`p{i7<-8i5+xp_yrD(nX#269<}Vwik@7NWB6jEE z7>Ss|r~HDHLFKO`H7EsvFPz03wH$tmq_^B5wgy6pP90PN6IU0vqNmV~O4{In%A zMD7TXgoUC?QqWLBy%oF{i+uLx(reE6mb#sT4*Y?T*YFF2!C*uJ!yB8^Vv_k?V;E_t8a$J9Sa^p!JCD;>z*M0Ib~m z183cdcnr?%GH1r@xWXmx65UD5CKOvMcU=Ygw9MbGf3btic70Va^uXD@+75^jtyE-F z%o`B`Tj_R1wb^5x(V{!;kZ4>YAv7e6GU}cWguDe6KnEO=v=Ylmquf}22Bp;H*t3+- z1AbpvJnklyqN?$swtMR0nwapa(1|KjRJdcAh0?!9^nymN=)8jEBibPV)%H=H3>W3V z@S<|}2_Zg-f1s+V(288~nvIEOup(0p;@~O_%3vg83tXR0>c{U=B*(pS6V{iZL^Hj{ za9|RmTbAYUg$QnzBkPMy>oY=F?s^h5Z8B)dC@(mBtB(>L2l{d=JZt+FNjHu7xBbiK z@XvgoF^^xe#1e&!GHVgaZQRVc#f*ji$f*vNJ54;S+@)cGua@@E{GhZjaokIpp8qwe zG2m*4=2HaUGaRPXd&qj!oQtQ?2GnO7Xq=!rpj7s#{do&M+<@Z*e@UT6CeiSYeyq7> zV`$B5+X6)5v0H+rU2OSTvywm~L3B$-m?Tl5rbp1`F*-0}5`$cW`xK!|SNWzL@Ce0% zg}cM1;o6vdPF2WoNr&!wO?!AChPQ+x2Pg7z4o=3jac-1S%6W6CHl=paY|W_cPPY+d z%sVh&^uz^d9l$Kqh}wyP986*BxbA8SOQj*b6u+Zbo4@2kdgWYD)BR_ezusJT_beax zO>_!({-?%vK641%stbFFNd?c%iH^<+gpi!Y_)G?7szreS?Troj3~!7baTVuVj9YGT^Ec4Q2wM@>iMdy9%@ zL1XQ*pqo0F+1o=YDN1v%zU!f$(OGfDsDjDUN1rLev>oH1>91U<@Z8C&-WD9wW6&r3 zQEIA4_Rs_EjF?RCeOH^|!x=4cq9jn<(84$6g~xTDfgoOv)JY(qT<&G#*DLvufB_=w z-vf_W9&SBOje|sg;b!#X@J2iw&gv%`UrB60;NxT{!8QdFsHT+6V|*6jRGYS^f_#oy zlv;u%b~IuImv$KfU6(wS2od%!}65rWd>Ym8YJ|OV2s&38PjxC`P0?R2h6-~ z*!tt>aKBw!Nt4TP;NvC~YL3XXeXYS{7%ny0VU#fGi2!ij43{Ksf%DxNBLhO`)Awcs z%^hPfHEP-3(0YsOcmp^3!nb7Y*u*pwU^$cWDu_PvGr!P2{G@h^4{1j>yYXSoOM%2| z`?I}r5p6f^`-t%yTWNk+n(n#Ti@VOejn#eu_*Ck~Z;#R+wiSO@`7!FjyL-#SKsr#fFQVh>JjQXg@cx>d%I zP{XZu?)CL#z9G5r|4=DV^OHMcGBkmZUn+D; z#1yYW<@zH90kGE(9(uro`KGrSVuLK(awxMUv6CWvzJhNDl~`s0mq6M&Ly@MX2L2jq zx$x$I$)MYL6UoC2h{3Qz!GaYha)X|nzQgokCJ~!=;a;rJH@(5reB_|8?QbWfxz3Hk zj%UYh5;AUw*0t#`Q&j7s6=MBUx(Bp*L^oFe%6U14ERjT0I>+e#*extPy3!bE2{~iZ zZQAFeJk~LZ}FKmNdK*mhf!VpAUf#QQJbc*OSU7$2tg4H%>4H-MfXY4n%J znsjU|*@AGJI)UIp%0Idxs@N(Lzf#G24Ixtp8ZeILidxz#HUX6Yc2$HFEBG`_e5A8Y z{VN0wX&64uX-fP%FzA?D!YYF1BmU6(r?iZhfaaPDd%1B_1rJLg>Ar@mZ>XNWVjcIe zfkr9){0@_?^gm?FKh_>)qT-uFe!DFR6I9XUE|f^wgDbm|1F#t9M0Q0di<~cXK9^Xx ze3Nj54~#f*IcRQ-Nx#PrIUQrp@)}R-pIbQ-AdKyv;R&3R*g!g;g`AQ^(7d71&m&Bc zuEMBY+nhP?qc330YOrQo`q`nshg45j|5JmuRwN8HlLT}B)7P0!pzp{g;XKSA7s z>#4G+J*bIOf@K>w%xL6f%Ds`tA|u+Of~iRIIdFRWr-}Ej3VSgmxc;& zC^%!LSI{Y7%XkzX5GX(F*L3+Ng3F;lNG&b;NLB7bQ+ou>8lOgxbw5R2cyz}XYF&t) z=U%SXV@dB4-1CeLeo`uH+fXwL&JZSIH0;blhr3e%TCL}Du43ku(9LJNL}bOi+Nx$W z13>`jnsS!QmyLdPF~~))+&na{Li+>q0>&zp+dH`#vd{Tq2B+B~37x>pLidK;B6<`i zl8uZ{t=UU$8xi(;9mx3XB8?;~~!w`wxPy4Ce(|NjOa58C;;nbc3-PE}{| zU~csXX9-tGGS8JLUF|2>mAOr?IjE+w^_gd7K;_WR)aVWQK?+7LuSKtLq3kF;K2 z`J1DnAqU2;#NCZtbEZw-96)W6Ktk2gSzLjYQV%PDZ_<5HPnHgr;?FcuF<)+-%%VCg zxOb(~FfL$v&O1x`GS{s}DKF$B5F{W5?wB9l!h3=Uf+%!5T{&*a=?rtZU0j;oo6?$% zzaz`Ekp7eWZnG;BvB-Q%#|l&Ob}4InevEWs^VhjwOX-u)y@rs!xg#B@@KgH)BP2^y zNWa?MZp)*=ZTa}0A$L{!p_8_Y8=~6nvwik2xX zC1yYjmQvw;4XW83ZbOF}uQ7%&Bb*4=Hm9WOsZ`Xuq;&Ry%ugami7y1uDTW@#bku;W z=?_iN=Ekp#kUD`rEpS>FmO!}M4SI&f-mhp#UnN-J5tX2kk+z|xrbcgg<$_`Gf_Nb- zFy?_UERa$60QcM}G+vt8$0Y>D&F8EYTj;r)$t)unAO0xM5MrYb&G&(ac~ZDDZ}&oh z#;n3AAU>6@{zp@beEhS)*6Nnr(eCI7u24eJ7>zuK*U0)l^@y5%X04Dm)Iw8az}t8t zOj=m^dg%0q7XA==mqqf)KPEBz{HX50Nx?RV&7rY+7OpE*yA}LV4PmWTb{D2`z;|#( z5uGW=>3+gjf<1+>6WT19YUW59He)`djS=OYXvXw=^V-Ch5LlH2HsP(>z0pZoo)}KL zJJ-Cx9q*p(qj+^RD(I<6J_UH^h;Q9|QY4>pNt(A;GeL95x`!jp zosAOR;zN+c>{R~B%vbLj%1^&-4tKs##%*p2l3`eAuc>yG$fwXWBPAzHjPsgG1_Ep( zZ545a~ru|li~|~gSxk+kTyG> zP80W-olpgc6E`IYH^=I}U@(VNmrXPoAo{>+xpjv5>ZqU|#EcdUs6xZ#igyU4ex;Z! zOrq2l82m}IoS;0^`L;F3YQwu9xMy$ZJ$5=;QxYk`c&X~(4isZzYxV+T`0ve#o3jXlO8Eb5_+H3~)ObNqpar9W-y!pg z6Wp_-g;cd96_HGrWJ+dcY?jo$K!r3sf#zzqjNXS#)hV=Dcl{aTs@}t(D9hI)4>xv^U8TlU8D>cpt*PzeNT+0XDs4P zq3LLMO>h{dv;8MArycg#n5Avvnq5NgQm21jGos`vB*mxMH4RutT$`X~QJ5uqT8cLR zYH?tl`>AVmm*RfAEe}t;Nwer|g#6T+Jru@i-@lzy)%5*|x0Yu$i>VA0;aBzT`0%Ay zS4drFUws#TOi*OWRE|W**gZK(EZl{QP zhHnC+0Xq&{3-VH7CiLz=!-%7Ad1kORU^^|Ff&;N*EMck9qY{CrK}CGtHEavSF&NO` zP^86m(LhmX^0)F7Ym!p+-rMi$2XGHKdag5?(hXe;=@%-VY8MBN{N+vGmy55QkEf_7 zjRys52`3X*V5=6hz0U`g-TrcZ&ifYs)~8rjm6Tas6Jz0X@}Fy{@pcOQ&7r*%o1qn| zz6*FuLH4mPO4s)l=U=PsY8n$Pveu#@Eyy zlPdpGKC5X)l&$WKo-(N>qGbo3AT>1i$r>UW?NzHaq zb0HtV@HETRs(>PBKDW6!>}5~l<-XMcC|KR8Tz8$kiV?5sYjbE4uW+o0AGSq)UVQ2r z4yFtG)rz`|k~lyGcI>8x3qml95VYIRO2&13rRZ4tA-Tmd|hq zU#V$O|1hde-Q&Gy(zR!|_OvUJx&Rp9I+Y~LNf+QIzvj-y_pmU!EK??k*$;~pQ9;=D z&(=wik?OCqms9}<*P*I#L&@NqTdI-D+T$a-ySu<&?Z_W&VSjDc)h^28%dFfW6V^jg zQ=R#4@la=cY|NixgupB*qxpVv)-y=`o`PPOt ztD9~4iAS&WQoaa0*bN=;Zq$Zd{oG=M$=b80)4u(Vl*vZzq5>O0unMpfo-zLIExzdV zTVJ8hjv3Yw3~Q}$w}n3tARwCXJwl8Rm$}m_UjjEnpYRg63gm0?E@6wri8RnmjgLQ5 z;m{*)EqKnryO-hzedCDdCY;gOY=(x=Tl;3>P=P&q2#xJW~dO-5?U=rx|}M6p7%tV7rhZp!EQ# zScN1WH#Ht1qWH2X(ommf4~)EysmgIB%zK4jKUti2VHbolfhUs|N;i8e&oY|ggE zYo!xAb3hx?`e5>{_u)*_pSa|mbYGaVGdERtZqj@+tk^(EkY=dyLTwe8 z_`KxmG#+t>2ImcH&3r52dVqf_Tw@Q+a@-?>DP2_Kz2jS zAM{<~3+%e0|I>*rzkNeot@o>3r0aIRKO)EPSbl+KR@32teIJ(>+cRHq51Pu)v;*_@ zde>W+i^kcQk;N>v?xjfw(y9L-%R1}FQ{Pn*0yH70sV<~v)-<;}R5}A&3RQD2{Xx1v zR^N*a66g-)6htJMr=b>h`+SHqYKa5lrs`fKFcbk{V^)unI?C~LsP_I8Y3P9HghYl` z3Um_>sWx6xiIccuH8EX6AGk^5Gxn1X`_NSP)GtkfLhA<5fvCl3A(W|`C4u3cQ;@q6 zk6q-7H@hQdS9sMaM2P|Qokzk^$)uqG5&3`@Kmg=17=r)>_OsQVmV>hBogPF-EB+G z$y|iwA=61y={9%tOQ8&AbgRSIrX*C|pw@pAhRe47KM12sIXC|xjhJ>@6i!vL z_#4j57V@^)9vbP8@#S}mBR@m-X%6pL=%6y*w5o^N4R0%*30ovgX{DxCmfZQ-&E4Gd z;|=@gayo*E5|<_|tOx3eFiHZHPy}9@BG88&H5RlxAcj98D^S^Hw;fw~41_1ra8;8*#?k9)8mV%tIxS z6y7o2D|}}%_6d}mZG6x&S4y4E54D6JHQvy6t*w(w@2yz8%RKRc(`q;Z(@=^|Sg~0C z(ACRZL%m&@?=Ia5+)L&4rO*OIy1c3zQ`xByFU;eo?h+~LSzr@sYER{N`;jaVDg74`Lm z2MZs(^5w*;^IvPekNkTnzvjm5wT6##ZkFcc+?K+EBffnx{u5`kb#;$Iq|>4 zf;4*uLePuU0&9ZpMg-%SZP9uFW=eLP42r z^ivV7Ln$%jaCAz!Q{*O9Z=9C!>^e*Do}-^H{XVyz&8))2NPl0pt%)x)?RQ~?+#>Jl b2Rn{f-+pHN5X~2tzLi|IvHASu|LppI1__*? literal 0 HcmV?d00001 diff --git a/tests/st/cpp/dataset/test_de.cc b/tests/st/cpp/dataset/test_de.cc index 57b4f0f75b..3077b98fe0 100644 --- a/tests/st/cpp/dataset/test_de.cc +++ b/tests/st/cpp/dataset/test_de.cc @@ -17,14 +17,14 @@ #include #include "common/common_test.h" #include "include/api/types.h" -#include "minddata/dataset/include/minddata_eager.h" +#include "minddata/dataset/include/execute.h" #include "minddata/dataset/include/vision.h" #include "minddata/dataset/kernels/tensor_op.h" #include "include/api/model.h" #include "include/api/serialization.h" #include "include/api/context.h" -using namespace mindspore::api; +using namespace mindspore; using namespace mindspore::dataset::vision; class TestDE : public ST::Common { @@ -33,59 +33,58 @@ class TestDE : public ST::Common { }; TEST_F(TestDE, TestResNetPreprocess) { - // Read images from target directory - std::vector> images; - MindDataEager::LoadImageFromDir("/home/workspace/mindspore_dataset/imagenet/imagenet_original/val/n01440764", - &images); + // Read images + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); // Define transform operations - MindDataEager Transform({Decode(), Resize({224, 224}), - Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}), - HWC2CHW()}); + mindspore::dataset::Execute Transform({ + Decode(), Resize({224, 224}), + Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}), + HWC2CHW()}); // Apply transform on images - for (auto &img : images) { - img = Transform(img); - } + Status rc = Transform(image, &image); - // Check shape of result - ASSERT_NE(images.size(), 0); - ASSERT_EQ(images[0]->Shape().size(), 3); - ASSERT_EQ(images[0]->Shape()[0], 3); - ASSERT_EQ(images[0]->Shape()[1], 224); - ASSERT_EQ(images[0]->Shape()[2], 224); + // Check image info + ASSERT_TRUE(rc.IsOk()); + ASSERT_EQ(image.Shape().size(), 3); + ASSERT_EQ(image.Shape()[0], 3); + ASSERT_EQ(image.Shape()[1], 224); + ASSERT_EQ(image.Shape()[2], 224); } TEST_F(TestDE, TestDvpp) { - ContextAutoSet(); - +#ifdef ENABLE_ACL // Read images from target directory - std::vector> images; - MindDataEager::LoadImageFromDir("/home/workspace/mindspore_dataset/imagenet/imagenet_original/val/n01440764", - &images); + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor); + auto image = MSTensor(std::make_shared(de_tensor)); // Define dvpp transform std::vector crop_size = {224, 224}; std::vector resize_size = {256, 256}; - MindDataEager Transform({DvppDecodeResizeCropJpeg(crop_size, resize_size)}); + mindspore::dataset::Execute Transform(DvppDecodeResizeCropJpeg(crop_size, resize_size)); // Apply transform on images - for (auto &img : images) { - img = Transform(img); - ASSERT_NE(img, nullptr); - ASSERT_EQ(img->Shape().size(), 3); - int32_t real_h = 0; - int32_t real_w = 0; - int32_t remainder = crop_size[crop_size.size() - 1] % 16; - if (crop_size.size() == 1) { - real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; - real_w = (remainder == 0) ? crop_size[0] : crop_size[0] + 16 - remainder; - } else { - real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; - real_w = (remainder == 0) ? crop_size[1] : crop_size[1] + 16 - remainder; - } - ASSERT_EQ(img->Shape()[0], real_h * real_w * 1.5); // For image in YUV format, each pixel takes 1.5 byte - ASSERT_EQ(img->Shape()[1], 1); - ASSERT_EQ(img->Shape()[2], 1); + Status rc = Transform(image, &image); + + // Check image info + ASSERT_TRUE(rc.IsOk()); + ASSERT_EQ(image.Shape().size(), 3); + int32_t real_h = 0; + int32_t real_w = 0; + int32_t remainder = crop_size[crop_size.size() - 1] % 16; + if (crop_size.size() == 1) { + real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; + real_w = (remainder == 0) ? crop_size[0] : crop_size[0] + 16 - remainder; + } else { + real_h = (crop_size[0] % 2 == 0) ? crop_size[0] : crop_size[0] + 1; + real_w = (remainder == 0) ? crop_size[1] : crop_size[1] + 16 - remainder; } + ASSERT_EQ(image.Shape()[0], real_h * real_w * 1.5); // For image in YUV format, each pixel takes 1.5 byte + ASSERT_EQ(image.Shape()[1], 1); + ASSERT_EQ(image.Shape()[2], 1); +#endif } diff --git a/tests/st/cpp/model/test_tensor_add.cc b/tests/st/cpp/model/test_tensor_add.cc index fda2dba0e1..7afc3b4ed9 100644 --- a/tests/st/cpp/model/test_tensor_add.cc +++ b/tests/st/cpp/model/test_tensor_add.cc @@ -20,7 +20,7 @@ #include "include/api/serialization.h" #include "include/api/context.h" -using namespace mindspore::api; +using namespace mindspore; static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/tensor_add/tensor_add.mindir"; static const std::vector input_data_1 = {1, 2, 3, 4}; @@ -36,23 +36,42 @@ TEST_F(TestTensorAdd, InferMindIR) { auto graph = Serialization::LoadModel(tensor_add_file, ModelType::kMindIR); Model tensor_add((GraphCell(graph))); - Status ret = tensor_add.Build({}); - ASSERT_TRUE(ret == SUCCESS); + ASSERT_TRUE(tensor_add.Build() == kSuccess); + + // get model inputs + std::vector origin_inputs = tensor_add.GetInputs(); + ASSERT_EQ(origin_inputs.size(), 2); // prepare input - std::vector outputs; - std::vector inputs; - inputs.emplace_back(Buffer(input_data_1.data(), sizeof(float) * input_data_1.size())); - inputs.emplace_back(Buffer(input_data_2.data(), sizeof(float) * input_data_2.size())); + std::vector outputs; + std::vector inputs; + inputs.emplace_back(origin_inputs[0].Name(), origin_inputs[0].DataType(), origin_inputs[0].Shape(), + input_data_1.data(), sizeof(float) * input_data_1.size()); + inputs.emplace_back(origin_inputs[1].Name(), origin_inputs[1].DataType(), origin_inputs[1].Shape(), + input_data_2.data(), sizeof(float) * input_data_2.size()); // infer - ret = tensor_add.Predict(inputs, &outputs); - ASSERT_TRUE(ret == SUCCESS); + ASSERT_TRUE(tensor_add.Predict(inputs, &outputs) == kSuccess); + + // assert input + inputs = tensor_add.GetInputs(); + ASSERT_EQ(inputs.size(), 2); + auto after_input_data_1 = inputs[0].Data(); + auto after_input_data_2 = inputs[1].Data(); + const float *p = reinterpret_cast(after_input_data_1.get()); + for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) { + ASSERT_LE(std::abs(p[i] - input_data_1[i]), 1e-4); + } + p = reinterpret_cast(after_input_data_2.get()); + for (size_t i = 0; i < inputs[0].DataSize() / sizeof(float); ++i) { + ASSERT_LE(std::abs(p[i] - input_data_2[i]), 1e-4); + } - // print + // assert output for (auto &buffer : outputs) { - const float *p = reinterpret_cast(buffer.Data()); - for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) { + auto buffer_data = buffer.Data(); + p = reinterpret_cast(buffer_data.get()); + for (size_t i = 0; i < buffer.DataSize() / sizeof(float); ++i) { ASSERT_LE(std::abs(p[i] - (input_data_1[i] + input_data_2[i])), 1e-4); } } diff --git a/tests/ut/cpp/cxx_api/context_test.cc b/tests/ut/cpp/cxx_api/context_test.cc new file mode 100644 index 0000000000..8509f0457e --- /dev/null +++ b/tests/ut/cpp/cxx_api/context_test.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#include "include/api/context.h" + +namespace mindspore { +class TestCxxApiContext : public UT::Common { + public: + TestCxxApiContext() = default; +}; + +TEST_F(TestCxxApiContext, test_context_global_context_SUCCESS) { + std::string device_target = "2333"; + uint32_t device_id = 2333; + GlobalContext::SetGlobalDeviceTarget(device_target); + ASSERT_EQ(GlobalContext::GetGlobalDeviceTarget(), device_target); + GlobalContext::SetGlobalDeviceID(device_id); + ASSERT_EQ(GlobalContext::GetGlobalDeviceID(), device_id); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_SUCCESS) { + std::string option_1 = "aaa"; + std::string option_2 = "vvv"; + std::string option_3 = "www"; + auto option_4 = DataType::kNumberTypeEnd; + std::string option_5 = "rrr"; + std::string option_6 = "ppp"; + auto ctx = std::make_shared(); + ModelContext::SetInsertOpConfigPath(ctx, option_1); + ModelContext::SetInputFormat(ctx, option_2); + ModelContext::SetInputShape(ctx, option_3); + ModelContext::SetOutputType(ctx, option_4); + ModelContext::SetPrecisionMode(ctx, option_5); + ModelContext::SetOpSelectImplMode(ctx, option_6); + + ASSERT_EQ(ModelContext::GetInsertOpConfigPath(ctx), option_1); + ASSERT_EQ(ModelContext::GetInputFormat(ctx), option_2); + ASSERT_EQ(ModelContext::GetInputShape(ctx), option_3); + ASSERT_EQ(ModelContext::GetOutputType(ctx), option_4); + ASSERT_EQ(ModelContext::GetPrecisionMode(ctx), option_5); + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), option_6); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_nullptr_FAILED) { + auto ctx = std::make_shared(); + EXPECT_ANY_THROW(ModelContext::GetInsertOpConfigPath(nullptr)); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_wrong_type_SUCCESS) { + auto ctx = std::make_shared(); + ctx->params["mindspore.option.op_select_impl_mode"] = 5; + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), ""); +} + +TEST_F(TestCxxApiContext, test_context_ascend310_context_default_value_SUCCESS) { + auto ctx = std::make_shared(); + ASSERT_EQ(ModelContext::GetOpSelectImplMode(ctx), ""); +} +} // namespace mindspore diff --git a/tests/ut/cpp/cxx_api/status_test.cc b/tests/ut/cpp/cxx_api/status_test.cc new file mode 100644 index 0000000000..aabd00fe5b --- /dev/null +++ b/tests/ut/cpp/cxx_api/status_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "common/common_test.h" +#define private public +#include "include/api/status.h" +#undef private + +namespace mindspore { +class TestCxxApiStatus : public UT::Common { + public: + TestCxxApiStatus() = default; +}; + +TEST_F(TestCxxApiStatus, test_status_base_SUCCESS) { + Status status_1; + ASSERT_TRUE(status_1 == kSuccess); + ASSERT_TRUE(status_1 == Status(kSuccess)); + ASSERT_EQ(status_1.operator bool(), true); + ASSERT_EQ(status_1.operator int(), kSuccess); + ASSERT_EQ(status_1.StatusCode(), kSuccess); + ASSERT_EQ(status_1.IsOk(), true); + ASSERT_EQ(status_1.IsError(), false); +} + +TEST_F(TestCxxApiStatus, test_status_msg_SUCCESS) { + std::string message = "2333"; + Status status_1(kMDSyntaxError, message); + ASSERT_EQ(status_1.IsError(), true); + ASSERT_EQ(status_1.ToString(), message); +} + +TEST_F(TestCxxApiStatus, test_status_ctor_SUCCESS) { + Status status_1; + Status status_2(kSuccess); + Status status_3(kSuccess, "2333"); + Status status_4(kSuccess, 1, "file", "2333"); + Status status_5 = Status::OK(); + ASSERT_TRUE(status_1 == status_2); + ASSERT_TRUE(status_1 == status_3); + ASSERT_TRUE(status_1 == status_4); + ASSERT_TRUE(status_1 == status_5); +} + +TEST_F(TestCxxApiStatus, test_status_string_SUCCESS) { + Status status_1(kMDSyntaxError); + ASSERT_EQ(Status::CodeAsString(status_1.StatusCode()), "Syntax error"); +} +} // namespace mindspore diff --git a/tests/ut/cpp/cxx_api/types_test.cc b/tests/ut/cpp/cxx_api/types_test.cc index c222bd5b30..d6c8a7d911 100644 --- a/tests/ut/cpp/cxx_api/types_test.cc +++ b/tests/ut/cpp/cxx_api/types_test.cc @@ -15,7 +15,9 @@ */ #include #include "common/common_test.h" +#define private public #include "include/api/types.h" +#undef private namespace mindspore { class TestCxxApiTypes : public UT::Common { @@ -23,116 +25,120 @@ class TestCxxApiTypes : public UT::Common { TestCxxApiTypes() = default; }; -TEST_F(TestCxxApiTypes, test_tensor_set_name_SUCCESS) { - std::string tensor_name_before = "TEST1"; - std::string tensor_name_after = "TEST2"; - api::Tensor tensor1(tensor_name_before, api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // name - ASSERT_EQ(tensor1.Name(), tensor_name_before); - ASSERT_EQ(tensor2.Name(), tensor_name_before); - ASSERT_EQ(tensor3.Name(), tensor_name_before); - - tensor1.SetName(tensor_name_after); - ASSERT_EQ(tensor1.Name(), tensor_name_after); - ASSERT_EQ(tensor2.Name(), tensor_name_after); - ASSERT_EQ(tensor3.Name(), tensor_name_before); +TEST_F(TestCxxApiTypes, test_tensor_default_attr_SUCCESS) { + MSTensor tensor; + ASSERT_EQ(tensor.Name(), ""); + ASSERT_EQ(tensor.DataType(), DataType::kTypeUnknown); + ASSERT_EQ(tensor.Shape().size(), 0); + ASSERT_EQ(tensor.MutableData(), nullptr); + ASSERT_EQ(tensor.DataSize(), 0); + ASSERT_EQ(tensor.IsDevice(), false); } -TEST_F(TestCxxApiTypes, test_tensor_set_dtype_SUCCESS) { - api::Tensor tensor1("", api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // dtype - ASSERT_EQ(tensor1.DataType(), api::DataType::kMsFloat32); - ASSERT_EQ(tensor2.DataType(), api::DataType::kMsFloat32); - ASSERT_EQ(tensor3.DataType(), api::DataType::kMsFloat32); - - tensor1.SetDataType(api::DataType::kMsUint32); - ASSERT_EQ(tensor1.DataType(), api::DataType::kMsUint32); - ASSERT_EQ(tensor2.DataType(), api::DataType::kMsUint32); - ASSERT_EQ(tensor3.DataType(), api::DataType::kMsFloat32); +TEST_F(TestCxxApiTypes, test_tensor_attr_SUCCESS) { + std::string tensor_name = "Name1"; + auto data_type = DataType::kNumberTypeFloat16; + MSTensor tensor(tensor_name, data_type, {}, nullptr, 0); + ASSERT_EQ(tensor.Name(), tensor_name); + ASSERT_EQ(tensor.DataType(), data_type); + ASSERT_EQ(tensor.Shape().size(), 0); + ASSERT_EQ(tensor.MutableData(), nullptr); + ASSERT_EQ(tensor.DataSize(), 0); + ASSERT_EQ(tensor.IsDevice(), false); } -TEST_F(TestCxxApiTypes, test_tensor_set_shape_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - api::Tensor tensor1("", api::DataType::kMsFloat32, {}, nullptr, 0); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // shape - ASSERT_EQ(tensor1.Shape(), std::vector()); - ASSERT_EQ(tensor2.Shape(), std::vector()); - ASSERT_EQ(tensor3.Shape(), std::vector()); - - tensor1.SetShape(shape); - ASSERT_EQ(tensor1.Shape(), shape); - ASSERT_EQ(tensor2.Shape(), shape); - ASSERT_EQ(tensor3.Shape(), std::vector()); +TEST_F(TestCxxApiTypes, test_tensor_create_FAILED) { + MSTensor tensor(nullptr); + ASSERT_EQ(tensor, nullptr); } - -TEST_F(TestCxxApiTypes, test_tensor_util_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - - // data - ASSERT_EQ(api::Tensor::GetTypeSize(api::DataType::kMsUint32), sizeof(uint32_t)); - ASSERT_EQ(tensor1.ElementNum(), 3 * 4 * 5 * 6); +TEST_F(TestCxxApiTypes, test_tensor_data_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_data_ref_and_copy_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - api::Tensor tensor2 = tensor1; - api::Tensor tensor3 = tensor1.Clone(); - - // data - ASSERT_EQ(tensor1.DataSize(), tensor2.DataSize()); - ASSERT_EQ(tensor1.DataSize(), tensor3.DataSize()); - ASSERT_EQ(tensor1.Data(), tensor2.MutableData()); - ASSERT_NE(tensor1.Data(), tensor3.Data()); +TEST_F(TestCxxApiTypes, test_tensor_ref_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor; + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_resize_data_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); - - // data - ASSERT_EQ(tensor1.ResizeData(0), true); +TEST_F(TestCxxApiTypes, test_tensor_clone_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor.Clone(); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_set_data_wrong_data_size_FAILED) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); +TEST_F(TestCxxApiTypes, test_tensor_ref_modified_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + std::vector data_modified = {2, 3, 4, 5}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor; + (void)memcpy(tensor.MutableData(), data_modified.data(), data_modified.size() * sizeof(int32_t)); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data_modified[i]); + } +} - // data - ASSERT_EQ(tensor1.SetData(nullptr, 1), false); - ASSERT_EQ(tensor1.SetData(data.data(), 0), false); +TEST_F(TestCxxApiTypes, test_tensor_clone_modified_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + std::vector data_modified = {2, 3, 4, 5}; + MSTensor tensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + MSTensor tensor2 = tensor.Clone(); + (void)memcpy(tensor.MutableData(), data_modified.data(), data_modified.size() * sizeof(int32_t)); + auto value = tensor2.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } } -TEST_F(TestCxxApiTypes, test_tensor_set_data_SUCCESS) { - std::vector shape = {3, 4, 5, 6}; - std::vector data(3 * 4 * 5 * 6, 123); - api::Tensor tensor1("", api::DataType::kMsFloat32, shape, data.data(), data.size() * sizeof(uint32_t)); +TEST_F(TestCxxApiTypes, test_tensor_ref_creator_function_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor = + MSTensor::CreateRefTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + data = {3, 4, 5, 6}; + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_EQ(p[i], data[i]); + } +} - // data - ASSERT_EQ(tensor1.SetData(nullptr, 0), true); - ASSERT_EQ(tensor1.SetData(data.data(), data.size() * sizeof(uint32_t)), true); +TEST_F(TestCxxApiTypes, test_tensor_creator_function_SUCCESS) { + std::vector data = {1, 2, 3, 4}; + MSTensor tensor = + MSTensor::CreateTensor("", DataType::kNumberTypeInt32, {4}, data.data(), data.size() * sizeof(int32_t)); + data = {3, 4, 5, 6}; + auto value = tensor.Data(); + int32_t *p = (int32_t *)value.get(); + for (size_t i = 0; i < data.size(); ++i) { + ASSERT_NE(p[i], data[i]); + } } TEST_F(TestCxxApiTypes, test_buffer_data_ref_and_copy_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); - api::Buffer buffer2 = buffer1; - api::Buffer buffer3 = buffer1.Clone(); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer2 = buffer1; + Buffer buffer3 = buffer1.Clone(); // data ASSERT_EQ(buffer1.DataSize(), buffer2.DataSize()); @@ -143,7 +149,7 @@ TEST_F(TestCxxApiTypes, test_buffer_data_ref_and_copy_SUCCESS) { TEST_F(TestCxxApiTypes, test_buffer_resize_data_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.ResizeData(0), true); @@ -151,7 +157,7 @@ TEST_F(TestCxxApiTypes, test_buffer_resize_data_SUCCESS) { TEST_F(TestCxxApiTypes, test_buffer_set_data_wrong_data_size_FAILED) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.SetData(nullptr, 1), false); @@ -160,7 +166,7 @@ TEST_F(TestCxxApiTypes, test_buffer_set_data_wrong_data_size_FAILED) { TEST_F(TestCxxApiTypes, test_buffer_set_data_SUCCESS) { std::vector data(3 * 4 * 5 * 6, 123); - api::Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); + Buffer buffer1(data.data(), data.size() * sizeof(uint32_t)); // data ASSERT_EQ(buffer1.SetData(nullptr, 0), true); diff --git a/tests/ut/cpp/dataset/btree_test.cc b/tests/ut/cpp/dataset/btree_test.cc index 5e309354cf..9a2271dcfa 100644 --- a/tests/ut/cpp/dataset/btree_test.cc +++ b/tests/ut/cpp/dataset/btree_test.cc @@ -101,7 +101,7 @@ TEST_F(MindDataTestBPlusTree, Test1) { // Test duplicate key { rc = btree.DoInsert(100, "Expect error"); - EXPECT_EQ(rc, Status(StatusCode::kDuplicateKey)); + EXPECT_EQ(rc, Status(StatusCode::kMDDuplicateKey)); } } diff --git a/tests/ut/cpp/dataset/build_vocab_test.cc b/tests/ut/cpp/dataset/build_vocab_test.cc index a0d42e6f89..bcb4d43649 100644 --- a/tests/ut/cpp/dataset/build_vocab_test.cc +++ b/tests/ut/cpp/dataset/build_vocab_test.cc @@ -25,7 +25,7 @@ #include "minddata/dataset/text/vocab.h" using mindspore::dataset::Tensor; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Vocab; class MindDataTestVocab : public UT::DatasetOpTesting { diff --git a/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc b/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc index 5ff421fb18..e9328529d9 100644 --- a/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc +++ b/tests/ut/cpp/dataset/c_api_dataset_randomdata_test.cc @@ -17,7 +17,7 @@ #include "minddata/dataset/include/datasets.h" #include "minddata/dataset/core/global_context.h" -#include "mindspore/core/ir/dtype/type_id.h" +#include "ir/dtype/type_id.h" using namespace mindspore::dataset; diff --git a/tests/ut/cpp/dataset/c_api_text_test.cc b/tests/ut/cpp/dataset/c_api_text_test.cc index c368bcee21..e50419e23b 100644 --- a/tests/ut/cpp/dataset/c_api_text_test.cc +++ b/tests/ut/cpp/dataset/c_api_text_test.cc @@ -27,7 +27,7 @@ using namespace mindspore::dataset; using mindspore::dataset::ShuffleMode; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Tensor; using mindspore::dataset::Vocab; diff --git a/tests/ut/cpp/dataset/c_api_text_vocab_test.cc b/tests/ut/cpp/dataset/c_api_text_vocab_test.cc index a01d697153..f54ed381f6 100644 --- a/tests/ut/cpp/dataset/c_api_text_vocab_test.cc +++ b/tests/ut/cpp/dataset/c_api_text_vocab_test.cc @@ -27,7 +27,7 @@ using namespace mindspore::dataset; using mindspore::dataset::DataType; using mindspore::dataset::ShuffleMode; -using mindspore::dataset::Status; +using mindspore::Status; using mindspore::dataset::Tensor; using mindspore::dataset::Vocab; diff --git a/tests/ut/cpp/dataset/cache_op_test.cc b/tests/ut/cpp/dataset/cache_op_test.cc index a85e6a6c33..1e1b66af78 100644 --- a/tests/ut/cpp/dataset/cache_op_test.cc +++ b/tests/ut/cpp/dataset/cache_op_test.cc @@ -43,7 +43,7 @@ Status GetSessionFromEnv(session_id_type *session_id) { *session_id = std::stoul(session_id_str); } catch (const std::exception &e) { std::string err_msg = "Invalid numeric value for session id in env var: " + session_id_str; - return Status(StatusCode::kSyntaxError, err_msg); + return Status(StatusCode::kMDSyntaxError, err_msg); } } else { RETURN_STATUS_UNEXPECTED("Test case requires a session id to be provided via SESSION_ID environment variable."); diff --git a/tests/ut/cpp/dataset/center_crop_op_test.cc b/tests/ut/cpp/dataset/center_crop_op_test.cc index 92f069d475..59432eee39 100644 --- a/tests/ut/cpp/dataset/center_crop_op_test.cc +++ b/tests/ut/cpp/dataset/center_crop_op_test.cc @@ -53,7 +53,7 @@ TEST_F(MindDataTestCenterCropOp, TestOp2) { std::unique_ptr op(new CenterCropOp(het, wid)); Status s = op->Compute(input_tensor_, &output_tensor); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } TEST_F(MindDataTestCenterCropOp, TestOp3) { diff --git a/tests/ut/cpp/dataset/common/common.h b/tests/ut/cpp/dataset/common/common.h index dc865cab05..db8c8130c0 100644 --- a/tests/ut/cpp/dataset/common/common.h +++ b/tests/ut/cpp/dataset/common/common.h @@ -20,6 +20,9 @@ #include "minddata/dataset/util/status.h" #include "utils/log_adapter.h" +using mindspore::Status; +using mindspore::StatusCode; + #define ASSERT_OK(_s) \ do { \ Status __rc = (_s); \ diff --git a/tests/ut/cpp/dataset/connector_test.cc b/tests/ut/cpp/dataset/connector_test.cc index 0fc5b100d7..00c971daef 100644 --- a/tests/ut/cpp/dataset/connector_test.cc +++ b/tests/ut/cpp/dataset/connector_test.cc @@ -67,7 +67,7 @@ private: // This worker loop read from input_ vector that have complete list of tasks/elements. // The assignment from the elements in input_ to each worker is ensured in RoundRobin, - // i.e., tid-0 will pick input_[0], tid-1 will pick input_[1], so-on circularly. + // i.e., tid-0 will pick input_[0], tid-1 will pick input_[1], so-on circular. Status FirstWorkerPush( int tid, std::shared_ptr > my_conn, @@ -227,7 +227,7 @@ Status MindDataTestConnector::Run_test_1() { std::bind(&MindDataTestConnector::SerialWorkerPull, this, 0, // thread id = 0, since it's the only one - conn2, // poping the data from conn2 + conn2, // popping the data from conn2 &output)); RETURN_IF_NOT_OK(rc); // Wait for the threads to finish. @@ -316,7 +316,7 @@ Status MindDataTestConnector::ValidateOutput(const std::vector &output int prev = 0; for (auto el : output) { if (prev >= el) { - return Status(StatusCode::kUnexpectedError, "Output vector are not in-order."); + return Status(StatusCode::kMDUnexpectedError, "Output vector are not in-order."); } prev = el; } diff --git a/tests/ut/cpp/dataset/execute_test.cc b/tests/ut/cpp/dataset/execute_test.cc index 83029d2400..08f99415ca 100644 --- a/tests/ut/cpp/dataset/execute_test.cc +++ b/tests/ut/cpp/dataset/execute_test.cc @@ -15,6 +15,7 @@ */ #include "common/common.h" #include "common/cvop_common.h" +#include "minddata/dataset/core/de_tensor.h" #include "minddata/dataset/include/execute.h" #include "minddata/dataset/include/transforms.h" #include "minddata/dataset/include/vision.h" @@ -32,12 +33,22 @@ class MindDataTestExecute : public UT::CVOP::CVOpCommon { std::shared_ptr output_tensor_; }; -TEST_F(MindDataTestExecute, TestOp1) { - MS_LOG(INFO) << "Doing testCrop."; - // Crop params +TEST_F(MindDataTestExecute, TestComposeTransforms) { + MS_LOG(INFO) << "Doing TestComposeTransforms."; + + std::shared_ptr de_tensor; + mindspore::dataset::Tensor::CreateFromFile("data/dataset/apple.jpg", &de_tensor); + auto image = mindspore::MSTensor(std::make_shared(de_tensor)); + + // Transform params + std::shared_ptr decode = vision::Decode(); std::shared_ptr center_crop = vision::CenterCrop({30}); - std::shared_ptr out_image = Execute(std::move(center_crop))(input_tensor_); - EXPECT_NE(out_image, nullptr); - EXPECT_EQ(30, out_image->shape()[0]); - EXPECT_EQ(30, out_image->shape()[1]); + std::shared_ptr rescale = vision::Rescale(1./3, 0.5); + + auto transform = Execute({decode, center_crop, rescale}); + Status rc = transform(image, &image); + + EXPECT_EQ(rc, Status::OK()); + EXPECT_EQ(30, image.Shape()[0]); + EXPECT_EQ(30, image.Shape()[1]); } diff --git a/tests/ut/cpp/dataset/fill_op_test.cc b/tests/ut/cpp/dataset/fill_op_test.cc index 795db705af..08d1ef072f 100644 --- a/tests/ut/cpp/dataset/fill_op_test.cc +++ b/tests/ut/cpp/dataset/fill_op_test.cc @@ -98,7 +98,7 @@ TEST_F(MindDataTestFillOp, ScalarFill) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-ScalarFill end."; } @@ -147,7 +147,7 @@ TEST_F(MindDataTestFillOp, NumericToString) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-NumericToString end."; } @@ -167,7 +167,7 @@ TEST_F(MindDataTestFillOp, StringToNumeric) { Status s = op->Compute(input, &output); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); MS_LOG(INFO) << "MindDataTestFillOp-StringToNumeric end."; } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/interrupt_test.cc b/tests/ut/cpp/dataset/interrupt_test.cc index 8a06413175..7282c1f805 100644 --- a/tests/ut/cpp/dataset/interrupt_test.cc +++ b/tests/ut/cpp/dataset/interrupt_test.cc @@ -43,7 +43,7 @@ TEST_F(MindDataTestIntrpService, Test1) { int v; Status rc; rc = q.PopFront(&v); - EXPECT_TRUE(rc.IsInterrupted()); + EXPECT_TRUE(rc == StatusCode::kMDInterrupted); return rc; }); vg_.GetIntrpService()->InterruptAll(); @@ -59,7 +59,7 @@ TEST_F(MindDataTestIntrpService, Test2) { vg_.CreateAsyncTask("Test1", [&]() -> Status { TaskManager::FindMe()->Post(); Status rc = wp.Wait(); - EXPECT_TRUE(rc.IsInterrupted()); + EXPECT_TRUE(rc == StatusCode::kMDInterrupted); return rc; }); vg_.GetIntrpService()->InterruptAll(); diff --git a/tests/ut/cpp/dataset/memory_pool_test.cc b/tests/ut/cpp/dataset/memory_pool_test.cc index 2981a63708..8c9713285d 100644 --- a/tests/ut/cpp/dataset/memory_pool_test.cc +++ b/tests/ut/cpp/dataset/memory_pool_test.cc @@ -79,7 +79,7 @@ TEST_F(MindDataTestMemoryPool, TestMemGuard) { // Try some large value. int64_t sz = 5LL * 1024LL * 1024LL * 1024LL; Status rc = mem.allocate(sz); - ASSERT_TRUE(rc.IsOk() || rc.IsOutofMemory()); + ASSERT_TRUE(rc.IsOk() || rc == StatusCode::kMDOutOfMemory); if (rc.IsOk()) { // Try write a character half way. auto *p = mem.GetMutablePointer(); diff --git a/tests/ut/cpp/dataset/queue_test.cc b/tests/ut/cpp/dataset/queue_test.cc index fcc4e1a54d..3f2e1ad4af 100644 --- a/tests/ut/cpp/dataset/queue_test.cc +++ b/tests/ut/cpp/dataset/queue_test.cc @@ -101,19 +101,19 @@ TEST_F(MindDataTestQueue, Test1) { TEST_F(MindDataTestQueue, Test2) { // Passing status object Queue que(3); - Status rc_send(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Oops"); + Status rc_send(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Oops"); Status rc = que.Add(rc_send); ASSERT_TRUE(rc.IsOk()); Status rc_recv; rc = que.PopFront(&rc_recv); ASSERT_TRUE(rc.IsOk()); ASSERT_EQ(rc_recv, rc_send); - rc = que.EmplaceBack(StatusCode::kOutOfMemory, "Test emplace"); + rc = que.EmplaceBack(StatusCode::kMDOutOfMemory, "Test emplace"); ASSERT_TRUE(rc.IsOk()); Status rc_recv2; rc = que.PopFront(&rc_recv2); ASSERT_TRUE(rc.IsOk()); - ASSERT_TRUE(rc_recv2.IsOutofMemory()); + ASSERT_TRUE(rc_recv2 == StatusCode::kMDOutOfMemory); } TEST_F(MindDataTestQueue, Test3) { diff --git a/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc b/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc index 50212ac76d..201de384c9 100644 --- a/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc +++ b/tests/ut/cpp/dataset/random_crop_with_bbox_op_test.cc @@ -103,7 +103,7 @@ TEST_F(MindDataTestRandomCropWithBBoxOp, TestOp3) { for (auto tensor_row_ : images_and_annotations_) { Status s = op->Compute(tensor_row_, &output_tensor_row_); EXPECT_TRUE(s.IsError()); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } MS_LOG(INFO) << "testRandomCropWithBBoxOp3 end."; } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/solarize_op_test.cc b/tests/ut/cpp/dataset/solarize_op_test.cc index 6e7bd0e2fd..e1e5a15b5c 100644 --- a/tests/ut/cpp/dataset/solarize_op_test.cc +++ b/tests/ut/cpp/dataset/solarize_op_test.cc @@ -163,5 +163,5 @@ TEST_F(MindDataTestSolarizeOp, TestOp6) { EXPECT_TRUE(s.IsError()); EXPECT_NE(s.ToString().find("Solarize: threshold_min must be smaller or equal to threshold_max."), std::string::npos); - ASSERT_TRUE(s.get_code() == StatusCode::kUnexpectedError); + ASSERT_TRUE(s.StatusCode() == StatusCode::kMDUnexpectedError); } \ No newline at end of file diff --git a/tests/ut/cpp/dataset/status_test.cc b/tests/ut/cpp/dataset/status_test.cc index 195da1c119..50072ce238 100644 --- a/tests/ut/cpp/dataset/status_test.cc +++ b/tests/ut/cpp/dataset/status_test.cc @@ -27,7 +27,7 @@ class MindDataTestStatus : public UT::Common { // This function returns Status Status f1() { - Status rc(StatusCode::kUnexpectedError, "Testing macro"); + Status rc(StatusCode::kMDUnexpectedError, "Testing macro"); RETURN_IF_NOT_OK(rc); // We shouldn't get here return Status::OK(); @@ -41,11 +41,11 @@ TEST_F(MindDataTestStatus, Test1) { // Test default constructor which should be OK Status rc; ASSERT_TRUE(rc.IsOk()); - Status err1(StatusCode::kOutOfMemory, __LINE__, __FILE__); + Status err1(StatusCode::kMDOutOfMemory, __LINE__, __FILE__); MS_LOG(DEBUG) << err1; - ASSERT_TRUE(err1.IsOutofMemory()); + ASSERT_TRUE(err1 == StatusCode::kMDOutOfMemory); ASSERT_TRUE(err1.IsError()); - Status err2(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Oops"); + Status err2(StatusCode::kMDUnexpectedError, __LINE__, __FILE__, "Oops"); MS_LOG(DEBUG) << err2; } diff --git a/tests/ut/cpp/dataset/task_manager_test.cc b/tests/ut/cpp/dataset/task_manager_test.cc index 7b8101fa56..fc61468f5a 100644 --- a/tests/ut/cpp/dataset/task_manager_test.cc +++ b/tests/ut/cpp/dataset/task_manager_test.cc @@ -35,9 +35,9 @@ TEST_F(MindDataTestTaskManager, Test1) { TaskManager::FindMe()->Post(); throw std::bad_alloc(); }); - ASSERT_TRUE(vg_rc.IsOk() || vg_rc.IsOutofMemory()); + ASSERT_TRUE(vg_rc.IsOk() || vg_rc == StatusCode::kMDOutOfMemory); ASSERT_TRUE(vg.join_all().IsOk()); - ASSERT_TRUE(vg.GetTaskErrorIfAny().IsOutofMemory()); + ASSERT_TRUE(vg.GetTaskErrorIfAny() == StatusCode::kMDOutOfMemory); // Test the error is passed back to the master thread if vg_rc above is OK. // If vg_rc is kOutOfMemory, the group error is already passed back. // Some compiler may choose to run the next line in parallel with the above 3 lines @@ -46,7 +46,7 @@ TEST_F(MindDataTestTaskManager, Test1) { // depends on previous lines. if (vg.GetTaskErrorIfAny().IsError() && vg_rc.IsOk()) { Status rc = TaskManager::GetMasterThreadRc(); - ASSERT_TRUE(rc.IsOutofMemory()); + ASSERT_TRUE(rc == StatusCode::kMDOutOfMemory); } } diff --git a/tests/ut/cpp/dataset/tensor_test.cc b/tests/ut/cpp/dataset/tensor_test.cc index f789533b80..ed10f8376a 100644 --- a/tests/ut/cpp/dataset/tensor_test.cc +++ b/tests/ut/cpp/dataset/tensor_test.cc @@ -156,9 +156,9 @@ TEST_F(MindDataTestTensorDE, InsertTensor) { Tensor::CreateFromVector(z, TensorShape({2, 3}), &t6); ASSERT_EQ(*t == *t6, true); - ASSERT_EQ(t->InsertTensor({2}, t5).get_code(), StatusCode::kUnexpectedError); - ASSERT_EQ(t->InsertTensor({1}, t5).get_code(), StatusCode::kUnexpectedError); - ASSERT_EQ(t->InsertTensor({1, 2}, t6).get_code(), StatusCode::kUnexpectedError); + ASSERT_EQ(t->InsertTensor({2}, t5).StatusCode(), StatusCode::kMDUnexpectedError); + ASSERT_EQ(t->InsertTensor({1}, t5).StatusCode(), StatusCode::kMDUnexpectedError); + ASSERT_EQ(t->InsertTensor({1, 2}, t6).StatusCode(), StatusCode::kMDUnexpectedError); t6->Fill(-1); ASSERT_TRUE(t->InsertTensor({}, t6).OK()); ASSERT_EQ(*t == *t6, true);