Dvpp refactor and 6 Dvpp operators

pull/12183/head
Zhenglong Li 4 years ago
parent 5f72693b4b
commit e2d3495925

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,17 +1,18 @@
file(GLOB_RECURSE _CURRENT_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
set_property(SOURCE ${_CURRENT_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_MD)
set(DATASET_CORE_SRC_FILES
client.cc
config_manager.cc
cv_tensor.cc
data_type.cc
de_tensor.cc
global_context.cc
tensor.cc
tensor_helpers.cc
tensor_row.cc
tensor_shape.cc
)
client.cc
config_manager.cc
cv_tensor.cc
data_type.cc
device_tensor.cc
de_tensor.cc
global_context.cc
tensor.cc
tensor_helpers.cc
tensor_row.cc
tensor_shape.cc
)
ms_protobuf_generate(EXAMPLE_SRCS EXAMPLE_HDRS example.proto)
ms_protobuf_generate(FEATURE_SRCS FEATURE_HDRS feature.proto)

@ -15,6 +15,7 @@
*/
#include "minddata/dataset/core/de_tensor.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/include/type_id.h"
@ -35,7 +36,27 @@ DETensor::DETensor(std::shared_ptr<dataset::Tensor> tensor_impl)
: tensor_impl_(tensor_impl),
name_("MindDataTensor"),
type_(static_cast<mindspore::DataType>(DETypeToMSType(tensor_impl_->type()))),
shape_(tensor_impl_->shape().AsVector()) {}
shape_(tensor_impl_->shape().AsVector()),
is_device_(false) {}
#ifndef ENABLE_ANDROID
DETensor::DETensor(std::shared_ptr<dataset::DeviceTensor> device_tensor_impl, bool is_device)
: device_tensor_impl_(device_tensor_impl), name_("MindDataDeviceTensor"), is_device_(is_device) {
// The sequence of shape_ is (width, widthStride, height, heightStride) in Dvpp module
// We need to add [1]widthStride and [3]heightStride, which are actual YUV image shape, into shape_ attribute
uint8_t flag = 0;
for (auto &i : device_tensor_impl->GetYuvStrideShape()) {
if (flag % 2 == 1) {
int64_t j = static_cast<int64_t>(i);
shape_.emplace_back(j);
}
++flag;
}
std::reverse(shape_.begin(), shape_.end());
MS_LOG(INFO) << "This is a YUV420 format image, one pixel takes 1.5 bytes. Therefore, the shape of"
<< " image is in (H, W) format. You can search for more information about YUV420 format";
}
#endif
const std::string &DETensor::Name() const { return name_; }
@ -45,6 +66,12 @@ enum mindspore::DataType DETensor::DataType() const {
}
size_t DETensor::DataSize() const {
#ifndef ENABLE_ANDROID
if (is_device_) {
ASSERT_NULL(device_tensor_impl_);
return device_tensor_impl_->DeviceDataSize();
}
#endif
ASSERT_NULL(tensor_impl_);
return tensor_impl_->SizeInBytes();
}
@ -52,6 +79,11 @@ size_t DETensor::DataSize() const {
const std::vector<int64_t> &DETensor::Shape() const { return shape_; }
std::shared_ptr<const void> DETensor::Data() const {
#ifndef ENABLE_ANDROID
if (is_device_) {
return std::shared_ptr<const void>(device_tensor_impl_->GetDeviceBuffer(), [](const void *) {});
}
#endif
return std::shared_ptr<const void>(tensor_impl_->GetBuffer(), [](const void *) {});
}
@ -60,7 +92,7 @@ void *DETensor::MutableData() {
return tensor_impl_->GetMutableBuffer();
}
bool DETensor::IsDevice() const { return false; }
bool DETensor::IsDevice() const { return is_device_; }
std::shared_ptr<mindspore::MSTensor::Impl> DETensor::Clone() const { return std::make_shared<DETensor>(tensor_impl_); }
} // namespace dataset

@ -31,7 +31,9 @@ class DETensor : public mindspore::MSTensor::Impl {
DETensor() = default;
~DETensor() override = default;
explicit DETensor(std::shared_ptr<dataset::Tensor> tensor_impl);
#ifndef ENABLE_ANDROID
explicit DETensor(std::shared_ptr<dataset::DeviceTensor> device_tensor_impl, bool is_device);
#endif
const std::string &Name() const override;
enum mindspore::DataType DataType() const override;
@ -50,6 +52,10 @@ class DETensor : public mindspore::MSTensor::Impl {
private:
std::shared_ptr<dataset::Tensor> tensor_impl_;
#ifndef ENABLE_ANDROID
std::shared_ptr<dataset::DeviceTensor> device_tensor_impl_;
#endif
bool is_device_;
std::string name_;
enum mindspore::DataType type_;
std::vector<int64_t> shape_;

@ -0,0 +1,77 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/core/global_context.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/core/device_tensor.h"
namespace mindspore {
namespace dataset {
Status DeviceTensor::SetYuvStrideShape_(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride) {
YUV_shape_ = {width, widthStride, height, heightStride};
return Status::OK();
}
std::vector<uint32_t> DeviceTensor::GetYuvStrideShape() { return YUV_shape_; }
#ifdef ENABLE_ACL
Status DeviceTensor::SetAttributes(const std::shared_ptr<DvppDataInfo> &data_ptr) {
device_data_ = data_ptr->data;
CHECK_FAIL_RETURN_UNEXPECTED(device_data_ != nullptr, "Fail to get the device data.");
SetSize_(data_ptr->dataSize);
SetYuvStrideShape_(data_ptr->width, data_ptr->widthStride, data_ptr->height, data_ptr->heightStride);
return Status::OK();
}
#endif
DeviceTensor::DeviceTensor(const TensorShape &shape, const DataType &type) : Tensor(shape, type) {
// grab the mem pool from global context and create the allocator for char data area
std::shared_ptr<MemoryPool> global_pool = GlobalContext::Instance()->mem_pool();
data_allocator_ = std::make_unique<Allocator<unsigned char>>(global_pool);
}
Status DeviceTensor::CreateEmpty(const TensorShape &shape, const DataType &type, std::shared_ptr<DeviceTensor> *out) {
CHECK_FAIL_RETURN_UNEXPECTED(shape.known(), "Invalid shape.");
CHECK_FAIL_RETURN_UNEXPECTED(type != DataType::DE_UNKNOWN, "Invalid data type.");
const DeviceTensorAlloc *alloc = GlobalContext::Instance()->device_tensor_allocator();
*out = std::allocate_shared<DeviceTensor>(*alloc, shape, type);
// if it's a string tensor and it has no elements, Just initialize the shape and type.
if (!type.IsNumeric() && shape.NumOfElements() == 0) {
return Status::OK();
}
CHECK_FAIL_RETURN_UNEXPECTED(type.IsNumeric(), "Number of elements is not 0. The type should be numeric.");
int64_t byte_size = (*out)->SizeInBytes();
// Don't allocate if we have a tensor with no elements.
if (byte_size != 0) {
RETURN_IF_NOT_OK((*out)->AllocateBuffer(byte_size));
}
return Status::OK();
}
uint8_t *DeviceTensor::GetDeviceBuffer() { return device_data_; }
uint32_t DeviceTensor::DeviceDataSize() { return size_; }
Status DeviceTensor::SetSize_(const uint32_t &new_size) {
size_ = new_size;
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,65 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_DEVICE_TENSOR_H
#define MINDSPORE_DEVICE_TENSOR_H
#include <memory>
#include <utility>
#include <vector>
#include "include/api/status.h"
#include "minddata/dataset/core/tensor.h"
#ifdef ENABLE_ACL
#include "minddata/dataset/kernels/image/dvpp/utils/DvppCommon.h"
#endif
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class Tensor;
class DeviceTensor : public Tensor {
public:
DeviceTensor(const TensorShape &shape, const DataType &type);
~DeviceTensor() {}
#ifdef ENABLE_ACL
Status SetAttributes(const std::shared_ptr<DvppDataInfo> &data);
#endif
static Status CreateEmpty(const TensorShape &shape, const DataType &type, std::shared_ptr<DeviceTensor> *out);
uint8_t *GetDeviceBuffer();
std::vector<uint32_t> GetYuvStrideShape();
uint32_t DeviceDataSize();
bool HasDeviceData() { return device_data_ != nullptr; }
private:
Status SetSize_(const uint32_t &new_size);
Status SetYuvStrideShape_(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride);
std::vector<uint32_t> YUV_shape_;
uint8_t *device_data_;
uint32_t size_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_DEVICE_TENSOR_H

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -22,6 +22,7 @@
#ifndef ENABLE_ANDROID
#include "minddata/dataset/core/cv_tensor.h"
#endif
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/util/allocator.h"
#include "minddata/dataset/util/circular_pool.h"
@ -61,6 +62,7 @@ Status GlobalContext::Init() {
#ifndef ENABLE_ANDROID
cv_tensor_allocator_ = std::make_unique<Allocator<CVTensor>>(mem_pool_);
#endif
device_tensor_allocator_ = std::make_unique<Allocator<DeviceTensor>>(mem_pool_);
int_allocator_ = std::make_unique<IntAlloc>(mem_pool_);
return Status::OK();
}

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,10 +19,10 @@
#include <memory>
#include <mutex>
#include "include/api/status.h"
#include "minddata/dataset/core/config_manager.h"
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/util/allocator.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
@ -30,9 +30,11 @@ namespace dataset {
class MemoryPool;
class Tensor;
class CVTensor;
class DeviceTensor;
using TensorAlloc = Allocator<Tensor>; // An allocator for Tensors
using CVTensorAlloc = Allocator<CVTensor>; // An allocator CVTensors
using TensorAlloc = Allocator<Tensor>; // An allocator for Tensors
using CVTensorAlloc = Allocator<CVTensor>; // An allocator CVTensors
using DeviceTensorAlloc = Allocator<DeviceTensor>; // An allocator for Device_Tensors
using IntAlloc = Allocator<dsize_t>;
class GlobalContext {
@ -82,6 +84,10 @@ class GlobalContext {
// @return the CVTensor allocator as raw pointer
const CVTensorAlloc *cv_tensor_allocator() const { return cv_tensor_allocator_.get(); }
// Getter method
// @return the DeviceTensor allocator as raw pointer
const DeviceTensorAlloc *device_tensor_allocator() const { return device_tensor_allocator_.get(); }
// Getter method
// @return the integer allocator as raw pointer
const IntAlloc *int_allocator() const { return int_allocator_.get(); }
@ -95,12 +101,13 @@ class GlobalContext {
Status Init();
static std::once_flag init_instance_flag_;
static std::unique_ptr<GlobalContext> global_context_; // The instance of the singleton (global)
std::shared_ptr<MemoryPool> mem_pool_; // A global memory pool
std::shared_ptr<ConfigManager> config_manager_; // The configs
std::unique_ptr<TensorAlloc> tensor_allocator_; // An allocator for Tensors
std::unique_ptr<CVTensorAlloc> cv_tensor_allocator_; // An allocator for CV Tensors
std::unique_ptr<IntAlloc> int_allocator_; // An allocator for ints
static std::unique_ptr<GlobalContext> global_context_; // The instance of the singleton (global)
std::shared_ptr<MemoryPool> mem_pool_; // A global memory pool
std::shared_ptr<ConfigManager> config_manager_; // The configs
std::unique_ptr<TensorAlloc> tensor_allocator_; // An allocator for Tensors
std::unique_ptr<CVTensorAlloc> cv_tensor_allocator_; // An allocator for CV Tensors
std::unique_ptr<DeviceTensorAlloc> device_tensor_allocator_; // An allocator for Device Tensors
std::unique_ptr<IntAlloc> int_allocator_; // An allocator for ints
};
} // namespace dataset
} // namespace mindspore

@ -423,6 +423,17 @@ class Tensor {
static Status GetBufferInfo(Tensor *t, py::buffer_info *out);
#endif
#ifdef ENABLE_ACL
Status SetYuvShape(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride) {
std::vector<uint32_t> tmp{width, widthStride, height, heightStride};
yuv_shape_ = tmp;
return Status::OK();
}
std::vector<uint32_t> GetYuvShape() { return yuv_shape_; }
#endif
/// TensorIterator is a linear iterator that can be used to iterate over the elements of the Tensor
/// The order elements is as the memory layout (i.e., row-major) [[1,2,3],[4,5,6] --> 1,2,3,4,5,6
/// \tparam T type of values in the Tensor Iterator
@ -686,6 +697,11 @@ class Tensor {
/// pointer to the end of the physical data
unsigned char *data_end_ = nullptr;
#ifdef ENABLE_ACL
/// shape for interpretation of YUV image
std::vector<uint32_t> yuv_shape_;
#endif
private:
friend class DETensor;

@ -17,25 +17,28 @@
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_EXECUTE_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_EXECUTE_H_
#include <string>
#include <vector>
#include <memory>
#include "include/api/context.h"
#include "include/api/types.h"
#include "minddata/dataset/include/constants.h"
#include "minddata/dataset/include/transforms.h"
namespace mindspore {
namespace dataset {
class AscendResource; // Class to manage the resource of Ascend310
// class to run tensor operations in eager mode
class Execute {
public:
/// \brief Constructor
explicit Execute(std::shared_ptr<TensorOperation> op);
explicit Execute(std::shared_ptr<TensorOperation> op, std::string deviceType = "CPU");
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops);
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string deviceType = "CPU");
/// \brief Destructor
~Execute() = default;
~Execute();
/// \brief callable function to execute the TensorOperation in eager mode
/// \param[in] input Tensor to be transformed
@ -48,9 +51,19 @@ class Execute {
/// \param[out] out Result tensor after transform
/// \return - Status
Status operator()(const std::vector<mindspore::MSTensor> &input_tensor_list, std::vector<mindspore::MSTensor> *out);
#ifdef ENABLE_ACL
Status DeviceMemoryRelease();
#endif
private:
Status validate_device_();
std::vector<std::shared_ptr<TensorOperation>> ops_;
std::string device_type_;
#ifdef ENABLE_ACL
std::shared_ptr<AscendResource> D_resource_;
#endif
};
} // namespace dataset

@ -39,7 +39,6 @@ class AutoContrastOperation;
class BoundingBoxAugmentOperation;
class CutMixBatchOperation;
class CutOutOperation;
class DvppDecodeResizeCropOperation;
class EqualizeOperation;
class HwcToChwOperation;
class InvertOperation;
@ -106,22 +105,6 @@ std::shared_ptr<CutMixBatchOperation> CutMixBatch(ImageBatchFormat image_batch_f
/// \return Shared pointer to the current TensorOp
std::shared_ptr<CutOutOperation> CutOut(int32_t length, int32_t num_patches = 1);
/// \brief Function to create a DvppDecodeResizeCropJpeg TensorOperation.
/// \notes Tensor operation to decode and resize JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [16*16, 4096*4096].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] size A vector representing the output size of the intermediate resized image.
/// If size is a single value, smaller edge of the image will be resized to this value with
/// the same image aspect ratio. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeResizeCropOperation> DvppDecodeResizeCropJpeg(std::vector<uint32_t> crop = {224, 224},
std::vector<uint32_t> resize = {256, 256});
/// \brief Function to create a Equalize TensorOperation.
/// \notes Apply histogram equalization on input image.
/// \return Shared pointer to the current TensorOperation.

@ -0,0 +1,219 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_VISION_ASCEND_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_VISION_ASCEND_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "include/api/status.h"
#include "minddata/dataset/include/constants.h"
#include "minddata/dataset/include/transforms.h"
namespace mindspore {
namespace dataset {
// Transform operations for performing computer vision.
namespace vision {
// Char arrays storing name of corresponding classes (in alphabetical order)
constexpr char kDvppCropJpegOperation[] = "DvppCropJpeg";
constexpr char kDvppDecodeResizeOperation[] = "DvppDecodeResize";
constexpr char kDvppDecodeResizeCropOperation[] = "DvppDecodeResizeCrop";
constexpr char kDvppDecodeJpegOperation[] = "DvppDecodeJpeg";
constexpr char kDvppDecodePngOperation[] = "DvppDecodePng";
constexpr char kDvppResizeJpegOperation[] = "DvppResizeJpeg";
class DvppCropJpegOperation;
class DvppDecodeResizeOperation;
class DvppDecodeResizeCropOperation;
class DvppDecodeJpegOperation;
class DvppDecodePngOperation;
class DvppResizeJpegOperation;
/// \brief Function to create a DvppCropJpeg TensorOperation.
/// \notes Tensor operation to crop JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] size A vector representing the output size of the intermediate resized image.
/// If size is a single value, the shape will be a square. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppCropJpegOperation> DvppCropJpeg(std::vector<uint32_t> crop = {256, 256});
/// \brief Function to create a DvppDecodeResizeJpeg TensorOperation.
/// \notes Tensor operation to decode and resize JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] size A vector representing the output size of the intermediate resized image.
/// If size is a single value, smaller edge of the image will be resized to this value with
/// the same image aspect ratio. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeResizeOperation> DvppDecodeResizeJpeg(std::vector<uint32_t> resize = {256, 256});
/// \brief Function to create a DvppDecodeResizeCropJpeg TensorOperation.
/// \notes Tensor operation to decode and resize JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] Resize vector representing the output size of the intermediate resized image.
/// If size is a single value, smaller edge of the image will be resized to the value with
/// the same image aspect ratio. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeResizeCropOperation> DvppDecodeResizeCropJpeg(std::vector<uint32_t> crop = {224, 224},
std::vector<uint32_t> resize = {256, 256});
/// \brief Function to create a DvppDecodeJpeg TensorOperation.
/// \notes Tensor operation to decode JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeJpegOperation> DvppDecodeJpeg();
/// \brief Function to create a DvppDecodePng TensorOperation.
/// \notes Tensor operation to decode PNG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodePngOperation> DvppDecodePng();
/// \brief Function to create a DvppResizeJpeg TensorOperation.
/// \notes Tensor operation to resize JPEG image using Ascend series chip DVPP module.
/// It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] resize vector represents the shape of image after resize.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppResizeJpegOperation> DvppResizeJpeg(std::vector<uint32_t> resize = {256, 256});
class DvppCropJpegOperation : public TensorOperation {
public:
explicit DvppCropJpegOperation(const std::vector<uint32_t> &resize);
~DvppCropJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppCropJpegOperation; }
private:
std::vector<uint32_t> crop_;
};
class DvppDecodeResizeOperation : public TensorOperation {
public:
explicit DvppDecodeResizeOperation(const std::vector<uint32_t> &resize);
~DvppDecodeResizeOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeResizeOperation; }
private:
std::vector<uint32_t> resize_;
};
class DvppDecodeResizeCropOperation : public TensorOperation {
public:
explicit DvppDecodeResizeCropOperation(const std::vector<uint32_t> &crop, const std::vector<uint32_t> &resize);
~DvppDecodeResizeCropOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeResizeCropOperation; }
private:
std::vector<uint32_t> crop_;
std::vector<uint32_t> resize_;
};
class DvppDecodeJpegOperation : public TensorOperation {
public:
~DvppDecodeJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeJpegOperation; }
};
class DvppDecodePngOperation : public TensorOperation {
public:
~DvppDecodePngOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodePngOperation; }
};
class DvppResizeJpegOperation : public TensorOperation {
public:
explicit DvppResizeJpegOperation(const std::vector<uint32_t> &resize);
~DvppResizeJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppResizeJpegOperation; }
private:
std::vector<uint32_t> resize_;
};
} // namespace vision
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_VISION_ASCEND_H_

@ -2,5 +2,10 @@ file(GLOB_RECURSE _CURRENT_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc"
set_property(SOURCE ${_CURRENT_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_MD)
add_subdirectory(utils)
add_library(kernels-dvpp-image OBJECT
dvpp_decode_resize_crop_jpeg_op.cc)
dvpp_crop_jpeg_op.cc
dvpp_decode_resize_crop_jpeg_op.cc
dvpp_decode_resize_jpeg_op.cc
dvpp_decode_jpeg_op.cc
dvpp_resize_jpeg_op.cc
dvpp_decode_png_op.cc)
add_dependencies(kernels-dvpp-image dvpp-utils)

@ -0,0 +1,145 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <iostream>
#include "include/api/context.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_crop_jpeg_op.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
Status DvppCropJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer is empty.");
std::string last_step = "Resize";
std::shared_ptr<DvppDataInfo> imageinfo(processor_->Get_Resized_DeviceData());
if (!imageinfo->data) {
last_step = "Decode";
}
APP_ERROR ret = processor_->JPEG_C(last_step);
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp crop processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> CropOut(processor_->Get_Croped_DeviceData());
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(CropOut);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppCropJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppCropJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
unsigned char *buffer = const_cast<unsigned char *>(input->GetBuffer());
DvppDataInfo imageinfo;
imageinfo.dataSize = input->SizeInBytes();
imageinfo.data = static_cast<uint8_t *>(buffer);
std::vector<uint32_t> yuv_shape_ = input->GetYuvShape();
imageinfo.width = yuv_shape_[0];
imageinfo.widthStride = yuv_shape_[1];
imageinfo.height = yuv_shape_[2];
imageinfo.heightStride = yuv_shape_[3];
imageinfo.format = PIXEL_FORMAT_YUV_SEMIPLANAR_420;
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
std::shared_ptr<ResourceManager> instance = ResourceManager::GetInstance();
APP_ERROR ret = instance->InitResource(resource);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init D-chip:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D-chip and set up all configures
MDAclProcess process(crop_width_, crop_height_, context, true);
ret = process.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.JPEG_C(imageinfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp crop processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> CropOut(process.Get_Croped_DeviceData());
dsize_t dvpp_length = CropOut->dataSize;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
uint32_t crop_height = CropOut->height;
uint32_t crop_heightStride = CropOut->heightStride;
uint32_t crop_width = CropOut->width;
uint32_t crop_widthStride = CropOut->widthStride;
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
(*output)->SetYuvShape(crop_width, crop_widthStride, crop_height, crop_heightStride);
if (!((*output)->HasData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppCropJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppCropJpegOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear();
TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 1 channels
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppCropJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
processor_->SetCropParas(crop_width_, crop_height_);
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,62 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_CROP_JPEG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_CROP_JPEG_OP_H
#include <memory>
#include <string>
#include <vector>
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class DvppCropJpegOp : public TensorOp {
public:
DvppCropJpegOp(int32_t crop_height, int32_t crop_width) : crop_height_(crop_height), crop_width_(crop_width) {}
/// \brief Destructor
~DvppCropJpegOp() = default;
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;
Status OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) override;
std::string Name() const override { return kDvppCropJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
private:
uint32_t crop_height_;
uint32_t crop_width_;
std::shared_ptr<MDAclProcess> processor_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_CROP_JPEG_OP_H

@ -0,0 +1,139 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <iostream>
#include "include/api/context.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
// Compute() will be called when context=="Ascend310"
Status DvppDecodeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer on device is empty");
APP_ERROR ret = processor_->JPEG_D();
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> DecodeOut(processor_->Get_Decode_DeviceData());
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(DecodeOut);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
// Compute() will be called when context=="CPU"
Status DvppDecodeJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
if (!IsNonEmptyJPEG(input)) {
RETURN_STATUS_UNEXPECTED("DvppDecodeJpegOp only support process JPEG image.");
}
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
unsigned char *buffer = const_cast<unsigned char *>(input->GetBuffer());
RawData imageInfo;
uint32_t filesize = input->SizeInBytes();
imageInfo.lenOfByte = filesize;
imageInfo.data = static_cast<void *>(buffer);
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
std::shared_ptr<ResourceManager> instance = ResourceManager::GetInstance();
APP_ERROR ret = instance->InitResource(resource);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init D-chip:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D-chip and set up all configures
MDAclProcess process(context, false);
ret = process.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.JPEG_D(imageInfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> DecodeOut(process.Get_Decode_DeviceData());
dsize_t dvpp_length = DecodeOut->dataSize;
uint32_t decoded_height = DecodeOut->height;
uint32_t decoded_heightStride = DecodeOut->heightStride;
uint32_t decoded_width = DecodeOut->width;
uint32_t decoded_widthStride = DecodeOut->widthStride;
// std::cout << "Decoded size: " << decoded_width << ", " << decoded_height << std::endl;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
(*output)->SetYuvShape(decoded_width, decoded_widthStride, decoded_height, decoded_heightStride);
if (!((*output)->HasData())) {
std::string error = "[ERROR] Fail to get the Output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
return Status::OK();
}
Status DvppDecodeJpegOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear();
TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,59 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_JPEG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_JPEG_OP_H
#include <memory>
#include <string>
#include <vector>
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class DvppDecodeJpegOp : public TensorOp {
public:
DvppDecodeJpegOp() { processor_ = nullptr; }
/// \brief Destructor
~DvppDecodeJpegOp() = default;
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;
Status OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) override;
std::string Name() const override { return kDvppDecodeJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
private:
std::shared_ptr<MDAclProcess> processor_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_JPEG_OP_H

@ -0,0 +1,138 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <iostream>
#include "include/api/context.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_png_op.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
Status DvppDecodePngOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer on device is empty");
APP_ERROR ret = processor_->PNG_D();
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> DecodeOut(processor_->Get_Decode_DeviceData());
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(DecodeOut);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodePngOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
if (!IsNonEmptyPNG(input)) {
RETURN_STATUS_UNEXPECTED("DvppDecodePngOp only support process PNG image.");
}
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
unsigned char *buffer = const_cast<unsigned char *>(input->GetBuffer());
RawData imageInfo;
uint32_t filesize = input->SizeInBytes();
imageInfo.lenOfByte = filesize;
imageInfo.data = static_cast<void *>(buffer);
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
std::shared_ptr<ResourceManager> instance = ResourceManager::GetInstance();
APP_ERROR ret = instance->InitResource(resource);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init D-chip:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D-chip and set up all configures
MDAclProcess process(context, false);
ret = process.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.PNG_D(imageInfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
/* 测试Device内存
*/
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> DecodeOut(process.Get_Decode_DeviceData());
dsize_t dvpp_length = DecodeOut->dataSize;
// dsize_t decode_height = DecodeOut->height;
// dsize_t decode_width = DecodeOut->width;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
if (!((*output)->HasData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodePngOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodePngOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear();
TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 3 channels
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodePngOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,58 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_PNG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_PNG_OP_H
#include <memory>
#include <string>
#include <vector>
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class DvppDecodePngOp : public TensorOp {
public:
DvppDecodePngOp() {}
/// \brief Destructor
~DvppDecodePngOp() = default;
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;
Status OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) override;
std::string Name() const override { return kDvppDecodePngOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
private:
std::shared_ptr<MDAclProcess> processor_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_PNG_OP_H

@ -17,20 +17,48 @@
#include <string>
#include <vector>
#include <iostream>
#include "minddata/dataset/kernels/image/dvpp/utils/AclProcess.h"
#include "include/api/context.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/kernels/image/image_utils.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_crop_jpeg_op.h"
#include "include/api/context.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input,
std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer on device is empty");
APP_ERROR ret = processor_->JPEG_DRC();
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> CropOut(processor_->Get_Croped_DeviceData());
// std::cout << "Decoded size: " << decoded_width << ", " << decoded_height << std::endl;
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(CropOut);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
if (!IsNonEmptyJPEG(input)) {
RETURN_STATUS_UNEXPECTED("SoftDvppDecodeReiszeJpegOp only support process jpeg image.");
RETURN_STATUS_UNEXPECTED("DvppDecodeReiszeJpegOp only support process jpeg image.");
}
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
@ -38,11 +66,7 @@ Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<Tensor> &input,
RawData imageInfo;
uint32_t filesize = input->SizeInBytes();
imageInfo.lenOfByte = filesize;
imageInfo.data = std::make_shared<uint8_t>();
imageInfo.data.reset(new uint8_t[filesize], std::default_delete<uint8_t[]>());
memcpy_s(imageInfo.data.get(), filesize, buffer, filesize);
// First part end, whose function is to transform data from a Tensor to imageinfo data structure which can be
// applied on device
imageInfo.data = static_cast<void *>(buffer);
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
@ -56,25 +80,26 @@ Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<Tensor> &input,
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D chip and set up all configures
AclProcess process(resized_width_, resized_height_, crop_width_, crop_height_, context);
process.set_mode(true);
ret = process.InitResource();
MDAclProcess processor(resized_width_, resized_height_, crop_width_, crop_height_, context, true);
ret = processor.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.Process(imageInfo);
ret = processor.JPEG_DRC(imageInfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
auto data = std::static_pointer_cast<unsigned char>(processor.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> CropOut = process.Get_Device_Memory_Data();
dsize_t dvpp_length = CropOut->dataSize;
std::shared_ptr<DvppDataInfo> CropOut(processor.Get_Croped_DeviceData());
uint32_t dvpp_length = CropOut->dataSize;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
@ -82,8 +107,8 @@ Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<Tensor> &input,
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
processor.device_memory_release();
processor.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeResizeCropJpegOp:" + std::string(e.what());
@ -102,5 +127,12 @@ Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector<TensorShape> &i
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodeResizeCropJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
processor_->SetResizeParas(resized_width_, resized_height_);
processor_->SetCropParas(crop_width_, crop_height_);
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

@ -14,21 +14,21 @@
* limitations under the License.
*/
#ifndef MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H
#define MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H
#include <memory>
#include <string>
#include <vector>
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
#include "minddata/dataset/core/data_type.h"
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "acl/acl.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
@ -44,17 +44,23 @@ class DvppDecodeResizeCropJpegOp : public TensorOp {
~DvppDecodeResizeCropJpegOp() = default;
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;
Status OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) override;
std::string Name() const override { return kDvppDecodeResizeCropJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
private:
int32_t crop_height_;
int32_t crop_width_;
int32_t resized_height_;
int32_t resized_width_;
std::shared_ptr<MDAclProcess> processor_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H

@ -0,0 +1,134 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <iostream>
#include "include/api/context.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_decode_resize_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
Status DvppDecodeResizeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input,
std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer on device is empty");
APP_ERROR ret = processor_->JPEG_DR();
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> ResizeOut(processor_->Get_Resized_DeviceData());
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(ResizeOut);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeResizeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodeResizeJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
if (!IsNonEmptyJPEG(input)) {
RETURN_STATUS_UNEXPECTED("DvppDecodeReiszeJpegOp only support process jpeg image.");
}
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
unsigned char *buffer = const_cast<unsigned char *>(input->GetBuffer());
RawData imageInfo;
uint32_t filesize = input->SizeInBytes();
imageInfo.lenOfByte = filesize;
imageInfo.data = static_cast<void *>(buffer);
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
std::shared_ptr<ResourceManager> instance = ResourceManager::GetInstance();
APP_ERROR ret = instance->InitResource(resource);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init D-chip:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D-chip and set up all configures
MDAclProcess process(resized_width_, resized_height_, context, false);
ret = process.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.JPEG_DR(imageInfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> ResizeOut(process.Get_Resized_DeviceData());
dsize_t dvpp_length = ResizeOut->dataSize;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
if (!((*output)->HasData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppDecodeResizeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppDecodeResizeJpegOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear();
TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 1 channels
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodeResizeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
processor_->SetResizeParas(resized_width_, resized_height_);
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,62 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_JPEG_OP_H
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_JPEG_OP_H
#include <memory>
#include <string>
#include <vector>
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class DvppDecodeResizeJpegOp : public TensorOp {
public:
DvppDecodeResizeJpegOp(int32_t resized_height, int32_t resized_width)
: resized_height_(resized_height), resized_width_(resized_width) {}
/// \brief Destructor
~DvppDecodeResizeJpegOp() = default;
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
Status Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) override;
Status OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) override;
std::string Name() const override { return kDvppDecodeResizeJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
private:
int32_t resized_height_;
int32_t resized_width_;
std::shared_ptr<MDAclProcess> processor_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_JPEG_OP_H

@ -0,0 +1,147 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <iostream>
#include "include/api/context.h"
#include "minddata/dataset/core/cv_tensor.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/kernels/image/dvpp/dvpp_resize_jpeg_op.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/image_utils.h"
namespace mindspore {
namespace dataset {
Status DvppResizeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetDeviceBuffer() != nullptr, "The input image buffer is empty.");
std::string last_step = "Decode";
std::shared_ptr<DvppDataInfo> imageinfo(processor_->Get_Decode_DeviceData());
if (!imageinfo->data) {
last_step = "Crop";
}
APP_ERROR ret = processor_->JPEG_R(last_step);
if (ret != APP_ERR_OK) {
processor_->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
std::shared_ptr<DvppDataInfo> ResizeOut(processor_->Get_Resized_DeviceData());
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(ResizeOut); // Set attributes for output DeviceTensor
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
}
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppResizeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppResizeJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
try {
CHECK_FAIL_RETURN_UNEXPECTED(input->GetBuffer() != nullptr, "The input image buffer is empty.");
unsigned char *buffer = const_cast<unsigned char *>(input->GetBuffer());
DvppDataInfo imageinfo;
imageinfo.dataSize = input->SizeInBytes();
imageinfo.data = static_cast<uint8_t *>(buffer);
std::vector<uint32_t> yuv_shape_ = input->GetYuvShape();
imageinfo.width = yuv_shape_[0];
imageinfo.widthStride = yuv_shape_[1];
imageinfo.height = yuv_shape_[2];
imageinfo.heightStride = yuv_shape_[3];
imageinfo.format = PIXEL_FORMAT_YUV_SEMIPLANAR_420;
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
std::shared_ptr<ResourceManager> instance = ResourceManager::GetInstance();
APP_ERROR ret = instance->InitResource(resource);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init D-chip:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
int deviceId = *(resource.deviceIds.begin());
aclrtContext context = instance->GetContext(deviceId);
// Second part end where we initialize the resource of D-chip and set up all configures
MDAclProcess process(resized_width_, resized_height_, context, false);
ret = process.InitResource();
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in Init resource:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
ret = process.JPEG_R(imageinfo);
if (ret != APP_ERR_OK) {
instance->Release();
std::string error = "Error in dvpp processing:" + std::to_string(ret);
RETURN_STATUS_UNEXPECTED(error);
}
// Third part end where we execute the core function of dvpp
auto data = std::static_pointer_cast<unsigned char>(process.Get_Memory_Data());
unsigned char *ret_ptr = data.get();
std::shared_ptr<DvppDataInfo> ResizeOut(process.Get_Resized_DeviceData());
dsize_t dvpp_length = ResizeOut->dataSize;
const TensorShape dvpp_shape({dvpp_length, 1, 1});
uint32_t resized_height = ResizeOut->height;
uint32_t resized_heightStride = ResizeOut->heightStride;
uint32_t resized_width = ResizeOut->width;
uint32_t resized_widthStride = ResizeOut->widthStride;
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::Tensor::CreateFromMemory(dvpp_shape, dvpp_data_type, ret_ptr, output);
(*output)->SetYuvShape(resized_width, resized_widthStride, resized_height, resized_heightStride);
if (!((*output)->HasData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
}
process.device_memory_release();
process.Release();
// Last part end where we transform the processed data into a tensor which can be applied in later units.
} catch (const cv::Exception &e) {
std::string error = "[ERROR] Fail in DvppResizeJpegOp:" + std::string(e.what());
RETURN_STATUS_UNEXPECTED(error);
}
return Status::OK();
}
Status DvppResizeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
processor_->SetResizeParas(resized_width_, resized_height_);
return Status::OK();
}
Status DvppResizeJpegOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
RETURN_IF_NOT_OK(TensorOp::OutputShape(inputs, outputs));
outputs.clear();
TensorShape out({-1, 1, 1}); // we don't know what is output image size, but we know it should be 1 channels
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
if (!outputs.empty()) return Status::OK();
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
} // namespace dataset
} // namespace mindspore

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save