!12537 Dvpp API merge

From: @lizhenglong1992
Reviewed-by: 
Signed-off-by:
pull/12537/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit a38c996c9c

@ -16,8 +16,8 @@
#include "minddata/dataset/include/execute.h"
#include "minddata/dataset/core/de_tensor.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor_row.h"
#include "minddata/dataset/include/tensor.h"
#include "minddata/dataset/include/type_id.h"
@ -35,12 +35,11 @@ namespace mindspore {
namespace dataset {
// FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
Execute::Execute(std::shared_ptr<TensorOperation> op, std::string deviceType) {
Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType) {
ops_.emplace_back(std::move(op));
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -51,14 +50,18 @@ Execute::Execute(std::shared_ptr<TensorOperation> op, std::string deviceType) {
#endif
}
Execute::Execute(std::shared_ptr<TensorTransform> op, std::string deviceType) {
Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation = op->Parse();
std::shared_ptr<TensorOperation> operation;
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
operation = op->Parse(deviceType);
}
ops_.emplace_back(std::move(operation));
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -70,14 +73,13 @@ Execute::Execute(std::shared_ptr<TensorTransform> op, std::string deviceType) {
}
/*
Execute::Execute(TensorTransform op, std::string deviceType) {
Execute::Execute(TensorTransform op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation = op.Parse();
ops_.emplace_back(std::move(operation));
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -90,14 +92,18 @@ Execute::Execute(TensorTransform op, std::string deviceType) {
*/
// Execute function for the example case: auto decode(new vision::Decode());
Execute::Execute(TensorTransform *op, std::string deviceType) {
Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) {
// Convert op from TensorTransform to TensorOperation
std::shared_ptr<TensorOperation> operation = op->Parse();
std::shared_ptr<TensorOperation> operation;
if (deviceType == MapTargetDevice::kCpu) {
operation = op->Parse();
} else {
operation = op->Parse(deviceType);
}
ops_.emplace_back(std::move(operation));
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -108,11 +114,10 @@ Execute::Execute(TensorTransform *op, std::string deviceType) {
#endif
}
Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string deviceType)
Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDevice deviceType)
: ops_(std::move(ops)), device_type_(deviceType) {
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -123,15 +128,21 @@ Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string
#endif
}
Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, std::string deviceType) {
Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> { return operation->Parse(); });
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(ops.begin(), ops.end(), std::back_inserter(ops_),
[](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
return operation->Parse();
});
} else {
for (auto &op : ops) {
ops_.emplace_back(op->Parse(deviceType));
}
}
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -142,15 +153,20 @@ Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, std::string
#endif
}
Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, std::string deviceType) {
Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](TensorTransform &operation) -> std::shared_ptr<TensorOperation> { return operation.Parse(); });
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](TensorTransform &operation) -> std::shared_ptr<TensorOperation> { return operation.Parse(); });
} else {
for (auto &op : ops) {
ops_.emplace_back(op.get().Parse(deviceType));
}
}
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -162,15 +178,20 @@ Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,
}
// Execute function for the example vector case: auto decode(new vision::Decode());
Execute::Execute(std::vector<TensorTransform *> ops, std::string deviceType) {
Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType) {
// Convert ops from TensorTransform to TensorOperation
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](TensorTransform *operation) -> std::shared_ptr<TensorOperation> { return operation->Parse(); });
if (deviceType == MapTargetDevice::kCpu) {
(void)std::transform(
ops.begin(), ops.end(), std::back_inserter(ops_),
[](TensorTransform *operation) -> std::shared_ptr<TensorOperation> { return operation->Parse(); });
} else {
for (auto &op : ops) {
ops_.emplace_back(op->Parse(deviceType));
}
}
device_type_ = deviceType;
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
@ -183,7 +204,7 @@ Execute::Execute(std::vector<TensorTransform *> ops, std::string deviceType) {
Execute::~Execute() {
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
if (device_type_ == MapTargetDevice::kAscend310) {
if (device_resource_) {
device_resource_->FinalizeResource();
} else {
@ -205,7 +226,7 @@ Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor
RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
transforms.emplace_back(ops_[i]->Build());
}
if (device_type_ == "CPU") {
if (device_type_ == MapTargetDevice::kCpu) {
// Convert mindspore::Tensor to dataset::Tensor
std::shared_ptr<dataset::Tensor> de_tensor;
Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()),
@ -268,7 +289,7 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
transforms.emplace_back(ops_[i]->Build());
}
if (device_type_ == "CPU") { // Case CPU
if (device_type_ == MapTargetDevice::kCpu) { // Case CPU
TensorRow de_tensor_list;
for (auto &tensor : input_tensor_list) {
std::shared_ptr<dataset::Tensor> de_tensor;
@ -325,8 +346,8 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
}
Status Execute::validate_device_() {
if (device_type_ != "CPU" && device_type_ != "Ascend310") {
std::string err_msg = device_type_ + " is not supported. (Option: CPU or Ascend310)";
if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310) {
std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}

File diff suppressed because it is too large Load Diff

@ -27,7 +27,7 @@ using uchar = unsigned char;
using dsize_t = int64_t;
// Target devices to perform map operation
enum class MapTargetDevice { kCpu, kGpu, kDvpp };
enum class MapTargetDevice { kCpu, kGpu, kAscend310 };
// Possible dataset types for holding the data and client type
enum class DatasetType { kUnknown, kArrow, kTf };

@ -33,15 +33,18 @@ class Execute {
public:
/// \brief Constructor
// FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
explicit Execute(std::shared_ptr<TensorOperation> op, std::string deviceType = "CPU");
explicit Execute(std::shared_ptr<TensorTransform> op, std::string deviceType = "CPU");
// explicit Execute(TensorTransform op, std::string deviceType = "CPU");
explicit Execute(TensorTransform *op, std::string deviceType = "CPU");
explicit Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
// explicit Execute(TensorTransform op, MapTargetDevice deviceType = MapTargetDevice::KCpu);
explicit Execute(TensorTransform *op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string deviceType = "CPU");
explicit Execute(std::vector<std::shared_ptr<TensorTransform>> ops, std::string deviceType = "CPU");
explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, std::string deviceType = "CPU");
explicit Execute(std::vector<TensorTransform *> ops, std::string deviceType = "CPU");
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<std::shared_ptr<TensorTransform>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType = MapTargetDevice::kCpu);
/// \brief Destructor
~Execute();
@ -65,7 +68,7 @@ class Execute {
std::vector<std::shared_ptr<TensorOperation>> ops_;
std::string device_type_;
MapTargetDevice device_type_;
std::shared_ptr<DeviceResource> device_resource_;
};

@ -44,6 +44,11 @@ class TensorTransform : public std::enable_shared_from_this<TensorTransform> {
/// \brief Pure virtual function to convert a TensorTransform class into a IR TensorOperation object.
/// \return shared pointer to the newly created TensorOperation.
virtual std::shared_ptr<TensorOperation> Parse() = 0;
/// \brief Virtual function to convert a TensorTransform class into a IR TensorOperation object.
/// \param[in] env A string to determine the running environment
/// \return shared pointer to the newly created TensorOperation.
virtual std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) { return nullptr; }
};
// Transform operations for performing data transformation.

@ -32,186 +32,67 @@ namespace dataset {
// Transform operations for performing computer vision.
namespace vision {
// Char arrays storing name of corresponding classes (in alphabetical order)
constexpr char kDvppCropJpegOperation[] = "DvppCropJpeg";
constexpr char kDvppDecodeResizeOperation[] = "DvppDecodeResize";
constexpr char kDvppDecodeResizeCropOperation[] = "DvppDecodeResizeCrop";
constexpr char kDvppDecodeJpegOperation[] = "DvppDecodeJpeg";
constexpr char kDvppDecodePngOperation[] = "DvppDecodePng";
constexpr char kDvppResizeJpegOperation[] = "DvppResizeJpeg";
class DvppCropJpegOperation;
class DvppDecodeResizeOperation;
class DvppDecodeResizeCropOperation;
class DvppDecodeJpegOperation;
class DvppDecodePngOperation;
class DvppResizeJpegOperation;
/// \brief Function to create a DvppCropJpeg TensorOperation.
/// \notes Tensor operation to crop JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] size A vector representing the output size of the intermediate resized image.
/// If size is a single value, the shape will be a square. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppCropJpegOperation> DvppCropJpeg(std::vector<uint32_t> crop = {256, 256});
/// \brief Function to create a DvppDecodeResizeJpeg TensorOperation.
/// \notes Tensor operation to decode and resize JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] size A vector representing the output size of the intermediate resized image.
/// If size is a single value, smaller edge of the image will be resized to this value with
/// the same image aspect ratio. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeResizeOperation> DvppDecodeResizeJpeg(std::vector<uint32_t> resize = {256, 256});
/// \brief Function to create a DvppDecodeResizeCropJpeg TensorOperation.
/// \notes Tensor operation to decode and resize JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] crop vector representing the output size of the final crop image.
/// \param[in] Resize vector representing the output size of the intermediate resized image.
/// If size is a single value, smaller edge of the image will be resized to the value with
/// the same image aspect ratio. If size has 2 values, it should be (height, width).
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeResizeCropOperation> DvppDecodeResizeCropJpeg(std::vector<uint32_t> crop = {224, 224},
std::vector<uint32_t> resize = {256, 256});
/// \brief Function to create a DvppDecodeJpeg TensorOperation.
/// \notes Tensor operation to decode JPEG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodeJpegOperation> DvppDecodeJpeg();
/// \brief Function to create a DvppDecodePng TensorOperation.
/// \notes Tensor operation to decode PNG image using the simulation algorithm of Ascend series
/// chip DVPP module. It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppDecodePngOperation> DvppDecodePng();
/// \brief Function to create a DvppResizeJpeg TensorOperation.
/// \notes Tensor operation to resize JPEG image using Ascend series chip DVPP module.
/// It is recommended to use this algorithm in the following scenarios:
/// When training, the DVPP of the Ascend chip is not used,
/// and the DVPP of the Ascend chip is used during inference,
/// and the accuracy of inference is lower than the accuracy of training;
/// and the input image size should be in range [32*32, 2048*2048].
/// Only images with an even resolution can be output. The output of odd resolution is not supported.
/// \param[in] resize vector represents the shape of image after resize.
/// \return Shared pointer to the current TensorOperation.
std::shared_ptr<DvppResizeJpegOperation> DvppResizeJpeg(std::vector<uint32_t> resize = {256, 256});
class DvppCropJpegOperation : public TensorOperation {
public:
explicit DvppCropJpegOperation(const std::vector<uint32_t> &resize);
~DvppCropJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppCropJpegOperation; }
/* ##################################### API class ###########################################*/
private:
std::vector<uint32_t> crop_;
};
class DvppDecodeResizeOperation : public TensorOperation {
class DvppDecodeResizeJpeg : public TensorTransform {
public:
explicit DvppDecodeResizeOperation(const std::vector<uint32_t> &resize);
~DvppDecodeResizeOperation() = default;
/// \brief Constructor.
/// \param[in] resize A vector of int value for each dimension, w.r.t H,W order.
explicit DvppDecodeResizeJpeg(std::vector<uint32_t> resize);
std::shared_ptr<TensorOp> Build() override;
/// \brief Destructor.
~DvppDecodeResizeJpeg() = default;
Status ValidateParams() override;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::string Name() const override { return kDvppDecodeResizeOperation; }
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<uint32_t> resize_;
};
class DvppDecodeResizeCropOperation : public TensorOperation {
class DvppDecodeResizeCropJpeg : public TensorTransform {
public:
explicit DvppDecodeResizeCropOperation(const std::vector<uint32_t> &crop, const std::vector<uint32_t> &resize);
~DvppDecodeResizeCropOperation() = default;
/// \brief Constructor.
/// \param[in] crop A vector of int value for each dimension after final crop, w.r.t H,W order.
/// \param[in] resize A vector of int value for each dimension after resize, w.r.t H,W order.
explicit DvppDecodeResizeCropJpeg(std::vector<uint32_t> crop, std::vector<uint32_t> resize);
std::shared_ptr<TensorOp> Build() override;
/// \brief Destructor.
~DvppDecodeResizeCropJpeg() = default;
Status ValidateParams() override;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::string Name() const override { return kDvppDecodeResizeCropOperation; }
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<uint32_t> crop_;
std::vector<uint32_t> resize_;
};
class DvppDecodeJpegOperation : public TensorOperation {
class DvppDecodePng : public TensorTransform {
public:
~DvppDecodeJpegOperation() = default;
/// \brief Constructor.
DvppDecodePng();
std::shared_ptr<TensorOp> Build() override;
/// \brief Destructor.
~DvppDecodePng() = default;
Status ValidateParams() override;
/// \brief Function to convert TensorTransform object into a TensorOperation object.
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::string Name() const override { return kDvppDecodeJpegOperation; }
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
};
class DvppDecodePngOperation : public TensorOperation {
public:
~DvppDecodePngOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodePngOperation; }
};
class DvppResizeJpegOperation : public TensorOperation {
public:
explicit DvppResizeJpegOperation(const std::vector<uint32_t> &resize);
~DvppResizeJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppResizeJpegOperation; }
private:
std::vector<uint32_t> resize_;
};
} // namespace vision
} // namespace dataset
} // namespace mindspore

@ -52,6 +52,8 @@ class CenterCrop : public TensorTransform {
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<int32_t> size_;
};
@ -94,6 +96,8 @@ class Decode : public TensorTransform {
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
bool rgb_;
};
@ -139,6 +143,8 @@ class Resize : public TensorTransform {
/// \return Shared pointer to TensorOperation object.
std::shared_ptr<TensorOperation> Parse() override;
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<int32_t> size_;
InterpolationMode interpolation_;

@ -5,4 +5,10 @@ set(DATASET_KERNELS_IR_VISION_SRC_FILES
vision_ir.cc
)
if(ENABLE_ACL)
set(DATASET_KERNELS_IR_VISION_SRC_FILES
${DATASET_KERNELS_IR_VISION_SRC_FILES}
ascend_vision_ir.cc)
endif()
add_library(kernels-ir-vision OBJECT ${DATASET_KERNELS_IR_VISION_SRC_FILES})

@ -0,0 +1,146 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_ASCEND_VISION_IR_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_ASCEND_VISION_IR_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "include/api/status.h"
#include "minddata/dataset/include/constants.h"
#include "minddata/dataset/include/transforms.h"
#include "minddata/dataset/kernels/ir/tensor_operation.h"
namespace mindspore {
namespace dataset {
// Transform operations for computer vision
namespace vision {
// Char arrays storing name of corresponding classes (in alphabetical order)
constexpr char kDvppCropJpegOperation[] = "DvppCropJpeg";
constexpr char kDvppDecodeResizeOperation[] = "DvppDecodeResize";
constexpr char kDvppDecodeResizeCropOperation[] = "DvppDecodeResizeCrop";
constexpr char kDvppDecodeJpegOperation[] = "DvppDecodeJpeg";
constexpr char kDvppDecodePngOperation[] = "DvppDecodePng";
constexpr char kDvppResizeJpegOperation[] = "DvppResizeJpeg";
/* ####################################### Derived TensorOperation classes ################################# */
class DvppCropJpegOperation : public TensorOperation {
public:
explicit DvppCropJpegOperation(const std::vector<uint32_t> &resize);
~DvppCropJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppCropJpegOperation; }
Status to_json(nlohmann::json *out_json) override;
private:
std::vector<uint32_t> crop_;
};
class DvppDecodeResizeOperation : public TensorOperation {
public:
explicit DvppDecodeResizeOperation(const std::vector<uint32_t> &resize);
~DvppDecodeResizeOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeResizeOperation; }
Status to_json(nlohmann::json *out_json) override;
private:
std::vector<uint32_t> resize_;
};
class DvppDecodeResizeCropOperation : public TensorOperation {
public:
explicit DvppDecodeResizeCropOperation(const std::vector<uint32_t> &crop, const std::vector<uint32_t> &resize);
~DvppDecodeResizeCropOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeResizeCropOperation; }
Status to_json(nlohmann::json *out_json) override;
private:
std::vector<uint32_t> crop_;
std::vector<uint32_t> resize_;
};
class DvppDecodeJpegOperation : public TensorOperation {
public:
~DvppDecodeJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodeJpegOperation; }
};
class DvppDecodePngOperation : public TensorOperation {
public:
~DvppDecodePngOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppDecodePngOperation; }
};
class DvppResizeJpegOperation : public TensorOperation {
public:
explicit DvppResizeJpegOperation(const std::vector<uint32_t> &resize);
~DvppResizeJpegOperation() = default;
std::shared_ptr<TensorOp> Build() override;
Status ValidateParams() override;
std::string Name() const override { return kDvppResizeJpegOperation; }
Status to_json(nlohmann::json *out_json) override;
private:
std::vector<uint32_t> resize_;
};
} // namespace vision
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_ASCEND_VISION_IR_H_

@ -73,14 +73,15 @@ TEST_F(TestDE, TestDvpp) {
// Define dvpp transform
std::vector<uint32_t> crop_paras = {224, 224};
std::vector<uint32_t> resize_paras = {256, 256};
mindspore::dataset::Execute Transform(DvppDecodeResizeCropJpeg(crop_paras, resize_paras));
auto decode_resize_crop(new vision::DvppDecodeResizeCropJpeg(crop_paras, resize_paras));
mindspore::dataset::Execute Transform(decode_resize_crop, MapTargetDevice::kAscend310);
// Apply transform on images
Status rc = Transform(image, &image);
// Check image info
ASSERT_TRUE(rc.IsOk());
ASSERT_EQ(image.Shape().size(), 3);
ASSERT_EQ(image.Shape().size(), 2);
int32_t real_h = 0;
int32_t real_w = 0;
int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
@ -91,9 +92,9 @@ TEST_F(TestDE, TestDvpp) {
real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
}
ASSERT_EQ(image.Shape()[0], real_h * real_w * 1.5); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], 1);
ASSERT_EQ(image.Shape()[2], 1);
ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
ASSERT_EQ(image.Shape()[1], real_w);
ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
#endif
}
@ -105,10 +106,13 @@ TEST_F(TestDE, TestDvppSinkMode) {
auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
// Define dvpp transform
std::vector<uint32_t> crop_paras = {224, 224};
std::vector<uint32_t> resize_paras = {256};
mindspore::dataset::Execute Transform({DvppDecodeJpeg(), DvppResizeJpeg(resize_paras), DvppCropJpeg(crop_paras)},
"Ascend310");
std::vector<int32_t> crop_paras = {224, 224};
std::vector<int32_t> resize_paras = {256};
std::shared_ptr<TensorTransform> decode(new vision::Decode());
std::shared_ptr<TensorTransform> resize(new vision::Resize(resize_paras));
std::shared_ptr<TensorTransform> centercrop(new vision::CenterCrop(crop_paras));
std::vector<std::shared_ptr<TensorTransform>> transforms = {decode, resize, centercrop};
mindspore::dataset::Execute Transform(transforms, MapTargetDevice::kAscend310);
// Apply transform on images
Status rc = Transform(image, &image);
@ -140,9 +144,13 @@ TEST_F(TestDE, TestDvppDecodeResizeCrop) {
auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
// Define dvpp transform
std::vector<uint32_t> crop_paras = {416};
std::vector<uint32_t> resize_paras = {512};
mindspore::dataset::Execute Transform(DvppDecodeResizeCropJpeg(crop_paras, resize_paras), "Ascend310");
std::vector<int32_t> crop_paras = {416};
std::vector<int32_t> resize_paras = {512};
auto decode(new vision::Decode());
auto resize(new vision::Resize(resize_paras));
auto centercrop(new vision::CenterCrop(crop_paras));
std::vector<TensorTransform *> transforms = {decode, resize, centercrop};
mindspore::dataset::Execute Transform(transforms, MapTargetDevice::kAscend310);
// Apply transform on images
Status rc = Transform(image, &image);

@ -200,7 +200,7 @@ TEST_F(MindDataTestExecute, TestTransformDecodeResizeCenterCrop1) {
auto hwc2chw(new vision::HWC2CHW());
std::vector<TensorTransform *> op_list = {decode, resize, centercrop, hwc2chw};
mindspore::dataset::Execute Transform(op_list, "CPU");
mindspore::dataset::Execute Transform(op_list, MapTargetDevice::kCpu);
// Apply transform on image
Status rc = Transform(image, &image);

Loading…
Cancel
Save