Make the resource manager independent, new base class DeviceResource and new derived class AscendResource

pull/12381/head
Zhenglong Li 4 years ago
parent 3d2195bea3
commit f2dce335f2

@ -27,108 +27,11 @@
#include "mindspore/lite/src/common/log_adapter.h"
#endif
#ifdef ENABLE_ACL
#include "acl/acl.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/DvppCommon.h"
#include "minddata/dataset/core/ascend_resource.h"
#endif
namespace mindspore {
namespace dataset {
#ifdef ENABLE_ACL
class AscendResource {
public:
AscendResource();
~AscendResource() = default;
Status InitChipResource();
Status FinalizeChipResource();
Status Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input);
Status Pop(std::shared_ptr<DeviceTensor> device_output, std::shared_ptr<Tensor> *host_output);
Status DeviceDataRelease();
std::shared_ptr<MDAclProcess> processor_;
std::shared_ptr<ResourceManager> ascend_resource_;
};
AscendResource::AscendResource() { InitChipResource(); }
Status AscendResource::InitChipResource() {
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
ascend_resource_ = ResourceManager::GetInstance();
APP_ERROR ret = ascend_resource_->InitResource(resource);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in Init D-chip:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
int device_id = *(resource.deviceIds.begin());
aclrtContext context = ascend_resource_->GetContext(device_id);
processor_ = std::make_shared<MDAclProcess>(context, false);
ret = processor_->InitResource();
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in Init resource:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
MS_LOG(INFO) << "Ascend resource all initialized!";
return Status::OK();
}
Status AscendResource::FinalizeChipResource() {
processor_->Release();
return Status::OK();
}
Status AscendResource::Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input) {
std::shared_ptr<mindspore::dataset::Tensor> de_input;
Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(host_input.Shape()),
MSTypeToDEType(static_cast<TypeId>(host_input.DataType())),
(const uchar *)(host_input.Data().get()), &de_input);
RETURN_IF_NOT_OK(rc);
APP_ERROR ret = processor_->H2D_Sink(de_input, *device_input);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in data sink process:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
MS_LOG(INFO) << "Process data sink successfully";
return Status::OK();
}
Status AscendResource::Pop(std::shared_ptr<DeviceTensor> device_output, std::shared_ptr<Tensor> *host_output) {
APP_ERROR ret = processor_->D2H_Pop(device_output, *host_output);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in data pop processing:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
Status AscendResource::DeviceDataRelease() {
APP_ERROR ret = processor_->device_memory_release();
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in device data release:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
#endif
Execute::Execute(std::shared_ptr<TensorOperation> op, std::string deviceType) {
ops_.emplace_back(std::move(op));
@ -136,7 +39,12 @@ Execute::Execute(std::shared_ptr<TensorOperation> op, std::string deviceType) {
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
D_resource_ = std::make_shared<AscendResource>();
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
device_resource_ = nullptr;
MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
}
}
#endif
}
@ -146,7 +54,12 @@ Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string
MS_LOG(INFO) << "Running Device: " << device_type_;
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
D_resource_ = std::make_shared<AscendResource>();
device_resource_ = std::make_shared<AscendResource>();
Status rc = device_resource_->InitResource();
if (!rc.IsOk()) {
device_resource_ = nullptr;
MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
}
}
#endif
}
@ -154,7 +67,11 @@ Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, std::string
Execute::~Execute() {
#ifdef ENABLE_ACL
if (device_type_ == "Ascend310") {
D_resource_->FinalizeChipResource();
if (device_resource_) {
device_resource_->FinalizeResource();
} else {
MS_LOG(ERROR) << "Device resource is nullptr which is illegal under case Ascend310";
}
}
#endif
}
@ -200,11 +117,12 @@ Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor
*output = mindspore::MSTensor(std::make_shared<DETensor>(de_tensor));
} else { // Ascend310 case, where we must set Ascend resource on each operators
#ifdef ENABLE_ACL
CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
std::shared_ptr<mindspore::dataset::DeviceTensor> device_input;
RETURN_IF_NOT_OK(D_resource_->Sink(input, &device_input));
RETURN_IF_NOT_OK(device_resource_->Sink(input, &device_input));
for (auto &t : transforms) {
std::shared_ptr<DeviceTensor> device_output;
RETURN_IF_NOT_OK(t->SetAscendResource(D_resource_->processor_));
RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
// For next transform
@ -262,12 +180,13 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid");
} else { // Case Ascend310
#ifdef ENABLE_ACL
CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
for (auto &input_tensor : input_tensor_list) {
std::shared_ptr<dataset::DeviceTensor> device_input;
RETURN_IF_NOT_OK(D_resource_->Sink(input_tensor, &device_input));
RETURN_IF_NOT_OK(device_resource_->Sink(input_tensor, &device_input));
for (auto &t : transforms) {
std::shared_ptr<DeviceTensor> device_output;
RETURN_IF_NOT_OK(t->SetAscendResource(D_resource_->processor_));
RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
// For next transform
@ -275,12 +194,12 @@ Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::
}
CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
// Due to the limitation of Ascend310 memory, we have to pop every data onto host memory
// So the speed of this method is slower than solo mode
// So the speed of this batch method is slower than solo mode
std::shared_ptr<mindspore::dataset::Tensor> host_output;
RETURN_IF_NOT_OK(D_resource_->Pop(device_input, &host_output));
RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
output_tensor_list->emplace_back(ms_tensor);
RETURN_IF_NOT_OK(D_resource_->DeviceDataRelease());
RETURN_IF_NOT_OK(device_resource_->DeviceDataRelease());
}
CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor vector is empty");
#endif
@ -297,17 +216,16 @@ Status Execute::validate_device_() {
return Status::OK();
}
#ifdef ENABLE_ACL
Status Execute::DeviceMemoryRelease() {
Status rc = D_resource_->DeviceDataRelease();
CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
Status rc = device_resource_->DeviceDataRelease();
if (rc.IsError()) {
D_resource_->ascend_resource_->Release();
std::string err_msg = "Error in device data release";
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
#endif
} // namespace dataset
} // namespace mindspore

@ -5,6 +5,7 @@ set(DATASET_CORE_SRC_FILES
config_manager.cc
cv_tensor.cc
data_type.cc
device_resource.cc
device_tensor.cc
de_tensor.cc
global_context.cc
@ -14,6 +15,12 @@ set(DATASET_CORE_SRC_FILES
tensor_shape.cc
)
if(ENABLE_ACL)
set(DATASET_CORE_SRC_FILES
${DATASET_CORE_SRC_FILES}
ascend_resource.cc)
endif()
ms_protobuf_generate(EXAMPLE_SRCS EXAMPLE_HDRS example.proto)
ms_protobuf_generate(FEATURE_SRCS FEATURE_HDRS feature.proto)
add_library(core OBJECT ${DATASET_CORE_SRC_FILES} ${EXAMPLE_SRCS} ${FEATURE_SRCS})

@ -0,0 +1,98 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/api/context.h"
#include "include/api/types.h"
#include "minddata/dataset/include/type_id.h"
#include "minddata/dataset/core/ascend_resource.h"
namespace mindspore {
namespace dataset {
Status AscendResource::InitResource() {
ResourceInfo resource;
resource.aclConfigPath = "";
resource.deviceIds.insert(mindspore::GlobalContext::GetGlobalDeviceID());
ascend_resource_ = ResourceManager::GetInstance();
APP_ERROR ret = ascend_resource_->InitResource(resource);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in Init D-chip:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
int device_id = *(resource.deviceIds.begin());
aclrtContext context = ascend_resource_->GetContext(device_id);
processor_ = std::make_shared<MDAclProcess>(context, false);
ret = processor_->InitResource();
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in Init resource:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
MS_LOG(INFO) << "Ascend resource all initialized!";
return Status::OK();
}
Status AscendResource::FinalizeResource() {
processor_->Release();
return Status::OK();
}
Status AscendResource::Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input) {
std::shared_ptr<mindspore::dataset::Tensor> de_input;
Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(host_input.Shape()),
MSTypeToDEType(static_cast<TypeId>(host_input.DataType())),
(const uchar *)(host_input.Data().get()), &de_input);
RETURN_IF_NOT_OK(rc);
APP_ERROR ret = processor_->H2D_Sink(de_input, *device_input);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in data sink process:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
MS_LOG(INFO) << "Process data sink successfully";
return Status::OK();
}
Status AscendResource::Pop(const std::shared_ptr<DeviceTensor> &device_output, std::shared_ptr<Tensor> *host_output) {
APP_ERROR ret = processor_->D2H_Pop(device_output, *host_output);
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in data pop processing:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
Status AscendResource::DeviceDataRelease() {
APP_ERROR ret = processor_->device_memory_release();
if (ret != APP_ERR_OK) {
ascend_resource_->Release();
std::string err_msg = "Error in device data release:" + std::to_string(ret);
MS_LOG(ERROR) << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
return Status::OK();
}
std::shared_ptr<void> AscendResource::GetInstance() { return processor_; }
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,59 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_ASCEND_RESOURCE_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_ASCEND_RESOURCE_H_
#include <memory>
#include <string>
#include "acl/acl.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
namespace mindspore {
namespace dataset {
class AscendResource : public DeviceResource {
public:
AscendResource() = default;
~AscendResource() = default;
Status InitResource() override;
Status FinalizeResource() override;
Status Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input) override;
Status Pop(const std::shared_ptr<DeviceTensor> &device_output, std::shared_ptr<Tensor> *host_output) override;
std::shared_ptr<void> GetInstance() override;
Status DeviceDataRelease() override;
private:
std::shared_ptr<MDAclProcess> processor_;
std::shared_ptr<ResourceManager> ascend_resource_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_ASCEND_RESOURCE_H_

@ -0,0 +1,58 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/core/device_resource.h"
namespace mindspore {
namespace dataset {
Status DeviceResource::InitResource() {
return Status(StatusCode::kMDUnexpectedError,
"Is this a valid device? If yes, please implement this InitResource() in the derived class.");
}
Status DeviceResource::FinalizeResource() {
return Status(StatusCode::kMDUnexpectedError,
"Is this a valid device? If yes, please implement this FinalizeResource() in the derived class.");
}
Status DeviceResource::Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input) {
return Status(StatusCode::kMDUnexpectedError,
"Is this a valid device whose device memory is available? If yes, please implement this Sink() in the "
"derived class.");
}
Status DeviceResource::Pop(const std::shared_ptr<DeviceTensor> &device_output, std::shared_ptr<Tensor> *host_output) {
return Status(StatusCode::kMDUnexpectedError,
"Is this a valid device whose device memory is available? If yes, please implement this Pop() in the "
"derived class.");
}
Status DeviceResource::DeviceDataRelease() {
return Status(
StatusCode::kMDUnexpectedError,
"Is this a valid device whose device memory is available? If yes, please implement this DeviceDataRelease() in the "
"derived class.");
}
std::shared_ptr<void> DeviceResource::GetInstance() {
MS_LOG(ERROR) << "Is this a device which contains a processor object? If yes, please implement this GetInstance() in "
"the derived class";
return nullptr;
}
} // namespace dataset
} // namespace mindspore

@ -0,0 +1,51 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_RESOURCE_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_RESOURCE_H_
#include <memory>
#include "include/api/context.h"
#include "include/api/status.h"
#include "include/api/types.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor.h"
namespace mindspore {
namespace dataset {
class DeviceResource {
public:
DeviceResource() = default;
virtual ~DeviceResource() = default;
virtual Status InitResource();
virtual Status FinalizeResource();
virtual Status Sink(const mindspore::MSTensor &host_input, std::shared_ptr<DeviceTensor> *device_input);
virtual Status Pop(const std::shared_ptr<DeviceTensor> &device_output, std::shared_ptr<Tensor> *host_output);
virtual std::shared_ptr<void> GetInstance();
virtual Status DeviceDataRelease();
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_DEVICE_RESOURCE_H

@ -15,8 +15,8 @@
*/
#include "minddata/dataset/core/global_context.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
@ -28,15 +28,14 @@ Status DeviceTensor::SetYuvStrideShape_(const uint32_t &width, const uint32_t &w
std::vector<uint32_t> DeviceTensor::GetYuvStrideShape() { return YUV_shape_; }
#ifdef ENABLE_ACL
Status DeviceTensor::SetAttributes(const std::shared_ptr<DvppDataInfo> &data_ptr) {
device_data_ = data_ptr->data;
Status DeviceTensor::SetAttributes(uint8_t *data_ptr, const uint32_t &dataSize, const uint32_t &width,
const uint32_t &widthStride, const uint32_t &height, const uint32_t &heightStride) {
device_data_ = data_ptr;
CHECK_FAIL_RETURN_UNEXPECTED(device_data_ != nullptr, "Fail to get the device data.");
SetSize_(data_ptr->dataSize);
SetYuvStrideShape_(data_ptr->width, data_ptr->widthStride, data_ptr->height, data_ptr->heightStride);
SetSize_(dataSize);
SetYuvStrideShape_(width, widthStride, height, heightStride);
return Status::OK();
}
#endif
DeviceTensor::DeviceTensor(const TensorShape &shape, const DataType &type) : Tensor(shape, type) {
// grab the mem pool from global context and create the allocator for char data area

@ -14,18 +14,15 @@
* limitations under the License.
*/
#ifndef MINDSPORE_DEVICE_TENSOR_H
#define MINDSPORE_DEVICE_TENSOR_H
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_TENSOR_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_TENSOR_H_
#include <memory>
#include <utility>
#include <vector>
#include "include/api/status.h"
#include "minddata/dataset/core/tensor.h"
#ifdef ENABLE_ACL
#include "minddata/dataset/kernels/image/dvpp/utils/DvppCommon.h"
#endif
#include "minddata/dataset/core/constants.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
@ -36,9 +33,10 @@ class DeviceTensor : public Tensor {
DeviceTensor(const TensorShape &shape, const DataType &type);
~DeviceTensor() {}
#ifdef ENABLE_ACL
Status SetAttributes(const std::shared_ptr<DvppDataInfo> &data);
#endif
Status SetAttributes(uint8_t *data_ptr, const uint32_t &dataSize, const uint32_t &width, const uint32_t &widthStride,
const uint32_t &height, const uint32_t &heightStride);
static Status CreateEmpty(const TensorShape &shape, const DataType &type, std::shared_ptr<DeviceTensor> *out);
uint8_t *GetDeviceBuffer();
@ -62,4 +60,4 @@ class DeviceTensor : public Tensor {
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_DEVICE_TENSOR_H
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_TENSOR_H_

@ -423,7 +423,6 @@ class Tensor {
static Status GetBufferInfo(Tensor *t, py::buffer_info *out);
#endif
#ifdef ENABLE_ACL
Status SetYuvShape(const uint32_t &width, const uint32_t &widthStride, const uint32_t &height,
const uint32_t &heightStride) {
std::vector<uint32_t> tmp{width, widthStride, height, heightStride};
@ -432,7 +431,6 @@ class Tensor {
}
std::vector<uint32_t> GetYuvShape() { return yuv_shape_; }
#endif
/// TensorIterator is a linear iterator that can be used to iterate over the elements of the Tensor
/// The order elements is as the memory layout (i.e., row-major) [[1,2,3],[4,5,6] --> 1,2,3,4,5,6
@ -697,10 +695,8 @@ class Tensor {
/// pointer to the end of the physical data
unsigned char *data_end_ = nullptr;
#ifdef ENABLE_ACL
/// shape for interpretation of YUV image
std::vector<uint32_t> yuv_shape_;
#endif
private:
friend class DETensor;

@ -22,12 +22,12 @@
#include <memory>
#include "include/api/context.h"
#include "include/api/types.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/include/constants.h"
#include "minddata/dataset/include/transforms.h"
namespace mindspore {
namespace dataset {
class AscendResource; // Class to manage the resource of Ascend310
// class to run tensor operations in eager mode
class Execute {
@ -51,9 +51,8 @@ class Execute {
/// \param[out] out Result tensor after transform
/// \return - Status
Status operator()(const std::vector<mindspore::MSTensor> &input_tensor_list, std::vector<mindspore::MSTensor> *out);
#ifdef ENABLE_ACL
Status DeviceMemoryRelease();
#endif
private:
Status validate_device_();
@ -61,9 +60,8 @@ class Execute {
std::vector<std::shared_ptr<TensorOperation>> ops_;
std::string device_type_;
#ifdef ENABLE_ACL
std::shared_ptr<AscendResource> D_resource_;
#endif
std::shared_ptr<DeviceResource> device_resource_;
};
} // namespace dataset

@ -46,7 +46,8 @@ Status DvppCropJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(CropOut);
(*output)->SetAttributes(CropOut->data, CropOut->dataSize, CropOut->width, CropOut->widthStride, CropOut->height,
CropOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -136,8 +137,11 @@ Status DvppCropJpegOp::OutputShape(const std::vector<TensorShape> &inputs, std::
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppCropJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppCropJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
processor_->SetCropParas(crop_width_, crop_height_);
return Status::OK();
}

@ -24,9 +24,11 @@
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
@ -48,7 +50,7 @@ class DvppCropJpegOp : public TensorOp {
std::string Name() const override { return kDvppCropJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
uint32_t crop_height_;

@ -43,7 +43,8 @@ Status DvppDecodeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(DecodeOut);
(*output)->SetAttributes(DecodeOut->data, DecodeOut->dataSize, DecodeOut->width, DecodeOut->widthStride,
DecodeOut->height, DecodeOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -121,8 +122,11 @@ Status DvppDecodeJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shar
return Status::OK();
}
Status DvppDecodeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppDecodeJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
return Status::OK();
}

@ -24,6 +24,7 @@
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
@ -48,7 +49,7 @@ class DvppDecodeJpegOp : public TensorOp {
std::string Name() const override { return kDvppDecodeJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
std::shared_ptr<MDAclProcess> processor_;

@ -42,7 +42,8 @@ Status DvppDecodePngOp::Compute(const std::shared_ptr<DeviceTensor> &input, std:
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(DecodeOut);
(*output)->SetAttributes(DecodeOut->data, DecodeOut->dataSize, DecodeOut->width, DecodeOut->widthStride,
DecodeOut->height, DecodeOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -129,8 +130,11 @@ Status DvppDecodePngOp::OutputShape(const std::vector<TensorShape> &inputs, std:
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodePngOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppDecodePngOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
return Status::OK();
}

@ -24,8 +24,10 @@
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
@ -47,7 +49,7 @@ class DvppDecodePngOp : public TensorOp {
std::string Name() const override { return kDvppDecodePngOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
std::shared_ptr<MDAclProcess> processor_;

@ -43,7 +43,8 @@ Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr<DeviceTensor> &
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(CropOut);
(*output)->SetAttributes(CropOut->data, CropOut->dataSize, CropOut->width, CropOut->widthStride, CropOut->height,
CropOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -127,8 +128,11 @@ Status DvppDecodeResizeCropJpegOp::OutputShape(const std::vector<TensorShape> &i
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodeResizeCropJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppDecodeResizeCropJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
processor_->SetResizeParas(resized_width_, resized_height_);
processor_->SetCropParas(crop_width_, crop_height_);
return Status::OK();

@ -24,9 +24,11 @@
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
@ -51,7 +53,7 @@ class DvppDecodeResizeCropJpegOp : public TensorOp {
std::string Name() const override { return kDvppDecodeResizeCropJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
int32_t crop_height_;

@ -42,7 +42,8 @@ Status DvppDecodeResizeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &inpu
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(ResizeOut);
(*output)->SetAttributes(ResizeOut->data, ResizeOut->dataSize, ResizeOut->width, ResizeOut->widthStride,
ResizeOut->height, ResizeOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -125,8 +126,11 @@ Status DvppDecodeResizeJpegOp::OutputShape(const std::vector<TensorShape> &input
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
}
Status DvppDecodeResizeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppDecodeResizeJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
processor_->SetResizeParas(resized_width_, resized_height_);
return Status::OK();
}

@ -24,8 +24,10 @@
#include "acl/acl.h"
#include "mindspore/core/utils/log_adapter.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
@ -48,7 +50,7 @@ class DvppDecodeResizeJpegOp : public TensorOp {
std::string Name() const override { return kDvppDecodeResizeJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
int32_t resized_height_;

@ -47,7 +47,8 @@ Status DvppResizeJpegOp::Compute(const std::shared_ptr<DeviceTensor> &input, std
const TensorShape dvpp_shape({1, 1, 1});
const DataType dvpp_data_type(DataType::DE_UINT8);
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, output);
(*output)->SetAttributes(ResizeOut); // Set attributes for output DeviceTensor
(*output)->SetAttributes(ResizeOut->data, ResizeOut->dataSize, ResizeOut->width, ResizeOut->widthStride,
ResizeOut->height, ResizeOut->heightStride);
if (!((*output)->HasDeviceData())) {
std::string error = "[ERROR] Fail to get the Output result from device memory!";
RETURN_STATUS_UNEXPECTED(error);
@ -128,8 +129,11 @@ Status DvppResizeJpegOp::Compute(const std::shared_ptr<Tensor> &input, std::shar
return Status::OK();
}
Status DvppResizeJpegOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
processor_ = processor;
Status DvppResizeJpegOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
processor_ = std::static_pointer_cast<MDAclProcess>(resource->GetInstance());
if (!processor_) {
RETURN_STATUS_UNEXPECTED("Resource initialize fail, please check your env");
}
processor_->SetResizeParas(resized_width_, resized_height_);
return Status::OK();
}

@ -24,8 +24,10 @@
#include "acl/acl.h"
#include "minddata/dataset/core/data_type.h"
#include "minddata/dataset/core/device_tensor.h"
#include "minddata/dataset/core/device_resource.h"
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h"
#include "minddata/dataset/kernels/image/dvpp/utils/MDAclProcess.h"
#include "minddata/dataset/kernels/image/dvpp/utils/ResourceManager.h"
#include "minddata/dataset/kernels/tensor_op.h"
#include "minddata/dataset/util/status.h"
@ -49,7 +51,7 @@ class DvppResizeJpegOp : public TensorOp {
std::string Name() const override { return kDvppDecodeResizeJpegOp; }
Status SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) override;
Status SetAscendResource(const std::shared_ptr<DeviceResource> &resource) override;
private:
int32_t resized_height_;

@ -157,7 +157,7 @@ std::shared_ptr<DvppCommon> MDAclProcess::GetDeviceModule() { return dvppCommon_
* Sink data from Tensor(On host) to DeviceTensor(On device)
* Two cases are different, jpeg and png
*/
APP_ERROR MDAclProcess::H2D_Sink(std::shared_ptr<mindspore::dataset::Tensor> &input,
APP_ERROR MDAclProcess::H2D_Sink(const std::shared_ptr<mindspore::dataset::Tensor> &input,
std::shared_ptr<mindspore::dataset::DeviceTensor> &device_input) {
RawData imageinfo;
uint32_t filesize = input->SizeInBytes();
@ -181,11 +181,12 @@ APP_ERROR MDAclProcess::H2D_Sink(std::shared_ptr<mindspore::dataset::Tensor> &in
const mindspore::dataset::DataType dvpp_data_type(mindspore::dataset::DataType::DE_UINT8);
const mindspore::dataset::TensorShape dvpp_shape({1, 1, 1});
mindspore::dataset::DeviceTensor::CreateEmpty(dvpp_shape, dvpp_data_type, &device_input);
device_input->SetAttributes(deviceInputData);
device_input->SetAttributes(deviceInputData->data, deviceInputData->dataSize, deviceInputData->width,
deviceInputData->widthStride, deviceInputData->height, deviceInputData->heightStride);
return APP_ERR_OK;
}
APP_ERROR MDAclProcess::D2H_Pop(std::shared_ptr<mindspore::dataset::DeviceTensor> &device_output,
APP_ERROR MDAclProcess::D2H_Pop(const std::shared_ptr<mindspore::dataset::DeviceTensor> &device_output,
std::shared_ptr<mindspore::dataset::Tensor> &output) {
void *resHostBuf = nullptr;
APP_ERROR ret = aclrtMallocHost(&resHostBuf, device_output->DeviceDataSize());

@ -84,10 +84,10 @@ class MDAclProcess {
// API for access device memory of decode data
std::shared_ptr<DvppDataInfo> Get_Decode_DeviceData();
APP_ERROR H2D_Sink(std::shared_ptr<mindspore::dataset::Tensor> &input,
APP_ERROR H2D_Sink(const std::shared_ptr<mindspore::dataset::Tensor> &input,
std::shared_ptr<mindspore::dataset::DeviceTensor> &device_input);
APP_ERROR D2H_Pop(std::shared_ptr<mindspore::dataset::DeviceTensor> &device_output,
APP_ERROR D2H_Pop(const std::shared_ptr<mindspore::dataset::DeviceTensor> &device_output,
std::shared_ptr<mindspore::dataset::Tensor> &output);
// D-chip memory release

@ -70,11 +70,10 @@ Status TensorOp::OutputType(const std::vector<DataType> &inputs, std::vector<Dat
outputs = inputs;
return Status::OK();
}
#ifdef ENABLE_ACL
Status TensorOp::SetAscendResource(const std::shared_ptr<MDAclProcess> &processor) {
Status TensorOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
return Status(StatusCode::kMDUnexpectedError,
"This is a CPU operator which doesn't have Ascend Resource. Please verify your context");
}
#endif
} // namespace dataset
} // namespace mindspore

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save