!12484 MD C++ api decouple ABI compile macro

From: @luoyang42
Reviewed-by: 
Signed-off-by:
pull/12484/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 3f3621e429

@ -16,11 +16,147 @@
#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
namespace mindspore {
inline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); }
inline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); }
inline std::optional<std::vector<char>> OptionalStringToChar(const std::optional<std::string> &s) {
std::optional<std::vector<char>> ret = std::vector<char>(s->begin(), s->end());
return (s == std::nullopt) ? std::nullopt : ret;
}
inline std::optional<std::string> OptionalCharToString(const std::optional<std::vector<char>> &c) {
std::optional<std::string> ret = std::string(c->begin(), c->end());
return (c == std::nullopt) ? std::nullopt : ret;
}
inline std::pair<std::vector<char>, int32_t> PairStringToChar(const std::pair<std::string, int32_t> &s) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(s.first.begin(), s.first.end()), s.second);
}
inline std::pair<std::string, int32_t> PairCharToString(const std::pair<std::vector<char>, int32_t> &c) {
return std::pair<std::string, int32_t>(std::string(c.first.begin(), c.first.end()), c.second);
}
inline std::vector<std::vector<char>> VectorStringToChar(const std::vector<std::string> &s) {
std::vector<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}
inline std::vector<std::string> VectorCharToString(const std::vector<std::vector<char>> &c) {
std::vector<std::string> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}
inline std::set<std::vector<char>> SetStringToChar(const std::set<std::string> &s) {
std::set<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}
inline std::set<std::string> SetCharToString(const std::set<std::vector<char>> &c) {
std::set<std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}
inline std::map<std::vector<char>, int32_t> MapStringToChar(const std::map<std::string, int32_t> &s) {
std::map<std::vector<char>, int32_t> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}
inline std::map<std::string, int32_t> MapCharToString(const std::map<std::vector<char>, int32_t> &c) {
std::map<std::string, int32_t> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, int32_t>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
inline std::map<std::vector<char>, std::vector<char>> UnorderedMapStringToChar(
const std::unordered_map<std::string, std::string> &s) {
std::map<std::vector<char>, std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),
std::vector<char>(str.second.begin(), str.second.end()));
});
return ret;
}
inline std::unordered_map<std::string, std::string> UnorderedMapCharToString(
const std::map<std::vector<char>, std::vector<char>> &c) {
std::unordered_map<std::string, std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),
std::string(ch.second.begin(), ch.second.end()));
});
return ret;
}
inline std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ClassIndexStringToChar(
const std::vector<std::pair<std::string, std::vector<int32_t>>> &s) {
std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {
return std::pair<std::vector<char>, std::vector<int32_t>>(std::vector<char>(str.first.begin(), str.first.end()),
str.second);
});
return ret;
}
inline std::vector<std::pair<std::string, std::vector<int32_t>>> ClassIndexCharToString(
const std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> &c) {
std::vector<std::pair<std::string, std::vector<int32_t>>> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {
return std::pair<std::string, std::vector<int32_t>>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
template <class T>
inline std::map<std::vector<char>, T> PadInfoStringToChar(const std::map<std::string, T> &s_pad_info) {
std::map<std::vector<char>, T> ret;
std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, T>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}
template <class T>
inline std::map<std::string, T> PadInfoCharToString(const std::map<std::vector<char>, T> &c_pad_info) {
std::map<std::string, T> ret;
std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, T>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
template <class T>
inline void TensorMapCharToString(const std::map<std::vector<char>, T> *c, std::unordered_map<std::string, T> *s) {
for (auto ch : *c) {
auto key = std::string(ch.first.begin(), ch.first.end());
auto val = ch.second;
s->insert(std::pair<std::string, T>(key, val));
}
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_

@ -94,8 +94,8 @@ bool set_callback_timeback(int32_t timeout) {
int32_t get_callback_timeout() { return _config->callback_timeout(); }
// Function to load configurations from a file
bool load(std::string file) {
Status rc = _config->LoadFile(file);
bool load(const std::vector<char> &file) {
Status rc = _config->LoadFile(CharToString(file));
if (rc.IsError()) {
MS_LOG(ERROR) << rc << file;
return false;

File diff suppressed because it is too large Load Diff

@ -26,7 +26,7 @@ Iterator::Iterator() : consumer_(nullptr) {}
Iterator::~Iterator() { Stop(); }
// Get the next row from the data pipeline.
Status Iterator::GetNextRow(MSTensorMap *row) {
Status Iterator::GetNextRowCharIF(MSTensorMapChar *row) {
// Clean data buffer
row->clear();
std::unordered_map<std::string, std::shared_ptr<dataset::Tensor>> md_map;
@ -38,7 +38,8 @@ Status Iterator::GetNextRow(MSTensorMap *row) {
}
for (auto de_tensor : md_map) {
CHECK_FAIL_RETURN_UNEXPECTED(de_tensor.second->HasData(), "Apply transform failed, output tensor has no data");
row->insert(std::make_pair(de_tensor.first, mindspore::MSTensor(std::make_shared<DETensor>(de_tensor.second))));
std::vector<char> col_name(de_tensor.first.begin(), de_tensor.first.end());
row->insert(std::make_pair(col_name, mindspore::MSTensor(std::make_shared<DETensor>(de_tensor.second))));
}
return Status::OK();

File diff suppressed because it is too large Load Diff

@ -30,25 +30,30 @@ namespace transforms {
// (In alphabetical order)
// Constructor to Compose.
Compose::Compose(const std::vector<TensorTransform *> &transforms) {
struct Compose::Data {
std::vector<std::shared_ptr<TensorOperation>> transforms_;
};
Compose::Compose(const std::vector<TensorTransform *> &transforms) : data_(std::make_shared<Data>()) {
(void)std::transform(
transforms.begin(), transforms.end(), std::back_inserter(transforms_),
transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform *op) -> std::shared_ptr<TensorOperation> { return op != nullptr ? op->Parse() : nullptr; });
}
Compose::Compose(const std::vector<std::shared_ptr<TensorTransform>> &transforms) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
Compose::Compose(const std::vector<std::shared_ptr<TensorTransform>> &transforms) : data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](std::shared_ptr<TensorTransform> op) -> std::shared_ptr<TensorOperation> {
return op != nullptr ? op->Parse() : nullptr;
});
}
Compose::Compose(const std::vector<std::reference_wrapper<TensorTransform>> &transforms) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
Compose::Compose(const std::vector<std::reference_wrapper<TensorTransform>> &transforms)
: data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform &op) -> std::shared_ptr<TensorOperation> { return op.Parse(); });
}
std::shared_ptr<TensorOperation> Compose::Parse() { return std::make_shared<ComposeOperation>(transforms_); }
std::shared_ptr<TensorOperation> Compose::Parse() { return std::make_shared<ComposeOperation>(data_->transforms_); }
// Constructor to Duplicate
Duplicate::Duplicate() {}
@ -56,59 +61,87 @@ Duplicate::Duplicate() {}
std::shared_ptr<TensorOperation> Duplicate::Parse() { return std::make_shared<DuplicateOperation>(); }
// Constructor to OneHot
OneHot::OneHot(int32_t num_classes) : num_classes_(num_classes) {}
struct OneHot::Data {
explicit Data(int32_t num_classes) : num_classes_(num_classes) {}
float num_classes_;
};
OneHot::OneHot(int32_t num_classes) : data_(std::make_shared<Data>(num_classes)) {}
std::shared_ptr<TensorOperation> OneHot::Parse() { return std::make_shared<OneHotOperation>(num_classes_); }
std::shared_ptr<TensorOperation> OneHot::Parse() { return std::make_shared<OneHotOperation>(data_->num_classes_); }
// Constructor to RandomApply.
RandomApply::RandomApply(const std::vector<TensorTransform *> &transforms, double prob) : prob_(prob) {
struct RandomApply::Data {
std::vector<std::shared_ptr<TensorOperation>> transforms_;
double prob_;
};
RandomApply::RandomApply(const std::vector<TensorTransform *> &transforms, double prob)
: data_(std::make_shared<Data>()) {
(void)std::transform(
transforms.begin(), transforms.end(), std::back_inserter(transforms_),
transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform *op) -> std::shared_ptr<TensorOperation> { return op != nullptr ? op->Parse() : nullptr; });
data_->prob_ = prob;
}
RandomApply::RandomApply(const std::vector<std::shared_ptr<TensorTransform>> &transforms, double prob) : prob_(prob) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
RandomApply::RandomApply(const std::vector<std::shared_ptr<TensorTransform>> &transforms, double prob)
: data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](std::shared_ptr<TensorTransform> op) -> std::shared_ptr<TensorOperation> {
return op != nullptr ? op->Parse() : nullptr;
});
data_->prob_ = prob;
}
RandomApply::RandomApply(const std::vector<std::reference_wrapper<TensorTransform>> &transforms, double prob)
: prob_(prob) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
: data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform &op) -> std::shared_ptr<TensorOperation> { return op.Parse(); });
data_->prob_ = prob;
}
std::shared_ptr<TensorOperation> RandomApply::Parse() {
return std::make_shared<RandomApplyOperation>(transforms_, prob_);
return std::make_shared<RandomApplyOperation>(data_->transforms_, data_->prob_);
}
// Constructor to RandomChoice.
RandomChoice::RandomChoice(const std::vector<TensorTransform *> &transforms) {
struct RandomChoice::Data {
std::vector<std::shared_ptr<TensorOperation>> transforms_;
};
RandomChoice::RandomChoice(const std::vector<TensorTransform *> &transforms) : data_(std::make_shared<Data>()) {
(void)std::transform(
transforms.begin(), transforms.end(), std::back_inserter(transforms_),
transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform *op) -> std::shared_ptr<TensorOperation> { return op != nullptr ? op->Parse() : nullptr; });
}
RandomChoice::RandomChoice(const std::vector<std::shared_ptr<TensorTransform>> &transforms) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
RandomChoice::RandomChoice(const std::vector<std::shared_ptr<TensorTransform>> &transforms)
: data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](std::shared_ptr<TensorTransform> op) -> std::shared_ptr<TensorOperation> {
return op != nullptr ? op->Parse() : nullptr;
});
}
RandomChoice::RandomChoice(const std::vector<std::reference_wrapper<TensorTransform>> &transforms) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(transforms_),
RandomChoice::RandomChoice(const std::vector<std::reference_wrapper<TensorTransform>> &transforms)
: data_(std::make_shared<Data>()) {
(void)std::transform(transforms.begin(), transforms.end(), std::back_inserter(data_->transforms_),
[](TensorTransform &op) -> std::shared_ptr<TensorOperation> { return op.Parse(); });
}
std::shared_ptr<TensorOperation> RandomChoice::Parse() { return std::make_shared<RandomChoiceOperation>(transforms_); }
std::shared_ptr<TensorOperation> RandomChoice::Parse() {
return std::make_shared<RandomChoiceOperation>(data_->transforms_);
}
// Constructor to TypeCast
TypeCast::TypeCast(std::string data_type) : data_type_(data_type) {}
struct TypeCast::Data {
explicit Data(const std::vector<char> &data_type) : data_type_(CharToString(data_type)) {}
std::string data_type_;
};
TypeCast::TypeCast(const std::vector<char> &data_type) : data_(std::make_shared<Data>(data_type)) {}
std::shared_ptr<TensorOperation> TypeCast::Parse() { return std::make_shared<TypeCastOperation>(data_type_); }
std::shared_ptr<TensorOperation> TypeCast::Parse() { return std::make_shared<TypeCastOperation>(data_->data_type_); }
// Constructor to Unique
Unique::Unique() {}

File diff suppressed because it is too large Load Diff

@ -20,6 +20,8 @@
#include <string>
#include <optional>
#include <utility>
#include <vector>
#include "include/api/dual_abi_helper.h"
#include "minddata/dataset/engine/cache/cache_client.h"
#include "minddata/dataset/engine/datasetops/cache_op.h"
#include "minddata/dataset/engine/ir/cache/dataset_cache.h"
@ -39,13 +41,13 @@ class DatasetCacheImpl : public DatasetCache {
/// \param port optional port (default=50052).
/// \param num_connections optional number of connections (default=12).
/// \param prefetch_sz optional prefetch size (default=20).
DatasetCacheImpl(session_id_type id, uint64_t mem_sz, bool spill, std::optional<std::string> hostname,
DatasetCacheImpl(session_id_type id, uint64_t mem_sz, bool spill, std::optional<std::vector<char>> hostname,
std::optional<int32_t> port, std::optional<int32_t> num_connections,
std::optional<int32_t> prefetch_sz)
: session_id_(id),
cache_mem_sz_(mem_sz),
spill_(spill),
hostname_(std::move(hostname)),
hostname_(OptionalCharToString(hostname)),
port_(std::move(port)),
num_connections_(std::move(num_connections)),
prefetch_sz_(std::move(prefetch_sz)) {}

@ -19,6 +19,8 @@
#include <cstdint>
#include <string>
#include <vector>
#include "include/api/dual_abi_helper.h"
namespace mindspore {
namespace dataset {
@ -70,7 +72,12 @@ int32_t get_callback_timeout();
/// \brief Function to load configuration from a file.
/// \param[in] file path of the configuration file to be loaded.
bool load(std::string file);
/// \note This api exists because std::string will constrained by ABI compile macro but char don't.
bool load(const std::vector<char> &file);
/// \brief Function to load configuration from a file.
/// \param[in] file path of the configuration file to be loaded.
inline bool load(std::string file) { return load(StringToChar(file)); }
} // namespace config
} // namespace dataset

File diff suppressed because it is too large Load Diff

@ -17,10 +17,12 @@
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_ITERATOR_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_ITERATOR_H_
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "include/api/dual_abi_helper.h"
#include "include/api/status.h"
#include "include/api/types.h"
@ -39,6 +41,7 @@ class IteratorConsumer;
class Dataset;
using MSTensorMap = std::unordered_map<std::string, mindspore::MSTensor>;
using MSTensorMapChar = std::map<std::vector<char>, mindspore::MSTensor>;
using MSTensorVec = std::vector<mindspore::MSTensor>;
// Abstract class for iterating over the dataset.
@ -60,7 +63,18 @@ class Iterator {
/// \note Type of return data is a map(with column name).
/// \param[out] row - the output tensor row.
/// \return - a Status error code, returns OK if no error encountered.
Status GetNextRow(MSTensorMap *row);
Status GetNextRow(MSTensorMap *row) {
MSTensorMapChar row_;
row_.clear();
row->clear();
Status s = GetNextRowCharIF(&row_);
TensorMapCharToString(&row_, row);
return s;
}
// Char interface(CharIF) of GetNextRow
// This api exists because std::string will constrained by ABI compile macro but char don't.
Status GetNextRowCharIF(MSTensorMapChar *row);
/// \brief Function to get the next row from the data pipeline.
/// \note Type of return data is a vector(without column name).

File diff suppressed because it is too large Load Diff

@ -22,6 +22,7 @@
#include <string>
#include <vector>
#include "include/api/dual_abi_helper.h"
#include "include/api/status.h"
#include "minddata/dataset/include/constants.h"
@ -72,7 +73,8 @@ class Compose : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<std::shared_ptr<TensorOperation>> transforms_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Duplicate Op.
@ -107,7 +109,8 @@ class OneHot : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
float num_classes_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief RandomApply Op.
@ -129,8 +132,8 @@ class RandomApply : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<std::shared_ptr<TensorOperation>> transforms_;
double prob_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief RandomChoice Op.
@ -151,7 +154,8 @@ class RandomChoice : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<std::shared_ptr<TensorOperation>> transforms_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief TypeCast Op.
@ -160,7 +164,9 @@ class TypeCast : public TensorTransform {
public:
/// \brief Constructor.
/// \param[in] data_type mindspore.dtype to be cast to.
explicit TypeCast(std::string data_type);
explicit TypeCast(std::string data_type) : TypeCast(StringToChar(data_type)) {}
explicit TypeCast(const std::vector<char> &data_type);
/// \brief Destructor
~TypeCast() = default;
@ -170,7 +176,8 @@ class TypeCast : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::string data_type_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Unique Op.

File diff suppressed because it is too large Load Diff

@ -50,7 +50,8 @@ class DvppDecodeResizeJpeg : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<uint32_t> resize_;
struct Data;
std::shared_ptr<Data> data_;
};
class DvppDecodeResizeCropJpeg : public TensorTransform {
@ -70,8 +71,8 @@ class DvppDecodeResizeCropJpeg : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<uint32_t> crop_;
std::vector<uint32_t> resize_;
struct Data;
std::shared_ptr<Data> data_;
};
class DvppDecodePng : public TensorTransform {

@ -62,12 +62,8 @@ class Affine : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
float degrees_;
std::vector<float> translation_;
float scale_;
std::vector<float> shear_;
InterpolationMode interpolation_;
std::vector<uint8_t> fill_value_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief CenterCrop TensorTransform.
@ -90,7 +86,8 @@ class CenterCrop : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<int32_t> size_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Crop TensorTransform.
@ -112,8 +109,8 @@ class Crop : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<int32_t> coordinates_;
std::vector<int32_t> size_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Decode TensorTransform.
@ -134,7 +131,8 @@ class Decode : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
bool rgb_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Normalize TensorTransform.
@ -158,8 +156,8 @@ class Normalize : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<float> mean_;
std::vector<float> std_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief RandomAffine TensorTransform.
@ -196,12 +194,8 @@ class RandomAffine : public TensorTransform {
std::shared_ptr<TensorOperation> Parse() override;
private:
std::vector<float_t> degrees_; // min_degree, max_degree
std::vector<float_t> translate_range_; // maximum x translation percentage, maximum y translation percentage
std::vector<float_t> scale_range_; // min_scale, max_scale
std::vector<float_t> shear_ranges_; // min_x_shear, max_x_shear, min_y_shear, max_y_shear
InterpolationMode interpolation_;
std::vector<uint8_t> fill_value_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Resize TensorTransform.
@ -225,8 +219,8 @@ class Resize : public TensorTransform {
std::shared_ptr<TensorOperation> Parse(const MapTargetDevice &env) override;
private:
std::vector<int32_t> size_;
InterpolationMode interpolation_;
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief Rotate TensorTransform.

File diff suppressed because it is too large Load Diff

@ -26,16 +26,27 @@
namespace mindspore {
namespace dataset {
class DeviceResource;
// class to run tensor operations in eager mode
class Execute {
public:
/// \brief Constructor
explicit Execute(std::shared_ptr<TensorOperation> op);
// FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
explicit Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
// explicit Execute(TensorTransform op, MapTargetDevice deviceType = MapTargetDevice::KCpu);
explicit Execute(TensorTransform *op, MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops);
explicit Execute(std::vector<std::shared_ptr<TensorOperation>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<std::shared_ptr<TensorTransform>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,
MapTargetDevice deviceType = MapTargetDevice::kCpu);
explicit Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType = MapTargetDevice::kCpu);
/// \brief Destructor
~Execute() = default;
~Execute();
/// \brief callable function to execute the TensorOperation in eager mode
/// \param[in] input Tensor to be transformed
@ -49,8 +60,16 @@ class Execute {
/// \return - Status
Status operator()(const std::vector<mindspore::MSTensor> &input_tensor_list, std::vector<mindspore::MSTensor> *out);
Status DeviceMemoryRelease();
private:
Status validate_device_();
std::vector<std::shared_ptr<TensorOperation>> ops_;
MapTargetDevice device_type_;
std::shared_ptr<DeviceResource> device_resource_;
};
} // namespace dataset

@ -17,10 +17,12 @@
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_ITERATOR_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_ITERATOR_H_
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "include/api/dual_abi_helper.h"
#include "include/api/status.h"
#include "include/api/types.h"
@ -39,6 +41,7 @@ class IteratorConsumer;
class Dataset;
using MSTensorMap = std::unordered_map<std::string, mindspore::MSTensor>;
using MSTensorMapChar = std::map<std::vector<char>, mindspore::MSTensor>;
using MSTensorVec = std::vector<mindspore::MSTensor>;
// Abstract class for iterating over the dataset.
@ -60,7 +63,18 @@ class Iterator {
/// \note Type of return data is a map(with column name).
/// \param[out] row - the output tensor row.
/// \return - a Status error code, returns OK if no error encountered.
Status GetNextRow(MSTensorMap *row);
Status GetNextRow(MSTensorMap *row) {
MSTensorMapChar row_;
row_.clear();
row->clear();
Status s = GetNextRowCharIF(&row_);
TensorMapCharToString(&row_, row);
return s;
}
// Char interface(CharIF) of GetNextRow
// This api exists because std::string will constrained by ABI compile macro but char don't.
Status GetNextRowCharIF(MSTensorMapChar *row);
/// \brief Function to get the next row from the data pipeline.
/// \note Type of return data is a vector(without column name).

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save