!1157 dataset: add concat operation for dataset
Merge pull request !1157 from ms_yan/concat_datasetpull/1157/MERGE
commit
c680cfbf27
@ -0,0 +1,145 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <iomanip>
|
||||
#include <utility>
|
||||
|
||||
#include "common/utils.h"
|
||||
#include "dataset/core/config_manager.h"
|
||||
#include "dataset/engine/data_buffer.h"
|
||||
#include "dataset/engine/datasetops/concat_op.h"
|
||||
#include "dataset/engine/db_connector.h"
|
||||
#include "dataset/engine/execution_tree.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
// Builder constructor. Creates the builder object.
|
||||
ConcatOp::Builder::Builder() {
|
||||
std::shared_ptr<ConfigManager> cfg = GlobalContext::config_manager();
|
||||
builder_op_connector_size_ = cfg->op_connector_size();
|
||||
}
|
||||
|
||||
// The builder "build" method creates the final object.
|
||||
Status ConcatOp::Builder::Build(std::shared_ptr<ConcatOp> *ptr) {
|
||||
*ptr = std::make_shared<ConcatOp>(builder_op_connector_size_);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Constructor of the ConcatOp.
|
||||
ConcatOp::ConcatOp(int32_t op_connector_size) : PipelineOp(op_connector_size), children_num_(0) {}
|
||||
|
||||
// A function that prints info about the Operator
|
||||
void ConcatOp::Print(std::ostream &out, bool show_all) const {
|
||||
// Always show the id and name as first line regardless if this is summary or detailed print
|
||||
out << "(" << std::setw(2) << operator_id_ << ") <ConcatOp>:";
|
||||
if (!show_all) {
|
||||
// Call the super class for displaying any common 1-liner info
|
||||
PipelineOp::Print(out, show_all);
|
||||
// Then show any custom derived-internal 1-liner info for this op
|
||||
out << "\n";
|
||||
} else {
|
||||
// Call the super class for displaying any common detailed info
|
||||
PipelineOp::Print(out, show_all);
|
||||
// Then show any custom derived-internal stuff
|
||||
out << "\nDatasets: " << children_num_ << "\n\n";
|
||||
}
|
||||
}
|
||||
|
||||
// Main entry point for Concat
|
||||
Status ConcatOp::operator()() {
|
||||
// The children_num_ parameter needs to be put here
|
||||
children_num_ = static_cast<int32_t>(child_.size());
|
||||
|
||||
TaskManager::FindMe()->Post();
|
||||
std::unique_ptr<DataBuffer> buf;
|
||||
RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf));
|
||||
|
||||
// Obtain columns_name_id_map from child_[0]
|
||||
column_name_id_map_ = child_[0]->column_name_id_map();
|
||||
if (column_name_id_map_.empty()) {
|
||||
RETURN_STATUS_UNEXPECTED("Child column name map cannot be empty!");
|
||||
}
|
||||
|
||||
int eof_count = 0;
|
||||
while (eof_count != children_num_) {
|
||||
for (int i = 0; i < children_num_; i++) {
|
||||
// 1. Throw the eof buffer when meet it
|
||||
if (buf->eof() || buf->eoe()) {
|
||||
RETURN_IF_NOT_OK(child_[i]->GetNextBuffer(&buf));
|
||||
}
|
||||
// 2. Do varification as for column name, column data type and rank of column data
|
||||
RETURN_IF_NOT_OK(Verify(i, buf));
|
||||
|
||||
// 3. Put the data into output_connector
|
||||
while (!buf->eoe() && !buf->eof()) {
|
||||
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(buf)));
|
||||
RETURN_IF_NOT_OK(child_[i]->GetNextBuffer(&buf));
|
||||
}
|
||||
|
||||
// 4. Throw the eoe buffer when meet it
|
||||
if (buf->eoe() && (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat))) {
|
||||
RETURN_IF_NOT_OK(child_[i]->GetNextBuffer(&buf));
|
||||
}
|
||||
// 5. Add eoe buffer after get buffer from all child
|
||||
if (i == (children_num_ - 1)) {
|
||||
auto eoe_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE);
|
||||
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer)));
|
||||
}
|
||||
if (buf->eof()) {
|
||||
eof_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// 6. Add eof buffer in the end manually
|
||||
MS_LOG(DEBUG) << "Add the eof buffer manualy in the end.";
|
||||
auto eof_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF);
|
||||
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eof_buffer)));
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status ConcatOp::Verify(int32_t id, const std::unique_ptr<DataBuffer> &buf) {
|
||||
TensorRow new_row;
|
||||
buf->GetRow(0, &new_row);
|
||||
|
||||
if (id == 0) {
|
||||
// Obtain the column name, data type and data rank in child[0]
|
||||
column_name_id_ = child_[id]->column_name_id_map();
|
||||
for (auto item : new_row) {
|
||||
data_type_.push_back(item->type());
|
||||
data_rank_.push_back(item->Rank());
|
||||
}
|
||||
} else {
|
||||
// Compare the column name, data type and data rank with these in child[0]
|
||||
if (child_[id]->column_name_id_map() != column_name_id_) {
|
||||
RETURN_STATUS_UNEXPECTED("The column name or column order is not the same with previous dataset.");
|
||||
}
|
||||
int32_t index = 0;
|
||||
for (auto item : new_row) {
|
||||
if ((item->type() != data_type_[index]) || item->Rank() != data_rank_[index++]) {
|
||||
RETURN_STATUS_UNEXPECTED("The data type or data rank is not the same with previous dataset.");
|
||||
}
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status ConcatOp::PrepareNodePostAction() {
|
||||
RETURN_IF_NOT_OK(PipelineOp::PrepareNodePostAction());
|
||||
tree_->AddToRepeatStack(shared_from_this());
|
||||
return Status::OK();
|
||||
}
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef DATASET_ENGINE_DATASETOPS_CONCAT_OP_H_
|
||||
#define DATASET_ENGINE_DATASETOPS_CONCAT_OP_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "dataset/engine/datasetops/pipeline_op.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
class ConcatOp : public PipelineOp {
|
||||
public:
|
||||
// The nested builder class inside of the ConcatOp is used to help manage all of the arguments
|
||||
// for constructing it. This Concat op is very simple though, so this builder is really just
|
||||
// provided for a consistent look and feel for creators of Dataset operators overall.
|
||||
class Builder {
|
||||
public:
|
||||
// Builder constructor. Creates the builder object.
|
||||
// @note No default args
|
||||
// @return This is a constructor.
|
||||
Builder();
|
||||
|
||||
// Default destructor
|
||||
~Builder() = default;
|
||||
|
||||
// The builder "build" method creates the final object.
|
||||
// @return shared_ptr to the new StorageOp object
|
||||
Status Build(std::shared_ptr<ConcatOp> *);
|
||||
|
||||
private:
|
||||
int32_t builder_op_connector_size_;
|
||||
};
|
||||
|
||||
// Constructor of the ConcatOp.
|
||||
// @note The builder class should be used to call it
|
||||
// @param op_connector_size - connector size
|
||||
explicit ConcatOp(int32_t op_connector_size);
|
||||
|
||||
// Destructor
|
||||
~ConcatOp() = default;
|
||||
|
||||
// A print method typically used for debugging
|
||||
// @param out - The output stream to write output to
|
||||
// @param show_all - A bool to control if you want to show all info or just a summary
|
||||
void Print(std::ostream &out, bool show_all) const override;
|
||||
|
||||
// << Stream output operator overload
|
||||
// @notes This allows you to write the debug print info using stream operators
|
||||
// @param out - reference to the output stream being overloaded
|
||||
// @param ro - reference to the ConcatOp to display
|
||||
// @return - the output stream must be returned
|
||||
friend std::ostream &operator<<(std::ostream &out, const ConcatOp &ro) {
|
||||
ro.Print(out, false);
|
||||
return out;
|
||||
}
|
||||
|
||||
// All dataset ops operate by launching a thread (see ExecutionTree). This class functor will
|
||||
// provide the master loop that drives the logic for performing the work
|
||||
// @return Status - The error code return
|
||||
Status operator()() override;
|
||||
|
||||
// During tree prepare phase, operators may have specific post-operations to perform depending on
|
||||
// their role.
|
||||
// @notes Derived versions of this function should always call it's superclass version first
|
||||
// before providing their own implementations.
|
||||
Status PrepareNodePostAction() override;
|
||||
|
||||
private:
|
||||
Status Verify(int32_t id, const std::unique_ptr<DataBuffer> &buf);
|
||||
|
||||
int32_t children_num_; // The num of child of parent node.
|
||||
std::unordered_map<std::string, int32_t> column_name_id_; // Mapping between col index and col name
|
||||
std::vector<DataType> data_type_;
|
||||
std::vector<dsize_t> data_rank_;
|
||||
};
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // DATASET_ENGINE_DATASETOPS_CONCAT_OP_H_
|
@ -0,0 +1,125 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common.h"
|
||||
#include "common/utils.h"
|
||||
#include "dataset/core/client.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "utils/log_adapter.h"
|
||||
|
||||
namespace common = mindspore::common;
|
||||
|
||||
using namespace mindspore::dataset;
|
||||
using mindspore::MsLogLevel::INFO;
|
||||
using mindspore::ExceptionType::NoExceptionType;
|
||||
using mindspore::LogStream;
|
||||
|
||||
class MindDataTestConcatOp : public UT::DatasetOpTesting {};
|
||||
|
||||
|
||||
TEST_F(MindDataTestConcatOp, TestConcatProject) {
|
||||
/* Tree:
|
||||
*
|
||||
* OpId(2) ConcatOp
|
||||
* / \
|
||||
* OpId(0) TFReaderOp OpId(1) TFReaderOp
|
||||
*
|
||||
* Start with an empty execution tree
|
||||
*/
|
||||
MS_LOG(INFO) << "UT test TestConcatProject.";
|
||||
auto my_tree = std::make_shared<ExecutionTree>();
|
||||
|
||||
std::string dataset_path;
|
||||
dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data";
|
||||
|
||||
// TFReaderOp1
|
||||
std::shared_ptr<TFReaderOp> my_tfreader_op1;
|
||||
TFReaderOp::Builder builder1;
|
||||
builder1.SetDatasetFilesList({dataset_path})
|
||||
.SetRowsPerBuffer(16)
|
||||
.SetWorkerConnectorSize(16)
|
||||
.SetNumWorkers(16);
|
||||
std::unique_ptr<DataSchema> schema1 = std::make_unique<DataSchema>();
|
||||
schema1->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {});
|
||||
builder1.SetDataSchema(std::move(schema1));
|
||||
Status rc = builder1.Build(&my_tfreader_op1);
|
||||
ASSERT_TRUE(rc.IsOk());
|
||||
rc = my_tree->AssociateNode(my_tfreader_op1);
|
||||
ASSERT_TRUE(rc.IsOk());
|
||||
|
||||
// TFReaderOp2
|
||||
std::shared_ptr<TFReaderOp> my_tfreader_op2;
|
||||
TFReaderOp::Builder builder2;
|
||||
builder2.SetDatasetFilesList({dataset_path})
|
||||
.SetRowsPerBuffer(16)
|
||||
.SetWorkerConnectorSize(16)
|
||||
.SetNumWorkers(16);
|
||||
std::unique_ptr<DataSchema> schema2 = std::make_unique<DataSchema>();
|
||||
schema2->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {});
|
||||
builder2.SetDataSchema(std::move(schema2));
|
||||
rc = builder2.Build(&my_tfreader_op2);
|
||||
ASSERT_TRUE(rc.IsOk());
|
||||
rc = my_tree->AssociateNode(my_tfreader_op2);
|
||||
ASSERT_TRUE(rc.IsOk());
|
||||
|
||||
// Creating ConcatOp
|
||||
std::shared_ptr<ConcatOp> concat_op;
|
||||
rc = ConcatOp::Builder().Build(&concat_op);
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
|
||||
rc = my_tree->AssociateNode(concat_op);
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
rc = concat_op->AddChild(std::move(my_tfreader_op1));
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
rc = concat_op->AddChild(std::move(my_tfreader_op2));
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
rc = my_tree->AssignRoot(concat_op);
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
rc = my_tree->Prepare();
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
|
||||
// Launch the tree execution to kick off threads and start running the pipeline
|
||||
MS_LOG(INFO) << "Launching my tree.";
|
||||
rc = my_tree->Launch();
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
|
||||
// Simulate a parse of data from our pipeline.
|
||||
std::shared_ptr<DatasetOp> rootNode = my_tree->root();
|
||||
|
||||
DatasetIterator di(my_tree);
|
||||
TensorRow tensor_list;
|
||||
rc = di.FetchNextTensorRow(&tensor_list);
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
|
||||
int row_count = 0;
|
||||
while (!tensor_list.empty()) {
|
||||
MS_LOG(INFO) << "Row display for row #: " << row_count << ".";
|
||||
|
||||
// Display the tensor by calling the printer on it
|
||||
for (int i = 0; i < tensor_list.size(); i++) {
|
||||
std::ostringstream ss;
|
||||
ss << "(" << tensor_list[i] << "): " << *tensor_list[i] << std::endl;
|
||||
MS_LOG(INFO) << "Tensor print: " << common::SafeCStr(ss.str()) << ".";
|
||||
}
|
||||
rc = di.FetchNextTensorRow(&tensor_list);
|
||||
EXPECT_TRUE(rc.IsOk());
|
||||
row_count++;
|
||||
}
|
||||
ASSERT_EQ(row_count, 24); // Should be 24 rows fetched
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue