dataset CPP UT: Updates for ExecTree to IRTree Support

pull/10142/head
Nat Sutyanyong 4 years ago committed by Cathy Wong
parent 7697492fb2
commit 7c5b2de1ec

@ -28,12 +28,13 @@ SET(DE_UT_SRCS
c_api_dataset_tfrecord_test.cc
c_api_dataset_voc_test.cc
c_api_datasets_test.cc
c_api_epoch_ctrl_test.cc
c_api_repeat_test.cc
c_api_samplers_test.cc
c_api_text_sentence_piece_vocab_test.cc
c_api_text_vocab_test.cc
c_api_transforms_test.cc
c_api_vision_test.cc
callback_test.cc
celeba_op_test.cc
center_crop_op_test.cc
channel_swap_test.cc
@ -56,7 +57,6 @@ SET(DE_UT_SRCS
datatype_test.cc
decode_op_test.cc
distributed_sampler_test.cc
epoch_ctrl_op_test.cc
equalize_op_test.cc
execution_tree_test.cc
fill_op_test.cc
@ -65,6 +65,9 @@ SET(DE_UT_SRCS
image_folder_op_test.cc
image_process_test.cc
interrupt_test.cc
ir_callback_test.cc
ir_tensor_op_fusion_pass_test.cc
ir_tree_adapter_test.cc
jieba_tokenizer_op_test.cc
main_test.cc
map_op_test.cc
@ -100,7 +103,6 @@ SET(DE_UT_SRCS
random_vertical_flip_op_test.cc
random_vertical_flip_with_bbox_op_test.cc
rename_op_test.cc
repeat_op_test.cc
rescale_op_test.cc
resize_op_test.cc
resize_with_bbox_op_test.cc
@ -120,7 +122,6 @@ SET(DE_UT_SRCS
swap_red_blue_test.cc
take_op_test.cc
task_manager_test.cc
tensor_op_fusion_pass_test.cc
tensor_row_test.cc
tensor_string_test.cc
tensor_test.cc
@ -130,7 +131,6 @@ SET(DE_UT_SRCS
to_float16_op_test.cc
tokenizer_op_test.cc
treap_test.cc
tree_adapter_test.cc
trucate_pair_test.cc
type_cast_op_test.cc
weighted_random_sampler_test.cc

@ -0,0 +1,210 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/common.h"
#include "minddata/dataset/include/datasets.h"
using namespace mindspore::dataset;
using mindspore::dataset::Tensor;
class MindDataTestEpochCtrl : public UT::DatasetOpTesting {
protected:
};
TEST_F(MindDataTestEpochCtrl, TestAutoInjectEpoch) {
MS_LOG(INFO) << "Doing MindDataTestEpochCtrl-TestAutoInjectEpoch.";
int32_t img_class[4] = {0, 1, 2, 3};
int32_t num_epochs = 2 + std::rand() % 3;
int32_t sampler_size = 44;
int32_t class_size = 11;
MS_LOG(INFO) << "num_epochs: " << num_epochs;
// Create an ImageFolder Dataset
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, SequentialSampler(0, sampler_size));
ds = ds->SetNumWorkers(2);
// Create an iterator over the result of the above dataset
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect a valid iterator
ASSERT_NE(iter, nullptr);
uint64_t i = 0;
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
for (int epoch = 0; epoch < num_epochs; epoch++) {
// Iterate the dataset and get each row
iter->GetNextRow(&row);
while (row.size() != 0) {
auto label = row["label"];
int32_t label_value;
label->GetItemAt(&label_value, {0});
EXPECT_TRUE(img_class[(i % sampler_size) / class_size] == label_value);
iter->GetNextRow(&row);
i++;
}
}
EXPECT_EQ(i, sampler_size * num_epochs);
// Try to fetch data beyond the specified number of epochs.
iter->GetNextRow(&row);
EXPECT_EQ(row.size(), 2);
// Manually terminate the pipeline
iter->Stop();
}
TEST_F(MindDataTestEpochCtrl, TestEpoch) {
MS_LOG(INFO) << "Doing MindDataTestEpochCtrl-TestEpoch.";
int32_t num_epochs = 1 + std::rand() % 4;
int32_t sampler_size = 7;
MS_LOG(INFO) << "num_epochs: " << num_epochs;
// Create an ImageFolder Dataset
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, RandomSampler(0, sampler_size));
ds = ds->SetNumWorkers(3);
// Create an iterator over the result of the above dataset
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect a valid iterator
ASSERT_NE(iter, nullptr);
// Iterate the dataset and get each row
uint64_t i = 0;
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
for (int epoch = 0; epoch < num_epochs; epoch++) {
iter->GetNextRow(&row);
while (row.size() != 0) {
auto label = row["label"];
int32_t label_value;
label->GetItemAt(&label_value, {0});
EXPECT_TRUE(label_value >= 0 && label_value <= 3);
iter->GetNextRow(&row);
i++;
}
}
// Verify correct number of rows fetched
EXPECT_EQ(i, sampler_size * num_epochs);
// Try to fetch data beyond the specified number of epochs.
iter->GetNextRow(&row);
EXPECT_EQ(row.size(), 2);
// Manually terminate the pipeline
iter->Stop();
}
TEST_F(MindDataTestEpochCtrl, TestRepeatEpoch) {
MS_LOG(INFO) << "Doing MindDataTestEpochCtrl-TestRepeatEpoch.";
int32_t num_epochs = 2 + std::rand() % 5;
int32_t num_repeats = 3;
int32_t sampler_size = 7;
MS_LOG(INFO) << "num_epochs: " << num_epochs;
// Create an ImageFolder Dataset
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, RandomSampler(0, sampler_size));
ds = ds->SetNumWorkers(3);
ds = ds->Repeat(num_repeats);
// Create an iterator over the result of the above dataset
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect a valid iterator
ASSERT_NE(iter, nullptr);
// Iterate the dataset and get each row
uint64_t i = 0;
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
for (int epoch = 0; epoch < num_epochs; epoch++) {
iter->GetNextRow(&row);
while (row.size() != 0) {
auto label = row["label"];
int32_t label_value;
label->GetItemAt(&label_value, {0});
EXPECT_TRUE(label_value >= 0 && label_value <= 3);
iter->GetNextRow(&row);
i++;
}
}
// Verify correct number of rows fetched
EXPECT_EQ(i, sampler_size * num_repeats * num_epochs);
// Try to fetch data beyond the specified number of epochs.
iter->GetNextRow(&row);
EXPECT_EQ(row.size(), 2);
// Manually terminate the pipeline
iter->Stop();
}
TEST_F(MindDataTestEpochCtrl, TestRepeatRepeatEpoch) {
MS_LOG(INFO) << "Doing MindDataTestEpochCtrl-TestRepeatRepeatEpoch.";
int32_t num_epochs = 1 + std::rand() % 5;
int32_t num_repeats[2] = {2, 3};
int32_t sampler_size = 11;
MS_LOG(INFO) << "num_epochs: " << num_epochs;
// Create an ImageFolder Dataset
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true, SequentialSampler(5, sampler_size));
ds = ds->Repeat(num_repeats[0]);
ds = ds->Repeat(num_repeats[1]);
// Create an iterator over the result of the above dataset
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect a valid iterator
ASSERT_NE(iter, nullptr);
// Iterate the dataset and get each row
uint64_t i = 0;
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
for (int epoch = 0; epoch < num_epochs; epoch++) {
iter->GetNextRow(&row);
while (row.size() != 0) {
auto label = row["label"];
int32_t label_value;
label->GetItemAt(&label_value, {0});
EXPECT_TRUE(label_value >= 0 && label_value <= 3);
iter->GetNextRow(&row);
i++;
}
}
// Verify correct number of rows fetched
EXPECT_EQ(i, sampler_size * num_repeats[0] * num_repeats[1] * num_epochs);
// Try to fetch data beyond the specified number of epochs.
iter->GetNextRow(&row);
EXPECT_EQ(row.size(), 2);
// Manually terminate the pipeline
iter->Stop();
}

@ -0,0 +1,55 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/common.h"
#include "minddata/dataset/include/datasets.h"
using namespace mindspore::dataset;
using mindspore::dataset::Tensor;
class MindDataTestPipeline : public UT::DatasetOpTesting {
protected:
};
TEST_F(MindDataTestPipeline, TestRepeatSetNumWorkers) {
MS_LOG(INFO) << "Doing MindDataTestRepeat-TestRepeatSetNumWorkers.";
std::string file_path = datasets_root_path_ + "/testTFTestAllTypes/test.data";
std::shared_ptr<Dataset> ds = TFRecord({file_path});
ds = ds->SetNumWorkers(16);
ds = ds->Repeat(32);
// Create an iterator over the result of the above dataset
std::shared_ptr<Iterator> iter = ds->CreateIterator();
// Expect a valid iterator
ASSERT_NE(iter, nullptr);
// Iterate the dataset and get each row
std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
iter->GetNextRow(&row);
uint64_t i = 0;
while (row.size() != 0) {
i++;
iter->GetNextRow(&row);
}
// Verify correct number of rows fetched
EXPECT_EQ(i, 12 * 32);
// Manually terminate the pipeline
iter->Stop();
}

File diff suppressed because it is too large Load Diff

@ -143,6 +143,7 @@ class MindDataTestCallback : public UT::DatasetOpTesting {
};
TEST_F(MindDataTestCallback, TestBasicCallback) {
MS_LOG(INFO) << "Doing: MindDataTestCallback-TestBasicCallback";
// config callback
Status rc;
std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(64);
@ -189,7 +190,8 @@ TEST_F(MindDataTestCallback, TestBasicCallback) {
EXPECT_EQ(tst_cb->all_ep_nums(len), all_epochs);
}
TEST_F(MindDataTestCallback, TestMutiEpochCallback) {
TEST_F(MindDataTestCallback, TestMultiEpochCallback) {
MS_LOG(INFO) << "Doing: MindDataTestCallback-TestMultiEpochCallback";
// config callback
Status rc;
std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(4);
@ -200,7 +202,7 @@ TEST_F(MindDataTestCallback, TestMutiEpochCallback) {
ColDescriptor col("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &shape);
ASSERT_OK(schema->AddColumn(col));
std::shared_ptr<RandomDataOp> leaf;
rc = RandomDataOp::Builder().SetRowsPerBuffer(1).SetDataSchema(std::move(schema)).SetTotalRows(4).Build(&leaf);
rc = RandomDataOp::Builder().SetRowsPerBuffer(1).SetDataSchema(std::move(schema)).SetTotalRows(4).SetNumWorkers(4).Build(&leaf);
EXPECT_TRUE(rc.IsOk());
// config mapOp
std::shared_ptr<MapOp> map_op;
@ -243,6 +245,7 @@ TEST_F(MindDataTestCallback, TestMutiEpochCallback) {
}
TEST_F(MindDataTestCallback, TestSelectedCallback) {
MS_LOG(INFO) << "Doing: MindDataTestCallback-TestSelectedCallback";
// config callback
Status rc;
std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(4);
@ -257,7 +260,7 @@ TEST_F(MindDataTestCallback, TestSelectedCallback) {
ColDescriptor col("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &shape);
ASSERT_OK(schema->AddColumn(col));
std::shared_ptr<RandomDataOp> leaf;
rc = RandomDataOp::Builder().SetRowsPerBuffer(1).SetDataSchema(std::move(schema)).SetTotalRows(4).Build(&leaf);
rc = RandomDataOp::Builder().SetRowsPerBuffer(1).SetDataSchema(std::move(schema)).SetTotalRows(4).SetNumWorkers(4).Build(&leaf);
EXPECT_TRUE(rc.IsOk());
// config mapOp
std::shared_ptr<MapOp> map_op;
@ -304,12 +307,15 @@ TEST_F(MindDataTestCallback, TestCAPICallback) {
// config callback
std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(64);
std::shared_ptr<DSCallback> cb1 = tst_cb;
// config leaf_op, use random_data to avoid I/O
std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
ASSERT_TRUE(schema->add_column("label", "uint32", {}));
// Create a RandomDataset. Use random_data to avoid I/O
std::shared_ptr<SchemaObj> schema = Schema();
ASSERT_OK(schema->add_column("label", mindspore::TypeId::kNumberTypeUInt32, {}));
std::shared_ptr<Dataset> ds = RandomData(44, schema);
ASSERT_NE(ds, nullptr);
ds = ds->Map({transforms::TypeCast("uint64")}, {"label"}, {}, {}, nullptr, {cb1});
ASSERT_NE(ds, nullptr);
ds = ds->Repeat(2);
ASSERT_NE(ds, nullptr);
TreeAdapter tree_adapter;
// using tree_adapter to set num_epoch = 1

@ -0,0 +1,101 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <string>
#include "common/common.h"
#include "minddata/dataset/engine/execution_tree.h"
#include "minddata/dataset/engine/ir/datasetops/dataset_node.h"
#include "minddata/dataset/engine/tree_adapter.h"
#include "minddata/dataset/include/datasets.h"
#include "minddata/dataset/include/transforms.h"
#include "minddata/dataset/include/vision.h"
#include "minddata/dataset/kernels/tensor_op.h"
using namespace mindspore::dataset;
class MindDataTestTensorOpFusionPass : public UT::DatasetOpTesting {
public:
MindDataTestTensorOpFusionPass() = default;
};
TEST_F(MindDataTestTensorOpFusionPass, RandomCropDecodeResizeDisabled) {
MS_LOG(INFO) << "Doing MindDataTestTensorOpFusionPass-RandomCropDecodeResizeDisabled";
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, false, SequentialSampler(0, 11));
ds = ds->SetNumWorkers(16);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> decode = vision::Decode();
std::shared_ptr<TensorOperation> random_resized_crop = vision::RandomResizedCrop({5});
ds = ds->Map({decode, random_resized_crop}, {"image"});
std::shared_ptr<DatasetNode> node = ds->IRNode();
auto ir_tree = std::make_shared<TreeAdapter>();
// Disable IR optimization pass
ir_tree->SetOptimize(false);
Status rc;
rc = ir_tree->Compile(node);
EXPECT_TRUE(rc);
auto root_op = ir_tree->GetRoot();
auto tree = std::make_shared<ExecutionTree>();
auto it = tree->begin(static_cast<std::shared_ptr<DatasetOp>>(root_op));
++it;
auto *map_op = &(*it);
auto tfuncs = static_cast<MapOp *>(map_op)->TFuncs();
auto func_it = tfuncs.begin();
EXPECT_EQ((*func_it)->Name(), kDecodeOp);
++func_it;
EXPECT_EQ((*func_it)->Name(), kRandomCropAndResizeOp);
}
TEST_F(MindDataTestTensorOpFusionPass, RandomCropDecodeResizeEnabled) {
MS_LOG(INFO) << "Doing MindDataTestTensorOpFusionPass-RandomCropDecodeResizeEnabled";
std::string folder_path = datasets_root_path_ + "/testPK/data/";
std::shared_ptr<Dataset> ds = ImageFolder(folder_path, false, SequentialSampler(0, 11));
ds = ds->SetNumWorkers(16);
// Create objects for the tensor ops
std::shared_ptr<TensorOperation> decode = vision::Decode();
std::shared_ptr<TensorOperation> random_resized_crop = vision::RandomResizedCrop({5});
ds = ds->Map({decode, random_resized_crop}, {"image"});
std::shared_ptr<DatasetNode> node = ds->IRNode();
auto ir_tree = std::make_shared<TreeAdapter>();
// Enable IR optimization pass
ir_tree->SetOptimize(true);
Status rc;
rc = ir_tree->Compile(node);
EXPECT_TRUE(rc);
auto root_op = ir_tree->GetRoot();
auto tree = std::make_shared<ExecutionTree>();
auto it = tree->begin(static_cast<std::shared_ptr<DatasetOp>>(root_op));
++it;
auto *map_op = &(*it);
auto tfuncs = static_cast<MapOp *>(map_op)->TFuncs();
auto func_it = tfuncs.begin();
// FIXME: Currently the following 2 commented out verifications for this test will fail because this
// optimization is still in ExecutionTree code, and not yet in IR optimization pass
// However, use a bogus check for func_it, to avoid compile error for unused variable.
EXPECT_EQ(func_it, func_it);
// EXPECT_EQ((*func_it)->Name(), kRandomCropDecodeResizeOp);
// EXPECT_EQ(++func_it, tfuncs.end());
}

@ -29,114 +29,8 @@ using namespace mindspore::dataset;
using mindspore::LogStream;
using mindspore::MsLogLevel::INFO;
class MindDataTestOptimizationPass : public UT::DatasetOpTesting {
public:
MindDataTestOptimizationPass() = default;
void SetUp() override { GlobalInit(); }
class MindDataTestOptimizationPass : public UT::DatasetOpTesting {};
// this recursive function helps build a ExecutionTree from a IR node, it is copied from TreeAdapter
Status DFSBuild(std::shared_ptr<DatasetNode> ir, std::shared_ptr<DatasetOp> *op, ExecutionTree *tree) {
std::vector<std::shared_ptr<DatasetOp>> ops;
RETURN_IF_NOT_OK(ir->Build(&ops));
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty() && tree != nullptr && op != nullptr, "Fail To Build Tree.");
(*op) = ops.front();
RETURN_IF_NOT_OK(tree->AssociateNode(*op));
for (size_t i = 1; i < ops.size(); i++) {
RETURN_IF_NOT_OK(tree->AssociateNode(ops[i]));
RETURN_IF_NOT_OK(ops[i - 1]->AddChild(ops[i]));
}
for (std::shared_ptr<DatasetNode> child_ir : ir->Children()) {
std::shared_ptr<DatasetOp> child_op;
RETURN_IF_NOT_OK(DFSBuild(child_ir, &child_op, tree));
RETURN_IF_NOT_OK(ops.back()->AddChild(child_op)); // append children to the last of ops
}
return Status::OK();
}
// this function will build an execution_tree from a root ir node. nullptr will be returned if error occurs
std::unique_ptr<ExecutionTree> BuildTree(std::shared_ptr<DatasetNode> ir) {
std::unique_ptr<ExecutionTree> tree = std::make_unique<ExecutionTree>();
std::shared_ptr<DatasetOp> root;
if (DFSBuild(ir, &root, tree.get()).IsError()) return nullptr;
if (tree->AssignRoot(root).IsError()) return nullptr;
return tree;
}
};
TEST_F(MindDataTestOptimizationPass, MindDataTestOutputShapeAndTypePass) {
MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestOutputShapeAndTypePass.";
// config leaf_op, use random_data to avoid I/O
std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
ASSERT_TRUE(schema->add_column("label", "uint32", {}));
std::shared_ptr<Dataset> ds = RandomData(44, schema)->Repeat(2)->Project({"label"})->Shuffle(10)->Batch(2);
std::unique_ptr<ExecutionTree> exe_tree = BuildTree(ds->IRNode());
ASSERT_NE(exe_tree, nullptr);
// test the optimization pass
// OptPass is supposed to remove concat, filter repeat, shuffle skip, take and set the callback of map to empty
std::function<OptPass(OptPass)> pass = [](OptPass pre) {
// return a new pass, this will override all the existing pre-pass es
pre.clear();
pre.push_back(std::make_unique<GetterPass>(GetterPass::kOutputShapeAndType));
return pre;
};
exe_tree->SetPrePassOverride(pass);
ASSERT_OK(exe_tree->PreAction());
std::stringstream ss;
// print the tree in std::string as a way to verify that nodes are indeed removed
exe_tree->Print(ss);
std::string ss_str = ss.str();
// ss_str would look like this
// +- ( 0) <BatchOp>: [workers: 4] [batch size: 2]
// +- ( 2) <ProjectOp>: [workers: 0 (inlined)]
// +- ( 4) <RandomDataOp>: [workers: 4] [total rows: 44]
//
// verify that no ops are removed, but Batch and ProjectOp are not
EXPECT_NE(ss_str.find("ShuffleOp"), ss_str.npos);
EXPECT_NE(ss_str.find("RepeatOp"), ss_str.npos);
EXPECT_NE(ss_str.find("ProjectOp"), ss_str.npos);
EXPECT_NE(ss_str.find("BatchOp"), ss_str.npos);
}
TEST_F(MindDataTestOptimizationPass, MindDataTestDatasetSizePass) {
MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestDatasetSizePass.";
// config leaf_op, use random_data to avoid I/O
std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
ASSERT_TRUE(schema->add_column("label", "uint32", {}));
std::shared_ptr<Dataset> ds = RandomData(44, schema)->Repeat(2)->Project({"label"})->Shuffle(10)->Batch(2);
std::unique_ptr<ExecutionTree> exe_tree = BuildTree(ds->IRNode());
ASSERT_NE(exe_tree, nullptr);
// test the optimization pass
// OptPass is supposed to remove concat, filter repeat, shuffle skip, take and set the callback of map to empty
std::function<OptPass(OptPass)> pass = [](OptPass pre) {
// return a new pass, this will override all the existing pre-pass es
pre.clear(); // remove all existing pre pass
pre.push_back(std::make_unique<GetterPass>(GetterPass::kDatasetSize));
return pre;
};
exe_tree->SetPrePassOverride(pass);
ASSERT_OK(exe_tree->PreAction());
std::stringstream ss;
// print the tree in std::string as a way to verify that nodes are indeed removed
exe_tree->Print(ss);
std::string ss_str = ss.str();
// verify that no ops are removed, but Batch and ProjectOp are not
EXPECT_NE(ss_str.find("ShuffleOp"), ss_str.npos);
EXPECT_NE(ss_str.find("RepeatOp"), ss_str.npos);
EXPECT_NE(ss_str.find("ProjectOp"), ss_str.npos);
EXPECT_NE(ss_str.find("BatchOp"), ss_str.npos);
}
TEST_F(MindDataTestOptimizationPass, MindDataTestAutoWorkerPass) {
MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestAutoWorkerPass.";

@ -1,63 +0,0 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/util/circular_pool.h"
#include "minddata/dataset/core/client.h"
#include "common/common.h"
#include "gtest/gtest.h"
#include "utils/log_adapter.h"
using namespace mindspore::dataset;
using mindspore::MsLogLevel::INFO;
using mindspore::ExceptionType::NoExceptionType;
using mindspore::LogStream;
class MindDataTestrepeat_op : public UT::DatasetOpTesting {
};
TEST_F(MindDataTestrepeat_op, Testrepeat_opFuntions) {
MS_LOG(INFO) << "Doing MindDataTestrepeat_op.";
auto my_tree = std::make_shared<ExecutionTree>();
std::shared_ptr<DatasetOp> parent_op = std::make_shared<RepeatOp>(32);
std::string dataset_path;
dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data";
// TFReaderOp
std::shared_ptr<TFReaderOp> my_tfreader_op;
TFReaderOp::Builder builder;
builder.SetDatasetFilesList({dataset_path})
.SetRowsPerBuffer(16)
.SetWorkerConnectorSize(16)
.SetNumWorkers(16);
Status rc= builder.Build(&my_tfreader_op);
ASSERT_TRUE(rc.IsOk());
rc = my_tree->AssociateNode(my_tfreader_op);
ASSERT_TRUE(rc.IsOk());
rc = my_tree->AssociateNode(parent_op);
ASSERT_TRUE(rc.IsOk());
ASSERT_NE(parent_op, nullptr);
ASSERT_NE(my_tfreader_op, nullptr);
parent_op->AddChild(std::move(my_tfreader_op));
MS_LOG(INFO) << parent_op;
my_tree->AssignRoot(parent_op);
my_tree->Prepare();
RepeatOp RepeatOpOp();
std::shared_ptr<RepeatOp> repeat_op;
rc = RepeatOp::Builder(3).Build(&repeat_op);
ASSERT_NE(repeat_op, nullptr);
}

@ -1,105 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <string>
#include "minddata/dataset/core/client.h"
#include "common/common.h"
#include "gtest/gtest.h"
#include "minddata/dataset/kernels/image/random_crop_and_resize_op.h"
#include "minddata/dataset/kernels/image/decode_op.h"
#include "minddata/dataset/engine/datasetops/source/image_folder_op.h"
#include "minddata/dataset/engine/execution_tree.h"
using namespace mindspore::dataset;
using mindspore::LogStream;
using mindspore::MsLogLevel::INFO;
class MindDataTestTensorOpFusionPass : public UT::DatasetOpTesting {
public:
MindDataTestTensorOpFusionPass() = default;
void SetUp() override { GlobalInit(); }
};
TEST_F(MindDataTestTensorOpFusionPass, RandomCropDecodeResize_fusion_disabled) {
MS_LOG(INFO) << "Doing RandomCropDecodeResize_fusion";
std::shared_ptr<ImageFolderOp> ImageFolder(int64_t num_works, int64_t rows, int64_t conns, std::string path,
bool shuf = false, std::shared_ptr<SamplerRT> sampler = nullptr,
std::map<std::string, int32_t> map = {}, bool decode = false);
std::shared_ptr<ExecutionTree> Build(std::vector<std::shared_ptr<DatasetOp>> ops);
auto rcar_op = std::make_shared<RandomCropAndResizeOp>();
auto decode_op = std::make_shared<DecodeOp>();
Status rc;
std::vector<std::shared_ptr<TensorOp>> func_list;
func_list.push_back(decode_op);
func_list.push_back(rcar_op);
std::shared_ptr<MapOp> map_op;
MapOp::Builder map_decode_builder;
map_decode_builder.SetInColNames({}).SetOutColNames({}).SetTensorFuncs(func_list).SetNumWorkers(4);
rc = map_decode_builder.Build(&map_op);
EXPECT_TRUE(rc.IsOk());
auto tree = std::make_shared<ExecutionTree>();
tree = Build({ImageFolder(16, 2, 32, "./", false), map_op});
rc = tree->SetOptimize(false);
EXPECT_TRUE(rc);
rc = tree->Prepare();
EXPECT_TRUE(rc.IsOk());
rc = tree->SetOptimize(false);
EXPECT_TRUE(rc.IsError());
auto it = tree->begin();
++it;
auto *m_op = &(*it);
auto tfuncs = static_cast<MapOp *>(m_op)->TFuncs();
auto func_it = tfuncs.begin();
EXPECT_EQ((*func_it)->Name(), kDecodeOp);
++func_it;
EXPECT_EQ((*func_it)->Name(), kRandomCropAndResizeOp);
}
TEST_F(MindDataTestTensorOpFusionPass, RandomCropDecodeResize_fusion_enabled) {
MS_LOG(INFO) << "Doing RandomCropDecodeResize_fusion";
std::shared_ptr<ImageFolderOp> ImageFolder(int64_t num_works, int64_t rows, int64_t conns, std::string path,
bool shuf = false, std::shared_ptr<SamplerRT> sampler = nullptr,
std::map<std::string, int32_t> map = {}, bool decode = false);
std::shared_ptr<ExecutionTree> Build(std::vector<std::shared_ptr<DatasetOp>> ops);
auto rcar_op = std::make_shared<RandomCropAndResizeOp>();
auto decode_op = std::make_shared<DecodeOp>();
Status rc;
std::vector<std::shared_ptr<TensorOp>> func_list;
func_list.push_back(decode_op);
func_list.push_back(rcar_op);
std::shared_ptr<MapOp> map_op;
MapOp::Builder map_decode_builder;
map_decode_builder.SetInColNames({}).SetOutColNames({}).SetTensorFuncs(func_list).SetNumWorkers(4);
rc = map_decode_builder.Build(&map_op);
EXPECT_TRUE(rc.IsOk());
auto tree = std::make_shared<ExecutionTree>();
tree = Build({ImageFolder(16, 2, 32, "./", false), map_op});
rc = tree->SetOptimize(true);
EXPECT_TRUE(rc);
rc = tree->Prepare();
EXPECT_TRUE(rc.IsOk());
rc = tree->SetOptimize(false);
EXPECT_TRUE(rc.IsError());
auto it = tree->begin();
++it;
auto *m_op = &(*it);
auto tfuncs = static_cast<MapOp *>(m_op)->TFuncs();
auto func_it = tfuncs.begin();
EXPECT_EQ((*func_it)->Name(), kRandomCropDecodeResizeOp);
EXPECT_EQ(++func_it, tfuncs.end());
}
Loading…
Cancel
Save