added test case to cifar_op

update cifar10 dataset
fixing missing error handling code in validator
pull/3039/head
tinazhang 5 years ago
parent 089623ad19
commit 340d98a4d1

@ -271,6 +271,8 @@ def check_sampler_shuffle_shard_options(param_dict):
if sampler is not None:
if shuffle is not None:
raise RuntimeError("sampler and shuffle cannot be specified at the same time.")
if num_shards is not None:
raise RuntimeError("sampler and sharding cannot be specified at the same time.")
if num_shards is not None:
check_pos_int32(num_shards)

@ -1,21 +0,0 @@
{
"datasetType": "CIFAR100",
"numRows": 100,
"columns": {
"image": {
"type": "uint8",
"rank": 1,
"t_impl": "cvmat"
},
"coarse_label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
},
"fine_label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
}
}
}

@ -1,21 +0,0 @@
{
"datasetType": "CIFAR100",
"numRows": 33,
"columns": {
"image": {
"type": "uint8",
"rank": 1,
"t_impl": "cvmat"
},
"coarse_label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
},
"fine_label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
}
}
}

@ -1,9 +0,0 @@
{
"deviceNum" : 3,
"deviceId" : 1,
"shardConfig" : "ALL",
"shuffle" : "ON",
"seed" : 0,
"epoch" : 2
}

@ -1,9 +0,0 @@
{
"deviceNum" : 3,
"deviceId" : 1,
"shardConfig" : "RANDOM",
"shuffle" : "ON",
"seed" : 0,
"epoch" : 1
}

@ -1,9 +0,0 @@
{
"deviceNum" : 3,
"deviceId" : 1,
"shardConfig" : "UNIQUE",
"shuffle" : "ON",
"seed" : 0,
"epoch" : 3
}

@ -1,16 +0,0 @@
{
"datasetType": "CIFAR10",
"numRows": 60000,
"columns": {
"image": {
"type": "uint8",
"rank": 1,
"t_impl": "cvmat"
},
"label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
}
}
}

@ -1,16 +0,0 @@
{
"datasetType": "CIFAR10",
"numRows": 33,
"columns": {
"image": {
"type": "uint8",
"rank": 1,
"t_impl": "cvmat"
},
"label" : {
"type": "uint32",
"rank": 1,
"t_impl": "flex"
}
}
}

@ -1,91 +0,0 @@
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import mindspore.dataset as ds
from mindspore import log as logger
# Data for CIFAR and MNIST are not part of build tree
# They need to be downloaded directly
# prep_data.py can be executed or code below
# import sys
# sys.path.insert(0,"../../data")
# import prep_data
# prep_data.download_all_for_test("../../data")
DATA_DIR_10 = "../data/dataset/testCifar10Data"
DATA_DIR_100 = "../data/dataset/testCifar100Data"
def load_cifar(path):
raw = np.empty(0, dtype=np.uint8)
for file_name in os.listdir(path):
if file_name.endswith(".bin"):
with open(os.path.join(path, file_name), mode='rb') as file:
raw = np.append(raw, np.fromfile(file, dtype=np.uint8), axis=0)
raw = raw.reshape(-1, 3073)
labels = raw[:, 0]
images = raw[:, 1:]
images = images.reshape(-1, 3, 32, 32)
images = images.transpose(0, 2, 3, 1)
return images, labels
def test_case_dataset_cifar10():
"""
dataset parameter
"""
logger.info("Test dataset parameter")
# apply dataset operations
data1 = ds.Cifar10Dataset(DATA_DIR_10, 100)
num_iter = 0
for _ in data1.create_dict_iterator():
# in this example, each dictionary has keys "image" and "label"
num_iter += 1
assert num_iter == 100
def test_case_dataset_cifar100():
"""
dataset parameter
"""
logger.info("Test dataset parameter")
# apply dataset operations
data1 = ds.Cifar100Dataset(DATA_DIR_100, 100)
num_iter = 0
for _ in data1.create_dict_iterator():
# in this example, each dictionary has keys "image" and "label"
num_iter += 1
assert num_iter == 100
def test_reading_cifar10():
"""
Validate CIFAR10 image readings
"""
data1 = ds.Cifar10Dataset(DATA_DIR_10, 100, shuffle=False)
images, labels = load_cifar(DATA_DIR_10)
for i, d in enumerate(data1.create_dict_iterator()):
np.testing.assert_array_equal(d["image"], images[i])
np.testing.assert_array_equal(d["label"], labels[i])
if __name__ == '__main__':
test_case_dataset_cifar10()
test_case_dataset_cifar100()
test_reading_cifar10()

@ -245,17 +245,17 @@ def test_deterministic_run_distribution():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
random_crop_op = c_vision.RandomHorizontalFlip(0.1)
random_horizontal_flip_op = c_vision.RandomHorizontalFlip(0.1)
decode_op = c_vision.Decode()
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_crop_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_flip_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(input_columns=["image"], operations=decode_op)
# If seed is set up on constructor, so the two ops output deterministic sequence
random_crop_op2 = c_vision.RandomHorizontalFlip(0.1)
data2 = data2.map(input_columns=["image"], operations=random_crop_op2)
random_horizontal_flip_op2 = c_vision.RandomHorizontalFlip(0.1)
data2 = data2.map(input_columns=["image"], operations=random_horizontal_flip_op2)
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
np.testing.assert_equal(item1["image"], item2["image"])

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save