!1636 Add TCs to 3 vision transformation ops and cleanup config settings

Merge pull request !1636 from Tinazhang/randomGray
pull/1636/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 15a196eba9

File diff suppressed because it is too large Load Diff

@ -23,7 +23,8 @@ import mindspore.dataset.transforms.vision.py_transforms as py_vision
import mindspore.dataset.transforms.vision.utils as mode import mindspore.dataset.transforms.vision.utils as mode
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore import log as logger from mindspore import log as logger
from util import diff_mse, save_and_check_md5, visualize from util import diff_mse, save_and_check_md5, visualize, \
config_get_set_seed, config_get_set_num_parallel_workers
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
@ -31,11 +32,11 @@ SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
GENERATE_GOLDEN = False GENERATE_GOLDEN = False
def test_random_crop_and_resize_op(plot=False): def test_random_crop_and_resize_op_c(plot=False):
""" """
Test RandomCropAndResize op Test RandomCropAndResize op in c transforms
""" """
logger.info("test_random_crop_and_resize_op") logger.info("test_random_crop_and_resize_op_c")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -64,13 +65,51 @@ def test_random_crop_and_resize_op(plot=False):
if plot: if plot:
visualize(original_images, crop_and_resize_images) visualize(original_images, crop_and_resize_images)
def test_random_crop_and_resize_op_py(plot=False):
"""
Test RandomCropAndResize op in py transforms
"""
logger.info("test_random_crop_and_resize_op_py")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (1, 1), (0.5, 0.5)),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
# Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = cv2.resize(original, (512,256))
mse = diff_mse(crop_and_resize, original)
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize(original_images, crop_and_resize_images)
def test_random_crop_and_resize_01(): def test_random_crop_and_resize_01():
""" """
Test RandomCropAndResize with md5 check, expected to pass Test RandomCropAndResize with md5 check, expected to pass
""" """
logger.info("test_random_crop_and_resize_01") logger.info("test_random_crop_and_resize_01")
ds.config.set_seed(0) original_seed = config_get_set_seed(0)
ds.config.set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -94,14 +133,18 @@ def test_random_crop_and_resize_01():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_02(): def test_random_crop_and_resize_02():
""" """
Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST, Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST,
expected to pass expected to pass
""" """
logger.info("test_random_crop_and_resize_02") logger.info("test_random_crop_and_resize_02")
ds.config.set_seed(0) original_seed = config_get_set_seed(0)
ds.config.set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -125,13 +168,17 @@ def test_random_crop_and_resize_02():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_03(): def test_random_crop_and_resize_03():
""" """
Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass
""" """
logger.info("test_random_crop_and_resize_03") logger.info("test_random_crop_and_resize_03")
ds.config.set_seed(0) original_seed = config_get_set_seed(0)
ds.config.set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -155,27 +202,25 @@ def test_random_crop_and_resize_03():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_04_c(): def test_random_crop_and_resize_04_c():
""" """
Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min), Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min),
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_04_c") logger.info("test_random_crop_and_resize_04_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try: try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# If input range of scale is not in the order of (min, max), ValueError will be raised. # If input range of scale is not in the order of (min, max), ValueError will be raised.
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)) random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5))
data = data.map(input_columns=["image"], operations=decode_op) data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op) data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e) assert "Input range is not valid" in str(e)
@ -186,12 +231,10 @@ def test_random_crop_and_resize_04_py():
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_04_py") logger.info("test_random_crop_and_resize_04_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), py_vision.Decode(),
# If input range of scale is not in the order of (min, max), ValueError will be raised. # If input range of scale is not in the order of (min, max), ValueError will be raised.
@ -200,10 +243,6 @@ def test_random_crop_and_resize_04_py():
] ]
transform = py_vision.ComposeOp(transforms) transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform()) data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e) assert "Input range is not valid" in str(e)
@ -214,21 +253,15 @@ def test_random_crop_and_resize_05_c():
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_05_c") logger.info("test_random_crop_and_resize_05_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try: try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)) random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
# If input range of ratio is not in the order of (min, max), ValueError will be raised. # If input range of ratio is not in the order of (min, max), ValueError will be raised.
data = data.map(input_columns=["image"], operations=decode_op) data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op) data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e) assert "Input range is not valid" in str(e)
@ -239,12 +272,10 @@ def test_random_crop_and_resize_05_py():
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_05_py") logger.info("test_random_crop_and_resize_05_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1) # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), py_vision.Decode(),
# If input range of ratio is not in the order of (min, max), ValueError will be raised. # If input range of ratio is not in the order of (min, max), ValueError will be raised.
@ -253,10 +284,6 @@ def test_random_crop_and_resize_05_py():
] ]
transform = py_vision.ComposeOp(transforms) transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform()) data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e) assert "Input range is not valid" in str(e)
@ -295,7 +322,8 @@ def test_random_crop_and_resize_comp(plot=False):
visualize(image_c_cropped, image_py_cropped) visualize(image_c_cropped, image_py_cropped)
if __name__ == "__main__": if __name__ == "__main__":
test_random_crop_and_resize_op(True) test_random_crop_and_resize_op_c(True)
test_random_crop_and_resize_op_py(True)
test_random_crop_and_resize_01() test_random_crop_and_resize_01()
test_random_crop_and_resize_02() test_random_crop_and_resize_02()
test_random_crop_and_resize_03() test_random_crop_and_resize_03()

@ -0,0 +1,189 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomGrayscale op in DE
"""
import numpy as np
import mindspore.dataset.transforms.vision.py_transforms as py_vision
import mindspore.dataset as ds
from mindspore import log as logger
from util import save_and_check_md5, visualize, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_random_grayscale_valid_prob(plot=False):
"""
Test RandomGrayscale Op: valid input, expect to pass
"""
logger.info("test_random_grayscale_valid_prob")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
# Note: prob is 1 so the output should always be grayscale images
py_vision.RandomGrayscale(1),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
image_gray = []
image = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_gray.append(image1)
image.append(image2)
if plot:
visualize(image, image_gray)
def test_random_grayscale_input_grayscale_images():
"""
Test RandomGrayscale Op: valid parameter with grayscale images as input, expect to pass
"""
logger.info("test_random_grayscale_input_grayscale_images")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
py_vision.Grayscale(1),
# Note: If the input images is grayscale image with 1 channel.
py_vision.RandomGrayscale(0.5),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
image_gray = []
image = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_gray.append(image1)
image.append(image2)
assert len(image1.shape) == 3
assert image1.shape[2] == 1
assert len(image2.shape) == 3
assert image2.shape[2] == 3
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_md5_valid_input():
"""
Test RandomGrayscale with md5 comparison: valid parameter, expect to pass
"""
logger.info("test_random_grayscale_md5_valid_input")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
# Check output images with md5 comparison
filename = "random_grayscale_01_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_md5_no_param():
"""
Test RandomGrayscale with md5 comparison: no parameter given, expect to pass
"""
logger.info("test_random_grayscale_md5_no_param")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
# Check output images with md5 comparison
filename = "random_grayscale_02_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_invalid_param():
"""
Test RandomGrayscale: invalid parameter given, expect to raise error
"""
logger.info("test_random_grayscale_invalid_param")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required range" in str(e)
if __name__ == "__main__":
test_random_grayscale_valid_prob(True)
test_random_grayscale_input_grayscale_images()
test_random_grayscale_md5_valid_input()
test_random_grayscale_md5_no_param()
test_random_grayscale_invalid_param()

@ -17,10 +17,14 @@ Testing the random horizontal flip op in DE
""" """
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.c_transforms as vision import mindspore.dataset.transforms.vision.c_transforms as c_vision
import mindspore.dataset.transforms.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5, visualize, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
@ -37,7 +41,7 @@ def h_flip(image):
return image return image
def visualize(image_de_random_horizontal, image_pil_random_horizontal, mse, image_original): def visualize_mse(image_de_random_horizontal, image_pil_random_horizontal, mse, image_original):
""" """
visualizes the image using DE op and Numpy op visualizes the image using DE op and Numpy op
""" """
@ -61,14 +65,14 @@ def visualize(image_de_random_horizontal, image_pil_random_horizontal, mse, imag
def test_random_horizontal_op(): def test_random_horizontal_op():
""" """
Test random_horizontal Test RandomHorizontalFlip op
""" """
logger.info("Test random_horizontal") logger.info("test_random_horizontal_op")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = vision.Decode() decode_op = c_vision.Decode()
random_horizontal_op = vision.RandomHorizontalFlip() random_horizontal_op = c_vision.RandomHorizontalFlip()
data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op) data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
@ -84,17 +88,144 @@ def test_random_horizontal_op():
break break
image_h_flipped = item1["image"] image_h_flipped = item1["image"]
image = item2["image"] image = item2["image"]
image_h_flipped_2 = h_flip(image) image_h_flipped_2 = h_flip(image)
diff = image_h_flipped - image_h_flipped_2 mse = diff_mse(image_h_flipped, image_h_flipped_2)
mse = np.sum(np.power(diff, 2))
logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
# Uncomment below line if you want to visualize images # Uncomment below line if you want to visualize images
# visualize(image_h_flipped, image_h_flipped_2, mse, image) # visualize_mse(image_h_flipped, image_h_flipped_2, mse, image)
num_iter += 1 num_iter += 1
def test_random_horizontal_valid_prob_c():
"""
Test RandomHorizontalFlip op with c_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_horizontal_valid_prob_c")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomHorizontalFlip(0.8)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
filename = "random_horizontal_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_horizontal_valid_prob_py():
"""
Test RandomHorizontalFlip op with py_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_horizontal_valid_prob_py")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomHorizontalFlip(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
filename = "random_horizontal_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_horizontal_invalid_prob_c():
"""
Test RandomHorizontalFlip op in c_transforms: invalid input, expect to raise error
"""
logger.info("test_random_horizontal_invalid_prob_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomHorizontalFlip(1.5)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_horizontal_invalid_prob_py():
"""
Test RandomHorizontalFlip op in py_transforms: invalid input, expect to raise error
"""
logger.info("test_random_horizontal_invalid_prob_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomHorizontalFlip(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_horizontal_comp(plot=False):
"""
Test test_random_horizontal_flip and compare between python and c image augmentation ops
"""
logger.info("test_random_horizontal_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomHorizontalFlip(1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# Note: The image must be flipped if prob is set to be 1
py_vision.RandomHorizontalFlip(1),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data2 = data2.map(input_columns=["image"], operations=transform())
images_list_c = []
images_list_py = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image_c = item1["image"]
image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
images_list_c.append(image_c)
images_list_py.append(image_py)
# Check if the output images are the same
mse = diff_mse(image_c, image_py)
assert mse < 0.001
if plot:
visualize(images_list_c, images_list_py)
if __name__ == "__main__": if __name__ == "__main__":
test_random_horizontal_op() test_random_horizontal_op()
test_random_horizontal_valid_prob_c()
test_random_horizontal_valid_prob_py()
test_random_horizontal_invalid_prob_c()
test_random_horizontal_invalid_prob_py()
test_random_horizontal_comp(True)

@ -19,8 +19,13 @@ import matplotlib.pyplot as plt
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.c_transforms as vision import mindspore.dataset.transforms.vision.c_transforms as c_vision
import mindspore.dataset.transforms.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5, visualize, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
@ -37,7 +42,7 @@ def v_flip(image):
return image return image
def visualize(image_de_random_vertical, image_pil_random_vertical, mse, image_original): def visualize_with_mse(image_de_random_vertical, image_pil_random_vertical, mse, image_original):
""" """
visualizes the image using DE op and Numpy op visualizes the image using DE op and Numpy op
""" """
@ -61,14 +66,14 @@ def visualize(image_de_random_vertical, image_pil_random_vertical, mse, image_or
def test_random_vertical_op(): def test_random_vertical_op():
""" """
Test random_vertical Test random_vertical with default probability
""" """
logger.info("Test random_vertical") logger.info("Test random_vertical")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = vision.Decode() decode_op = c_vision.Decode()
random_vertical_op = vision.RandomVerticalFlip() random_vertical_op = c_vision.RandomVerticalFlip()
data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_vertical_op) data1 = data1.map(input_columns=["image"], operations=random_vertical_op)
@ -92,9 +97,139 @@ def test_random_vertical_op():
mse = np.sum(np.power(diff, 2)) mse = np.sum(np.power(diff, 2))
logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
# Uncomment below line if you want to visualize images # Uncomment below line if you want to visualize images
# visualize(image_v_flipped, image_v_flipped_2, mse, image) # visualize_with_mse(image_v_flipped, image_v_flipped_2, mse, image)
num_iter += 1 num_iter += 1
def test_random_vertical_valid_prob_c():
"""
Test RandomVerticalFlip op with c_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_vertical_valid_prob_c")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomVerticalFlip(0.8)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
filename = "random_vertical_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_vertical_valid_prob_py():
"""
Test RandomVerticalFlip op with py_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_vertical_valid_prob_py")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomVerticalFlip(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
filename = "random_vertical_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_vertical_invalid_prob_c():
"""
Test RandomVerticalFlip op in c_transforms: invalid input, expect to raise error
"""
logger.info("test_random_vertical_invalid_prob_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomVerticalFlip(1.5)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_vertical_invalid_prob_py():
"""
Test RandomVerticalFlip op in py_transforms: invalid input, expect to raise error
"""
logger.info("test_random_vertical_invalid_prob_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomVerticalFlip(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_vertical_comp(plot=False):
"""
Test test_random_vertical_flip and compare between python and c image augmentation ops
"""
logger.info("test_random_vertical_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomVerticalFlip(1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# Note: The image must be flipped if prob is set to be 1
py_vision.RandomVerticalFlip(1),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data2 = data2.map(input_columns=["image"], operations=transform())
images_list_c = []
images_list_py = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image_c = item1["image"]
image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
images_list_c.append(image_c)
images_list_py.append(image_py)
# Check if the output images are the same
mse = diff_mse(image_c, image_py)
assert mse < 0.001
if plot:
visualize(images_list_c, images_list_py)
if __name__ == "__main__": if __name__ == "__main__":
test_random_vertical_op() test_random_vertical_op()
test_random_vertical_valid_prob_c()
test_random_vertical_valid_prob_py()
test_random_vertical_invalid_prob_c()
test_random_vertical_invalid_prob_py()
test_random_vertical_comp(True)

@ -28,7 +28,7 @@ from mindspore import log as logger
from mindspore.dataset.transforms.vision import Inter from mindspore.dataset.transforms.vision import Inter
from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME
from util import config_get_set_num_parallel_workers
def test_imagefolder(remove_json_files=True): def test_imagefolder(remove_json_files=True):
""" """
@ -176,6 +176,7 @@ def test_random_crop():
logger.info("test_random_crop") logger.info("test_random_crop")
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"]) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
@ -201,6 +202,9 @@ def test_random_crop():
assert np.array_equal(item1['image'], item1_1['image']) assert np.array_equal(item1['image'], item1_1['image'])
_ = item2["image"] _ = item2["image"]
# Restore configuration num_parallel_workers
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def validate_jsonfile(filepath): def validate_jsonfile(filepath):
try: try:

Loading…
Cancel
Save