Mindcon Shandong bug fix part1

fix rebase confict
pull/10821/head
13465716071 4 years ago committed by ms_yan
parent 2f883fb4c2
commit 3594bd581c

File diff suppressed because it is too large Load Diff

@ -72,11 +72,9 @@ class GraphData:
the server automatically exits (default=True).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> features = data_graph.get_node_feature(nodes, [1])
>>> graph_dataset = ds.GraphData(graph_dataset_dir, 2)
>>> nodes = graph_dataset.get_all_nodes(0)
>>> features = graph_dataset.get_node_feature(nodes, [1])
"""
@check_gnn_graphdata
@ -116,10 +114,7 @@ class GraphData:
numpy.ndarray, array of nodes.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> nodes = graph_dataset.get_all_nodes(0)
Raises:
TypeError: If `node_type` is not integer.
@ -140,10 +135,7 @@ class GraphData:
numpy.ndarray, array of edges.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_edges(0)
>>> edges = graph_dataset.get_all_edges(0)
Raises:
TypeError: If `edge_type` is not integer.
@ -183,11 +175,8 @@ class GraphData:
numpy.ndarray, array of neighbors.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> neighbors = data_graph.get_all_neighbors(nodes, 0)
>>> nodes = graph_dataset.get_all_nodes(0)
>>> neighbors = graph_dataset.get_all_neighbors(nodes, 0)
Raises:
TypeError: If `node_list` is not list or ndarray.
@ -222,11 +211,8 @@ class GraphData:
numpy.ndarray, array of neighbors.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> neighbors = data_graph.get_sampled_neighbors(nodes, [2, 2], [0, 0])
>>> nodes = graph_dataset.get_all_nodes(0)
>>> neighbors = graph_dataset.get_sampled_neighbors(nodes, [2, 2], [0, 0])
Raises:
TypeError: If `node_list` is not list or ndarray.
@ -254,11 +240,8 @@ class GraphData:
numpy.ndarray, array of neighbors.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> neg_neighbors = data_graph.get_neg_sampled_neighbors(nodes, 5, 0)
>>> nodes = graph_dataset.get_all_nodes(0)
>>> neg_neighbors = graph_dataset.get_neg_sampled_neighbors(nodes, 5, 0)
Raises:
TypeError: If `node_list` is not list or ndarray.
@ -283,11 +266,8 @@ class GraphData:
numpy.ndarray, array of features.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.get_all_nodes(0)
>>> features = data_graph.get_node_feature(nodes, [1])
>>> nodes = graph_dataset.get_all_nodes(0)
>>> features = graph_dataset.get_node_feature(nodes, [1])
Raises:
TypeError: If `node_list` is not list or ndarray.
@ -315,11 +295,8 @@ class GraphData:
numpy.ndarray, array of features.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> edges = data_graph.get_all_edges(0)
>>> features = data_graph.get_edge_feature(edges, [1])
>>> edges = graph_dataset.get_all_edges(0)
>>> features = graph_dataset.get_edge_feature(edges, [1])
Raises:
TypeError: If `edge_list` is not list or ndarray.
@ -370,10 +347,7 @@ class GraphData:
numpy.ndarray, array of nodes.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> data_graph = ds.GraphData('dataset_file', 2)
>>> nodes = data_graph.random_walk([1,2], [1,2,1,2,1])
>>> nodes = graph_dataset.random_walk([1,2], [1,2,1,2,1])
Raises:
TypeError: If `target_nodes` is not list or ndarray.

@ -245,13 +245,11 @@ class DistributedSampler(BuiltinSampler):
should be no more than num_shards.
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> # creates a distributed sampler with 10 shards in total. This shard is shard 5.
>>> sampler = ds.DistributedSampler(10, 5)
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
Raises:
ValueError: If num_shards is not positive.
@ -327,13 +325,11 @@ class PKSampler(BuiltinSampler):
num_samples (int, optional): The number of samples to draw (default=None, all elements).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> # creates a PKSampler that will get 3 samples from every class.
>>> sampler = ds.PKSampler(3)
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
Raises:
ValueError: If num_val is not positive.
@ -396,13 +392,11 @@ class RandomSampler(BuiltinSampler):
num_samples (int, optional): Number of elements to sample (default=None, all elements).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> # creates a RandomSampler
>>> sampler = ds.RandomSampler()
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
Raises:
ValueError: If replacement is not boolean.
@ -452,13 +446,11 @@ class SequentialSampler(BuiltinSampler):
num_samples (int, optional): Number of elements to sample (default=None, all elements).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> # creates a SequentialSampler
>>> sampler = ds.SequentialSampler()
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
"""
def __init__(self, start_index=None, num_samples=None):
@ -503,15 +495,13 @@ class SubsetSampler(BuiltinSampler):
num_samples (int, optional): Number of elements to sample (default=None, all elements).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> indices = [0, 1, 2, 3, 7, 88, 119]
>>> indices = [0, 1, 2, 3, 4, 5]
>>>
>>> # creates a SubsetSampler, will sample from the provided indices
>>> sampler = ds.SubsetSampler(indices)
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> # creates a SubsetRandomSampler, will sample from the provided indices
>>> sampler = ds.SubsetRandomSampler(indices)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
"""
def __init__(self, indices, num_samples=None):
@ -603,15 +593,13 @@ class WeightedRandomSampler(BuiltinSampler):
replacement (bool): If True, put the sample ID back for the next draw (default=True).
Examples:
>>> import mindspore.dataset as ds
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>>
>>> weights = [0.9, 0.01, 0.4, 0.8, 0.1, 0.1, 0.3]
>>>
>>> # creates a WeightedRandomSampler that will sample 4 elements without replacement
>>> sampler = ds.WeightedRandomSampler(weights, 4)
>>> data = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8, sampler=sampler)
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir,
... num_parallel_workers=8,
... sampler=sampler)
Raises:
ValueError: If num_samples is not positive.

@ -40,16 +40,13 @@ def serialize(dataset, json_filepath=""):
OSError cannot open a file
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.transforms.c_transforms as C
>>> DATA_DIR = "../../data/testMnistData"
>>> data = ds.MnistDataset(DATA_DIR, 100)
>>> one_hot_encode = C.OneHot(10) # num_classes is input argument
>>> data = data.map(operation=one_hot_encode, input_column_names="label")
>>> data = data.batch(batch_size=10, drop_remainder=True)
>>>
>>> ds.engine.serialize(data, json_filepath="mnist_dataset_pipeline.json") # serialize it to json file
>>> serialized_data = ds.engine.serialize(data) # serialize it to Python dict
>>> dataset = ds.MnistDataset(mnist_dataset_dir, 100)
>>> one_hot_encode = c_transforms.OneHot(10) # num_classes is input argument
>>> dataset = dataset.map(operation=one_hot_encode, input_column_names="label")
>>> dataset = dataset.batch(batch_size=10, drop_remainder=True)
>>> # serialize it to json file
>>> ds.engine.serialize(dataset, json_filepath="/path/to/mnist_dataset_pipeline.json")
>>> serialized_data = ds.engine.serialize(dataset) # serialize it to Python dict
"""
return dataset.to_json(json_filepath)
@ -69,20 +66,16 @@ def deserialize(input_dict=None, json_filepath=None):
OSError cannot open a file.
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.transforms.c_transforms as C
>>> DATA_DIR = "../../data/testMnistData"
>>> data = ds.MnistDataset(DATA_DIR, 100)
>>> one_hot_encode = C.OneHot(10) # num_classes is input argument
>>> data = data.map(operation=one_hot_encode, input_column_names="label")
>>> data = data.batch(batch_size=10, drop_remainder=True)
>>>
>>> dataset = ds.MnistDataset(mnist_dataset_dir, 100)
>>> one_hot_encode = c_transforms.OneHot(10) # num_classes is input argument
>>> dataset = dataset.map(operation=one_hot_encode, input_column_names="label")
>>> dataset = dataset.batch(batch_size=10, drop_remainder=True)
>>> # Use case 1: to/from json file
>>> ds.engine.serialize(data, json_filepath="mnist_dataset_pipeline.json")
>>> data = ds.engine.deserialize(json_filepath="mnist_dataset_pipeline.json")
>>> ds.engine.serialize(dataset, json_filepath="/path/to/mnist_dataset_pipeline.json")
>>> dataset = ds.engine.deserialize(json_filepath="/path/to/mnist_dataset_pipeline.json")
>>> # Use case 2: to/from Python dictionary
>>> serialized_data = ds.engine.serialize(data)
>>> data = ds.engine.deserialize(input_dict=serialized_data)
>>> serialized_data = ds.engine.serialize(dataset)
>>> dataset = ds.engine.deserialize(input_dict=serialized_data)
"""
data = None

File diff suppressed because it is too large Load Diff

@ -46,14 +46,8 @@ class OneHot(cde.OneHotOp):
RuntimeError: feature size is bigger than num_classes.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import mindspore.dataset.vision.c_transforms as c_vision
>>>
>>> onehot_op = c_transforms.OneHot(num_classes=10)
>>> data1 = data1.map(operations=onehot_op, input_columns=["label"])
>>> mixup_batch_op = c_vision.MixUpBatch(alpha=0.8)
>>> data1 = data1.batch(4)
>>> data1 = data1.map(operations=mixup_batch_op, input_columns=["image", "label"])
>>> mnist_dataset = mnist_dataset.map(operations=onehot_op, input_columns=["label"])
"""
@check_num_classes
@ -72,9 +66,15 @@ class Fill(cde.FillOp):
to fill created tensor with.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> import numpy as np
>>> from mindspore.dataset import GeneratorDataset
>>> # Generate 1d int numpy array from 0 - 63
>>> def generator_1d():
>>> for i in range(64):
... yield (np.array([i]),)
>>> generator_dataset = GeneratorDataset(generator_1d,column_names='col')
>>> fill_op = c_transforms.Fill(3)
>>> generator_dataset = generator_dataset.map(operations=fill_op)
"""
@check_fill_value
@ -90,10 +90,16 @@ class TypeCast(cde.TypeCastOp):
data_type (mindspore.dtype): mindspore.dtype to be cast to.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import numpy as np
>>> import mindspore.common.dtype as mstype
>>>
>>> from mindspore.dataset import GeneratorDataset
>>> # Generate 1d int numpy array from 0 - 63
>>> def generator_1d():
>>> for i in range(64):
... yield (np.array([i]),)
>>> generator_dataset = GeneratorDataset(generator_1d,column_names='col')
>>> type_cast_op = c_transforms.TypeCast(mstype.int32)
>>> generator_dataset = generator_dataset.map(operations=type_cast_op)
"""
@check_de_type
@ -149,14 +155,15 @@ class Slice(cde.SliceOp):
5. :py:obj:`Ellipses`: Slice the whole dimension. Similar to `:` in Python indexing.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> # Data before
>>> # | col |
>>> # +---------+
>>> # | [1,2,3] |
>>> # +---------|
>>> data1 = data1.map(operations=c_transforms.Slice(slice(1,3))) # slice indices 1 and 2 only
>>> data = [[1, 2, 3]]
>>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["col"])
>>> # slice indices 1 and 2 only
>>> numpy_slices_dataset = numpy_slices_dataset.map(operations=c_transforms.Slice(slice(1,3)))
>>> # Data after
>>> # | col |
>>> # +---------+
@ -200,16 +207,17 @@ class Mask(cde.MaskOp):
dtype (mindspore.dtype, optional): Type of the generated mask (Default to bool).
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> from mindspore.dataset.transforms.c_transforms import Relational
>>> # Data before
>>> # | col1 |
>>> # | col |
>>> # +---------+
>>> # | [1,2,3] |
>>> # +---------+
>>> data1 = data1.map(operations=c_transforms.Mask(Relational.EQ, 2))
>>> data = [[1, 2, 3]]
>>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["col"])
>>> numpy_slices_dataset = numpy_slices_dataset.map(operations=c_transforms.Mask(Relational.EQ, 2))
>>> # Data after
>>> # | col1 |
>>> # | col |
>>> # +--------------------+
>>> # | [False,True,False] |
>>> # +--------------------+
@ -233,14 +241,15 @@ class PadEnd(cde.PadEndOp):
string in case of tensors of strings.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> # Data before
>>> # | col |
>>> # +---------+
>>> # | [1,2,3] |
>>> # +---------|
>>> data1 = data1.map(operations=c_transforms.PadEnd(pad_shape=[4], pad_value=10))
>>> data = [[1, 2, 3]]
>>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["col"])
>>> numpy_slices_dataset = numpy_slices_dataset.map(operations=c_transforms.PadEnd(pad_shape=[4],
... pad_value=10))
>>> # Data after
>>> # | col |
>>> # +------------+
@ -265,12 +274,14 @@ class Concatenate(cde.ConcatenateOp):
append (numpy.array, optional): NumPy array to be appended to the already concatenated tensors (Default=None).
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> import numpy as np
>>> # concatenate string
>>> prepend_tensor = np.array(["dw", "df"], dtype='S')
>>> append_tensor = np.array(["dwsdf", "df"], dtype='S')
>>> concatenate_op = c_transforms.Concatenate(0, prepend_tensor, append_tensor)
>>> data = [["This","is","a","string"]]
>>> dataset = ds.NumpySlicesDataset(data)
>>> dataset = dataset.map(operations=concatenate_op)
"""
@check_concat_type
@ -287,15 +298,17 @@ class Duplicate(cde.DuplicateOp):
Duplicate the input tensor to a new output tensor. The input tensor is carried over to the output list.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> # Data before
>>> # | x |
>>> # +---------+
>>> # | [1,2,3] |
>>> # +---------+
>>> data1 = data1.map(operations=c_transforms.Duplicate(), input_columns=["x"],
>>> output_columns=["x", "y"], column_order=["x", "y"])
>>> data = [[1,2,3]]
>>> numpy_slices_dataset = ds.NumpySlicesDataset(data, ["x"])
>>> numpy_slices_dataset = numpy_slices_dataset.map(operations=c_transforms.Duplicate(),
... input_columns=["x"],
... output_columns=["x", "y"],
... column_order=["x", "y"])
>>> # Data after
>>> # | x | y |
>>> # +---------+---------+
@ -319,15 +332,17 @@ class Unique(cde.UniqueOp):
Call batch op before calling this function.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>>
>>> # Data before
>>> # | x |
>>> # +--------------------+
>>> # | [[0,1,2], [1,2,3]] |
>>> # +--------------------+
>>> data1 = data1.map(operations=c_transforms.Unique(), input_columns=["x"],
>>> output_columns=["x", "y", "z"], column_order=["x", "y", "z"])
>>> data = [[[0,1,2], [1,2,3]]]
>>> dataset = ds.NumpySlicesDataset(data, ["x"])
>>> dataset = dataset.map(operations=c_transforms.Unique(),
... input_columns=["x"],
... output_columns=["x", "y", "z"],
... column_order=["x", "y", "z"])
>>> # Data after
>>> # | x | y |z |
>>> # +---------+-----------------+---------+
@ -343,11 +358,8 @@ class Compose():
transforms (list): List of transformations to be applied.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import mindspore.dataset.vision.c_transforms as c_vision
>>>
>>> compose = c_transforms.Compose([c_vision.Decode(), c_vision.RandomCrop(512)])
>>> data1 = data1.map(operations=compose)
>>> image_folder_dataset = image_folder_dataset.map(operations=compose)
"""
@check_random_transform_ops
@ -372,11 +384,8 @@ class RandomApply():
prob (float, optional): The probability to apply the transformation list (default=0.5)
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import mindspore.dataset.vision.c_transforms as c_vision
>>>
>>> rand_apply = c_transforms.RandomApply([c_vision.RandomCrop(512)])
>>> data1 = data1.map(operations=rand_apply)
>>> image_folder_dataset = image_folder_dataset.map(operations=rand_apply)
"""
@check_random_transform_ops
@ -402,11 +411,8 @@ class RandomChoice():
transforms (list): List of transformations to be chosen from to apply.
Examples:
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import mindspore.dataset.vision.c_transforms as c_vision
>>>
>>> rand_choice = c_transforms.RandomChoice([c_vision.CenterCrop(50), c_vision.RandomCrop(512)])
>>> data1 = data1.map(operations=rand_choice)
>>> image_folder_dataset = image_folder_dataset.map(operations=rand_choice)
"""
@check_random_transform_ops

@ -31,11 +31,9 @@ class OneHotOp:
(Default=0.0 means no smoothing is applied.)
Examples:
>>> import mindspore.dataset.transforms as py_transforms
>>>
>>> transforms_list = [py_transforms.OneHotOp(num_classes=10, smoothing_rate=0.1)]
>>> transform = py_transforms.Compose(transforms_list)
>>> data1 = data1.map(input_columns=["label"], operations=transform())
>>> mnist_dataset = mnist_dataset(input_columns=["label"], operations=transform)
"""
@check_one_hot_op
@ -71,53 +69,44 @@ class Compose:
transforms (list): List of transformations to be applied.
Examples:
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.vision.py_transforms as py_vision
>>> import mindspore.dataset.transforms.py_transforms as py_transforms
>>>
>>> dataset_dir = "path/to/imagefolder_directory"
>>> image_folder_dataset_dir = "/path/to/image_folder_dataset_directory"
>>> # create a dataset that reads all files in dataset_dir with 8 threads
>>> data1 = ds.ImageFolderDataset(dataset_dir, num_parallel_workers=8)
>>> image_folder_dataset = ds.ImageFolderDataset(image_folder_dataset_dir, num_parallel_workers=8)
>>> # create a list of transformations to be applied to the image data
>>> transform = py_transforms.Compose([py_vision.Decode(),
>>> py_vision.RandomHorizontalFlip(0.5),
>>> py_vision.ToTensor(),
>>> py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
>>> py_vision.RandomErasing()])
>>> # apply the transform to the dataset through dataset.map()
>>> data1 = data1.map(operations=transform, input_columns="image")
... py_vision.RandomHorizontalFlip(0.5),
... py_vision.ToTensor(),
... py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
... py_vision.RandomErasing()])
>>> # apply the transform to the dataset through dataset.map function
>>> image_folder_dataset = image_folder_dataset.map(operations=transform, input_columns=["image"])
>>>
>>> # Compose is also be invoked implicitly, by just passing in a list of ops
>>> # the above example then becomes:
>>> transform_list = [py_vision.Decode(),
>>> py_vision.RandomHorizontalFlip(0.5),
>>> py_vision.ToTensor(),
>>> py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
>>> py_vision.RandomErasing()]
... py_vision.RandomHorizontalFlip(0.5),
... py_vision.ToTensor(),
... py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
... py_vision.RandomErasing()]
>>>
>>> # apply the transform to the dataset through dataset.map()
>>> data2 = data2.map(operations=transform_list, input_columns="image")
>>> image_folder_dataset_1 = image_folder_dataset_1.map(operations=transform_list, input_columns=["image"])
>>>
>>> # Certain C++ and Python ops can be combined, but not all of them
>>> # An example of combined operations
>>> import mindspore.dataset as ds
>>> import mindspore.dataset.transforms.c_transforms as c_transforms
>>> import mindspore.dataset.vision.c_transforms as c_vision
>>>
>>> data3 = ds.NumpySlicesDataset(arr, column_names=["cols"], shuffle=False)
>>> arr = [0, 1]
>>> dataset = ds.NumpySlicesDataset(arr, column_names=["cols"], shuffle=False)
>>> transformed_list = [py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]
>>> data3 = data3.map(operations=transformed_list, input_columns=["cols"])
>>> dataset = dataset.map(operations=transformed_list, input_columns=["cols"])
>>>
>>> # Here is an example of mixing vision ops
>>> data_dir = "/path/to/imagefolder_directory"
>>> data4 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
>>> input_columns = ["column_names"]
>>> import numpy as np
>>> op_list=[c_vision.Decode(),
>>> c_vision.Resize((224, 244)),
>>> py_vision.ToPIL(),
>>> np.array, # need to convert PIL image to a NumPy array to pass it to C++ operation
>>> c_vision.Resize((24, 24))]
>>> data4 = data4.map(operations=op_list, input_columns=input_columns)
... c_vision.Resize((224, 244)),
... py_vision.ToPIL(),
... np.array, # need to convert PIL image to a NumPy array to pass it to C++ operation
... c_vision.Resize((24, 24))]
>>> image_folder_dataset = image_folder_dataset.map(operations=op_list, input_columns=["image"])
"""
@check_compose_list
@ -144,12 +133,14 @@ class RandomApply:
prob (float, optional): The probability to apply the transformation list (default=0.5).
Examples:
>>> import mindspore.dataset.vision.py_transforms as py_vision
>>> from mindspore.dataset.transforms.py_transforms import Compose
>>>
>>> Compose([py_vision.Decode(),
>>> py_vision.RandomApply(transforms_list, prob=0.6),
>>> py_vision.ToTensor()])
>>> transform_list = [py_vision.RandomHorizontalFlip(0.5),
... py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
... py_vision.RandomErasing()]
>>> transforms = Compose([py_vision.Decode(),
... py_transforms.RandomApply(transforms_list, prob=0.6),
... py_vision.ToTensor()])
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms, input_columns=["image"])
"""
@check_random_apply
@ -178,12 +169,14 @@ class RandomChoice:
transforms (list): List of transformations to be chosen from to apply.
Examples:
>>> import mindspore.dataset.vision.py_transforms as py_vision
>>> from mindspore.dataset.transforms.py_transforms import Compose, RandomChoice
>>>
>>> Compose([py_vision.Decode(),
>>> RandomChoice(transforms_list),
>>> py_vision.ToTensor()])
>>> transform_list = [py_vision.RandomHorizontalFlip(0.5),
... py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
... py_vision.RandomErasing()]
>>> transforms = Compose([py_vision.Decode(),
... py_transforms.RandomChoice(transform_list),
... py_vision.ToTensor()])
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms, input_columns=["image"])
"""
@check_transforms_list
@ -211,12 +204,14 @@ class RandomOrder:
transforms (list): List of the transformations to apply.
Examples:
>>> import mindspore.dataset.vision.py_transforms as py_vision
>>> from mindspore.dataset.transforms.py_transforms import Compose
>>>
>>> Compose([py_vision.Decode(),
>>> py_vision.RandomOrder(transforms_list),
>>> py_vision.ToTensor()])
>>> transform_list = [py_vision.RandomHorizontalFlip(0.5),
... py_vision.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262)),
... py_vision.RandomErasing()]
>>> transforms = Compose([py_vision.Decode(),
... py_transforms.RandomOrder(transforms_list),
... py_vision.ToTensor()])
>>> image_folder_dataset = image_folder_dataset.map(operations=transforms, input_columns=["image"])
"""
@check_transforms_list

Loading…
Cancel
Save