!8734 Code_docs updating notes of examples in nn_folder

From: @zhangz0911gm
Reviewed-by: @zhunaipan,@liangchenghui
Signed-off-by: @liangchenghui
pull/8734/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit d8a7fd8801

@ -136,7 +136,8 @@ class MSELoss(_Loss):
>>> loss = nn.MSELoss() >>> loss = nn.MSELoss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data) >>> output = loss(input_data, target_data)
>>> print(output)
0.33333334 0.33333334
""" """
def construct(self, base, target): def construct(self, base, target):
@ -495,7 +496,8 @@ class BCELoss(_Loss):
>>> loss = nn.BCELoss(weight=weight, reduction='mean') >>> loss = nn.BCELoss(weight=weight, reduction='mean')
>>> inputs = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32) >>> inputs = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
>>> labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32) >>> labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32)
>>> loss(inputs, labels) >>> output = loss(inputs, labels)
>>> print(output)
1.8952923 1.8952923
""" """
@ -553,7 +555,8 @@ class CosineEmbeddingLoss(_Loss):
>>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32)
>>> y = Tensor(np.array([1,-1]), mindspore.int32) >>> y = Tensor(np.array([1,-1]), mindspore.int32)
>>> cosine_embedding_loss = nn.CosineEmbeddingLoss() >>> cosine_embedding_loss = nn.CosineEmbeddingLoss()
>>> cosine_embedding_loss(x1, x2, y) >>> output = cosine_embedding_loss(x1, x2, y)
>>> print(output)
[0.0003426671] [0.0003426671]
""" """
def __init__(self, margin=0.0, reduction="mean"): def __init__(self, margin=0.0, reduction="mean"):

@ -34,7 +34,7 @@ class TopKCategoricalAccuracy(Metric):
Examples: Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.TopKCategoricalAccuracy(3) >>> topk = nn.TopKCategoricalAccuracy(3)
>>> topk.clear() >>> topk.clear()
@ -98,7 +98,7 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy):
Examples: Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.Top1CategoricalAccuracy() >>> topk = nn.Top1CategoricalAccuracy()
>>> topk.clear() >>> topk.clear()
@ -116,7 +116,7 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy):
Examples: Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.Top5CategoricalAccuracy() >>> topk = nn.Top5CategoricalAccuracy()
>>> topk.clear() >>> topk.clear()

@ -281,8 +281,8 @@ class Adam(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0.
@ -416,8 +416,8 @@ class AdamWeightDecay(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.

@ -135,8 +135,8 @@ class FTRL(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params}, ... {'params': no_conv_params},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.FTRL(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.FTRL(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use weight decay of 0.01. >>> # The conv_params's parameters will use weight decay of 0.01.
>>> # The no_conv_params's parameters will use default weight decay of 0.0. >>> # The no_conv_params's parameters will use default weight decay of 0.0.

@ -245,8 +245,8 @@ class Lamb(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': poly_decay_lr}, ... {'params': no_conv_params, 'lr': poly_decay_lr},
>>> {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}] ... {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}]
>>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default >>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default

@ -192,8 +192,8 @@ class LazyAdam(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.

@ -113,8 +113,8 @@ class Momentum(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) >>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.

@ -116,8 +116,8 @@ class ProximalAdagrad(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.ProximalAdagrad(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.ProximalAdagrad(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.

@ -139,8 +139,8 @@ class RMSProp(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.

@ -112,8 +112,8 @@ class SGD(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, ... {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] ... {'order_params': net.trainable_params()}]
>>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) >>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.

@ -252,7 +252,8 @@ class ConvReparam(_ConvVariational):
Examples: Examples:
>>> net = ConvReparam(120, 240, 4, has_bias=False) >>> net = ConvReparam(120, 240, 4, has_bias=False)
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape >>> output = net(input).shape
>>> print(output)
(1, 240, 1024, 640) (1, 240, 1024, 640)
""" """

@ -190,7 +190,8 @@ class DenseReparam(_DenseVariational):
Examples: Examples:
>>> net = DenseReparam(3, 4) >>> net = DenseReparam(3, 4)
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> net(input).shape >>> output = net(input).shape
>>> print(output)
(2, 4) (2, 4)
""" """

@ -59,18 +59,20 @@ class UncertaintyEvaluation:
>>> load_param_into_net(network, param_dict) >>> load_param_into_net(network, param_dict)
>>> ds_train = create_dataset('workspace/mnist/train') >>> ds_train = create_dataset('workspace/mnist/train')
>>> evaluation = UncertaintyEvaluation(model=network, >>> evaluation = UncertaintyEvaluation(model=network,
>>> train_dataset=ds_train, ... train_dataset=ds_train,
>>> task_type='classification', ... task_type='classification',
>>> num_classes=10, ... num_classes=10,
>>> epochs=1, ... epochs=1,
>>> epi_uncer_model_path=None, ... epi_uncer_model_path=None,
>>> ale_uncer_model_path=None, ... ale_uncer_model_path=None,
>>> save_model=False) ... save_model=False)
>>> epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data) >>> epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data)
>>> aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data) >>> aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data)
>>> epistemic_uncertainty.shape >>> output = epistemic_uncertainty.shape
>>> print(output)
(32, 10) (32, 10)
>>> aleatoric_uncertainty.shape >>> output = aleatoric_uncertainty.shape
>>> print(output)
(32,) (32,)
""" """

Loading…
Cancel
Save