!14288 Modify the examples in explainer.benchmark to make them executable seperately

From: @lixiaohui33
Reviewed-by: @ouwenchang,@yelihua,@ouwenchang,@yelihua
Signed-off-by: @yelihua
pull/14288/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 2b8e875bc4

@ -89,6 +89,7 @@ class ImageClassificationRunner:
>>> # Load the checkpoint >>> # Load the checkpoint
>>> param_dict = load_checkpoint("/path/to/checkpoint") >>> param_dict = load_checkpoint("/path/to/checkpoint")
>>> load_param_into_net(net, param_dict) >>> load_param_into_net(net, param_dict)
[]
>>> >>>
>>> # Prepare the dataset for explaining and evaluation. >>> # Prepare the dataset for explaining and evaluation.
>>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py >>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py
@ -105,8 +106,6 @@ class ImageClassificationRunner:
>>> >>>
>>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn) >>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn)
>>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers) >>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers)
>>> runner.register_uncertainty()
>>> runner.register_hierarchical_occlusion()
>>> runner.run() >>> runner.run()
""" """

@ -57,7 +57,8 @@ class ClassSensitivity(LabelAgnosticMetric):
>>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
>>> class_sensitivity = ClassSensitivity() >>> class_sensitivity = ClassSensitivity()
>>> res = class_sensitivity.evaluate(gradient, input_x) >>> res = class_sensitivity.evaluate(gradient, input_x)
>>> print(res) >>> print(res.shape)
(1,)
""" """
self._check_evaluate_param(explainer, inputs) self._check_evaluate_param(explainer, inputs)

@ -365,15 +365,6 @@ class Faithfulness(LabelSensitiveMetric):
metric (str, optional): The specifi metric to quantify faithfulness. metric (str, optional): The specifi metric to quantify faithfulness.
Options: "DeletionAUC", "InsertionAUC", "NaiveFaithfulness". Options: "DeletionAUC", "InsertionAUC", "NaiveFaithfulness".
Default: 'NaiveFaithfulness'. Default: 'NaiveFaithfulness'.
Examples:
>>> from mindspore import nn
>>> from mindspore.explainer.benchmark import Faithfulness
>>> # init a `Faithfulness` object
>>> num_labels = 10
>>> metric = "InsertionAUC"
>>> activation_fn = nn.Softmax()
>>> faithfulness = Faithfulness(num_labels, activation_fn, metric)
""" """
_methods = [NaiveFaithfulness, DeletionAUC, InsertionAUC] _methods = [NaiveFaithfulness, DeletionAUC, InsertionAUC]
@ -418,9 +409,15 @@ class Faithfulness(LabelSensitiveMetric):
Examples: Examples:
>>> import numpy as np >>> import numpy as np
>>> import mindspore as ms >>> import mindspore as ms
>>> from mindspore import nn
>>> from mindspore.explainer.benchmark import Faithfulness
>>> from mindspore.explainer.explanation import Gradient >>> from mindspore.explainer.explanation import Gradient
>>> >>>
>>> >>> # init a `Faithfulness` object
>>> num_labels = 10
>>> metric = "InsertionAUC"
>>> activation_fn = nn.Softmax()
>>> faithfulness = Faithfulness(num_labels, activation_fn, metric)
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3) >>> net = LeNet5(10, num_channel=3)
>>> gradient = Gradient(net) >>> gradient = Gradient(net)
@ -429,10 +426,13 @@ class Faithfulness(LabelSensitiveMetric):
>>> # usage 1: input the explainer and the data to be explained, >>> # usage 1: input the explainer and the data to be explained,
>>> # faithfulness is a Faithfulness instance >>> # faithfulness is a Faithfulness instance
>>> res = faithfulness.evaluate(gradient, inputs, targets) >>> res = faithfulness.evaluate(gradient, inputs, targets)
>>> print(res.shape)
(1,)
>>> # usage 2: input the generated saliency map >>> # usage 2: input the generated saliency map
>>> saliency = gradient(inputs, targets) >>> saliency = gradient(inputs, targets)
>>> res = faithfulness.evaluate(gradient, inputs, targets, saliency) >>> res = faithfulness.evaluate(gradient, inputs, targets, saliency)
>>> print(res) >>> print(res.shape)
(1,)
""" """
self._check_evaluate_param(explainer, inputs, targets, saliency) self._check_evaluate_param(explainer, inputs, targets, saliency)

@ -56,13 +56,7 @@ class Localization(LabelSensitiveMetric):
Args: Args:
num_labels (int): Number of classes in the dataset. num_labels (int): Number of classes in the dataset.
metric (str, optional): Specific metric to calculate localization capability. metric (str, optional): Specific metric to calculate localization capability.
Options: "PointingGame", "IoSR". Options: "PointingGame", "IoSR". Default: "PointingGame".
Default: "PointingGame".
Examples:
>>> from mindspore.explainer.benchmark import Localization
>>> num_labels = 10
>>> localization = Localization(num_labels, "PointingGame")
""" """
def __init__(self, def __init__(self,
@ -113,6 +107,10 @@ class Localization(LabelSensitiveMetric):
>>> import numpy as np >>> import numpy as np
>>> import mindspore as ms >>> import mindspore as ms
>>> from mindspore.explainer.explanation import Gradient >>> from mindspore.explainer.explanation import Gradient
>>> from mindspore.explainer.benchmark import Localization
>>>
>>> num_labels = 10
>>> localization = Localization(num_labels, "PointingGame")
>>> >>>
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3) >>> net = LeNet5(10, num_channel=3)
@ -124,11 +122,13 @@ class Localization(LabelSensitiveMetric):
>>> # usage 1: input the explainer and the data to be explained, >>> # usage 1: input the explainer and the data to be explained,
>>> # localization is a Localization instance >>> # localization is a Localization instance
>>> res = localization.evaluate(gradient, inputs, targets, mask=masks) >>> res = localization.evaluate(gradient, inputs, targets, mask=masks)
>>> print(res) >>> print(res.shape)
(1,)
>>> # usage 2: input the generated saliency map >>> # usage 2: input the generated saliency map
>>> saliency = gradient(inputs, targets) >>> saliency = gradient(inputs, targets)
>>> res = localization.evaluate(gradient, inputs, targets, saliency, mask=masks) >>> res = localization.evaluate(gradient, inputs, targets, saliency, mask=masks)
>>> print(res) >>> print(res.shape)
(1,)
""" """
self._check_evaluate_param_with_mask(explainer, inputs, targets, saliency, mask) self._check_evaluate_param_with_mask(explainer, inputs, targets, saliency, mask)

@ -35,15 +35,6 @@ class Robustness(LabelSensitiveMetric):
single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification tasks, single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification tasks,
`nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long as `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long as
when combining this function with network, the final output is the probability of the input. when combining this function with network, the final output is the probability of the input.
Examples:
>>> from mindspore import nn
>>> from mindspore.explainer.benchmark import Robustness
>>> # Initialize a Robustness benchmarker passing num_labels of the dataset.
>>> num_labels = 10
>>> activation_fn = nn.Softmax()
>>> robustness = Robustness(num_labels, activation_fn)
""" """
def __init__(self, num_labels, activation_fn): def __init__(self, num_labels, activation_fn):
@ -79,7 +70,14 @@ class Robustness(LabelSensitiveMetric):
Examples: Examples:
>>> import numpy as np >>> import numpy as np
>>> import mindspore as ms >>> import mindspore as ms
>>> from mindspore import nn
>>> from mindspore.explainer.explanation import Gradient >>> from mindspore.explainer.explanation import Gradient
>>> from mindspore.explainer.benchmark import Robustness
>>>
>>> # Initialize a Robustness benchmarker passing num_labels of the dataset.
>>> num_labels = 10
>>> activation_fn = nn.Softmax()
>>> robustness = Robustness(num_labels, activation_fn)
>>> >>>
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3) >>> net = LeNet5(10, num_channel=3)
@ -89,7 +87,8 @@ class Robustness(LabelSensitiveMetric):
>>> target_label = ms.Tensor([0], ms.int32) >>> target_label = ms.Tensor([0], ms.int32)
>>> # robustness is a Robustness instance >>> # robustness is a Robustness instance
>>> res = robustness.evaluate(gradient, input_x, target_label) >>> res = robustness.evaluate(gradient, input_x, target_label)
>>> print(res) >>> print(res.shape)
(1,)
""" """
self._check_evaluate_param(explainer, inputs, targets, saliency) self._check_evaluate_param(explainer, inputs, targets, saliency)

@ -87,6 +87,7 @@ class GradCAM(IntermediateLayerAttribution):
>>> label = 5 >>> label = 5
>>> saliency = gradcam(inputs, label) >>> saliency = gradcam(inputs, label)
>>> print(saliency.shape) >>> print(saliency.shape)
(1, 1, 32, 32)
""" """
def __init__(self, network, layer=""): def __init__(self, network, layer=""):

@ -62,6 +62,7 @@ class Gradient(Attribution):
>>> label = 5 >>> label = 5
>>> saliency = gradient(inputs, label) >>> saliency = gradient(inputs, label)
>>> print(saliency.shape) >>> print(saliency.shape)
(1, 1, 32, 32)
""" """
def __init__(self, network): def __init__(self, network):

@ -45,13 +45,6 @@ class ModifiedReLU(Gradient):
Returns: Returns:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5
>>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be
>>> # explained and get the attribution
>>> saliency = explainer(inputs, label)
""" """
self._verify_data(inputs, targets) self._verify_data(inputs, targets)
@ -117,6 +110,7 @@ class Deconvolution(ModifiedReLU):
>>> label = 5 >>> label = 5
>>> saliency = deconvolution(inputs, label) >>> saliency = deconvolution(inputs, label)
>>> print(saliency.shape) >>> print(saliency.shape)
(1, 1, 32, 32)
""" """
def __init__(self, network): def __init__(self, network):
@ -161,6 +155,7 @@ class GuidedBackprop(ModifiedReLU):
>>> label = 5 >>> label = 5
>>> saliency = gbp(inputs, label) >>> saliency = gbp(inputs, label)
>>> print(saliency.shape) >>> print(saliency.shape)
(1, 1, 32, 32)
""" """
def __init__(self, network): def __init__(self, network):

@ -80,6 +80,7 @@ class Occlusion(PerturbationAttribution):
>>> label = ms.Tensor([1], ms.int32) >>> label = ms.Tensor([1], ms.int32)
>>> saliency = occlusion(input_x, label) >>> saliency = occlusion(input_x, label)
>>> print(saliency.shape) >>> print(saliency.shape)
(1, 1, 32, 32)
""" """
def __init__(self, network, activation_fn, perturbation_per_eval=32): def __init__(self, network, activation_fn, perturbation_per_eval=32):

@ -74,10 +74,12 @@ class RISE(PerturbationAttribution):
>>> targets = 5 >>> targets = 5
>>> saliency = rise(inputs, targets) >>> saliency = rise(inputs, targets)
>>> print(saliency.shape) >>> print(saliency.shape)
(2, 1, 32, 32)
>>> # `targets` can also be a 2D tensor >>> # `targets` can also be a 2D tensor
>>> targets = ms.Tensor([[5], [1]], ms.int32) >>> targets = ms.Tensor([[5], [1]], ms.int32)
>>> saliency = rise(inputs, targets) >>> saliency = rise(inputs, targets)
>>> print(saliency.shape) >>> print(saliency.shape)
(2, 1, 32, 32)
""" """
def __init__(self, def __init__(self,

@ -379,27 +379,6 @@ class Searcher:
strides (Union(list[int], optional): Stride of layers, None means by auto calcuation. strides (Union(list[int], optional): Stride of layers, None means by auto calcuation.
threshold (float): Threshold network output value of the target class. threshold (float): Threshold network output value of the target class.
by_masking (bool): Whether it is masking mode. by_masking (bool): Whether it is masking mode.
Examples:
>>> from mindspore import nn
>>> from mindspore.explainer.explanation._counterfactual.hierarchical_occlusion import Searcher, EditStep
>>>
>>> from user_defined import load_network, load_sample_image
>>>
>>>
>>> network = nn.SequentialCell([load_network(), nn.Sigmoid()])
>>>
>>> # single image in CHW or NCHW(N=1) numpy.ndarray tensor, typical dimension is 224x224
>>> image = load_sample_image()
>>> # target class index
>>> class_idx = 5
>>>
>>> # by default, maximum 3 search layers, auto calculate window sizes and strides
>>> searcher = Searcher(network)
>>>
>>> edit_tree, layer_outputs = searcher.search(image, class_idx)
>>> # get the outcome image of the deepest layer in CHW(or NCHW(N=1) if input image is NCHW) format
>>> outcome = EditStep.apply(image, searcher.compiled_mask, edit_tree.leaf_steps)
""" """
def __init__(self, def __init__(self,

Loading…
Cancel
Save