diff --git a/mindspore/explainer/_image_classification_runner.py b/mindspore/explainer/_image_classification_runner.py index faeacaa1d2..713fc36ca6 100644 --- a/mindspore/explainer/_image_classification_runner.py +++ b/mindspore/explainer/_image_classification_runner.py @@ -89,6 +89,7 @@ class ImageClassificationRunner: >>> # Load the checkpoint >>> param_dict = load_checkpoint("/path/to/checkpoint") >>> load_param_into_net(net, param_dict) + [] >>> >>> # Prepare the dataset for explaining and evaluation. >>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py @@ -105,8 +106,6 @@ class ImageClassificationRunner: >>> >>> runner = ImageClassificationRunner("./summary_dir", (dataset, labels), net, activation_fn) >>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers) - >>> runner.register_uncertainty() - >>> runner.register_hierarchical_occlusion() >>> runner.run() """ diff --git a/mindspore/explainer/benchmark/_attribution/class_sensitivity.py b/mindspore/explainer/benchmark/_attribution/class_sensitivity.py index 83fe41a7af..64c896d6f5 100644 --- a/mindspore/explainer/benchmark/_attribution/class_sensitivity.py +++ b/mindspore/explainer/benchmark/_attribution/class_sensitivity.py @@ -57,7 +57,8 @@ class ClassSensitivity(LabelAgnosticMetric): >>> input_x = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32) >>> class_sensitivity = ClassSensitivity() >>> res = class_sensitivity.evaluate(gradient, input_x) - >>> print(res) + >>> print(res.shape) + (1,) """ self._check_evaluate_param(explainer, inputs) diff --git a/mindspore/explainer/benchmark/_attribution/faithfulness.py b/mindspore/explainer/benchmark/_attribution/faithfulness.py index 5a77cc72cc..838ec98fcb 100644 --- a/mindspore/explainer/benchmark/_attribution/faithfulness.py +++ b/mindspore/explainer/benchmark/_attribution/faithfulness.py @@ -365,15 +365,6 @@ class Faithfulness(LabelSensitiveMetric): metric (str, optional): The specifi metric to quantify faithfulness. Options: "DeletionAUC", "InsertionAUC", "NaiveFaithfulness". Default: 'NaiveFaithfulness'. - - Examples: - >>> from mindspore import nn - >>> from mindspore.explainer.benchmark import Faithfulness - >>> # init a `Faithfulness` object - >>> num_labels = 10 - >>> metric = "InsertionAUC" - >>> activation_fn = nn.Softmax() - >>> faithfulness = Faithfulness(num_labels, activation_fn, metric) """ _methods = [NaiveFaithfulness, DeletionAUC, InsertionAUC] @@ -418,9 +409,15 @@ class Faithfulness(LabelSensitiveMetric): Examples: >>> import numpy as np >>> import mindspore as ms + >>> from mindspore import nn + >>> from mindspore.explainer.benchmark import Faithfulness >>> from mindspore.explainer.explanation import Gradient >>> - >>> + >>> # init a `Faithfulness` object + >>> num_labels = 10 + >>> metric = "InsertionAUC" + >>> activation_fn = nn.Softmax() + >>> faithfulness = Faithfulness(num_labels, activation_fn, metric) >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> net = LeNet5(10, num_channel=3) >>> gradient = Gradient(net) @@ -429,10 +426,13 @@ class Faithfulness(LabelSensitiveMetric): >>> # usage 1: input the explainer and the data to be explained, >>> # faithfulness is a Faithfulness instance >>> res = faithfulness.evaluate(gradient, inputs, targets) + >>> print(res.shape) + (1,) >>> # usage 2: input the generated saliency map >>> saliency = gradient(inputs, targets) >>> res = faithfulness.evaluate(gradient, inputs, targets, saliency) - >>> print(res) + >>> print(res.shape) + (1,) """ self._check_evaluate_param(explainer, inputs, targets, saliency) diff --git a/mindspore/explainer/benchmark/_attribution/localization.py b/mindspore/explainer/benchmark/_attribution/localization.py index f135b84f32..add069a201 100644 --- a/mindspore/explainer/benchmark/_attribution/localization.py +++ b/mindspore/explainer/benchmark/_attribution/localization.py @@ -56,13 +56,7 @@ class Localization(LabelSensitiveMetric): Args: num_labels (int): Number of classes in the dataset. metric (str, optional): Specific metric to calculate localization capability. - Options: "PointingGame", "IoSR". - Default: "PointingGame". - - Examples: - >>> from mindspore.explainer.benchmark import Localization - >>> num_labels = 10 - >>> localization = Localization(num_labels, "PointingGame") + Options: "PointingGame", "IoSR". Default: "PointingGame". """ def __init__(self, @@ -113,6 +107,10 @@ class Localization(LabelSensitiveMetric): >>> import numpy as np >>> import mindspore as ms >>> from mindspore.explainer.explanation import Gradient + >>> from mindspore.explainer.benchmark import Localization + >>> + >>> num_labels = 10 + >>> localization = Localization(num_labels, "PointingGame") >>> >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> net = LeNet5(10, num_channel=3) @@ -124,11 +122,13 @@ class Localization(LabelSensitiveMetric): >>> # usage 1: input the explainer and the data to be explained, >>> # localization is a Localization instance >>> res = localization.evaluate(gradient, inputs, targets, mask=masks) - >>> print(res) + >>> print(res.shape) + (1,) >>> # usage 2: input the generated saliency map >>> saliency = gradient(inputs, targets) >>> res = localization.evaluate(gradient, inputs, targets, saliency, mask=masks) - >>> print(res) + >>> print(res.shape) + (1,) """ self._check_evaluate_param_with_mask(explainer, inputs, targets, saliency, mask) diff --git a/mindspore/explainer/benchmark/_attribution/robustness.py b/mindspore/explainer/benchmark/_attribution/robustness.py index 5618dd8473..efbe74ee35 100644 --- a/mindspore/explainer/benchmark/_attribution/robustness.py +++ b/mindspore/explainer/benchmark/_attribution/robustness.py @@ -35,15 +35,6 @@ class Robustness(LabelSensitiveMetric): single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long as when combining this function with network, the final output is the probability of the input. - - - Examples: - >>> from mindspore import nn - >>> from mindspore.explainer.benchmark import Robustness - >>> # Initialize a Robustness benchmarker passing num_labels of the dataset. - >>> num_labels = 10 - >>> activation_fn = nn.Softmax() - >>> robustness = Robustness(num_labels, activation_fn) """ def __init__(self, num_labels, activation_fn): @@ -79,7 +70,14 @@ class Robustness(LabelSensitiveMetric): Examples: >>> import numpy as np >>> import mindspore as ms + >>> from mindspore import nn >>> from mindspore.explainer.explanation import Gradient + >>> from mindspore.explainer.benchmark import Robustness + >>> + >>> # Initialize a Robustness benchmarker passing num_labels of the dataset. + >>> num_labels = 10 + >>> activation_fn = nn.Softmax() + >>> robustness = Robustness(num_labels, activation_fn) >>> >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py >>> net = LeNet5(10, num_channel=3) @@ -89,7 +87,8 @@ class Robustness(LabelSensitiveMetric): >>> target_label = ms.Tensor([0], ms.int32) >>> # robustness is a Robustness instance >>> res = robustness.evaluate(gradient, input_x, target_label) - >>> print(res) + >>> print(res.shape) + (1,) """ self._check_evaluate_param(explainer, inputs, targets, saliency) diff --git a/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py b/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py index f25460539d..572f6ced1d 100644 --- a/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py +++ b/mindspore/explainer/explanation/_attribution/_backprop/gradcam.py @@ -87,6 +87,7 @@ class GradCAM(IntermediateLayerAttribution): >>> label = 5 >>> saliency = gradcam(inputs, label) >>> print(saliency.shape) + (1, 1, 32, 32) """ def __init__(self, network, layer=""): diff --git a/mindspore/explainer/explanation/_attribution/_backprop/gradient.py b/mindspore/explainer/explanation/_attribution/_backprop/gradient.py index e060a61633..33bb6c5b3a 100644 --- a/mindspore/explainer/explanation/_attribution/_backprop/gradient.py +++ b/mindspore/explainer/explanation/_attribution/_backprop/gradient.py @@ -62,6 +62,7 @@ class Gradient(Attribution): >>> label = 5 >>> saliency = gradient(inputs, label) >>> print(saliency.shape) + (1, 1, 32, 32) """ def __init__(self, network): diff --git a/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py b/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py index a753a4e263..dea10791c9 100644 --- a/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py +++ b/mindspore/explainer/explanation/_attribution/_backprop/modified_relu.py @@ -45,13 +45,6 @@ class ModifiedReLU(Gradient): Returns: Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`. - - Examples: - >>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32) - >>> label = 5 - >>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be - >>> # explained and get the attribution - >>> saliency = explainer(inputs, label) """ self._verify_data(inputs, targets) @@ -117,6 +110,7 @@ class Deconvolution(ModifiedReLU): >>> label = 5 >>> saliency = deconvolution(inputs, label) >>> print(saliency.shape) + (1, 1, 32, 32) """ def __init__(self, network): @@ -161,6 +155,7 @@ class GuidedBackprop(ModifiedReLU): >>> label = 5 >>> saliency = gbp(inputs, label) >>> print(saliency.shape) + (1, 1, 32, 32) """ def __init__(self, network): diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py b/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py index 1a72098d6f..fcb38949d1 100644 --- a/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py +++ b/mindspore/explainer/explanation/_attribution/_perturbation/occlusion.py @@ -80,6 +80,7 @@ class Occlusion(PerturbationAttribution): >>> label = ms.Tensor([1], ms.int32) >>> saliency = occlusion(input_x, label) >>> print(saliency.shape) + (1, 1, 32, 32) """ def __init__(self, network, activation_fn, perturbation_per_eval=32): diff --git a/mindspore/explainer/explanation/_attribution/_perturbation/rise.py b/mindspore/explainer/explanation/_attribution/_perturbation/rise.py index 9d6c5ebad5..e9bfe0c4aa 100644 --- a/mindspore/explainer/explanation/_attribution/_perturbation/rise.py +++ b/mindspore/explainer/explanation/_attribution/_perturbation/rise.py @@ -74,10 +74,12 @@ class RISE(PerturbationAttribution): >>> targets = 5 >>> saliency = rise(inputs, targets) >>> print(saliency.shape) + (2, 1, 32, 32) >>> # `targets` can also be a 2D tensor >>> targets = ms.Tensor([[5], [1]], ms.int32) >>> saliency = rise(inputs, targets) >>> print(saliency.shape) + (2, 1, 32, 32) """ def __init__(self, diff --git a/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py b/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py index 0a5880f11e..6c561fc4ac 100644 --- a/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py +++ b/mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py @@ -379,27 +379,6 @@ class Searcher: strides (Union(list[int], optional): Stride of layers, None means by auto calcuation. threshold (float): Threshold network output value of the target class. by_masking (bool): Whether it is masking mode. - - Examples: - >>> from mindspore import nn - >>> from mindspore.explainer.explanation._counterfactual.hierarchical_occlusion import Searcher, EditStep - >>> - >>> from user_defined import load_network, load_sample_image - >>> - >>> - >>> network = nn.SequentialCell([load_network(), nn.Sigmoid()]) - >>> - >>> # single image in CHW or NCHW(N=1) numpy.ndarray tensor, typical dimension is 224x224 - >>> image = load_sample_image() - >>> # target class index - >>> class_idx = 5 - >>> - >>> # by default, maximum 3 search layers, auto calculate window sizes and strides - >>> searcher = Searcher(network) - >>> - >>> edit_tree, layer_outputs = searcher.search(image, class_idx) - >>> # get the outcome image of the deepest layer in CHW(or NCHW(N=1) if input image is NCHW) format - >>> outcome = EditStep.apply(image, searcher.compiled_mask, edit_tree.leaf_steps) """ def __init__(self,