!8104 Fixbug IssueI23FX6: format API comments

Merge pull request !8104 from lixiaohui33/fixbug_explain
pull/5294/head
mindspore-ci-bot 4 years ago committed by Gitee
commit 3f00585113

@ -56,16 +56,17 @@ def _make_rgba(saliency):
class ExplainRunner:
"""
High-level API for users to generate results with the explanation methods and the evaluation methods.
A high-level API for users to generate and store results of the explanation methods and the evaluation methods.
After generating results with the explanation methods and the evaluation methods, the results will be written into
a specified file with 'mindspore.summary.SummaryRecord'. The stored content can be viewed using MindInsight.
a specified file with `mindspore.summary.SummaryRecord`. The stored content can be viewed using MindInsight.
Args:
summary_dir (str): The directory path to save the summary files which store the generated results.
summary_dir (str, optional): The directory path to save the summary files which store the generated results.
Default: "./"
Examples:
>>> from mindspore.explainer import ExplainRunner
>>> # init a runner with a specified directory
>>> summary_dir = "summary_dir"
>>> runner = ExplainRunner(summary_dir)
@ -83,14 +84,20 @@ class ExplainRunner:
explainers: List,
benchmarkers: Optional[List] = None):
"""
Genereate results and write results into the summary files in `self.summary_dir`.
Genereates results and writes results into the summary files in `summary_dir` specified during the object
initialization.
Args:
dataset (tuple): A tuple that contains `mindspore.dataset` object for iteration and its labels.
- dataset[0], a `mindspore.dataset` object to provide data to explain.
- dataset[1], a list of string that specifies the label names of the dataset.
explainers (list): A list of explanation objects to generate _attribution results.
benchmarkers (list): A list of benchmark objects to generate evaluation results. Default: None
- dataset[0]: A `mindspore.dataset` object to provide data to explain.
- dataset[1]: A list of string that specifies the label names of the dataset.
explainers (list[Explanation]): A list of explanation objects to generate attribution results. Explanation
object is an instance initialized with the explanation methods in module
`mindspore.explainer.explanation`.
benchmarkers (list[Benchmark], optional): A list of benchmark objects to generate evaluation results.
Default: None
Examples:
>>> from mindspore.explainer.explanation import GuidedBackprop, Gradient

@ -508,16 +508,34 @@ class Faithfulness(AttributionMetric):
"""
Provides evaluation on faithfulness on XAI explanations.
Faithfulness first generate saliency map with given explainers and calculate faithfulness based on different
faithfulness metric.
Three specific metrics to obtain quantified results are supported: "NaiveFaithfulness", "DeletionAUC", and
"InsertionAUC".
For metric "NaiveFaithfulness", a series of perturbed images are created by modifying pixels
on original image. Then the perturbed images will be fed to the model and a series of output probability drops can
be obtained. The faithfulness is then quantified as the correlation between the propability drops and the saliency
map values on the same pixels (we normalize the correlation further to make them in range of [0, 1]).
For metric "DeletionAUC", a series of perturbed images are created by accumulatively modifying pixels of the
original image to a base value (e.g. a constant). The perturbation starts from pixels with high saliency values
to pixels with low saliency values. Feeding the perturbed images into the model in order, an output probability
drop curve can be obtained. "DeletionAUC" is then obtained as the area under this probability drop curve.
For metric "InsertionAUC", a series of perturbed images are created by accumulatively inserting pixels of the
original image to a reference image (e.g. a black image). The insertion starts from pixels with high saliency values
to pixels with low saliency values. Feeding the perturbed images into the model in order, an output probability
increase curve can be obtained. "InsertionAUC" is then obtained as the area under this curve.
For all the three metrics, higher value indicates better faithfulness.
Args:
num_labels (int): number of labels
metric (str): the specifi metric to quantify faithfulness.
Options: 'DeletionAUC', 'InsertionAUC', 'NaiveFaithfulness'.
num_labels (int): Number of labels.
metric (str, optional): The specifi metric to quantify faithfulness.
Options: "DeletionAUC", "InsertionAUC", "NaiveFaithfulness".
Default: 'NaiveFaithfulness'.
Examples:
>>> from mindspore.explainer.benchmark import Faithfulness
>>> # init a `Faithfulness` object
>>> num_labels = 10
>>> metric = "InsertionAUC"
@ -549,19 +567,22 @@ class Faithfulness(AttributionMetric):
"""
Evaluate faithfulness on a single data sample.
Args:
explainer (Explainer): A explainer instance object.
The 'Explainer' object see mindspore/explainer/explanation.
inputs (Tensor): data sample. Currently only support single sample at each call.
targets (Union[int, Tensor]): A target label to evaluate on.
saliency (Tensor): A saliency tensor.
Return:
np.ndarray: result of faithfulness evaluated on explainer.
Note:
To apply `Faithfulness` to evaluate an explainer, this explainer must be initialized with a network that
contains the output activation function. Otherwise, the results will not be correct. Currently only single
sample (:math:`N=1`) at each call is supported.
Notes:
To apply `Faithfulness` to evaluate an explainer, this explainer must be initialize with a network that
contains the output activation function. Otherwise, the results will not be correct.
Args:
explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`.
inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If `targets` is a 1D tensor, its length should be the same as `inputs`.
saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`.
If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and
continue the evaluation. Default: None.
Returns:
numpy.ndarray, 1D array of shape :math:`(N,)`, result of faithfulness evaluated on `explainer`.
Examples:
>>> # init an explainer, the network should contain the output activation function.

@ -38,21 +38,24 @@ def _mask_out_saliency(saliency, threshold):
class Localization(AttributionMetric):
"""
r"""
Provides evaluation on the localization capability of XAI methods.
We support two metrics for the evaluation os localization capability: "PointingGame" and "IoSR".
Three specific metrics to obtain quantified results are supported: "PointingGame", and "IoSR"
(Intersection over Salient Region).
For metric "PointingGame", the localization capability is calculated as the ratio of data in which the max position
of their saliency maps lies within the bounding boxes. Specifically, for a single datum, given the saliency map and
its bounding box, if the max point of its saliency map lies within the bounding box, the evaluation result is 1
otherwise 0.
For metric "IoSR" (Intersection over Salient Region), the localization capability is calculated as the intersection
of the bounding box and the salient region over the area of the salient region.
of the bounding box and the salient region over the area of the salient region. The salient region is defined as
the region whose value exceeds :math:`\theta * \max{saliency}`.
Args:
num_labels (int): number of classes in the dataset.
metric (str): specific metric to calculate localization capability.
num_labels (int): Number of classes in the dataset.
metric (str, optional): Specific metric to calculate localization capability.
Options: "PointingGame", "IoSR".
Default: "PointingGame".
@ -89,15 +92,22 @@ class Localization(AttributionMetric):
"""
Evaluate localization on a single data sample.
Note:
Currently only single sample (:math:`N=1`) at each call is supported.
Args:
explainer (Explanation): The explainer to be evaluated, see `mindspore.explainer.explanation`.
inputs (Tensor): data sample. Currently only support single sample at each call.
targets (int): target label to evaluate on.
saliency (Tensor): A saliency tensor.
mask (Union[Tensor, np.ndarray]): ground truth bounding box/masks for the inputs w.r.t targets.
inputs (Tensor): A data sample, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If `targets` is a 1D tensor, its length should be the same as `inputs`.
saliency (Tensor, optional): The saliency map to be evaluated, a 4D tensor of shape :math:`(N, 1, H, W)`.
If it is None, the parsed `explainer` will generate the saliency map with `inputs` and `targets` and
continue the evaluation. Default: None.
mask (Tensor, numpy.ndarray): Ground truth bounding box/masks for the inputs w.r.t targets, a 4D tensor
or numpy.ndarray of shape :math:`(N, 1, H, W)`.
Returns:
np.ndarray, result of localization evaluated on explainer
numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`.
Examples:
>>> # init an explainer, the network should contain the output activation function.

@ -43,38 +43,37 @@ class GradCAM(IntermediateLayerAttribution):
r"""
Provides GradCAM explanation method.
GradCAM generates saliency map at intermediate layer.
..math:
\alpha_k^c = 1/Z \sum_i \sum_j \div{\partial{y^c}}{\partial{A_{i,j}^k}}
L_{GradCAM} = ReLu(\sum_k \alpha_k^c A^k)
For more details, please refer to the original paper: GradCAM
[https://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf]
`GradCAM` generates saliency map at intermediate layer. The attribution is obtained as:
Args:
network (Cell): The black-box model to be explained.
layer (str): The layer name to generate the explanation at. Default: ''.
If default, the explantion will be generated at the input layer.
.. math::
\alpha_k^c = \frac{1}{Z} \sum_i \sum_j \frac{\partial{y^c}}{\partial{A_{i,j}^k}}
attribution = ReLU(\sum_k \alpha_k^c A^k)
Notes:
For more details, please refer to the original paper: `GradCAM <https://openaccess.thecvf.com/content_ICCV_2017/
papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf>`_.
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations.
Args:
network (Cell): The black-box model to be explained.
layer (str, optional): The layer name to generate the explanation, usually chosen as the last convolutional
layer for better practice. If it is '', the explantion will be generated at the input layer.
Default: ''.
Examples:
>>> from mindspore.explainer.explanation import GradCAM
>>> net = resnet50(10)
>>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(net, param_dict)
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
>>> # you may also use the net itself.
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
>>> # specify a layer name to generate explanation, usually the layer can be set as the last conv layer.
>>> layer_name = '0.layer4'
>>> # init GradCAM with a trained network and specify the layer to obtain
>>> layer_name = 'layer4'
>>> # init GradCAM with a trained network and specify the layer to obtain attribution
>>> gradcam = GradCAM(net, layer=layer_name)
>>> # parse data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> label = 5
>>> saliency = gradcam(inputs, label)
"""
def __init__(
@ -108,9 +107,18 @@ class GradCAM(IntermediateLayerAttribution):
Call function for `GradCAM`.
Args:
inputs (Tensor): The input data to be explained, 4D Tensor.
targets (Union[Tensor, int]): The label of interest. It should be a 1D or 0D Tensor, or an integer.
If `targets` is a 1D Tensor, its length should be the same as `inputs`.
inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Returns:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> label = 5
>>> # gradcam is a GradCAM object, parse data and the target label to be explained and get the attribution
>>> saliency = gradcam(inputs, label)
"""
self._verify_data(inputs, targets)
self._hook_cell()

@ -59,29 +59,23 @@ class Gradient(Attribution):
explanation.
.. math::
_attribution = \div{\delta{y}, \delta{x}}
Args:
network (Cell): The black-box model to be explained.
attribution = \frac{\partial{y}}{\partial{x}}
Notes:
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations.
Args:
network (Cell): The black-box model to be explained.
Examples:
>>> from mindspore.explainer.explanation import Gradient
>>> net = resnet50(10)
>>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(net, param_dict)
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
>>> # you may also use the net itself. The saliency map might be slightly different for softmax activation.
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
>>> # init Gradient with a trained network.
>>> gradient = Gradient(net)
>>> # parse data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> label = 5
>>> saliency = gradient(inputs, label)
"""
def __init__(self, network):
@ -99,9 +93,18 @@ class Gradient(Attribution):
Call function for `Gradient`.
Args:
inputs (Tensor): The input data to be explained, 4D Tensor.
targets (Union[Tensor, int]): The label of interest. It should be a 1D or 0D Tensor, or an integer.
If `targets` is a 1D `Tensor`, its length should be the same as `inputs`.
inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Returns:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> label = 5
>>> # gradient is a Gradient object, parse data and the target label to be explained and get the attribution
>>> saliency = gradient(inputs, label)
"""
self._verify_data(inputs, targets)
inputs = unify_inputs(inputs)

@ -33,6 +33,25 @@ class ModifiedReLU(Gradient):
self.hooked_list = []
def __call__(self, inputs, targets):
"""
Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop".
Args:
inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
If it is a 1D tensor, its length should be the same as `inputs`.
Returns:
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> label = 5
>>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be
>>> # explained and get the attribution
>>> saliency = explainer(inputs, label)
"""
self._verify_data(inputs, targets)
inputs = unify_inputs(inputs)
targets = unify_targets(targets)
@ -63,24 +82,25 @@ class Deconvolution(ModifiedReLU):
"""
Deconvolution explanation.
To use `Deconvolution`, the `ReLU` operations in the network must be implemented with `mindspore.nn.Cell` object
rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be correct.
Deconvolution method is a modified version of Gradient method. For the original ReLU operation in the network to be
explained, Deconvolution modifies the propagation rule from directly backpropagating gradients to backprpagating
positive gradients.
Args:
network (Cell): The black-box model to be explained.
Notes:
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations.
operations. To use `Deconvolution`, the `ReLU` operations in the network must be implemented with
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
correct.
Args:
network (Cell): The black-box model to be explained.
Examples:
>>> from mindspore.explainer.explanation import Deconvolution
>>> net = resnet50(10)
>>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(net, param_dict)
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
>>> # you may also use the net itself. The saliency map might be slightly different for softmax activation.
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
>>> # init Gradient with a trained network.
>>> deconvolution = Deconvolution(net)
>>> # parse data and the target label to be explained and get the saliency map
@ -95,26 +115,27 @@ class Deconvolution(ModifiedReLU):
class GuidedBackprop(ModifiedReLU):
"""
Guided-Backpropation explanation.
Guided-Backpropagation explanation.
To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with `mindspore.nn.Cell` object
rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be correct.
Args:
network (Cell): The black-box model to be explained.
Guided-Backpropagation method is an extension of Gradient method. On top of the original ReLU operation in the
network to be explained, Guided-Backpropagation introduces another ReLU operation to filter out the negative
gradients during backpropagation.
Notes:
Note:
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
operations.
operations. To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
correct.
Args:
network (Cell): The black-box model to be explained.
Examples:
>>> from mindspore.explainer.explanation import GuidedBackprop
>>> net = resnet50(10)
>>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(net, param_dict)
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
>>> # you may also use the net itself. The saliency map might be slightly different for softmax activation.
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
>>> # init Gradient with a trained network.
>>> gbp = GuidedBackprop(net)
>>> # parse data and the target label to be explained and get the saliency map

Loading…
Cancel
Save