|
|
@ -33,6 +33,25 @@ class ModifiedReLU(Gradient):
|
|
|
|
self.hooked_list = []
|
|
|
|
self.hooked_list = []
|
|
|
|
|
|
|
|
|
|
|
|
def __call__(self, inputs, targets):
|
|
|
|
def __call__(self, inputs, targets):
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop".
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
|
|
|
|
|
|
|
|
targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
|
|
|
|
|
|
|
|
If it is a 1D tensor, its length should be the same as `inputs`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
|
|
|
|
|
|
|
|
>>> label = 5
|
|
|
|
|
|
|
|
>>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be
|
|
|
|
|
|
|
|
>>> # explained and get the attribution
|
|
|
|
|
|
|
|
>>> saliency = explainer(inputs, label)
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
self._verify_data(inputs, targets)
|
|
|
|
self._verify_data(inputs, targets)
|
|
|
|
inputs = unify_inputs(inputs)
|
|
|
|
inputs = unify_inputs(inputs)
|
|
|
|
targets = unify_targets(targets)
|
|
|
|
targets = unify_targets(targets)
|
|
|
@ -63,24 +82,25 @@ class Deconvolution(ModifiedReLU):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Deconvolution explanation.
|
|
|
|
Deconvolution explanation.
|
|
|
|
|
|
|
|
|
|
|
|
To use `Deconvolution`, the `ReLU` operations in the network must be implemented with `mindspore.nn.Cell` object
|
|
|
|
Deconvolution method is a modified version of Gradient method. For the original ReLU operation in the network to be
|
|
|
|
rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be correct.
|
|
|
|
explained, Deconvolution modifies the propagation rule from directly backpropagating gradients to backprpagating
|
|
|
|
|
|
|
|
positive gradients.
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
Note:
|
|
|
|
network (Cell): The black-box model to be explained.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Notes:
|
|
|
|
|
|
|
|
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
|
|
|
|
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
|
|
|
|
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
|
|
|
|
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
|
|
|
|
operations.
|
|
|
|
operations. To use `Deconvolution`, the `ReLU` operations in the network must be implemented with
|
|
|
|
|
|
|
|
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
|
|
|
|
|
|
|
|
correct.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
network (Cell): The black-box model to be explained.
|
|
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
>>> from mindspore.explainer.explanation import Deconvolution
|
|
|
|
>>> net = resnet50(10)
|
|
|
|
>>> net = resnet50(10)
|
|
|
|
>>> param_dict = load_checkpoint("resnet50.ckpt")
|
|
|
|
>>> param_dict = load_checkpoint("resnet50.ckpt")
|
|
|
|
>>> load_param_into_net(net, param_dict)
|
|
|
|
>>> load_param_into_net(net, param_dict)
|
|
|
|
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
|
|
|
|
|
|
|
|
>>> # you may also use the net itself. The saliency map might be slightly different for softmax activation.
|
|
|
|
|
|
|
|
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
|
|
|
|
|
|
|
|
>>> # init Gradient with a trained network.
|
|
|
|
>>> # init Gradient with a trained network.
|
|
|
|
>>> deconvolution = Deconvolution(net)
|
|
|
|
>>> deconvolution = Deconvolution(net)
|
|
|
|
>>> # parse data and the target label to be explained and get the saliency map
|
|
|
|
>>> # parse data and the target label to be explained and get the saliency map
|
|
|
@ -95,26 +115,27 @@ class Deconvolution(ModifiedReLU):
|
|
|
|
|
|
|
|
|
|
|
|
class GuidedBackprop(ModifiedReLU):
|
|
|
|
class GuidedBackprop(ModifiedReLU):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Guided-Backpropation explanation.
|
|
|
|
Guided-Backpropagation explanation.
|
|
|
|
|
|
|
|
|
|
|
|
To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with `mindspore.nn.Cell` object
|
|
|
|
Guided-Backpropagation method is an extension of Gradient method. On top of the original ReLU operation in the
|
|
|
|
rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be correct.
|
|
|
|
network to be explained, Guided-Backpropagation introduces another ReLU operation to filter out the negative
|
|
|
|
|
|
|
|
gradients during backpropagation.
|
|
|
|
Args:
|
|
|
|
|
|
|
|
network (Cell): The black-box model to be explained.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Notes:
|
|
|
|
Note:
|
|
|
|
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
|
|
|
|
The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
|
|
|
|
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
|
|
|
|
If you want to train the `network` afterwards, please reset it back to training mode through the opposite
|
|
|
|
operations.
|
|
|
|
operations. To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with
|
|
|
|
|
|
|
|
`mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
|
|
|
|
|
|
|
|
correct.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
network (Cell): The black-box model to be explained.
|
|
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
Examples:
|
|
|
|
|
|
|
|
>>> from mindspore.explainer.explanation import GuidedBackprop
|
|
|
|
>>> net = resnet50(10)
|
|
|
|
>>> net = resnet50(10)
|
|
|
|
>>> param_dict = load_checkpoint("resnet50.ckpt")
|
|
|
|
>>> param_dict = load_checkpoint("resnet50.ckpt")
|
|
|
|
>>> load_param_into_net(net, param_dict)
|
|
|
|
>>> load_param_into_net(net, param_dict)
|
|
|
|
>>> # bind net with its output activation if you wish, e.g. nn.Sigmoid(),
|
|
|
|
|
|
|
|
>>> # you may also use the net itself. The saliency map might be slightly different for softmax activation.
|
|
|
|
|
|
|
|
>>> net = nn.SequentialCell([net, nn.Sigmoid()])
|
|
|
|
|
|
|
|
>>> # init Gradient with a trained network.
|
|
|
|
>>> # init Gradient with a trained network.
|
|
|
|
>>> gbp = GuidedBackprop(net)
|
|
|
|
>>> gbp = GuidedBackprop(net)
|
|
|
|
>>> # parse data and the target label to be explained and get the saliency map
|
|
|
|
>>> # parse data and the target label to be explained and get the saliency map
|
|
|
|