Fixbug: fix API docs bugs

pull/9204/head
lixiaohui 4 years ago
parent f6134bd1da
commit 33f75f191c

@ -424,7 +424,7 @@ class Faithfulness(LabelSensitiveMetric):
>>> res = faithfulness.evaluate(gradient, inputs, targets)
>>> # usage 2: input the generated saliency map
>>> saliency = gradient(inputs, targets)
>>> res = faithfulenss.evaluate(gradient, inputs, targets, saliency)
>>> res = faithfulness.evaluate(gradient, inputs, targets, saliency)
"""
self._check_evaluate_param(explainer, inputs, targets, saliency)

@ -110,10 +110,10 @@ class Localization(LabelSensitiveMetric):
numpy.ndarray, 1D array of shape :math:`(N,)`, result of localization evaluated on `explainer`.
Examples:
>>> # init an explainer, the network should contain the output activation function.
>>> # init an explainer with a trained network
>>> gradient = Gradient(network)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> masks = np.zeros(1, 1, 224, 224)
>>> masks = np.zeros([1, 1, 224, 224])
>>> masks[:, :, 65: 100, 65: 100] = 1
>>> targets = 5
>>> # usage 1: input the explainer and the data to be explained,

@ -111,7 +111,7 @@ class GradCAM(IntermediateLayerAttribution):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5
>>> # gradcam is a GradCAM object, parse data and the target label to be explained and get the attribution
>>> saliency = gradcam(inputs, label)

@ -89,7 +89,7 @@ class Gradient(Attribution):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5
>>> # gradient is a Gradient object, parse data and the target label to be explained and get the attribution
>>> saliency = gradient(inputs, label)

@ -45,7 +45,7 @@ class ModifiedReLU(Gradient):
Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`.
Examples:
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5
>>> # explainer is a "Deconvolution" or "GuidedBackprop" object, parse data and the target label to be
>>> # explained and get the attribution
@ -104,7 +104,7 @@ class Deconvolution(ModifiedReLU):
>>> # init Gradient with a trained network.
>>> deconvolution = Deconvolution(net)
>>> # parse data and the target label to be explained and get the saliency map
>>> inputs = ms.Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 5
>>> saliency = deconvolution(inputs, label)
"""

@ -76,7 +76,7 @@ class Occlusion(PerturbationAttribution):
>>> param_dict = load_checkpoint("resnet50.ckpt")
>>> load_param_into_net(network, param_dict)
>>> occlusion = Occlusion(network)
>>> x = Tensor(np.random.rand([1, 3, 224, 224]), ms.float32)
>>> x = Tensor(np.random.rand(1, 3, 224, 224), ms.float32)
>>> label = 1
>>> saliency = occlusion(x, label)
"""

@ -114,14 +114,13 @@ class RISE(PerturbationAttribution):
Examples:
>>> # given an instance of RISE, saliency map can be generate
>>> inputs = ms.Tensor(np.random.rand([2, 3, 224, 224]), ms.float32)
>>> inputs = ms.Tensor(np.random.rand(2, 3, 224, 224), ms.float32)
>>> # when `targets` is an integer
>>> targets = 5
>>> saliency = rise(inputs, targets)
>>> # `targets` can also be a tensor
>>> targets = ms.Tensor([[5], [1]])
>>> saliency = rise(inputs, targets)
>>>
"""
self._verify_data(inputs, targets)
height, width = inputs.shape[2], inputs.shape[3]

Loading…
Cancel
Save