t# This is a combination of 2 commits.

updating notes of pynative in nn_layer
pull/8641/head
zhangz0911gm 4 years ago
parent 0ab808ec9e
commit dda18138c1

@ -40,7 +40,8 @@ def piecewise_constant_lr(milestone, learning_rates):
Examples:
>>> milestone = [2, 5, 10]
>>> learning_rates = [0.1, 0.05, 0.01]
>>> piecewise_constant_lr(milestone, learning_rates)
>>> output = piecewise_constant_lr(milestone, learning_rates)
>>> print(output)
[0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01]
"""
validator.check_value_type('milestone', milestone, (tuple, list))
@ -100,7 +101,8 @@ def exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch,
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 1
>>> exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch)
>>> output = exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch)
>>> print(output)
[0.1, 0.1, 0.09000000000000001, 0.09000000000000001, 0.08100000000000002, 0.08100000000000002]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
@ -142,7 +144,8 @@ def natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch,
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> output = natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> print(output)
[0.1, 0.1, 0.1, 0.1, 0.016529888822158657, 0.016529888822158657]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
@ -185,7 +188,8 @@ def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, deca
>>> total_step = 6
>>> step_per_epoch = 1
>>> decay_epoch = 1
>>> inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> output = inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True)
>>> print(output)
[0.1, 0.06666666666666667, 0.05, 0.04, 0.03333333333333333, 0.028571428571428574]
"""
_check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair)
@ -227,7 +231,8 @@ def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
>>> total_step = 6
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
>>> output = cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
>>> print(output)
[0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01]
"""
if not isinstance(min_lr, float):
@ -295,7 +300,8 @@ def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_e
>>> step_per_epoch = 2
>>> decay_epoch = 2
>>> power = 0.5
>>> polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
>>> r = polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
>>> print(r)
[0.1, 0.1, 0.07363961030678928, 0.07363961030678928, 0.01, 0.01]
"""
validator.check_positive_float(learning_rate, 'learning_rate')
@ -350,7 +356,8 @@ def warmup_lr(learning_rate, total_step, step_per_epoch, warmup_epoch):
>>> total_step = 6
>>> step_per_epoch = 2
>>> warmup_epoch = 2
>>> warmup_lr(learning_rate, total_step, step_per_epoch, warmup_epoch)
>>> output = warmup_lr(learning_rate, total_step, step_per_epoch, warmup_epoch)
>>> print(output)
[0.0, 0.0, 0.05, 0.05, 0.1, 0.1]
"""
if not isinstance(learning_rate, float):

@ -70,7 +70,8 @@ class Softmax(Cell):
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> softmax = nn.Softmax()
>>> softmax(input_x)
>>> output = softmax(input_x)
>>> print(output)
[0.03168 0.01166 0.0861 0.636 0.2341]
"""
@ -106,7 +107,8 @@ class LogSoftmax(Cell):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> log_softmax = nn.LogSoftmax()
>>> log_softmax(input_x)
>>> output = log_softmax(input_x)
>>> print(output)
[[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
[-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
"""
@ -174,7 +176,8 @@ class ReLU(Cell):
Examples:
>>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
>>> relu = nn.ReLU()
>>> relu(input_x)
>>> output = relu(input_x)
>>> print(output)
[0. 2. 0. 2. 0.]
"""
@ -203,7 +206,8 @@ class ReLU6(Cell):
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> relu6 = nn.ReLU6()
>>> relu6(input_x)
>>> output = relu6(input_x)
>>> print(output)
[0. 0. 0. 2. 1.]
"""
@ -240,7 +244,8 @@ class LeakyReLU(Cell):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> leaky_relu = nn.LeakyReLU()
>>> leaky_relu(input_x)
>>> output = leaky_relu(input_x)
>>> print(output)
[[-0.2 4. -1.6]
[ 2 -1. 9.]]
"""
@ -284,7 +289,8 @@ class Tanh(Cell):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
>>> tanh = nn.Tanh()
>>> tanh(input_x)
>>> output = tanh(input_x)
>>> print(output)
[0.7617 0.964 0.995 0.964 0.7617]
"""
@ -315,7 +321,8 @@ class GELU(Cell):
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> gelu = nn.GELU()
>>> gelu(input_x)
>>> output = gelu(input_x)
>>> print(output)
[[-1.5880802e-01 3.9999299e+00 -3.1077917e-21]
[ 1.9545976e+00 -2.2918017e-07 9.0000000e+00]]
"""
@ -346,7 +353,8 @@ class Sigmoid(Cell):
Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> sigmoid = nn.Sigmoid()
>>> sigmoid(input_x)
>>> output = sigmoid(input_x)
>>> print(output)
[0.2688 0.11914 0.5 0.881 0.7305]
"""
@ -384,7 +392,8 @@ class PReLU(Cell):
Examples:
>>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
>>> prelu = nn.PReLU()
>>> prelu(input_x)
>>> output = prelu(input_x)
>>> print(output)
[[[[0.1 0.6]
[0.9 0.9]]]]
@ -506,6 +515,7 @@ class LogSigmoid(Cell):
>>> net = nn.LogSigmoid()
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> logsigmoid = net(input_x)
>>> print(logsigmoid)
[-3.1326166e-01, -1.2692806e-01, -4.8587345e-02]
"""

@ -76,7 +76,8 @@ class Dropout(Cell):
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> net = nn.Dropout(keep_prob=0.8)
>>> net.set_train()
>>> net(x)
>>> output = net(x)
>>> print(output)
[[[0., 1.25, 0.],
[1.25, 1.25, 1.25]],
[[1.25, 1.25, 1.25],
@ -141,7 +142,8 @@ class Flatten(Cell):
Examples:
>>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32)
>>> net = nn.Flatten()
>>> net(input)
>>> output = net(input)
>>> print(output)
[[1.2 1.2 2.1 2.1]
[2.2 2.2 3.2 3.2]]
"""
@ -196,7 +198,8 @@ class Dense(Cell):
Examples:
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> net = nn.Dense(3, 4)
>>> net(input)
>>> output = net(input)
>>> print(output)
[[ 2.5246444 2.2738023 0.5711005 -3.9399147 ]
[ 1.0739875 4.0155234 0.94188046 -5.459526 ]]
"""
@ -317,7 +320,8 @@ class ClipByNorm(Cell):
>>> net = nn.ClipByNorm()
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
>>> clip_norm = Tensor(np.array([100]).astype(np.float32))
>>> net(input, clip_norm).shape
>>> result = net(input, clip_norm).shape
>>> print(result)
(4, 16)
"""
@ -386,7 +390,8 @@ class Norm(Cell):
Examples:
>>> net = nn.Norm(axis=0)
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
>>> net(input)
>>> output = net(input)
>>> print(output)
[2.236068 9.848858 4. 5.656854]
"""
@ -442,7 +447,8 @@ class OneHot(Cell):
Examples:
>>> net = nn.OneHot(depth=4, axis=1)
>>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
>>> net(indices)
>>> output = net(indices)
>>> print(output)
[[[0. 0.]
[1. 0.]
[0. 0.]
@ -501,11 +507,11 @@ class Pad(Cell):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.pad = nn.Pad(paddings=((1,1),(2,2)), mode="CONSTANT")
>>> def construct(self, x):
>>> return self.pad(x)
... def __init__(self):
... super(Net, self).__init__()
... self.pad = nn.Pad(paddings=((1,1),(2,2)), mode="CONSTANT")
... def construct(self, x):
... return self.pad(x)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> pad = Net()
>>> ms_output = pad(Tensor(x))
@ -567,9 +573,10 @@ class Unfold(Cell):
Examples:
>>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1])
>>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16)
>>> net(image)
Tensor ([[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]],
[[1, 1], [1, 1]]]], shape=(2, 12, 2, 2), dtype=mstype.float16)
>>> output = net(image)
>>> print(output)
[[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]],
[[1, 1], [1, 1]]]]
"""
def __init__(self, ksizes, strides, rates, padding="valid"):
@ -621,6 +628,7 @@ class MatrixDiag(Cell):
>>> x = Tensor(np.array([1, -1]), mstype.float32)
>>> matrix_diag = nn.MatrixDiag()
>>> result = matrix_diag(x)
>>> print(result)
[[1. 0.]
[0. -1.]]
"""
@ -652,6 +660,7 @@ class MatrixDiagPart(Cell):
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> matrix_diag_part = nn.MatrixDiagPart()
>>> result = matrix_diag_part(x)
>>> print(result)
[[-1., 1.], [-1., 1.], [-1., 1.]]
"""
def __init__(self):
@ -684,6 +693,7 @@ class MatrixSetDiag(Cell):
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = nn.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal)
>>> print(result)
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
"""
def __init__(self):

@ -80,7 +80,8 @@ class Conv2dBnAct(Cell):
>>> net = nn.Conv2dBnAct(120, 240, 4, has_bn=True, activation='relu')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> result = net(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(1, 240, 1024, 640)
"""
@ -171,7 +172,8 @@ class DenseBnAct(Cell):
>>> net = nn.DenseBnAct(3, 4)
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> result = net(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(2, 4)
"""

@ -87,7 +87,8 @@ class SequentialCell(Cell):
>>> seq = nn.SequentialCell([conv, bn, relu])
>>>
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
>>> seq(x)
>>> output = seq(x)
>>> print(output)
[[[[0.02531557 0. ]
[0.04933941 0.04880078]]
[[0. 0. ]
@ -155,7 +156,8 @@ class SequentialCell(Cell):
>>> seq = nn.SequentialCell([conv, bn])
>>> seq.append(relu)
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
>>> seq(x)
>>> output = seq(x)
>>> print(output)
[[[[0.12445523 0.12445523]
[0.12445523 0.12445523]]
[[0. 0. ]

@ -199,7 +199,8 @@ class Conv2d(_Conv):
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(1, 240, 1024, 640)
"""
@ -374,7 +375,8 @@ class Conv1d(_Conv):
Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(1, 240, 640)
"""
@ -544,7 +546,8 @@ class Conv2dTranspose(_Conv):
Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(1, 64, 19, 53)
"""
@ -719,7 +722,8 @@ class Conv1dTranspose(_Conv):
Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(1, 64, 53)
"""

@ -66,7 +66,8 @@ class Embedding(Cell):
>>>
>>> # Maps the input word IDs to word embedding.
>>> output = net(input_data)
>>> output.shape
>>> result = output.shape
>>> print(result)
(8, 128, 768)
"""

@ -53,7 +53,8 @@ class ImageGradients(Cell):
Examples:
>>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
>>> net(image)
>>> output = net(image)
>>> print(output)
[[[[2,2]
[0,0]]]]
[[[[1,0]
@ -214,6 +215,7 @@ class SSIM(Cell):
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> ssim = net(img1, img2)
>>> print(ssim)
[0.12174469]
"""
def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
@ -379,6 +381,7 @@ class PSNR(Cell):
>>> img1 = Tensor(np.random.random((1,3,16,16)))
>>> img2 = Tensor(np.random.random((1,3,16,16)))
>>> psnr = net(img1, img2)
>>> print(psnr)
[7.8297315]
"""
def __init__(self, max_val=1.0):
@ -447,7 +450,8 @@ class CentralCrop(Cell):
>>> net = nn.CentralCrop(central_fraction=0.5)
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)
>>> output = net(image)
>>> output.shape
>>> result = output.shape
>>> print(result)
(4, 3, 2, 2)
"""

@ -59,7 +59,8 @@ class ReduceLogSumExp(Cell):
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = nn.ReduceLogSumExp(1, keep_dims=True)
>>> output = op(input_x)
>>> output.shape
>>> result = output.shape
>>> print(reuslt)
(3, 1, 5, 6)
"""
@ -96,6 +97,7 @@ class Range(Cell):
Examples:
>>> net = nn.Range(1, 8, 2)
>>> out = net()
>>> print(out)
[1, 3, 5, 7]
"""
@ -149,6 +151,7 @@ class LinSpace(Cell):
Examples:
>>> linspace = nn.LinSpace(1, 10, 5)
>>> output = linspace()
>>> print(output)
[1, 3.25, 5.5, 7.75, 10]
"""

@ -522,7 +522,8 @@ class LayerNorm(Cell):
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape[1:]
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
>>> m(x).shape
>>> output = m(x).shape
>>> print(output)
(20, 5, 10, 10)
"""
@ -593,7 +594,8 @@ class GroupNorm(Cell):
Examples:
>>> goup_norm_op = nn.GroupNorm(2, 2)
>>> x = Tensor(np.ones([1, 2, 4, 4], np.float32))
>>> goup_norm_op(x)
>>> output = goup_norm_op(x)
>>> print(output)
[[[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]

@ -107,6 +107,7 @@ class MaxPool2d(_PoolNd):
Examples:
>>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> print(x)
[[[[1. 5. 5. 1.]
[0. 3. 4. 8.]
[4. 2. 7. 6.]
@ -116,9 +117,10 @@ class MaxPool2d(_PoolNd):
[0. 0. 4. 0.]
[1. 8. 7. 0.]]]]
>>> output = pool(x)
>>> output.shape
>>> reuslt = output.shape
>>> print(result)
(1, 2, 2, 2)
>>> output
>>> print(output)
[[[[7. 8.]
[9. 9.]]
[[7. 8.]
@ -185,7 +187,8 @@ class MaxPool1d(_PoolNd):
>>> max_pool = nn.MaxPool1d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32)
>>> output = max_pool(x)
>>> output.shape
>>> result = output.shape
>>> printI(result)
(1, 2, 2)
"""
@ -269,6 +272,7 @@ class AvgPool2d(_PoolNd):
Examples:
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
>>> print(x)
[[[[5. 5. 9. 9.]
[8. 4. 3. 0.]
[2. 7. 1. 2.]
@ -278,9 +282,10 @@ class AvgPool2d(_PoolNd):
[0. 8. 9. 7.]
[2. 1. 4. 9.]]]]
>>> output = pool(x)
>>> output.shape
>>> result = output.shape
>>> print(result)
(1, 2, 2, 2)
>>> output
>>> print(output)
[[[[4.888889 4.4444447]
[4.111111 3.4444444]]
[[4.2222223 4.5555553]
@ -345,7 +350,8 @@ class AvgPool1d(_PoolNd):
>>> pool = nn.AvgPool1d(kernel_size=6, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
>>> output = pool(x)
>>> output.shape
>>> result = output.shape
>>> print(result)
(1, 3, 1)
"""

@ -115,7 +115,8 @@ def _partial_init(cls_or_self, **kwargs):
>>> foo_builder = Foo.partial_init(a=3, b=4).partial_init(answer=42)
>>> foo_instance1 = foo_builder()
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1) == id(foo_instance2)
>>> result = (id(foo_instance1) == id(foo_instance2))
>>> print(result)
False
"""
@ -233,7 +234,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
>>> fake_quant = nn.FakeQuantWithMinMaxObserver()
>>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> result = fake_quant(input)
>>> result
>>> print(result)
[[0.9882355, 1.9764705, 0.9882355], [-1.9764705, 0. , -0.9882355]]
"""
@ -376,7 +377,8 @@ class Conv2dBnFoldQuant(Cell):
>>> quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
>>> result = conv2d_bnfold(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(2, 6, 2, 2)
"""
@ -561,7 +563,8 @@ class Conv2dBnWithoutFoldQuant(Cell):
>>> quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32)
>>> result = conv2d_no_bnfold(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(2, 6, 2, 2)
"""
@ -682,7 +685,8 @@ class Conv2dQuant(Cell):
>>> quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
>>> result = conv2d_quant(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(2, 6, 2, 2)
"""
@ -782,7 +786,8 @@ class DenseQuant(Cell):
>>> dense_quant = nn.DenseQuant(3, 6, quant_config=qconfig)
>>> input = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> result = dense_quant(input)
>>> result.shape
>>> output = result.shape
>>> print(output)
(2, 6)
"""
@ -887,7 +892,7 @@ class ActQuant(_QuantActivation):
>>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)
>>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32)
>>> result = act_quant(input)
>>> result
>>> print(result)
[[0.9882355, 1.9764705, 0.], [0., 0., 0.]]
"""
@ -949,7 +954,7 @@ class TensorAddQuant(Cell):
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32)
>>> result = add_quant(input_x1, input_x2)
>>> result
>>> print(result)
[[1.9764705, 3.011765, 1.9764705], [-0.9882355, 0.9882355, 0.]]
"""
@ -996,7 +1001,7 @@ class MulQuant(Cell):
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
>>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32)
>>> result = mul_quant(input_x1, input_x2)
>>> result
>>> print(result)
[[1.9764705, 4.0000005, 1.9764705], [-4., 0., -1.9764705]]
"""

Loading…
Cancel
Save