diff --git a/mindspore/compression/quant/qat.py b/mindspore/compression/quant/qat.py index 3d6255783a..b7925ff1c0 100644 --- a/mindspore/compression/quant/qat.py +++ b/mindspore/compression/quant/qat.py @@ -142,33 +142,33 @@ class QuantizationAwareTraining(Quantizer): Examples: >>> class LeNet5(nn.Cell): - >>> def __init__(self, num_class=10, channel=1): - >>> super(LeNet5, self).__init__() - >>> self.type = "fusion" - >>> self.num_class = num_class - >>> - >>> # change `nn.Conv2d` to `nn.Conv2dBnAct` - >>> self.conv1 = nn.Conv2dBnAct(channel, 6, 5, pad_mode='valid', activation='relu') - >>> self.conv2 = nn.Conv2dBnAct(6, 16, 5, pad_mode='valid', activation='relu') - >>> # change `nn.Dense` to `nn.DenseBnAct` - >>> self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu') - >>> self.fc2 = nn.DenseBnAct(120, 84, activation='relu') - >>> self.fc3 = nn.DenseBnAct(84, self.num_class) - >>> - >>> self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - >>> self.flatten = nn.Flatten() - >>> - >>> def construct(self, x): - >>> x = self.conv1(x) - >>> x = self.max_pool2d(x) - >>> x = self.conv2(x) - >>> x = self.max_pool2d(x) - >>> x = self.flatten(x) - >>> x = self.fc1(x) - >>> x = self.fc2(x) - >>> x = self.fc3(x) - >>> return x - >>> + ... def __init__(self, num_class=10, channel=1): + ... super(LeNet5, self).__init__() + ... self.type = "fusion" + ... self.num_class = num_class + ... + ... # change `nn.Conv2d` to `nn.Conv2dBnAct` + ... self.conv1 = nn.Conv2dBnAct(channel, 6, 5, pad_mode='valid', activation='relu') + ... self.conv2 = nn.Conv2dBnAct(6, 16, 5, pad_mode='valid', activation='relu') + ... # change `nn.Dense` to `nn.DenseBnAct` + ... self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu') + ... self.fc2 = nn.DenseBnAct(120, 84, activation='relu') + ... self.fc3 = nn.DenseBnAct(84, self.num_class) + ... + ... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + ... self.flatten = nn.Flatten() + ... + ... def construct(self, x): + ... x = self.conv1(x) + ... x = self.max_pool2d(x) + ... x = self.conv2(x) + ... x = self.max_pool2d(x) + ... x = self.flatten(x) + ... x = self.fc1(x) + ... x = self.fc2(x) + ... x = self.fc3(x) + ... return x + ... >>> net = LeNet5() >>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False]) >>> net_qat = quantizer.quantize(net)