[dgraph qat] Refine calculating output scale of dygraph qat (#31710)

* Refine calculating output scale of dygraph qat, test=develop
2.0.1-rocm-post
cc 4 years ago committed by GitHub
parent 420527f0d9
commit 1d197f6c97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -499,6 +499,10 @@ class QuantizedNoweightLayer(layers.Layer):
def forward(self, input): def forward(self, input):
quant_input = self._fake_quant_input(input) quant_input = self._fake_quant_input(input)
# TODO (jc): support ops that have several inputs
if isinstance(input, list):
assert len(input) == 1, \
"The QuantizedNoweightLayer should only have one input."
return self._layer.forward(quant_input) return self._layer.forward(quant_input)

@ -12,12 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.nn import Linear, Conv2D import paddle
from paddle.fluid.dygraph.nn import Pool2D
from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6
from paddle.nn.layer.activation import Tanh, Softmax, PReLU, Swish
_op_real_in_out_name = { op_real_in_out_name = {
"conv2d": [["Input", "Filter"], ["Output"]], "conv2d": [["Input", "Filter"], ["Output"]],
"depthwise_conv2d": [["Input", "Filter"], ["Output"]], "depthwise_conv2d": [["Input", "Filter"], ["Output"]],
"pool2d": [["X"], ["Out"]], "pool2d": [["X"], ["Out"]],
@ -33,14 +30,30 @@ _op_real_in_out_name = {
"swish": [["X"], ["Out"]], "swish": [["X"], ["Out"]],
} }
_quant_layers_map = { supported_quant_layers_map = {
'Conv2D': Conv2D, 'Conv2D': paddle.nn.Conv2D,
'Linear': Linear, 'Linear': paddle.nn.Linear,
'Pool2D': Pool2D, 'AdaptiveAvgPool2D': paddle.nn.AdaptiveAvgPool2D,
'ReLU': ReLU, 'AdaptiveMaxPool2D': paddle.nn.AdaptiveMaxPool2D,
'LeakyReLU': LeakyReLU, 'AvgPool2D': paddle.nn.AvgPool2D,
'ReLU6': ReLU6, 'MaxPool2D': paddle.nn.MaxPool2D,
'Softmax': Softmax, 'Hardswish': paddle.nn.Hardswish,
'Tanh': Tanh, 'LeakyReLU': paddle.nn.LeakyReLU,
'Swish': Swish 'PReLU': paddle.nn.PReLU,
'ReLU': paddle.nn.ReLU,
'ReLU6': paddle.nn.ReLU6,
'Sigmoid': paddle.nn.Sigmoid,
'Softmax': paddle.nn.Softmax,
'Swish': paddle.nn.Swish,
'Tanh': paddle.nn.Tanh,
'Hardswish': paddle.nn.Hardswish,
'BatchNorm': paddle.nn.BatchNorm,
'GroupNorm': paddle.nn.GroupNorm,
'LayerNorm': paddle.nn.LayerNorm,
} }
out_scale_layers_list = (
paddle.nn.Conv2D, paddle.nn.Linear, paddle.nn.MaxPool2D,
paddle.nn.BatchNorm, paddle.nn.BatchNorm2D, paddle.nn.SyncBatchNorm,
paddle.nn.LeakyReLU, paddle.nn.PReLU, paddle.nn.ReLU, paddle.nn.ReLU6,
paddle.nn.Sigmoid, paddle.nn.Softmax, paddle.nn.Tanh, paddle.nn.Swish)

@ -191,8 +191,8 @@ class TestImperativeAddQuantDequant(unittest.TestCase):
weight_quantize_type='abs_max', weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max', activation_quantize_type='moving_average_abs_max',
quantizable_layer_type=[ quantizable_layer_type=[
'Conv2D', 'Linear', 'ReLU', 'Pool2D', 'LeakyReLU', 'ReLU6', 'Conv2D', 'Linear', 'ReLU', 'LeakyReLU', 'ReLU6', 'Tanh',
'Tanh', 'Swish' 'Swish'
]) ])
with fluid.dygraph.guard(): with fluid.dygraph.guard():

Loading…
Cancel
Save