From b5a4f228cde3ab5fc6c54495ef8ca77da07906cd Mon Sep 17 00:00:00 2001 From: bai-yangfan Date: Fri, 9 Oct 2020 15:15:28 +0800 Subject: [PATCH] weight_export --- mindspore/train/quant/quant.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/mindspore/train/quant/quant.py b/mindspore/train/quant/quant.py index c32380b036..e3956b8474 100644 --- a/mindspore/train/quant/quant.py +++ b/mindspore/train/quant/quant.py @@ -342,14 +342,6 @@ class ExportToQuantInferNetwork: network = self._convert_quant2deploy(network) return network - def statistic_weight(self, weight): - out_nums = np.shape(weight)[0] - sta_metric = np.zeros((out_nums, 2), dtype=np.float32) - for num in range(out_nums): - sta_metric[num, 0] = np.min(weight[num]) - sta_metric[num, 1] = np.max(weight[num]) - return np.mean(sta_metric[:, 1]).tolist(), np.mean(sta_metric[:, 0]).tolist() - def _get_quant_block(self, cell_core, activation, fake_quant_a_out): """convet network's quant subcell to deploy subcell""" # Calculate the scale and zero point @@ -410,7 +402,8 @@ class ExportToQuantInferNetwork: elif isinstance(cell_core, quant.Conv2dBnWithoutFoldQuant): weight, bias = quant_utils.without_fold_batchnorm(weight, cell_core) if self.is_mindir: - param_dict["filter_maxq"], param_dict["filter_minq"] = self.statistic_weight(weight) + param_dict["filter_maxq"], param_dict["filter_minq"] = cell_core.fake_quant_weight.maxq, \ + cell_core.fake_quant_weight.minq weight_b = weight bias_b = bias # apply the quant