!11846 fix weight quant bug && modify var name

From: @jianghui58
Reviewed-by: @hangangqiang
Signed-off-by:
pull/11846/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 11cd3b9b2c

@ -74,7 +74,7 @@ void LiteSession::ConvertTensorsQuantParam(const schema::Tensor *src_tensor, lit
dst_tensor->AddQuantParam(quant_arg);
}
}
dst_tensor->SetEnableHuffmanCode(src_tensor->enableHuffmanCode());
dst_tensor->set_enable_huffman_code(src_tensor->enableHuffmanCode());
auto quant_clusters = src_tensor->quantClusters();
if (quant_clusters != nullptr) {
std::vector<float> clusters;

@ -451,9 +451,9 @@ void PrimitiveC::set_quant_type(const schema::QuantType &quant_type) { this->qua
schema::QuantType PrimitiveC::quant_type() const { return quant_type_; }
bool PrimitiveC::IsEnableHuffmanCode() const { return enableHuffmanCode; }
bool PrimitiveC::enable_huffman_code() const { return enable_huffman_code_; }
void PrimitiveC::SetEnableHuffmanCode(bool enableHuffmanCode) { this->enableHuffmanCode = enableHuffmanCode; }
void PrimitiveC::set_enable_huffman_code(bool enable_huffman_code) { this->enable_huffman_code_ = enable_huffman_code; }
std::shared_ptr<PrimitiveC> GetReturnPrim() {
auto return_primitiveT = new (std::nothrow) schema::PrimitiveT;

@ -123,9 +123,9 @@ class PrimitiveC : public mindspore::Primitive {
schema::QuantType quant_type() const;
bool IsEnableHuffmanCode() const;
bool enable_huffman_code() const;
void SetEnableHuffmanCode(bool enableHuffmanCode);
void set_enable_huffman_code(bool enable_huffman_code);
virtual int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs);
@ -158,7 +158,7 @@ class PrimitiveC : public mindspore::Primitive {
schema::QuantType quant_type_{schema::QuantType_QUANT_NONE};
bool infer_flag_ = true;
int op_type_ = OP_TYPE_NOT_SET;
bool enableHuffmanCode = false;
bool enable_huffman_code_ = false;
};
std::shared_ptr<PrimitiveC> GetReturnPrim();

@ -366,9 +366,9 @@ std::vector<float> Tensor::quant_clusters() const { return this->quant_clusters_
void Tensor::set_quant_clusters(const std::vector<float> &clusters) { this->quant_clusters_ = clusters; }
bool Tensor::IsEnableHuffmanCode() const { return enableHuffmanCode; }
bool Tensor::enable_huffman_code() const { return enable_huffman_code_; }
void Tensor::SetEnableHuffmanCode(bool enableHuffmanCode) { this->enableHuffmanCode = enableHuffmanCode; }
void Tensor::set_enable_huffman_code(bool enable_huffman_code) { this->enable_huffman_code_ = enable_huffman_code; }
std::vector<tensor::MSTensor *> TensorVectorCast(const std::vector<Tensor *> &src) {
std::vector<tensor::MSTensor *> target(src.size());

@ -149,9 +149,9 @@ class Tensor : public mindspore::tensor::MSTensor {
void set_quant_clusters(const std::vector<float> &clusters);
bool IsEnableHuffmanCode() const;
bool enable_huffman_code() const;
void SetEnableHuffmanCode(bool enableHuffmanCode);
void set_enable_huffman_code(bool enable_huffman_code);
virtual bool IsConst() const {
return (this->category_ == CONST_TENSOR || this->category_ == CONST_SCALAR) && this->data_ != nullptr;
@ -202,7 +202,7 @@ class Tensor : public mindspore::tensor::MSTensor {
std::vector<float> quant_clusters_;
mindspore::lite::Allocator *allocator_ = nullptr;
Tensor *root_tensor_ = nullptr;
bool enableHuffmanCode = false;
bool enable_huffman_code_ = false;
};
inline size_t DataTypeSize(const TypeId type) {

@ -1 +1 @@
efficientnet.mindir 40.64 9.98
efficientnet.mindir 41.37 9.98

@ -1,3 +1,3 @@
retinaface_732_1280_iod.mindir
mobilefacenet_iod.mindir
retinaface_732_1280_iod.mindir 16.9
mobilefacenet_iod.mindir 13.5
#effnet_iod.mindir

@ -1,2 +1,2 @@
ml_face_openclose.tflite 0.5
hiai_ghostnet.tflite 5
hiai_ghostnet.tflite 4.7

@ -238,10 +238,11 @@ function Run_Converter() {
# Convert mindir weightquant models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
weight_quant_line_info=${line}
if [[ $weight_quant_line_info == \#* ]]; then
continue
fi
model_name=`echo ${weight_quant_line_info}|awk -F ' ' '{print $1}'`
echo ${model_name} >> "${run_converter_log_file}"
echo './converter_lite --fmk=MINDIR --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}' --quantType=WeightQuant --bitNum=8 --quantWeightSize=500 --quantWeightChannel=16' >> "${run_converter_log_file}"
./converter_lite --fmk=MINDIR --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_weightquant --quantType=WeightQuant --bitNum=8 --quantWeightSize=500 --quantWeightChannel=16
@ -538,15 +539,17 @@ function Run_x86() {
# Run mindir weight quantization converted models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
weight_quant_line_info=${line}
if [[ $weight_quant_line_info == \#* ]]; then
continue
fi
model_name=`echo ${weight_quant_line_info}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${weight_quant_line_info}|awk -F ' ' '{print $2}'`
echo ${model_name} >> "${run_x86_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64' >> "${run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64 || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.weightquant.ms.out >> "${run_x86_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --accuracyThreshold=${accuracy_limit}' >> "${run_x86_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_log_file}"
if [ $? = 0 ]; then
run_result='x86: '${model_name}'[weight quant] pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
@ -809,15 +812,17 @@ function Run_x86_sse() {
# Run mindir weight quantization converted models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
weight_quant_line_info=${line}
if [[ $weight_quant_line_info == \#* ]]; then
continue
fi
model_name=`echo ${weight_quant_line_info}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${weight_quant_line_info}|awk -F ' ' '{print $2}'`
echo ${model_name} >> "${run_x86_sse_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-sse' >> "${run_x86_sse_log_file}"
cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-sse || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_sse_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.weightquant.ms.out >> "${run_x86_sse_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --accuracyThreshold=${accuracy_limit}' >> "${run_x86_sse_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_sse_log_file}"
if [ $? = 0 ]; then
run_result='x86_sse: '${model_name}'[weight quant] pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
@ -1081,15 +1086,17 @@ function Run_x86_avx() {
# Run mindir weight quantization converted models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
weight_quant_line_info=${line}
if [[ $weight_quant_line_info == \#* ]]; then
continue
fi
model_name=`echo ${weight_quant_line_info}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${weight_quant_line_info}|awk -F ' ' '{print $2}'`
echo ${model_name} >> "${run_x86_avx_log_file}"
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-avx' >> "${run_x86_avx_log_file}"
cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-avx || return 1
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_avx_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.weightquant.ms.out >> "${run_x86_avx_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out --accuracyThreshold=${accuracy_limit}' >> "${run_x86_avx_log_file}"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}_weightquant.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_avx_log_file}"
if [ $? = 0 ]; then
run_result='x86_avx: '${model_name}'[weight quant] pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
@ -1637,14 +1644,16 @@ function Run_arm64() {
# Run mindir weightquant converted train models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
weight_quant_line_info=${line}
if [[ $weight_quant_line_info == \#* ]]; then
continue
fi
echo ${model_name}'_train' >> "${run_arm64_log_file}"
model_name=`echo ${weight_quant_line_info}|awk -F ' ' '{print $1}'`
accuracy_limit=`echo ${weight_quant_line_info}|awk -F ' ' '{print $2}'`
echo ${model_name} >> "${run_arm64_log_file}"
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_weightquant.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.weightquant.ms.out --loopCount=1' >> "${run_arm64_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_weightquant.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.weightquant.ms.out --loopCount=1' >> adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_weightquant.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --loopCount=1 --accuracyThreshold='${accuracy_limit} >> "${run_arm64_log_file}"
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_weightquant.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --loopCount=1 --accuracyThreshold='${accuracy_limit} >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}"
if [ $? = 0 ]; then
run_result='arm64: '${model_name}'[weightQuant] pass'; echo ${run_result} >> ${run_benchmark_result_file}

@ -508,7 +508,7 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> &input_ano
}
paramTensor->name = input_name;
if (primitive_c->IsEnableHuffmanCode() && paramTensor->dataType == kNumberTypeInt8) {
if (primitive_c->enable_huffman_code() && paramTensor->dataType == kNumberTypeInt8) {
paramTensor->enableHuffmanCode = true;
}
node_id_map_[input_name] = meta_graphT->allTensors.size();

@ -52,7 +52,6 @@
#include "tools/optimizer/graph/inputs_adjust_pass.h"
#include "tools/converter/quantizer/post_training_quantizer.h"
#include "tools/converter/quantizer/quant_cast.h"
#include "tools/converter/quantizer/huffman_encode.h"
#include "tools/converter/quantizer/weight_quantizer.h"
using std::string;
@ -243,24 +242,6 @@ int AnfTransform::DoQuantize(const FuncGraphPtr &old_graph, const converter::Fla
return RET_OK;
}
int AnfTransform::DoHuffmanEncode(const converter::Flags *config, const FuncGraphPtr &new_graph,
bool enableHuffmanCode) {
if (config->quantType == schema::QuantType_WeightQuant && enableHuffmanCode) {
if (config->bitNum < 16 && config->bitNum > 8) {
MS_LOG(WARNING) << "don't support huffman encode when 8 < bitNum < 16 currently.";
return RET_OK;
}
auto huffman_encode = std::make_unique<lite::HuffmanEncode>();
auto status = huffman_encode->DoHuffmanEncode(new_graph, config->bitNum);
if (status != RET_OK) {
MS_LOG(ERROR) << "Huffman encode failed.";
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
return RET_ERROR;
}
}
return RET_OK;
}
FuncGraphPtr AnfTransform::TransformSingleFuncGraph(const FuncGraphPtr &old_graph, const converter::Flags *config) {
MS_ASSERT(nullptr != old_graph);
if (config == nullptr) {
@ -315,12 +296,6 @@ FuncGraphPtr AnfTransform::TransformSingleFuncGraph(const FuncGraphPtr &old_grap
return nullptr;
}
status = DoHuffmanEncode(config, new_graph, false);
if (status != RET_OK) {
MS_LOG(ERROR) << "Do HuffmanCode failed.";
return nullptr;
}
return new_graph;
}

@ -58,8 +58,6 @@ class AnfTransform {
int RunTFAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
int DoQuantize(const FuncGraphPtr &old_graph, const converter::Flags *config, const FuncGraphPtr &new_graph);
int DoHuffmanEncode(const converter::Flags *config, const FuncGraphPtr &new_graph, bool enableHuffmanCode);
};
} // namespace lite
} // namespace mindspore

@ -227,7 +227,6 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) {
quantNodeOptimizer.AddPass(dTypeTransPass);
quantNodeOptimizer.AddPass(new (std::nothrow) QuantCastFusionPass());
quantNodeOptimizer.AddPass(new (std::nothrow) IsolatedNodeRemovePass());
quantNodeOptimizer.AddPass(new (std::nothrow) SetUnusedQuantParamToDefaultPass());
quantNodeOptimizer.AddPass(new (std::nothrow) SubgraphNodePass(old_nodes2));
status = quantNodeOptimizer.Run(graphDefT);
if (status != RET_OK && status != RET_NO_CHANGE) {
@ -287,6 +286,15 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) {
}
}
{
Optimizer quantNodeOptimizer;
quantNodeOptimizer.AddPass(new (std::nothrow) SetUnusedQuantParamToDefaultPass());
status = quantNodeOptimizer.Run(graphDefT);
if (status != RET_OK && status != RET_NO_CHANGE) {
MS_LOG(ERROR) << "Run quantNodeOptimizer graphPasses Failed";
return status;
}
}
return RET_OK;
} // namespace mindspore::lite
} // namespace mindspore::lite

@ -24,120 +24,49 @@
namespace mindspore {
namespace lite {
STATUS HuffmanEncode::GetParamValueLitePtr(const std::shared_ptr<AnfNode> &input_node, ParamValueLitePtr *param_value) {
if (!input_node->isa<Parameter>()) {
return RET_CONTINUE;
}
auto abstract_base = input_node->abstract();
if (abstract_base == nullptr) {
MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << input_node->fullname_with_scope();
return RET_ERROR;
}
if (!utils::isa<abstract::AbstractTensorPtr>(abstract_base)) {
MS_LOG(ERROR) << "Abstract of parameter should be abstract tensor, " << input_node->fullname_with_scope();
return RET_ERROR;
}
auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base);
if (abstract_tensor->element() == nullptr) {
MS_LOG(ERROR) << "abstract tensor element is nullptr, " << input_node->fullname_with_scope();
STATUS HuffmanEncode::DoHuffmanEncode(const ParamValueLitePtr &weight, const std::shared_ptr<PrimitiveC> &primitive_c,
void *quant_datas, const size_t &bit_num) {
if (quant_datas == nullptr) {
MS_LOG(ERROR) << "quant data is nullptr";
return RET_ERROR;
}
auto tensor_type = abstract_tensor->element()->GetTypeTrack();
MS_ASSERT(tensor_type != nullptr);
auto tensor_type_id = tensor_type->type_id();
if (tensor_type_id != kNumberTypeInt8) {
return RET_CONTINUE;
}
auto param_node = input_node->cast<ParameterPtr>();
if (param_node == nullptr) {
MS_LOG(ERROR) << "parameter node is nullptr, " << input_node->fullname_with_scope();
return RET_ERROR;
}
if (!param_node->has_default()) {
MS_LOG(WARNING) << "param_node don't have default: " << input_node->fullname_with_scope();
return RET_CONTINUE;
}
*param_value = std::static_pointer_cast<ParamValueLite>(param_node->default_param());
return RET_OK;
}
STATUS HuffmanEncode::DoHuffmanEncode(const FuncGraphPtr &func_graph, const int &bit_num) {
auto cnodes = func_graph->GetOrderedCnodes();
for (auto &cnode : cnodes) {
auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0));
if (primitive_c == nullptr) {
MS_LOG(ERROR) << "primitive_c is nullptr: " << cnode->fullname_with_scope();
return RET_ERROR;
}
if (primitive_c->quant_type() != schema::QuantType_WeightQuant) {
continue;
auto *raw_datas = static_cast<int8_t *>(quant_datas);
size_t elem_count = weight->tensor_shape_size();
size_t packed_size = elem_count * bit_num;
HuffmanPriorityQueue pq;
auto status = GetHuffmanPriorityQueue(raw_datas, elem_count, &pq);
if (status != RET_OK) {
MS_LOG(ERROR) << "GetHuffmanPriorityQueue failed";
return status;
}
status = BuildHuffmanTree(&pq);
if (status != RET_OK) {
MS_LOG(ERROR) << "BuildHuffmanTree failed";
return status;
}
status = DoHuffmanCompress(raw_datas, elem_count);
if (status != RET_OK) {
MS_LOG(ERROR) << "DoHuffmanCompress failed";
return status;
}
size_t ch_size = huffman_encoded_str_.length();
if (ch_size < packed_size) {
auto encode_data = new (std::nothrow) char[ch_size];
if (encode_data == nullptr) {
MS_LOG(ERROR) << "new char[] failed.";
return RET_MEMORY_FAILED;
}
for (size_t i = 1; i < cnode->inputs().size(); i++) {
auto input_node = cnode->input(i);
ParamValueLitePtr param_value;
auto status = GetParamValueLitePtr(input_node, &param_value);
if (status == RET_CONTINUE) {
continue;
} else if (status == RET_ERROR) {
MS_LOG(ERROR) << "Get param value lite ptr failed. " << cnode->fullname_with_scope();
return RET_ERROR;
}
size_t elem_count = param_value->tensor_shape_size();
size_t packed_size = param_value->tensor_size();
auto *raw_datas = static_cast<int8_t *>(param_value->tensor_addr());
if (raw_datas == nullptr) {
MS_LOG(ERROR) << "rawDatas is nullptr";
return RET_ERROR;
}
if (bit_num < 8 && bit_num > 0) {
auto dst_data = new (std::nothrow) int8_t[elem_count];
if (dst_data == nullptr) {
MS_LOG(ERROR) << "new int8_t[] failed";
return RET_ERROR;
}
DequantUtil::UnpackUtil<int8_t, uint8_t>(raw_datas, packed_size, bit_num, dst_data);
if (memcpy_s(raw_datas, elem_count, dst_data, elem_count) != EOK) {
MS_LOG(ERROR) << "memcpy_s failed.";
return RET_MEMORY_FAILED;
}
}
HuffmanPriorityQueue pq;
status = GetHuffmanPriorityQueue(raw_datas, elem_count, &pq);
if (status != RET_OK) {
MS_LOG(ERROR) << "GetHuffmanPriorityQueue failed";
return status;
}
status = BuildHuffmanTree(&pq);
if (status != RET_OK) {
MS_LOG(ERROR) << "BuildHuffmanTree failed";
return status;
}
status = DoHuffmanCompress(raw_datas, elem_count);
if (status != RET_OK) {
MS_LOG(ERROR) << "DoHuffmanCompress failed";
return status;
}
size_t ch_size = huffman_encoded_str_.length();
if (ch_size < packed_size) {
auto encode_data = new (std::nothrow) char[ch_size];
if (encode_data == nullptr) {
MS_LOG(ERROR) << "new char[] failed.";
delete[] raw_datas;
return RET_MEMORY_FAILED;
}
delete[] raw_datas;
if (memcpy_s(encode_data, ch_size, huffman_encoded_str_.c_str(), ch_size) != EOK) {
MS_LOG(ERROR) << "memcpy_s failed.";
delete[] encode_data;
return RET_MEMORY_FAILED;
}
param_value->SetTensorData(encode_data, ch_size);
primitive_c->SetEnableHuffmanCode(true);
}
huffman_encoded_str_.clear();
huffman_table_.clear();
if (memcpy_s(encode_data, ch_size, huffman_encoded_str_.c_str(), ch_size) != EOK) {
MS_LOG(ERROR) << "memcpy_s failed.";
delete[] encode_data;
return RET_MEMORY_FAILED;
}
weight->SetTensorData(encode_data, ch_size);
primitive_c->set_enable_huffman_code(true);
}
huffman_encoded_str_.clear();
huffman_table_.clear();
return RET_SUCCESS;
}

@ -60,7 +60,8 @@ class HuffmanEncode {
STATUS GetParamValueLitePtr(const std::shared_ptr<AnfNode> &input_node, ParamValueLitePtr *param_value);
STATUS DoHuffmanEncode(const FuncGraphPtr &func_graph, const int &bit_num);
STATUS DoHuffmanEncode(const ParamValueLitePtr &weight, const std::shared_ptr<PrimitiveC> &primitive_c,
void *quant_datas, const size_t &bit_num);
private:
std::map<int, std::string> huffman_table_;

File diff suppressed because it is too large Load Diff

@ -109,7 +109,9 @@ STATUS WeightQuantizer::DoConvQuantize(CNodePtr cnode) {
status =
QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true);
}
if (status != RET_OK) {
if (status == RET_CONTINUE) {
return RET_OK;
} else if (status != RET_OK) {
MS_LOG(ERROR) << "QuantFilter failed : " << status;
return status;
}
@ -173,7 +175,9 @@ STATUS WeightQuantizer::DoMulQuantize(CNodePtr cnode) {
status =
QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true);
}
if (status != RET_OK) {
if (status == RET_CONTINUE) {
return RET_OK;
} else if (status != RET_OK) {
MS_LOG(ERROR) << "QuantFilter failed : " << status;
return status;
}
@ -246,7 +250,9 @@ STATUS WeightQuantizer::DoGatherQuantize(CNodePtr cnode) {
status =
QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, 0);
}
if (status != RET_OK) {
if (status == RET_CONTINUE) {
return RET_OK;
} else if (status != RET_OK) {
MS_LOG(ERROR) << "QuantFilter failed : " << status;
return status;
}
@ -286,7 +292,9 @@ STATUS WeightQuantizer::ProcessLstmWeightByIndex(const CNodePtr &cnode, const st
status = QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_,
false, index - 1);
}
if (status != RET_OK) {
if (status == RET_CONTINUE) {
return RET_OK;
} else if (status != RET_OK) {
MS_LOG(ERROR) << "QuantFilter failed : " << status;
return status;
}
@ -503,7 +511,9 @@ STATUS WeightQuantizer::TryQuant(const int &bit_num_t, const ParameterPtr &param
MS_LOG(ERROR) << "unexpected type_id_: " << type_id_;
return RET_ERROR;
}
if (status != RET_OK) {
if (status == RET_CONTINUE) {
return RET_OK;
} else if (status != RET_OK) {
MS_LOG(ERROR) << "quant filter failed.";
return RET_ERROR;
}

Loading…
Cancel
Save