process Constant op in onnx converter && add two onnx models

pull/4595/head
wangzhe 5 years ago
parent bcb24c7944
commit c9cb994e37

@ -1 +1,3 @@
mtk_detect-mbv2-shortcut-400-400-simplified.onnx
mtk_emotions-d2012-75.8%.onnx
mtk_face_features_v3.onnx

@ -153,6 +153,28 @@ function Run_arm64() {
fi
#sleep 1
done < ${models_caffe_config}
# Run caffe converted models:
while read line; do
model_name=${line}
if [[ $model_name == \#* ]]; then
continue
fi
echo ${model_name}
echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1'
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelPath='${model_name}'.ms --inDataPath=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --calibDataPath=/data/local/tmp/input_output/output/'${model_name}'.ms.out --warmUpLoopCount=1 --loopCount=1' >> adb_run_cmd.txt
adb -s ${device_id} shell < adb_run_cmd.txt
if [ $? = 0 ]; then
run_result='Run_arm64:'${model_name}' pass'
echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='Run_arm64:'${model_name}' fail <<===========================this is the failed case'
echo ${run_result} >> ${run_benchmark_result_file}
return 1
fi
#sleep 1
done < ${models_onnx_config}
}
# Print start msg before run testcase

@ -48,7 +48,10 @@ class OnnxModelParser : public ModelParser {
STATUS SetGraphConstTensor(const onnx::GraphProto &onnx_graph, TensorCache *tensor_cache);
STATUS SetGraphInputTensor(const onnx::GraphProto &onnx_graph, schema::MetaGraphT *graph, TensorCache *tensor_cache);
STATUS SetGraphOutputTensor(const onnx::GraphProto &onnx_graph, schema::MetaGraphT *graph, TensorCache *tensor_cache);
STATUS AddTensorCache(const onnx::ValueInfoProto &proto, schema::TensorT *tensor);
STATUS AddValueInfo(const onnx::ValueInfoProto &proto, const std::string &name, const TensorType &type,
TensorCache *tensor_cache, int *index);
STATUS AddTensorProto(const onnx::TensorProto &proto, const std::string &name, const TensorType &type,
TensorCache *tensor_cache, int *index);
STATUS ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *dst_op, schema::TensorT *dst_tensor, TensorCache *tensor_cache);
void ParseOnnxGemmNode(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,

@ -23,6 +23,7 @@ STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(DEBUG) << "onnx PoolParser";
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
attr->format = schema::Format_NCHW;
const auto &pool_type = onnx_node.op_type();
if (pool_type == "MaxPool") {
attr->poolingMode = schema::PoolMode_MAX_POOLING;
@ -37,7 +38,7 @@ STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
attr->poolingMode = schema::PoolMode_MEAN_POOLING;
attr->global = true;
} else {
// MS_LOGE("Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only.");
MS_LOG(ERROR) << "Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only.";
return RET_ERROR;
}
@ -92,4 +93,3 @@ OnnxNodeRegistrar g_onnxGlobalAveragePoolParser("GlobalAveragePool", new OnnxPoo
OnnxNodeRegistrar g_onnxGlobalMaxPoolParser("GlobalMaxPool", new OnnxPoolParser());
} // namespace lite
} // namespace mindspore

@ -19,8 +19,7 @@
namespace mindspore {
namespace lite {
STATUS OnnxUnSqueezeParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
STATUS OnnxUnSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx UnSqueezeParser";
std::unique_ptr<schema::UnsqueezeT> attr(new schema::UnsqueezeT());
@ -43,4 +42,3 @@ STATUS OnnxUnSqueezeParser::Parse(const onnx::GraphProto &onnx_graph,
OnnxNodeRegistrar g_onnxUnsqueezeParser("Unsqueeze", new OnnxUnSqueezeParser());
} // namespace lite
} // namespace mindspore

Loading…
Cancel
Save