!6268 cmake optimze for mindspore lite

Merge pull request !6268 from zhanghaibo/master
pull/6268/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit a92e444f21

@ -116,7 +116,7 @@ int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector
int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ ==NULL) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input data is NULL!");
return RET_PARAM_INVALID;
}

@ -68,7 +68,7 @@ void FusedBatchNormFp32MeanVar(const float *input, float momentum, float *run_me
run_mean[f] = run_mean[f] / N;
run_var[f] = run_var[f] / N - run_mean[f] * run_mean[f];
save_mean[f] = momentum * save_mean[f] + (1 - momentum) * run_mean[f];
float inv_var = 1.f/sqrt(run_var[f]+param->epsilon_);
float inv_var = 1.f / sqrt(run_var[f] + param->epsilon_);
save_inv_var[f] = momentum * save_inv_var[f] + (1 - momentum) * inv_var;
}
}

@ -95,3 +95,14 @@ if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND (PLATFORM_ARM64 OR PLATFORM_ARM
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
if (PLATFORM_ARM64 OR PLATFORM_ARM32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
elseif (NOT WIN32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND strip ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()
endif ()

@ -100,6 +100,5 @@ int BiasGrad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> out
return RET_OK;
}
} // namespace lite
} // namespace mindspore

@ -33,7 +33,7 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel {
const std::vector<lite::Tensor *> &outputs,
const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LossKernel(parameter, inputs, outputs, ctx, primitive) , losses_(nullptr), sum_data_(nullptr) {
: LossKernel(parameter, inputs, outputs, ctx, primitive), losses_(nullptr), sum_data_(nullptr) {
param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(parameter);
}
~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override {

@ -30,7 +30,6 @@
namespace mindspore {
namespace lite {
struct QuantArg {
double scale;
int32_t zeroPoint;

Loading…
Cancel
Save