!6268 cmake optimze for mindspore lite

Merge pull request !6268 from zhanghaibo/master
pull/6268/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit a92e444f21

@ -116,7 +116,7 @@ int DoMatMulInferShape(const TensorPtrVector &in_tensors, const TensorPtrVector
int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tensors, Node *node,
mindspore::lite::Allocator *allocator) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ ==NULL) {
if (in_tensors[0]->data_ == NULL || in_tensors[1]->data_ == NULL) {
LITE_LOG_ERROR("input data is NULL!");
return RET_PARAM_INVALID;
}

@ -68,7 +68,7 @@ void FusedBatchNormFp32MeanVar(const float *input, float momentum, float *run_me
run_mean[f] = run_mean[f] / N;
run_var[f] = run_var[f] / N - run_mean[f] * run_mean[f];
save_mean[f] = momentum * save_mean[f] + (1 - momentum) * run_mean[f];
float inv_var = 1.f/sqrt(run_var[f]+param->epsilon_);
float inv_var = 1.f / sqrt(run_var[f] + param->epsilon_);
save_inv_var[f] = momentum * save_inv_var[f] + (1 - momentum) * inv_var;
}
}

@ -29,7 +29,7 @@ void FusedBatchNormFp32(const void *input, const void *scale, const void *offset
const void *variance, BatchNormParameter *param, int task_id, void *output);
void FusedBatchNormFp32MeanVar(const float *input, float momentum, float *run_mean, float *run_var,
BatchNormParameter *param, float *save_mean, float *save_var);
BatchNormParameter *param, float *save_mean, float *save_var);
#ifdef __cplusplus
}
#endif

@ -33,7 +33,7 @@ typedef struct SoftmaxCrossEntropyParameter {
} SoftmaxCrossEntropyParameter;
void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, float *sum_data,
float *sum_mul, SoftmaxParameter *parameter);
float *sum_mul, SoftmaxParameter *parameter);
#ifdef __cplusplus
}
#endif

@ -17,7 +17,7 @@
#include "nnacl/arithmetic_common.h"
void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, int *block_sizes, int *in_shape,
int *out_shape) {
int *out_shape) {
int out_dim0 = out_shape[0];
int out_dim1 = out_shape[1];
int out_dim2 = out_shape[2];

@ -95,3 +95,14 @@ if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND (PLATFORM_ARM64 OR PLATFORM_ARM
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
if (PLATFORM_ARM64 OR PLATFORM_ARM32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip
${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
elseif (NOT WIN32)
add_custom_command(TARGET mindspore-lite POST_BUILD
COMMAND strip ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so)
endif ()
endif ()

@ -100,6 +100,5 @@ int BiasGrad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> out
return RET_OK;
}
} // namespace lite
} // namespace mindspore

@ -33,7 +33,7 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel {
const std::vector<lite::Tensor *> &outputs,
const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LossKernel(parameter, inputs, outputs, ctx, primitive) , losses_(nullptr), sum_data_(nullptr) {
: LossKernel(parameter, inputs, outputs, ctx, primitive), losses_(nullptr), sum_data_(nullptr) {
param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(parameter);
}
~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override {

@ -30,7 +30,6 @@
namespace mindspore {
namespace lite {
struct QuantArg {
double scale;
int32_t zeroPoint;

Loading…
Cancel
Save