From a949a11af5d2c7a0a7825c7d058781ef09f14766 Mon Sep 17 00:00:00 2001 From: lixian Date: Tue, 30 Mar 2021 10:47:20 +0800 Subject: [PATCH] fix coding style --- mindspore/lite/nnacl/fp32/prelu_fp32.c | 2 +- mindspore/lite/src/cxx_api/cell.cc | 3 +-- mindspore/lite/src/cxx_api/context.cc | 2 +- mindspore/lite/src/cxx_api/graph/graph.cc | 1 - mindspore/lite/src/cxx_api/graph/graph_data.h | 2 ++ mindspore/lite/src/cxx_api/model/model.cc | 2 +- mindspore/lite/src/cxx_api/model/model_impl.cc | 6 +++++- mindspore/lite/src/cxx_api/model/model_impl.h | 2 ++ mindspore/lite/src/cxx_api/serialization.cc | 1 - mindspore/lite/src/cxx_api/tensor/tensor_impl.cc | 14 ++++++++------ mindspore/lite/src/cxx_api/tensor/tensor_impl.h | 15 ++++++++++----- mindspore/lite/src/cxx_api/tensor_utils.h | 7 ++++++- mindspore/lite/src/cxx_api/types.cc | 13 ++++++------- 13 files changed, 43 insertions(+), 27 deletions(-) diff --git a/mindspore/lite/nnacl/fp32/prelu_fp32.c b/mindspore/lite/nnacl/fp32/prelu_fp32.c index c75ad0235c..e2caa6a6cd 100644 --- a/mindspore/lite/nnacl/fp32/prelu_fp32.c +++ b/mindspore/lite/nnacl/fp32/prelu_fp32.c @@ -16,7 +16,7 @@ #include "nnacl/fp32/prelu_fp32.h" #ifdef ENABLE_ARM64 -inline void PRelu4x16(const float *in, float *out, float *cur_slope, size_t step) { +static inline void PRelu4x16(const float *in, float *out, float *cur_slope, size_t step) { asm volatile( "mov x10, %[in]\n" "mov x11, %[out]\n" diff --git a/mindspore/lite/src/cxx_api/cell.cc b/mindspore/lite/src/cxx_api/cell.cc index 8df0bba2d9..3a43fabf92 100644 --- a/mindspore/lite/src/cxx_api/cell.cc +++ b/mindspore/lite/src/cxx_api/cell.cc @@ -13,11 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "include/api/cell.h" #include "src/common/log_adapter.h" namespace mindspore { - class GraphImpl {}; std::vector CellBase::operator()(const std::vector &inputs) const { @@ -90,5 +90,4 @@ InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std: int32_t index) { MS_LOG(ERROR) << "Unsupported feature."; } - } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/context.cc b/mindspore/lite/src/cxx_api/context.cc index 542c1b5b48..1712e7ce4c 100644 --- a/mindspore/lite/src/cxx_api/context.cc +++ b/mindspore/lite/src/cxx_api/context.cc @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "include/api/context.h" #include #include @@ -262,5 +263,4 @@ enum DataType Ascend310DeviceInfo::GetOutputType() const { MS_LOG(ERROR) << "Unsupported Feature."; return DataType::kTypeUnknown; } - } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/graph/graph.cc b/mindspore/lite/src/cxx_api/graph/graph.cc index f93eb46b72..2ed1c874a6 100644 --- a/mindspore/lite/src/cxx_api/graph/graph.cc +++ b/mindspore/lite/src/cxx_api/graph/graph.cc @@ -19,7 +19,6 @@ #include "src/cxx_api/graph/graph_data.h" namespace mindspore { - Graph::Graph() : graph_data_(nullptr) {} Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} diff --git a/mindspore/lite/src/cxx_api/graph/graph_data.h b/mindspore/lite/src/cxx_api/graph/graph_data.h index e24858e741..b01bec4647 100644 --- a/mindspore/lite/src/cxx_api/graph/graph_data.h +++ b/mindspore/lite/src/cxx_api/graph/graph_data.h @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #ifndef MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H #define MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H @@ -39,4 +40,5 @@ class Graph::GraphData { std::shared_ptr lite_model_; }; } // namespace mindspore + #endif // MINDSPORE_LITE_SRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore/lite/src/cxx_api/model/model.cc b/mindspore/lite/src/cxx_api/model/model.cc index f249c8b6e3..8eaa98f13a 100644 --- a/mindspore/lite/src/cxx_api/model/model.cc +++ b/mindspore/lite/src/cxx_api/model/model.cc @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "include/api/model.h" #include "include/api/types.h" #include "include/api/context.h" @@ -21,7 +22,6 @@ #include "src/common/log_adapter.h" namespace mindspore { - Status Model::Build(GraphCell graph, const std::shared_ptr &model_context) { if (impl_ != nullptr) { MS_LOG(DEBUG) << "Model has been already built."; diff --git a/mindspore/lite/src/cxx_api/model/model_impl.cc b/mindspore/lite/src/cxx_api/model/model_impl.cc index f005a6f304..e01b9f6d08 100644 --- a/mindspore/lite/src/cxx_api/model/model_impl.cc +++ b/mindspore/lite/src/cxx_api/model/model_impl.cc @@ -150,6 +150,11 @@ Status ModelImpl::Predict(const std::vector &inputs, std::vectortensor_name()) { MS_LOG(WARNING) << "Tensor " << user_input.Name() << " has a different name from input" << input->tensor_name() << "."; @@ -384,5 +389,4 @@ Status ModelImpl::Resize(const std::vector &inputs, const std::vector< auto ret = session_->Resize(inner_input, truncated_shape); return static_cast(ret); } - } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/model/model_impl.h b/mindspore/lite/src/cxx_api/model/model_impl.h index 4f0b7d6604..20f72ed3a0 100644 --- a/mindspore/lite/src/cxx_api/model/model_impl.h +++ b/mindspore/lite/src/cxx_api/model/model_impl.h @@ -13,8 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #ifndef MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H #define MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_IMPL_H + #include #include #include diff --git a/mindspore/lite/src/cxx_api/serialization.cc b/mindspore/lite/src/cxx_api/serialization.cc index bd768c4f70..530eae0fe8 100644 --- a/mindspore/lite/src/cxx_api/serialization.cc +++ b/mindspore/lite/src/cxx_api/serialization.cc @@ -27,7 +27,6 @@ #include "src/common/log_adapter.h" namespace mindspore { - Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph) { if (model_type != kMindIR) { MS_LOG(ERROR) << "Unsupported IR."; diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc index 471bc37589..b4e24123f4 100644 --- a/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.cc @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include #include #include @@ -33,8 +34,9 @@ namespace mindspore { using mindspore::lite::RET_OK; -MSTensor::Impl *MSTensor::Impl::CreateTensorImpl(const std::string &name, enum DataType type, - const std::vector &shape, const void *data, size_t data_len) { +std::shared_ptr MSTensor::Impl::CreateTensorImpl(const std::string &name, enum DataType type, + const std::vector &shape, const void *data, + size_t data_len) { std::vector truncated_shape = TruncateShape(shape, static_cast(type), data_len, true); if (truncated_shape.empty() && !(shape.empty())) { MS_LOG(ERROR) << "Invalid shape for creating tensor."; @@ -45,7 +47,7 @@ MSTensor::Impl *MSTensor::Impl::CreateTensorImpl(const std::string &name, enum D MS_LOG(ERROR) << "Failed to allocate lite tensor."; return nullptr; } - auto impl = new (std::nothrow) Impl(lite_tensor); + auto impl = std::shared_ptr(new (std::nothrow) Impl(lite_tensor)); if (impl == nullptr) { MS_LOG(ERROR) << "Failed to allocate tensor impl."; return nullptr; @@ -54,7 +56,8 @@ MSTensor::Impl *MSTensor::Impl::CreateTensorImpl(const std::string &name, enum D return impl; } -MSTensor::Impl *MSTensor::Impl::StringsToTensorImpl(const std::string &name, const std::vector &str) { +std::shared_ptr MSTensor::Impl::StringsToTensorImpl(const std::string &name, + const std::vector &str) { auto lite_tensor = new (std::nothrow) lite::Tensor(); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "Failed to allocate lite tensor."; @@ -67,7 +70,7 @@ MSTensor::Impl *MSTensor::Impl::StringsToTensorImpl(const std::string &name, con delete lite_tensor; return nullptr; } - auto impl = new (std::nothrow) Impl(lite_tensor); + auto impl = std::shared_ptr(new (std::nothrow) Impl(lite_tensor)); if (impl == nullptr) { delete lite_tensor; MS_LOG(ERROR) << "Failed to allocate tensor impl."; @@ -77,5 +80,4 @@ MSTensor::Impl *MSTensor::Impl::StringsToTensorImpl(const std::string &name, con impl->set_from_session(false); return impl; } - } // namespace mindspore diff --git a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h index e500397861..d7922a7d5b 100644 --- a/mindspore/lite/src/cxx_api/tensor/tensor_impl.h +++ b/mindspore/lite/src/cxx_api/tensor/tensor_impl.h @@ -13,6 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +#ifndef MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H +#define MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H + #include #include #include @@ -53,10 +57,10 @@ class MSTensor::Impl { } } - static Impl *CreateTensorImpl(const std::string &name, enum DataType type, const std::vector &shape, - const void *data, size_t data_len); + static std::shared_ptr CreateTensorImpl(const std::string &name, enum DataType type, + const std::vector &shape, const void *data, size_t data_len); - static Impl *StringsToTensorImpl(const std::string &name, const std::vector &str); + static std::shared_ptr StringsToTensorImpl(const std::string &name, const std::vector &str); static std::vector TensorImplToStrings(const std::shared_ptr &impl) { std::vector empty; @@ -116,7 +120,7 @@ class MSTensor::Impl { return nullptr; } - return std::shared_ptr(lite_tensor_->MutableData(), [](const void *) {}); + return std::shared_ptr(lite_tensor_->data(), [](const void *) {}); } virtual void *MutableData() { @@ -158,5 +162,6 @@ class MSTensor::Impl { bool own_data_ = false; bool from_session_ = false; }; - } // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_CXX_API_TENSOR_TENSOR_IMPL_H diff --git a/mindspore/lite/src/cxx_api/tensor_utils.h b/mindspore/lite/src/cxx_api/tensor_utils.h index c0b442b30a..6e18e873ec 100644 --- a/mindspore/lite/src/cxx_api/tensor_utils.h +++ b/mindspore/lite/src/cxx_api/tensor_utils.h @@ -13,6 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +#ifndef MINDSPORE_LITE_SRC_CXX_API_TENSOR_UTILS_H +#define MINDSPORE_LITE_SRC_CXX_API_TENSOR_UTILS_H + #include #include #include "ir/dtype/type_id.h" @@ -45,5 +49,6 @@ static std::vector TruncateShape(const std::vector &shape, enu } return truncated_shape; } - } // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_CXX_API_TENSOR_UTILS_H diff --git a/mindspore/lite/src/cxx_api/types.cc b/mindspore/lite/src/cxx_api/types.cc index 1958dc9c74..e9f12f4cda 100644 --- a/mindspore/lite/src/cxx_api/types.cc +++ b/mindspore/lite/src/cxx_api/types.cc @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "include/api/types.h" #include #include @@ -25,7 +26,6 @@ #include "src/common/log_adapter.h" namespace mindspore { - class Buffer::Impl { public: Impl() : data_() { MS_LOG(ERROR) << "Unsupported feature."; } @@ -64,7 +64,7 @@ MSTensor::MSTensor(std::nullptr_t) : impl_(nullptr) {} MSTensor::MSTensor(const std::shared_ptr &impl) : impl_(impl) {} MSTensor::MSTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, size_t data_len) - : impl_(std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len))) {} + : impl_(Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len)) {} MSTensor::~MSTensor() = default; bool MSTensor::operator==(std::nullptr_t) const { return impl_ == nullptr; } @@ -79,7 +79,7 @@ MSTensor *MSTensor::CreateTensor(const std::vector &name, enum DataType ty return nullptr; } ::memcpy(new_data, data, data_len); - auto impl = std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len)); + auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; free(new_data); @@ -97,7 +97,7 @@ MSTensor *MSTensor::CreateTensor(const std::vector &name, enum DataType ty MSTensor *MSTensor::CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, size_t data_len) noexcept { - auto impl = std::shared_ptr(Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len)); + auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, data, data_len); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; return nullptr; @@ -111,7 +111,7 @@ MSTensor *MSTensor::CreateRefTensor(const std::vector &name, enum DataType } MSTensor *MSTensor::CharStringsToTensor(const std::vector &name, const std::vector> &inputs) { - auto impl = std::shared_ptr(Impl::StringsToTensorImpl(CharToString(name), VectorCharToString(inputs))); + auto impl = Impl::StringsToTensorImpl(CharToString(name), VectorCharToString(inputs)); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; return nullptr; @@ -148,8 +148,7 @@ MSTensor *MSTensor::Clone() const { MS_LOG(ERROR) << "Allocate data failed."; return nullptr; } - auto impl = - std::shared_ptr(Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len)); + auto impl = Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; free(new_data);