Merge pull request #2888 from gangliao/cpplint

FIX: cpplint code style
cblas_new
gangliao 8 years ago committed by GitHub
commit 9b8451cc14

@ -27,7 +27,8 @@ set(IGNORE_PATTERN
.*cblas\\.h.* .*cblas\\.h.*
.*\\.pb\\.txt .*\\.pb\\.txt
.*LtrDataProvider.* .*LtrDataProvider.*
.*MultiDataProvider.*) .*MultiDataProvider.*
.*pb.*)
# add_style_check_target # add_style_check_target
# #
@ -52,14 +53,13 @@ macro(add_style_check_target TARGET_NAME)
endif() endif()
endforeach() endforeach()
if(LINT MATCHES ON) if(LINT MATCHES ON)
# cpplint code style
get_filename_component(base_filename ${filename} NAME) get_filename_component(base_filename ${filename} NAME)
set(CUR_GEN ${CMAKE_CURRENT_BINARY_DIR}/${base_filename}.cpplint) set(CUR_GEN ${CMAKE_CURRENT_BINARY_DIR}/${base_filename}.cpplint)
add_custom_command(OUTPUT ${CUR_GEN} add_custom_command(TARGET ${TARGET_NAME} PRE_BUILD
PRE_BUILD COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py"
COMMAND env ${py_env} "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" "--filter=${STYLE_FILTER}"
"--filter=${STYLE_FILTER}" "--write-success=${CUR_GEN}" ${filename}
"--write-success=${CUR_GEN}" ${filename}
DEPENDS ${filename}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endif() endif()
endforeach() endforeach()

@ -185,6 +185,10 @@ function(cc_library TARGET_NAME)
add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) add_dependencies(${TARGET_NAME} ${cc_library_DEPS})
target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
endif() endif()
# cpplint code style
add_style_check_target(${TARGET_NAME} ${cc_library_SRCS})
else(cc_library_SRCS) else(cc_library_SRCS)
if (cc_library_DEPS) if (cc_library_DEPS)
merge_static_libs(${TARGET_NAME} ${cc_library_DEPS}) merge_static_libs(${TARGET_NAME} ${cc_library_DEPS})

@ -1,9 +1,23 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/ddim.h" #include "paddle/framework/ddim.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
///@cond HIDDEN /// @cond HIDDEN
template <int i> template <int i>
Dim<i> make_dim(const int* d) { Dim<i> make_dim(const int* d) {
@ -50,7 +64,7 @@ void make_ddim(DDim& ddim, const int* dims, int n) {
} }
} }
///@endcond /// @endcond
DDim make_ddim(std::initializer_list<int> dims) { DDim make_ddim(std::initializer_list<int> dims) {
DDim result(make_dim(0)); DDim result(make_dim(0));
@ -64,11 +78,11 @@ DDim make_ddim(const std::vector<int>& dims) {
return result; return result;
} }
///@cond HIDDEN /// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors // XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor<int&> { class DynamicMutableIndexer : public boost::static_visitor<int&> {
public: public:
DynamicMutableIndexer(int idx) : idx_(idx) {} explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D> template <int D>
int& operator()(Dim<D>& dim) const { int& operator()(Dim<D>& dim) const {
@ -81,7 +95,7 @@ class DynamicMutableIndexer : public boost::static_visitor<int&> {
class DynamicConstIndexer : public boost::static_visitor<int> { class DynamicConstIndexer : public boost::static_visitor<int> {
public: public:
DynamicConstIndexer(int idx) : idx_(idx) {} explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D> template <int D>
int operator()(const Dim<D>& dim) const { int operator()(const Dim<D>& dim) const {
@ -92,7 +106,7 @@ class DynamicConstIndexer : public boost::static_visitor<int> {
int idx_; int idx_;
}; };
///@endcond /// @endcond
int& DDim::operator[](int idx) { int& DDim::operator[](int idx) {
return boost::apply_visitor(DynamicMutableIndexer(idx), var); return boost::apply_visitor(DynamicMutableIndexer(idx), var);
@ -155,11 +169,11 @@ int get(const DDim& ddim, int idx) { return ddim[idx]; }
void set(DDim& ddim, int idx, int value) { ddim[idx] = value; } void set(DDim& ddim, int idx, int value) { ddim[idx] = value; }
///@cond HIDDEN /// @cond HIDDEN
struct VectorizeVisitor : public boost::static_visitor<> { struct VectorizeVisitor : public boost::static_visitor<> {
std::vector<int>& vector; std::vector<int>& vector;
VectorizeVisitor(std::vector<int>& v) : vector(v) {} explicit VectorizeVisitor(std::vector<int>& v) : vector(v) {}
template <typename T> template <typename T>
void operator()(const T& t) { void operator()(const T& t) {
@ -169,7 +183,7 @@ struct VectorizeVisitor : public boost::static_visitor<> {
void operator()(const Dim<1>& t) { vector.push_back(t.head); } void operator()(const Dim<1>& t) { vector.push_back(t.head); }
}; };
///@endcond /// @endcond
std::vector<int> vectorize(const DDim& ddim) { std::vector<int> vectorize(const DDim& ddim) {
std::vector<int> result; std::vector<int> result;
@ -187,7 +201,7 @@ ssize_t product(const DDim& ddim) {
return result; return result;
} }
///\cond HIDDEN /// \cond HIDDEN
struct ArityVisitor : boost::static_visitor<int> { struct ArityVisitor : boost::static_visitor<int> {
template <int D> template <int D>
@ -196,15 +210,15 @@ struct ArityVisitor : boost::static_visitor<int> {
} }
}; };
///\endcond /// \endcond
int arity(const DDim& d) { return boost::apply_visitor(ArityVisitor(), d); } int arity(const DDim& d) { return boost::apply_visitor(ArityVisitor(), d); }
///\cond HIDDEN /// \cond HIDDEN
struct DDimPrinter : boost::static_visitor<void> { struct DDimPrinter : boost::static_visitor<void> {
std::ostream& os; std::ostream& os;
DDimPrinter(std::ostream& os_) : os(os_) {} explicit DDimPrinter(std::ostream& os_) : os(os_) {}
template <typename T> template <typename T>
void operator()(const T& t) { void operator()(const T& t) {
@ -212,7 +226,7 @@ struct DDimPrinter : boost::static_visitor<void> {
} }
}; };
///\endcond /// \endcond
std::ostream& operator<<(std::ostream& os, const DDim& ddim) { std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
DDimPrinter printer(os); DDimPrinter printer(os);

@ -27,7 +27,7 @@ struct DDim {
DDim() : var(Dim<1>()) {} DDim() : var(Dim<1>()) {}
template <int D> template <int D>
DDim(const Dim<D>& in) : var(in) {} explicit DDim(const Dim<D>& in) : var(in) {}
template <int D> template <int D>
DDim& operator=(const Dim<D>& in) { DDim& operator=(const Dim<D>& in) {

@ -1,3 +1,19 @@
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "paddle/framework/net.h" #include "paddle/framework/net.h"
namespace paddle { namespace paddle {

@ -1,3 +1,17 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/framework/op_registry.h> #include <paddle/framework/op_registry.h>
namespace paddle { namespace paddle {
@ -33,4 +47,4 @@ void AttrTypeHelper::SetAttrType<std::vector<std::string>>(AttrProto* attr) {
attr->set_type(paddle::framework::AttrType::STRINGS); attr->set_type(paddle::framework::AttrType::STRINGS);
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle

@ -40,4 +40,4 @@ std::string OperatorBase::DebugString() const {
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle

@ -117,8 +117,7 @@ public:
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
virtual void check(const BufferArgs& inputs, void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const BufferArgs& outputs) override {
const TensorShape& input = inputs[0].shape(); const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape(); const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape(); const TensorShape& output = outputs[0].shape();
@ -217,8 +216,7 @@ public:
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
virtual void check(const BufferArgs& inputs, void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const BufferArgs& outputs) override {
const TensorShape& output = inputs[0].shape(); const TensorShape& output = inputs[0].shape();
const TensorShape& filter = inputs[1].shape(); const TensorShape& filter = inputs[1].shape();
const TensorShape& input = outputs[0].shape(); const TensorShape& input = outputs[0].shape();
@ -311,8 +309,7 @@ public:
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
virtual void check(const BufferArgs& inputs, void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const BufferArgs& outputs) override {
const TensorShape& output = inputs[0].shape(); const TensorShape& output = inputs[0].shape();
const TensorShape& input = inputs[1].shape(); const TensorShape& input = inputs[1].shape();
const TensorShape& filter = outputs[0].shape(); const TensorShape& filter = outputs[0].shape();

@ -90,8 +90,7 @@ public:
ConvFunctionBase::init(config); ConvFunctionBase::init(config);
} }
virtual void check(const BufferArgs& inputs, void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const BufferArgs& outputs) override {
const TensorShape& input = inputs[0].shape(); const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape(); const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape(); const TensorShape& output = outputs[0].shape();

@ -403,7 +403,7 @@ public:
: layerName_(layerName) { : layerName_(layerName) {
addEvaluator(std::move(evaluator)); addEvaluator(std::move(evaluator));
} }
virtual void eval(const NeuralNetwork& nn) override { void eval(const NeuralNetwork& nn) override {
const LayerPtr& layer = nn.getLayer(layerName_); const LayerPtr& layer = nn.getLayer(layerName_);
CHECK(layer) << "Nonexisted layer: " << layerName_ << " in submodel " CHECK(layer) << "Nonexisted layer: " << layerName_ << " in submodel "
<< nn.getName(); << nn.getName();

@ -636,7 +636,7 @@ void lenToStarts(std::vector<int>& starts) {
} }
starts.back() = pos; starts.back() = pos;
} }
} } // namespace
void RecurrentGradientMachine::calcSequenceStartPositions() { void RecurrentGradientMachine::calcSequenceStartPositions() {
std::vector<int> starts(commonSeqInfo_.size() + 1); std::vector<int> starts(commonSeqInfo_.size() + 1);

@ -124,7 +124,7 @@ void copyElements(const IVector& srcVec,
dest[index[i]] = src[i]; dest[index[i]] = src[i];
} }
} }
} } // namespace
void GatherAgentLayer::forwardIds(PassType passType) { void GatherAgentLayer::forwardIds(PassType passType) {
IVectorPtr realId = realLayers_[0]->getOutputLabel(); IVectorPtr realId = realLayers_[0]->getOutputLabel();

@ -152,6 +152,6 @@ MemoryBlock* MemoryBlock::metadata() const {
reinterpret_cast<const Metadata*>(this) - 1)); reinterpret_cast<const Metadata*>(this) - 1));
} }
} // detail } // namespace detail
} // memory } // namespace memory
} // paddle } // namespace paddle

@ -17,8 +17,6 @@ limitations under the License. */
#include "paddle/memory/detail/system_allocator.h" #include "paddle/memory/detail/system_allocator.h"
#include "paddle/platform/assert.h" #include "paddle/platform/assert.h"
#include <boost/variant.hpp>
namespace paddle { namespace paddle {
namespace memory { namespace memory {

@ -1,3 +1,17 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/framework/op_registry.h> #include <paddle/framework/op_registry.h>
#include <paddle/framework/tensor.h> #include <paddle/framework/tensor.h>
#include <paddle/operators/add_op.h> #include <paddle/operators/add_op.h>
@ -36,9 +50,9 @@ The equation is: Out = X + Y
)DOC"); )DOC");
} }
}; };
} // namespace op } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OP(add_two, paddle::operators::AddOp, paddle::operators::AddOpMaker); REGISTER_OP(add_two, paddle::operators::AddOp, paddle::operators::AddOpMaker);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
add_two, ::paddle::operators::AddKernel<::paddle::platform::CPUPlace>); add_two, ::paddle::operators::AddKernel<::paddle::platform::CPUPlace>);

@ -1,3 +1,19 @@
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "parameter_optimizer.h" #include "parameter_optimizer.h"
#include <cmath> #include <cmath>
#include <map> #include <map>
@ -5,21 +21,18 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "lr_policy.h" #include "lr_policy.h"
using namespace paddle; paddle::optimizer::Tensor* FillTensor(size_t size) {
using namespace paddle::optimizer; paddle::optimizer::Tensor* param = new paddle::optimizer::Tensor(size);
paddle::optimizer::Tensor& p = *param;
Tensor* FillTensor(size_t size) {
Tensor* param = new Tensor(size);
Tensor& p = *param;
for (size_t i = 0; i < p.size(); ++i) { for (size_t i = 0; i < p.size(); ++i) {
p[i] = (float)rand() / (float)RAND_MAX; p[i] = (float)rand() / (float)RAND_MAX;
} }
return param; return param;
} }
Tensor* FixedTensor(size_t size) { paddle::optimizer::Tensor* FixedTensor(size_t size) {
Tensor* param = new Tensor(size); paddle::optimizer::Tensor* param = new paddle::optimizer::Tensor(size);
Tensor& p = *param; paddle::optimizer::Tensor& p = *param;
for (size_t i = 0; i < p.size(); ++i) { for (size_t i = 0; i < p.size(); ++i) {
p[i] = i; p[i] = i;
} }
@ -28,7 +41,8 @@ Tensor* FixedTensor(size_t size) {
class OptimizerTest : public testing::Test { class OptimizerTest : public testing::Test {
public: public:
// init tensor shape virtual ~OptimizerTest() {}
// init paddle::optimizer::Tensor shape
const size_t kSize = 5; const size_t kSize = 5;
virtual void SetUp() { virtual void SetUp() {
@ -38,34 +52,36 @@ public:
virtual void TearDown() {} virtual void TearDown() {}
void CreateSGD() { void CreateSGD() {
Tensor* parameter = FixedTensor(kSize); paddle::optimizer::Tensor* parameter = FixedTensor(kSize);
config_.set_optimizer(OptimizerConfig::SGD); config_.set_optimizer(paddle::OptimizerConfig::SGD);
config_.mutable_sgd()->set_momentum(0.0); config_.mutable_sgd()->set_momentum(0.0);
config_.mutable_sgd()->set_decay(0.0); config_.mutable_sgd()->set_decay(0.0);
config_.mutable_sgd()->set_nesterov(false); config_.mutable_sgd()->set_nesterov(false);
config_.set_lr_policy(OptimizerConfig::Const); config_.set_lr_policy(paddle::OptimizerConfig::Const);
config_.mutable_const_lr()->set_learning_rate(0.1); config_.mutable_const_lr()->set_learning_rate(0.1);
std::string str = config_.SerializeAsString(); std::string str = config_.SerializeAsString();
ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter); paddle::optimizer::ParameterOptimizer* opt =
paddle::optimizer::ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt); opts_.push_back(opt);
} }
void CreateAdam() { void CreateAdam() {
Tensor* parameter = FixedTensor(kSize); paddle::optimizer::Tensor* parameter = FixedTensor(kSize);
config_.set_optimizer(OptimizerConfig::Adam); config_.set_optimizer(paddle::OptimizerConfig::Adam);
config_.mutable_adam()->set_beta_1(0.9); config_.mutable_adam()->set_beta_1(0.9);
config_.mutable_adam()->set_beta_2(0.1); config_.mutable_adam()->set_beta_2(0.1);
config_.mutable_adam()->set_epsilon(1e-3); config_.mutable_adam()->set_epsilon(1e-3);
config_.mutable_adam()->set_decay(0.0); config_.mutable_adam()->set_decay(0.0);
config_.set_lr_policy(OptimizerConfig::Const); config_.set_lr_policy(paddle::OptimizerConfig::Const);
config_.mutable_const_lr()->set_learning_rate(0.1); config_.mutable_const_lr()->set_learning_rate(0.1);
std::string str = config_.SerializeAsString(); std::string str = config_.SerializeAsString();
ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter); paddle::optimizer::ParameterOptimizer* opt =
paddle::optimizer::ParameterOptimizer::Create(str, parameter);
opts_.push_back(opt); opts_.push_back(opt);
} }
void TestGetWeight() { void TestGetWeight() {
Tensor* p = FixedTensor(kSize); paddle::optimizer::Tensor* p = FixedTensor(kSize);
for (size_t i = 0; i < opts_.size(); ++i) { for (size_t i = 0; i < opts_.size(); ++i) {
int s = 0; int s = 0;
float* newp = (float*)opts_[i]->get_weight(&s); float* newp = (float*)opts_[i]->get_weight(&s);
@ -76,7 +92,7 @@ public:
} }
void TestUpdate() { void TestUpdate() {
Tensor* g = FixedTensor(kSize); paddle::optimizer::Tensor* g = FixedTensor(kSize);
for (size_t i = 0; i < opts_.size(); ++i) { for (size_t i = 0; i < opts_.size(); ++i) {
opts_[i]->Update(g); opts_[i]->Update(g);
} }
@ -91,8 +107,8 @@ public:
} }
private: private:
std::vector<ParameterOptimizer*> opts_; std::vector<paddle::optimizer::ParameterOptimizer*> opts_;
OptimizerConfig config_; paddle::OptimizerConfig config_;
}; };
TEST_F(OptimizerTest, TestGetWeight) { TestGetWeight(); } TEST_F(OptimizerTest, TestGetWeight) { TestGetWeight(); }

@ -1,19 +1,32 @@
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "serialization.h" #include "serialization.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace paddle;
using namespace paddle::optimizer;
TEST(TensorToProto, Case1) { TEST(TensorToProto, Case1) {
Tensor t(3), t1(3); paddle::optimizer::Tensor t(3), t1(3);
for (size_t i = 0; i < t.size(); ++i) { for (size_t i = 0; i < t.size(); ++i) {
t[i] = i; t[i] = i;
t1[i] = 0; t1[i] = 0;
} }
TensorProto proto; paddle::TensorProto proto;
TensorToProto(t, &proto); paddle::optimizer::TensorToProto(t, &proto);
ProtoToTensor(proto, &t1); paddle::optimizer::ProtoToTensor(proto, &t1);
for (size_t i = 0; i < t1.size(); ++i) { for (size_t i = 0; i < t1.size(); ++i) {
EXPECT_EQ(t1[i], t[i]); EXPECT_EQ(t1[i], t[i]);
} }

@ -41,8 +41,8 @@ inline size_t CpuTotalPhysicalMemory() {
if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) return (size_t)size; if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) return (size_t)size;
return 0L; return 0L;
#else #else
long pages = sysconf(_SC_PHYS_PAGES); int64_t pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE); int64_t page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size; return pages * page_size;
#endif #endif
} }

@ -1,3 +1,17 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
namespace paddle { namespace paddle {
@ -7,7 +21,7 @@ namespace detail {
class PlacePrinter : public boost::static_visitor<> { class PlacePrinter : public boost::static_visitor<> {
public: public:
PlacePrinter(std::ostream &os) : os_(os) {} explicit PlacePrinter(std::ostream &os) : os_(os) {}
void operator()(const CPUPlace &) { os_ << "CPUPlace"; } void operator()(const CPUPlace &) { os_ << "CPUPlace"; }
void operator()(const GPUPlace &p) { os_ << "GPUPlace(" << p.device << ")"; } void operator()(const GPUPlace &p) { os_ << "GPUPlace(" << p.device << ")"; }

@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifndef DYNAMIC_LOAD_H_ #pragma once
#define DYNAMIC_LOAD_H_
#include <dlfcn.h> #include <dlfcn.h>
#include <memory> #include <memory>
@ -59,5 +58,3 @@ void GetWarpCTCDsoHandle(void** dso_handle);
* *
*/ */
void GetLapackDsoHandle(void** dso_handle); void GetLapackDsoHandle(void** dso_handle);
#endif // DYNAMIC_LOAD_H_

@ -51,7 +51,7 @@ template <class T>
class ThreadLocal { class ThreadLocal {
public: public:
ThreadLocal() { ThreadLocal() {
CHECK(pthread_key_create(&threadSpecificKey_, dataDestructor) == 0); CHECK_EQ(pthread_key_create(&threadSpecificKey_, dataDestructor), 0);
} }
~ThreadLocal() { pthread_key_delete(threadSpecificKey_); } ~ThreadLocal() { pthread_key_delete(threadSpecificKey_); }
@ -65,7 +65,7 @@ public:
if (!p && createLocal) { if (!p && createLocal) {
p = new T(); p = new T();
int ret = pthread_setspecific(threadSpecificKey_, p); int ret = pthread_setspecific(threadSpecificKey_, p);
CHECK(ret == 0); CHECK_EQ(ret, 0);
} }
return p; return p;
} }
@ -79,7 +79,7 @@ public:
if (T* q = get(false)) { if (T* q = get(false)) {
dataDestructor(q); dataDestructor(q);
} }
CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0);
} }
/** /**
@ -112,7 +112,7 @@ private:
template <class T> template <class T>
class ThreadLocalD { class ThreadLocalD {
public: public:
ThreadLocalD() { CHECK(pthread_key_create(&threadSpecificKey_, NULL) == 0); } ThreadLocalD() { CHECK_EQ(pthread_key_create(&threadSpecificKey_, NULL), 0); }
~ThreadLocalD() { ~ThreadLocalD() {
pthread_key_delete(threadSpecificKey_); pthread_key_delete(threadSpecificKey_);
for (auto t : threadMap_) { for (auto t : threadMap_) {
@ -127,7 +127,7 @@ public:
T* p = (T*)pthread_getspecific(threadSpecificKey_); T* p = (T*)pthread_getspecific(threadSpecificKey_);
if (!p) { if (!p) {
p = new T(); p = new T();
CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0);
updateMap(p); updateMap(p);
} }
return p; return p;
@ -141,7 +141,7 @@ public:
if (T* q = (T*)pthread_getspecific(threadSpecificKey_)) { if (T* q = (T*)pthread_getspecific(threadSpecificKey_)) {
dataDestructor(q); dataDestructor(q);
} }
CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0);
updateMap(p); updateMap(p);
} }

Loading…
Cancel
Save