fix develop build issue (#10978)

* fix develop build issue

* fix google style

* cpplint check only fluid
release/0.13.0
Wu Yi 7 years ago committed by GitHub
parent 8075a11f0e
commit 8f7b020ba8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -19,7 +19,7 @@ BasedOnStyle: Google
IndentWidth: 2
TabWidth: 2
ContinuationIndentWidth: 4
AccessModifierOffset: -2 # The private/protected/public has no indent in class
AccessModifierOffset: -1 # The private/protected/public has no indent in class
Standard: Cpp11
AllowAllParametersOfDeclarationOnNextLine: true
BinPackParameters: false

@ -94,7 +94,7 @@ void UpdateCallback::apply(Parameter* p) {
}
class UpdateCallbackWrapper {
public:
public:
explicit UpdateCallbackWrapper(const UpdateCallback& callback)
: callback(const_cast<UpdateCallback&>(callback)) {}
@ -105,7 +105,7 @@ public:
delete p;
}
private:
private:
UpdateCallback& callback;
};

File diff suppressed because it is too large Load Diff

@ -138,7 +138,7 @@ struct SequenceGeneratorPrivate {
maxLength(0UL),
feedback(__create_feedback__()) {}
private:
private:
static paddle::Argument __create_feedback__() {
paddle::Argument feedback;
feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu);
@ -157,7 +157,7 @@ SequenceGenerator::~SequenceGenerator() { delete m; }
class PathSequenceResults : public ISequenceResults {
// ISequenceResults interface
public:
public:
PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path,
const std::shared_ptr<std::vector<std::string>>& dict)
: path_(path), dict_(dict) {}
@ -196,7 +196,7 @@ public:
}
}
private:
private:
std::shared_ptr<std::vector<Path>> path_;
std::shared_ptr<std::vector<std::string>> dict_;
};

@ -26,7 +26,7 @@ enum GradientMatchineCreateMode {
namespace paddle {
class MyNeuralNetwork : public NeuralNetwork {
public:
public:
MyNeuralNetwork(const std::string& name, NeuralNetwork* network)
: NeuralNetwork(name, network) {}
};

@ -50,7 +50,7 @@ struct PaddleTensor {
* TODO(Superjomn) Prepare another API for NLP-related usages.
*/
class PaddlePredictor {
public:
public:
struct Config;
PaddlePredictor() = default;
PaddlePredictor(const PaddlePredictor&) = delete;
@ -66,6 +66,7 @@ public:
// be thread-safe.
virtual std::unique_ptr<PaddlePredictor> Clone() = 0;
virtual bool InitShared() { return false; }
// Destroy the Predictor.
virtual ~PaddlePredictor() {}

@ -28,7 +28,7 @@ namespace {
// Timer for timer
class Timer {
public:
public:
double start;
double startu;
void tic() {
@ -135,8 +135,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
VLOG(3) << "Predictor::clone";
std::unique_ptr<PaddlePredictorImpl> cls(new PaddlePredictorImpl(config_));
if (!cls->InitShared(this)) {
std::unique_ptr<PaddlePredictor> cls(new PaddlePredictorImpl(config_));
if (!cls->InitShared()) {
LOG(ERROR) << "fail to call InitShared";
return nullptr;
}
@ -144,7 +144,7 @@ std::unique_ptr<PaddlePredictor> PaddlePredictorImpl::Clone() {
}
// TODO(panyx0718): Consider merge with Init()?
bool PaddlePredictorImpl::InitShared(PaddlePredictorImpl *cls) {
bool PaddlePredictorImpl::InitShared() {
VLOG(3) << "Predictor::init_shared";
// 1. Define place, executor, scope
if (this->config_.device >= 0) {

@ -41,7 +41,7 @@ struct VisConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor.
*/
class PaddlePredictorImpl : public PaddlePredictor {
public:
public:
explicit PaddlePredictorImpl(const VisConfig &config) : config_(config) {}
bool Init();
@ -53,8 +53,8 @@ public:
~PaddlePredictorImpl() override{};
private:
bool InitShared(PaddlePredictorImpl *cls);
private:
bool InitShared();
bool SetFeed(const std::vector<PaddleTensor> &input_datas,
std::vector<paddle::framework::LoDTensor> *feeds);
bool GetFetch(const std::vector<paddle::framework::LoDTensor> &fetchs,

@ -31,7 +31,7 @@ struct DemoConfig : public PaddlePredictor::Config {
* Do not use this, just a demo indicating how to customize a Predictor.
*/
class DemoPredictor : public PaddlePredictor {
public:
public:
explicit DemoPredictor(const DemoConfig &config) {
LOG(INFO) << "I get other_config " << config.other_config;
}

@ -31,7 +31,7 @@ namespace hppl {
*/
template <class T>
class Active {
public:
public:
typedef T (*forward)(T);
typedef T (*backward)(T, T);
};

File diff suppressed because it is too large Load Diff

@ -30,7 +30,7 @@ bool hl_lstm_sequence_parallel(int frameSize) {
}
class frameValue {
public:
public:
real *value_;
__device__ frameValue(real *value) : value_(value) {}
template <int reversed, int frameSize>

@ -33,7 +33,7 @@ namespace paddle {
* \param outputs[0] Image data of NCHW format.
*/
class BlockExpandFunction : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
strides_ = config.get<std::vector<size_t>>("strides");
@ -81,7 +81,7 @@ public:
(size_t)blockW()});
}
protected:
protected:
std::vector<size_t> strides_;
std::vector<size_t> paddings_;
std::vector<size_t> blocks_;
@ -101,7 +101,7 @@ protected:
template <DeviceType Device>
class BlockExpandForward : public BlockExpandFunction {
public:
public:
void init(const FuncConfig& config) override {
BlockExpandFunction::init(config);
}
@ -149,7 +149,7 @@ public:
template <DeviceType Device>
class BlockExpandBackward : public BlockExpandFunction {
public:
public:
void init(const FuncConfig& config) override {
BlockExpandFunction::init(config);
}

@ -63,12 +63,12 @@ enum ArgType {
ADD_TO = 2,
};
class BufferArg {
public:
public:
void setArgType(ArgType argType) { argType_ = argType; }
ArgType getArgType() const { return argType_; }
public:
public:
BufferArg(ValueType valueType,
const TensorShape& shape,
ArgType argType = UNSPECIFIED)
@ -169,7 +169,7 @@ public:
const SequenceArg& sequence() const;
const SparseMatrixArg& sparse() const;
protected:
protected:
void* buf_;
ValueType valueType_;
TensorShape shape_;
@ -185,7 +185,7 @@ protected:
// valueType_ = int32
// if a < b then value_.buf_[a] < value_.buf_[b]
class SequenceIdArg : public BufferArg {
public:
public:
SequenceIdArg(const TensorShape& shape, ArgType argType = UNSPECIFIED)
: BufferArg(VALUE_TYPE_INT32, shape, argType) {
bufferType_ = TENSOR_SEQUENCE_ID;
@ -212,7 +212,7 @@ public:
size_t numSeqs() const { return numSeqs_; }
private:
private:
size_t numSeqs_;
};
@ -222,7 +222,7 @@ private:
// SequenceArg can be used to represent sequences that contain multiple
// unequal lengths.
class SequenceArg : public BufferArg {
public:
public:
SequenceArg(ValueType valueType,
const TensorShape& shape,
ArgType argType = UNSPECIFIED)
@ -255,7 +255,7 @@ public:
SequenceIdArg& getSequenceId() { return startPositions_; }
const SequenceIdArg& getSequenceId() const { return startPositions_; }
private:
private:
SequenceIdArg startPositions_;
};
@ -263,7 +263,7 @@ private:
// valueType_ == float or double
// shape_.ndims() == 2
class SparseMatrixArg : public BufferArg {
public:
public:
SparseMatrixArg(void* buf,
ValueType valueType,
const TensorShape& shape,
@ -353,7 +353,7 @@ public:
SparseDataType dataType() const { return type_; }
private:
private:
BufferArg row_;
BufferArg col_;
size_t nnz_;

@ -100,7 +100,7 @@ void ContextProjectionForward<DEVICE_TYPE_CPU>(CpuMatrix& out_mat,
*/
template <DeviceType Device>
class ContextProjectionForwardFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
@ -146,7 +146,7 @@ public:
begin_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;
@ -223,7 +223,7 @@ void ContextProjectionBackward<DEVICE_TYPE_CPU>(const CpuMatrix& out_grad_mat,
*/
template <DeviceType Device>
class ContextProjectionBackwardFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
@ -278,7 +278,7 @@ public:
total_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;
@ -299,7 +299,7 @@ private:
*/
template <DeviceType Device>
class ContextProjectionBackwardDataFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
@ -331,7 +331,7 @@ public:
out_grad_mat, in_grad_mat, seq_vec, context_length_, context_start_);
}
private:
private:
size_t context_length_;
int context_start_;
};
@ -348,7 +348,7 @@ private:
*/
template <DeviceType Device>
class ContextProjectionBackwardWeightFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
context_length_ = config.get<size_t>("context_length");
context_start_ = config.get<int>("context_start");
@ -382,7 +382,7 @@ public:
begin_pad_);
}
private:
private:
size_t context_length_;
int context_start_;
size_t begin_pad_;

@ -56,7 +56,7 @@ namespace paddle {
* H and W is height and width of filter.
*/
class ConvFunctionBase : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
strides_ = config.get<std::vector<size_t>>("strides");
@ -101,7 +101,7 @@ public:
}
}
protected:
protected:
size_t getFilterHeight(const TensorShape& filter) const {
return filter[filter.ndims() - 2];
}

@ -97,7 +97,7 @@ class CosSimForwardFunc : public FunctionBase {
CosSimForward<Device>(out_mat, in1_mat, in2_mat, scale_);
}
private:
private:
real scale_;
};
@ -227,7 +227,7 @@ class CosSimBackwardFunc : public FunctionBase {
out_grad, out_val, in1_val, in2_val, in1_grad, in2_grad, scale_);
}
private:
private:
real scale_;
};

@ -112,7 +112,7 @@ void CropGrad<DEVICE_TYPE_CPU>(const real* inGrad,
*/
template <DeviceType Device>
class CropFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
@ -130,7 +130,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};
@ -145,7 +145,7 @@ private:
template <DeviceType Device>
class CropGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override { conf_ = config; }
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
@ -163,7 +163,7 @@ public:
conf_);
}
private:
private:
FuncConfig conf_;
};

@ -160,7 +160,7 @@ void CrossMapNormalGrad<DEVICE_TYPE_CPU>(real* inputsGrad,
*/
template <DeviceType Device>
class CrossMapNormalFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
size_ = config.get<size_t>("size");
@ -220,7 +220,7 @@ public:
return ops;
}
private:
private:
size_t size_;
real scale_;
real pow_;
@ -260,7 +260,7 @@ private:
*/
template <DeviceType Device>
class CrossMapNormalGradFunc : public FunctionBase {
public:
public:
void init(const FuncConfig& config) override {
// function arguments
size_ = config.get<size_t>("size");
@ -328,7 +328,7 @@ public:
return ops;
}
private:
private:
size_t size_;
real scale_;
real pow_;

@ -19,7 +19,7 @@ namespace paddle {
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
@ -43,7 +43,7 @@ public:
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
@ -66,7 +66,7 @@ public:
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_CPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
@ -93,7 +93,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
@ -156,7 +156,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvGradInputFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
@ -220,7 +220,7 @@ public:
*/
template <DeviceType Device>
class DepthwiseConvGradFilterFunction : public ConvFunctionBase {
public:
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}

@ -44,7 +44,7 @@ namespace paddle {
*/
template <DeviceType Device, class T>
class DepthwiseConvFunctor {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
@ -89,7 +89,7 @@ public:
*/
template <DeviceType Device, class T>
class DepthwiseConvGradInputFunctor {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
@ -135,7 +135,7 @@ public:
*/
template <DeviceType Device, class T>
class DepthwiseConvGradFilterFunctor {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,

@ -199,7 +199,7 @@ __global__ void ConvolutionDepthwiseFilterBackward(const int num_i,
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
@ -249,7 +249,7 @@ public:
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
@ -300,7 +300,7 @@ public:
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> {
public:
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,

@ -46,7 +46,7 @@ int GetCpuCount() { return 1; }
#endif
class EigenDeviceWarpper {
public: // NOLINT
public: // NOLINT
#if EIGEN_USE_THREADS
static Eigen::ThreadPoolDevice* device() {
const int num_cpus = GetCpuCount();

@ -29,7 +29,7 @@ namespace paddle {
* The argument type of Function::init.
*/
class FuncConfig {
public:
public:
template <typename T>
T get(const std::string& key, Error* err = nullptr) const {
try {
@ -59,7 +59,7 @@ public:
return *this;
}
protected:
protected:
mutable std::unordered_map<std::string, any> valueMap_;
};
@ -77,7 +77,7 @@ protected:
* in the BufferArgs life time.
*/
class BufferArgs {
public:
public:
BufferArgs() {}
~BufferArgs() {
@ -137,7 +137,7 @@ public:
void addArg(SparseMatrixArg& arg) { args_.push_back(&arg); }
private:
private:
std::vector<BufferArg*> args_;
// The BufferArg object is constructed and freed by BufferArgs.
std::vector<BufferArg*> _args_;
@ -163,7 +163,7 @@ private:
* If Function has more than one output, each output can have different modes.
*/
class FunctionBase {
public:
public:
virtual ~FunctionBase() {}
virtual void init(const FuncConfig& config) {}
@ -192,7 +192,7 @@ public:
static ClassRegistrar<FunctionBase> funcRegistrar_;
protected:
protected:
// numInputs_ and numOutputs_ represents the maximum
// input and output supported by Function.
// Some functions are optimized for input and output,

@ -39,7 +39,7 @@ struct Allocator<DEVICE_TYPE_GPU> {
// Copy argument1 to argument2
template <DeviceType DType1, DeviceType DType2>
class CopyArgument {
public:
public:
void operator()(const BufferArg& arg1, BufferArg& arg2) {
CHECK_EQ(arg1.valueType(), arg2.valueType());
CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements());
@ -95,7 +95,7 @@ public:
*/
template <DeviceType DType1, DeviceType DType2>
class Compare2Function {
public:
public:
typedef typename test::Allocator<DType1>::type Allocator1;
typedef typename test::Allocator<DType2>::type Allocator2;
typedef typename Tensor<real, DType1>::Vector Vector1;
@ -305,7 +305,7 @@ public:
std::shared_ptr<FunctionBase> getFunction2() const { return function2_; }
protected:
protected:
// only init cpu argument, gpu argument copy from cpu argument.
void initArg(BufferArg& arg) {
Vector1 vector(arg.shape().getElements(), (real*)arg.data());
@ -381,7 +381,7 @@ protected:
}
}
protected:
protected:
std::shared_ptr<FunctionBase> function1_;
std::shared_ptr<FunctionBase> function2_;
std::vector<std::shared_ptr<Allocator1>> func1Memory_;
@ -400,7 +400,7 @@ protected:
class CpuGpuFuncCompare
: public Compare2Function<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> {
public:
public:
CpuGpuFuncCompare(const std::string& name, const FuncConfig& config)
: Compare2Function(name + "-CPU", name + "-GPU", config) {}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save