Polish code style

test=develop
revert-16190-refine_parallel_executor
minqiyang 6 years ago
parent 36dce65bb3
commit b40e41fbd1

@ -68,11 +68,11 @@ class SplitOpMaker : public OpProtoAndCheckerMaker {
class DummyVarTypeInference : public VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
auto& inputs = ctx.Input("X");
auto type = ctx.GetType(inputs.front());
auto out_var_name = ctx.Output("Out").front();
ctx.SetType(out_var_name, type);
void operator()(framework::InferVarTypeContext* ctx) const override {
auto& inputs = ctx->Input("X");
auto type = ctx->GetType(inputs.front());
auto out_var_name = ctx->Output("Out").front();
ctx->SetType(out_var_name, type);
}
};

@ -131,7 +131,7 @@ struct OpInfoFiller<T, kVarTypeInference> {
void operator()(const char* op_type, OpInfo* info) const {
info->infer_var_type_ = [](InferVarTypeContext* context) {
T inference;
inference(*context);
inference(context);
};
}
};

@ -43,20 +43,20 @@ class SumOpMaker : public OpProtoAndCheckerMaker {
class SumOpVarTypeInference : public VarTypeInference {
public:
void operator()(InferVarTypeContext &ctx) const override {
auto &inputs = ctx.Input("X");
void operator()(InferVarTypeContext *ctx) const override {
auto &inputs = ctx->Input("X");
auto default_var_type = proto::VarType::SELECTED_ROWS;
bool any_input_is_lod_tensor = std::any_of(
inputs.begin(), inputs.end(), [&ctx](const std::string &name) {
return ctx.GetType(name) == proto::VarType::LOD_TENSOR;
return ctx->GetType(name) == proto::VarType::LOD_TENSOR;
});
if (any_input_is_lod_tensor) {
default_var_type = proto::VarType::LOD_TENSOR;
}
auto out_var_name = ctx.Output("Out").front();
ctx.SetType(out_var_name, default_var_type);
auto out_var_name = ctx->Output("Out").front();
ctx->SetType(out_var_name, default_var_type);
}
};
@ -71,7 +71,7 @@ class DummyOpMaker : public OpProtoAndCheckerMaker {
class DummyOpVarTypeInference : public VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {}
void operator()(framework::InferVarTypeContext *ctx) const override {}
};
} // namespace framework
} // namespace paddle

@ -126,20 +126,20 @@ class InferVarTypeContext {
class VarTypeInference {
public:
virtual ~VarTypeInference() {}
virtual void operator()(InferVarTypeContext& context) const = 0; // NOLINT
virtual void operator()(InferVarTypeContext* context) const = 0; // NOLINT
};
class PassInDtypeAndVarTypeToOutput : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const final { // NOLINT
void operator()(framework::InferVarTypeContext* ctx) const final { // NOLINT
auto in_out_var_names = this->GetInputOutputWithSameType();
for (auto& i_o_n : in_out_var_names) {
auto& x_name = ctx.Input(i_o_n.first).at(0);
auto& out_name = ctx.Output(i_o_n.second).at(0);
auto& x_name = ctx->Input(i_o_n.first).at(0);
auto& out_name = ctx->Output(i_o_n.second).at(0);
ctx.SetType(out_name, ctx.GetType(x_name));
ctx.SetDataType(out_name, ctx.GetDataType(x_name));
ctx->SetType(out_name, ctx->GetType(x_name));
ctx->SetDataType(out_name, ctx->GetDataType(x_name));
}
}

@ -44,20 +44,20 @@ class SumOpMaker : public OpProtoAndCheckerMaker {
class SumOpVarTypeInference : public VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto &inputs = ctx.Input("X");
void operator()(framework::InferVarTypeContext *ctx) const override {
auto &inputs = ctx->Input("X");
auto default_var_type = proto::VarType::SELECTED_ROWS;
bool any_input_is_lod_tensor = std::any_of(
inputs.begin(), inputs.end(), [&ctx](const std::string &name) {
return ctx.GetType(name) == proto::VarType::LOD_TENSOR;
return ctx->GetType(name) == proto::VarType::LOD_TENSOR;
});
if (any_input_is_lod_tensor) {
default_var_type = proto::VarType::LOD_TENSOR;
}
auto out_var_name = ctx.Output("Out").front();
ctx.SetType(out_var_name, default_var_type);
auto out_var_name = ctx->Output("Out").front();
ctx->SetType(out_var_name, default_var_type);
}
};
} // namespace framework

@ -161,7 +161,7 @@ Tracer::Tracer(framework::BlockDesc* root_block) : root_block_(root_block) {
}
std::set<std::string> Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
VarBasePtrMap& outputs,
VarBasePtrMap* outputs,
framework::AttributeMap attrs_map,
const platform::Place expected_place,
const bool stop_gradient) {
@ -195,7 +195,7 @@ std::set<std::string> Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
}
}
op->output_vars_ = outputs;
op->output_vars_ = *outputs;
for (auto it : op->output_vars_) {
auto& outvars = outvars_map[it.first];
const std::vector<VarBase*>& outputs = it.second;
@ -218,7 +218,7 @@ std::set<std::string> Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
framework::VariableNameMap invars_name_map =
CreateInputVarNameMap(op, inputs);
framework::VariableNameMap outvars_name_map =
CreateOutputVarNameMap(op, outputs);
CreateOutputVarNameMap(op, *outputs);
auto& info = framework::OpInfoMap::Instance().Get(op->Type());
if (info.Checker() != nullptr) {
@ -230,8 +230,7 @@ std::set<std::string> Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
outvars_name_map, attrs_map);
if (info.infer_var_type_) {
RuntimeInferVarTypeContext infer_var_type_ctx(&inputs, &outputs,
&attrs_map);
RuntimeInferVarTypeContext infer_var_type_ctx(&inputs, outputs, &attrs_map);
info.infer_var_type_(&infer_var_type_ctx);
}

@ -48,7 +48,7 @@ class Tracer {
virtual ~Tracer() {}
std::set<std::string> Trace(OpBase* op, const VarBasePtrMap& inputs,
VarBasePtrMap& outputs, // NOLINT
VarBasePtrMap* outputs, // NOLINT
framework::AttributeMap attrs_map,
const platform::Place expected_place,
const bool stop_gradient = false);

@ -203,12 +203,12 @@ class BeamSearchDecodeInferShape : public framework::InferShapeBase {
class BeamSearchDecodeInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
for (auto& o : ctx.Output("SentenceIds")) {
ctx.SetType(o, framework::proto::VarType::LOD_TENSOR);
void operator()(framework::InferVarTypeContext* ctx) const override {
for (auto& o : ctx->Output("SentenceIds")) {
ctx->SetType(o, framework::proto::VarType::LOD_TENSOR);
}
for (auto& o : ctx.Output("SentenceScores")) {
ctx.SetType(o, framework::proto::VarType::LOD_TENSOR);
for (auto& o : ctx->Output("SentenceScores")) {
ctx->SetType(o, framework::proto::VarType::LOD_TENSOR);
}
}
};

@ -120,12 +120,12 @@ class BeamSearchOp : public framework::OperatorWithKernel {
class BeamSearchInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
for (auto &o : ctx.Output("selected_ids")) {
ctx.SetType(o, framework::proto::VarType::LOD_TENSOR);
void operator()(framework::InferVarTypeContext *ctx) const override {
for (auto &o : ctx->Output("selected_ids")) {
ctx->SetType(o, framework::proto::VarType::LOD_TENSOR);
}
for (auto &o : ctx.Output("selected_scores")) {
ctx.SetType(o, framework::proto::VarType::LOD_TENSOR);
for (auto &o : ctx->Output("selected_scores")) {
ctx->SetType(o, framework::proto::VarType::LOD_TENSOR);
}
}
};

@ -100,13 +100,13 @@ class WriteToArrayInferShape : public framework::InferShapeBase {
class WriteToArrayInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto x_name = ctx.Input("X")[0];
auto out_name = ctx.Output("Out")[0];
void operator()(framework::InferVarTypeContext *ctx) const override {
auto x_name = ctx->Input("X")[0];
auto out_name = ctx->Output("Out")[0];
VLOG(10) << "Set Variable " << out_name << " as LOD_TENSOR_ARRAY";
ctx.SetType(out_name, framework::proto::VarType::LOD_TENSOR_ARRAY);
if (ctx.HasVar(x_name)) {
ctx.SetDataType(out_name, ctx.GetDataType(x_name));
ctx->SetType(out_name, framework::proto::VarType::LOD_TENSOR_ARRAY);
if (ctx->HasVar(x_name)) {
ctx->SetDataType(out_name, ctx->GetDataType(x_name));
}
}
};

@ -365,16 +365,16 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker {
class WhileGradOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto p_names = ctx.Input(kX);
auto pg_ig_names = ctx.Output(framework::GradVarName(kX));
void operator()(framework::InferVarTypeContext *ctx) const override {
auto p_names = ctx->Input(kX);
auto pg_ig_names = ctx->Output(framework::GradVarName(kX));
for (size_t i = 0; i < p_names.size(); ++i) {
if (ctx.HasVar(pg_ig_names[i])) {
if (ctx->HasVar(pg_ig_names[i])) {
VLOG(5) << "Setting " << pg_ig_names[i] << " following " << p_names[i]
<< " type: " << ctx.GetType(p_names[i]);
ctx.SetType(pg_ig_names[i], ctx.GetType(p_names[i]));
ctx.SetDataType(pg_ig_names[i], ctx.GetDataType(p_names[i]));
<< " type: " << ctx->GetType(p_names[i]);
ctx->SetType(pg_ig_names[i], ctx->GetType(p_names[i]));
ctx->SetDataType(pg_ig_names[i], ctx->GetDataType(p_names[i]));
}
}
}

@ -56,7 +56,7 @@ class FakeInitOp : public framework::OperatorBase {
class FakeInitOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {}
void operator()(framework::InferVarTypeContext *ctx) const override {}
};
class FakeInitOpMaker : public framework::OpProtoAndCheckerMaker {

@ -114,10 +114,10 @@ class MergeIdsOp : public framework::OperatorWithKernel {
class MergeIdsOpInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto input_type = ctx.GetType(ctx.Input("Ids")[0]);
for (auto &out_var : ctx.Output("Out")) {
ctx.SetType(out_var, input_type);
void operator()(framework::InferVarTypeContext *ctx) const override {
auto input_type = ctx->GetType(ctx->Input("Ids")[0]);
for (auto &out_var : ctx->Output("Out")) {
ctx->SetType(out_var, input_type);
}
}
};

@ -73,10 +73,10 @@ class SplitIdsOp : public framework::OperatorWithKernel {
class SplitIdsOpInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto input_type = ctx.GetType(ctx.Input("Ids")[0]);
for (auto &out_var : ctx.Output("Out")) {
ctx.SetType(out_var, input_type);
void operator()(framework::InferVarTypeContext *ctx) const override {
auto input_type = ctx->GetType(ctx->Input("Ids")[0]);
for (auto &out_var : ctx->Output("Out")) {
ctx->SetType(out_var, input_type);
}
}
};

@ -39,11 +39,11 @@ class FillConstantOp : public framework::OperatorWithKernel {
class FillConstantOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
void operator()(framework::InferVarTypeContext* ctx) const override {
auto data_type = static_cast<framework::proto::VarType::Type>(
boost::get<int>(ctx.GetAttr("dtype")));
auto& out_var_name = ctx.Output("Out").front();
ctx.SetDataType(out_var_name, data_type);
boost::get<int>(ctx->GetAttr("dtype")));
auto& out_var_name = ctx->Output("Out").front();
ctx->SetDataType(out_var_name, data_type);
}
};

@ -137,20 +137,20 @@ class FusedEmbeddingSeqPoolOpGrad : public framework::OperatorWithKernel {
class FusedEmbeddingSeqPoolOpGradVarTypeInference
: public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
auto out_var_name = ctx.Output(framework::GradVarName("W")).front();
auto attr = ctx.GetAttr("is_sparse");
void operator()(framework::InferVarTypeContext* ctx) const override {
auto out_var_name = ctx->Output(framework::GradVarName("W")).front();
auto attr = ctx->GetAttr("is_sparse");
bool is_sparse = boost::get<bool>(attr);
if (is_sparse) {
VLOG(3) << "fused_embedding_seq_pool_grad op "
<< framework::GradVarName("W") << " is set to SelectedRows";
ctx.SetType(out_var_name, framework::proto::VarType::SELECTED_ROWS);
ctx->SetType(out_var_name, framework::proto::VarType::SELECTED_ROWS);
} else {
VLOG(3) << "fused_embedding_seq_pool_grad op "
<< framework::GradVarName("W") << " is set to LoDTensor";
ctx.SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
ctx->SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
}
ctx.SetDataType(out_var_name, ctx.GetDataType(ctx.Input("W")[0]));
ctx->SetDataType(out_var_name, ctx->GetDataType(ctx->Input("W")[0]));
}
};

@ -81,12 +81,12 @@ GetTensorFromSelectedRows is used to get the tensor from SelectedRows.
class GetTensorFromSelectedRowsOpVarTypeInference
: public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const { // NOLINT
auto out_var_name = ctx.Output("Out").front();
auto in_var_name = ctx.Input("X").front();
void operator()(framework::InferVarTypeContext *ctx) const { // NOLINT
auto out_var_name = ctx->Output("Out").front();
auto in_var_name = ctx->Input("X").front();
ctx.SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
ctx.SetDataType(out_var_name, ctx.GetDataType(in_var_name));
ctx->SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
ctx->SetDataType(out_var_name, ctx->GetDataType(in_var_name));
}
};

@ -197,32 +197,32 @@ class HierarchicalSigmoidGradOp : public framework::OperatorWithKernel {
class HierarchicalSigmoidGradOpGradVarTypeInference
: public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
auto w_grad_var_name = ctx.Output(framework::GradVarName("W")).front();
auto bias_grad_var_name_vec = ctx.Output(framework::GradVarName("Bias"));
void operator()(framework::InferVarTypeContext* ctx) const override {
auto w_grad_var_name = ctx->Output(framework::GradVarName("W")).front();
auto bias_grad_var_name_vec = ctx->Output(framework::GradVarName("Bias"));
std::string bias_grad_var_name;
bool hasBias = false;
if (bias_grad_var_name_vec.size()) {
hasBias = true;
bias_grad_var_name = ctx.Output(framework::GradVarName("Bias")).front();
bias_grad_var_name = ctx->Output(framework::GradVarName("Bias")).front();
}
auto attr = ctx.GetAttr("is_sparse");
auto attr = ctx->GetAttr("is_sparse");
bool is_sparse = boost::get<bool>(attr);
if (is_sparse) {
VLOG(30) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W")
<< " is set to SelectedRows";
ctx.SetType(w_grad_var_name, framework::proto::VarType::SELECTED_ROWS);
ctx->SetType(w_grad_var_name, framework::proto::VarType::SELECTED_ROWS);
} else {
VLOG(30) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W")
<< " is set to LoDTensor";
ctx.SetType(w_grad_var_name, framework::proto::VarType::LOD_TENSOR);
ctx->SetType(w_grad_var_name, framework::proto::VarType::LOD_TENSOR);
}
if (hasBias) {
VLOG(30) << "hierarchical_sigmoid_grad op "
<< framework::GradVarName("Bias") << " is set to LoDTensor";
ctx.SetType(bias_grad_var_name, framework::proto::VarType::LOD_TENSOR);
ctx->SetType(bias_grad_var_name, framework::proto::VarType::LOD_TENSOR);
}
ctx.SetDataType(w_grad_var_name, ctx.GetDataType(ctx.Input("W")[0]));
ctx->SetDataType(w_grad_var_name, ctx->GetDataType(ctx->Input("W")[0]));
}
};

@ -64,9 +64,9 @@ class LoDRankTableInferShape : public framework::InferShapeBase {
class LoDRankTableInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
for (auto &o : ctx.Output("Out")) {
ctx.SetType(o, framework::proto::VarType::LOD_RANK_TABLE);
void operator()(framework::InferVarTypeContext *ctx) const override {
for (auto &o : ctx->Output("Out")) {
ctx->SetType(o, framework::proto::VarType::LOD_RANK_TABLE);
}
}
};

@ -201,9 +201,9 @@ class LoDTensorToArrayInferShape : public framework::InferShapeBase {
class LoDTensorToArrayInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
for (auto &out_var : ctx.Output("Out")) {
ctx.SetType(out_var, framework::proto::VarType::LOD_TENSOR_ARRAY);
void operator()(framework::InferVarTypeContext *ctx) const override {
for (auto &out_var : ctx->Output("Out")) {
ctx->SetType(out_var, framework::proto::VarType::LOD_TENSOR_ARRAY);
}
}
};

@ -147,20 +147,20 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
class LookupTableOpGradVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext& ctx) const override {
auto out_var_name = ctx.Output(framework::GradVarName("W")).front();
auto attr = ctx.GetAttr("is_sparse");
void operator()(framework::InferVarTypeContext* ctx) const override {
auto out_var_name = ctx->Output(framework::GradVarName("W")).front();
auto attr = ctx->GetAttr("is_sparse");
bool is_sparse = boost::get<bool>(attr);
if (is_sparse) {
VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W")
<< " is set to SelectedRows";
ctx.SetType(out_var_name, framework::proto::VarType::SELECTED_ROWS);
ctx->SetType(out_var_name, framework::proto::VarType::SELECTED_ROWS);
} else {
VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W")
<< " is set to LoDTensor";
ctx.SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
ctx->SetType(out_var_name, framework::proto::VarType::LOD_TENSOR);
}
ctx.SetDataType(out_var_name, ctx.GetDataType(ctx.Input("W")[0]));
ctx->SetDataType(out_var_name, ctx->GetDataType(ctx->Input("W")[0]));
}
};

@ -60,9 +60,9 @@ class NCCLInitOp : public framework::OperatorBase {
class NCCLInitOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto out_var_name = ctx.Output("Communicator").front();
ctx.SetType(out_var_name, framework::proto::VarType::RAW);
void operator()(framework::InferVarTypeContext *ctx) const override {
auto out_var_name = ctx->Output("Communicator").front();
ctx->SetType(out_var_name, framework::proto::VarType::RAW);
}
};

@ -237,21 +237,21 @@ class NCEOpGrad : public framework::OperatorWithKernel {
class NCEOpGradVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {
auto weight_grad = ctx.Output(framework::GradVarName("Weight")).front();
void operator()(framework::InferVarTypeContext *ctx) const override {
auto weight_grad = ctx->Output(framework::GradVarName("Weight")).front();
auto attr = ctx.GetAttr("is_sparse");
auto attr = ctx->GetAttr("is_sparse");
bool is_sparse = boost::get<bool>(attr);
if (is_sparse) {
VLOG(3) << "nce_op_grad op " << weight_grad << " and "
<< " is set to SelectedRows";
ctx.SetType(weight_grad, framework::proto::VarType::SELECTED_ROWS);
ctx->SetType(weight_grad, framework::proto::VarType::SELECTED_ROWS);
} else {
VLOG(3) << "nce_op_grad op " << weight_grad << " and "
<< " is set to LoDTensor";
ctx.SetType(weight_grad, framework::proto::VarType::LOD_TENSOR);
ctx->SetType(weight_grad, framework::proto::VarType::LOD_TENSOR);
}
ctx.SetDataType(weight_grad, ctx.GetDataType(ctx.Input("Input")[0]));
ctx->SetDataType(weight_grad, ctx->GetDataType(ctx->Input("Input")[0]));
}
};

@ -37,7 +37,7 @@ class NgraphEngineOpMaker : public framework::OpProtoAndCheckerMaker {
class NgraphEngineInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {}
void operator()(framework::InferVarTypeContext *ctx) const override {}
};
} // namespace operators

@ -72,7 +72,7 @@ use L2 regularizers in case of using LARS.
class LarsMomentumOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext &ctx) const override {}
void operator()(framework::InferVarTypeContext* ctx) const override {}
};
} // namespace operators
} // namespace paddle

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save