From bdb47cd93396dc001f34ec20c5a5fa243af8c59f Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sun, 8 Apr 2018 13:56:52 +0800 Subject: [PATCH 1/7] Add some comments for distribute_transpiler --- python/paddle/fluid/distribute_transpiler.py | 35 ++++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 31bedb592f..5d052d71d7 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -102,6 +102,8 @@ def split_dense_variable(var_list, the parameter server side can gain better performance. By default minimum block size is 1024. The max block size is used to prevent very large blocks that may cause send error. + :return: A list of VarBlocks. Each VarBlock specifies a shard of + the var. """ blocks = [] for var in var_list: @@ -192,22 +194,24 @@ class DistributeTranspiler: self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") - # step1 + # step1: For large parameters and gradients, split them into smaller + # blocks. param_list = [pg[0] for pg in params_grads] grad_list = [pg[1] for pg in params_grads] grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) - # step2 + # step2: Create new vars for the parameters and gradients blocks and + # add ops to do the split. grad_var_mapping = self._append_split_op(program, grad_blocks) - # step3 + param_var_mapping = self._create_vars_from_blocklist(program, + param_blocks) + # step3: Add gradients as send op inputs and parameters as send + # op outputs. send_inputs = [] send_outputs = [] for b in grad_blocks: # append by order varname, block_id, _ = b.split(":") send_inputs.append(grad_var_mapping[varname][int(block_id)]) - - param_var_mapping = self._create_vars_from_blocklist(program, - param_blocks) for b in param_blocks: varname, block_id, _ = b.split(":") send_outputs.append(param_var_mapping[varname][int(block_id)]) @@ -237,7 +241,7 @@ class DistributeTranspiler: "RPCClient": rpc_client_var}, attrs={"endpoints": pserver_endpoints, "epmap": eplist}) - # step4 + # step4: Concat the parameters splits together after recv. for varname, splited_var in param_var_mapping.iteritems(): if len(splited_var) <= 1: continue @@ -258,13 +262,14 @@ class DistributeTranspiler: def get_pserver_program(self, endpoint): """ Get pserver side program using the endpoint. + TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers. NOTE: assume blocks of the same variable is not distributed on the same pserver, only change param/grad varnames for trainers to fetch. """ # step1 pserver_program = Program() - # step2 + # step2: Create vars to receive vars at parameter servers. recv_inputs = [] for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) @@ -278,6 +283,8 @@ class DistributeTranspiler: orig_var_name = v.name[:suff_idx] else: orig_var_name = v.name + #TODO(panyx0718): Should this be put in the else block below? It's + # only used there and it's called single_trainer_var. single_trainer_var = pserver_program.global_block().create_var( name=orig_var_name, persistable=True, @@ -344,7 +351,7 @@ class DistributeTranspiler: self._append_pserver_non_opt_ops(block, op) append_block = optimize_block - # append lr decay ops to the child block if exits + # append lr decay ops to the child block if exists lr_ops = self._get_lr_ops() if len(lr_ops) > 0: for _, op in enumerate(lr_ops): @@ -447,8 +454,10 @@ class DistributeTranspiler: block_list, add_trainer_suffix=False): """ + Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. + :return: A dict mapping from original var name to each var split. """ block_map = dict() var_mapping = dict() @@ -615,6 +624,7 @@ class DistributeTranspiler: type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) + # TODO(panyx0718): What if it's SELECTED_ROWS. if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: optimize_block.append_op( type="scale", @@ -638,7 +648,7 @@ class DistributeTranspiler: shape=param_block.shape) new_inputs[key] = tmpvar elif key == "LearningRate": - # leraning rate variable has already be created by non-optimize op, + # learning rate variable has already be created by non-optimize op, # don't create it once again. lr_varname = opt_op.input(key)[0] if pserver_block.vars.has_key(lr_varname): @@ -773,6 +783,7 @@ class DistributeTranspiler: return False def _get_input_map_from_op(self, varmap, op): + """Returns a dict from op input name to the vars in varmap.""" iomap = dict() for key in op.input_names: vars = [] @@ -785,6 +796,7 @@ class DistributeTranspiler: return iomap def _get_output_map_from_op(self, varmap, op): + """Returns a dict from op output name to the vars in varmap.""" iomap = dict() for key in op.output_names: vars = [] @@ -812,6 +824,9 @@ class DistributeTranspiler: find_ops.append(op) # make a union find struct by the ops in default_main_program ufind = UnionFind(block.ops) + + # TODO(panyx0718): If lr_ops connects with other training + # ops, could they be considered as lr_ops? for op1 in block.ops: for op2 in block.ops: # NOTE: we need to skip all optimize ops, since it is connected From 75c9eb11c4b1fb32b2e4cc1eaf45dd582074a3a5 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sun, 8 Apr 2018 20:31:04 -0700 Subject: [PATCH 2/7] refine --- python/paddle/fluid/distribute_transpiler.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 5d052d71d7..4ea72a93c4 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -283,14 +283,6 @@ class DistributeTranspiler: orig_var_name = v.name[:suff_idx] else: orig_var_name = v.name - #TODO(panyx0718): Should this be put in the else block below? It's - # only used there and it's called single_trainer_var. - single_trainer_var = pserver_program.global_block().create_var( - name=orig_var_name, - persistable=True, - type=v.type, - dtype=v.dtype, - shape=v.shape) if self.trainers > 1: for trainer_id in xrange(self.trainers): var = pserver_program.global_block().create_var( @@ -301,6 +293,12 @@ class DistributeTranspiler: shape=v.shape) recv_inputs.append(var) else: + single_trainer_var = pserver_program.global_block().create_var( + name=orig_var_name, + persistable=True, + type=v.type, + dtype=v.dtype, + shape=v.shape) recv_inputs.append(single_trainer_var) # step3 @@ -825,8 +823,6 @@ class DistributeTranspiler: # make a union find struct by the ops in default_main_program ufind = UnionFind(block.ops) - # TODO(panyx0718): If lr_ops connects with other training - # ops, could they be considered as lr_ops? for op1 in block.ops: for op2 in block.ops: # NOTE: we need to skip all optimize ops, since it is connected From 29bc1c8178d3ae6dbdba95e28ae06b163857ed16 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Mon, 9 Apr 2018 12:33:25 +0800 Subject: [PATCH 3/7] Rearange parallel_executor.md --- .../concepts}/images/parallel_executor_overview.dot | 0 .../concepts}/images/parallel_executor_overview.png | Bin doc/fluid/design/concepts/index_cn.rst | 1 + doc/fluid/design/concepts/index_en.rst | 1 + .../design/concepts}/parallel_executor.md | 0 5 files changed, 2 insertions(+) rename doc/{design => fluid/design/concepts}/images/parallel_executor_overview.dot (100%) rename doc/{design => fluid/design/concepts}/images/parallel_executor_overview.png (100%) rename doc/{design => fluid/design/concepts}/parallel_executor.md (100%) diff --git a/doc/design/images/parallel_executor_overview.dot b/doc/fluid/design/concepts/images/parallel_executor_overview.dot similarity index 100% rename from doc/design/images/parallel_executor_overview.dot rename to doc/fluid/design/concepts/images/parallel_executor_overview.dot diff --git a/doc/design/images/parallel_executor_overview.png b/doc/fluid/design/concepts/images/parallel_executor_overview.png similarity index 100% rename from doc/design/images/parallel_executor_overview.png rename to doc/fluid/design/concepts/images/parallel_executor_overview.png diff --git a/doc/fluid/design/concepts/index_cn.rst b/doc/fluid/design/concepts/index_cn.rst index eec8a2f14c..dcdc894937 100644 --- a/doc/fluid/design/concepts/index_cn.rst +++ b/doc/fluid/design/concepts/index_cn.rst @@ -16,3 +16,4 @@ block.md scope.md executor.md + parallel_executor.md diff --git a/doc/fluid/design/concepts/index_en.rst b/doc/fluid/design/concepts/index_en.rst index 036e1da255..b85a305574 100644 --- a/doc/fluid/design/concepts/index_en.rst +++ b/doc/fluid/design/concepts/index_en.rst @@ -16,3 +16,4 @@ Core Concepts block.md scope.md executor.md + parallel_executor.md diff --git a/doc/design/parallel_executor.md b/doc/fluid/design/concepts/parallel_executor.md similarity index 100% rename from doc/design/parallel_executor.md rename to doc/fluid/design/concepts/parallel_executor.md From 2e2726f176d44d8dbe5ff289b58d0df6ce8fc32e Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Sun, 8 Apr 2018 23:07:14 -0700 Subject: [PATCH 4/7] Fix cpplint issues in some operators --- paddle/fluid/operators/activation_mkldnn_op.cc | 15 +++++++++------ paddle/fluid/operators/activation_op.h | 2 ++ paddle/fluid/operators/adagrad_op.cc | 1 + paddle/fluid/operators/array_operator.h | 1 + paddle/fluid/operators/assign_value_op.cc | 2 ++ paddle/fluid/operators/assign_value_op.h | 1 + paddle/fluid/operators/auc_op.cc | 1 + paddle/fluid/operators/auc_op.h | 15 +++++++++------ paddle/fluid/operators/average_accumulates_op.cc | 10 +++++----- paddle/fluid/operators/average_accumulates_op.h | 8 ++++---- 10 files changed, 35 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 6ff363d766..5b6390ab73 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -13,8 +13,8 @@ limitations under the License. */ #include "mkldnn.hpp" -#include "mkldnn_activation_op.h" #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/mkldnn_activation_op.h" namespace paddle { namespace operators { @@ -50,8 +50,10 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src_data); - auto dst_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)dst_data); + auto src_memory = + mkldnn::memory({data_md, mkldnn_engine}, static_cast(src_data)); + auto dst_memory = + mkldnn::memory({data_md, mkldnn_engine}, static_cast(dst_data)); auto forward_desc = mkldnn::eltwise_forward::desc( mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); @@ -95,11 +97,12 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src); + auto src_memory = + mkldnn::memory({data_md, mkldnn_engine}, static_cast(src)); auto diff_src_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_src); + mkldnn::memory({data_md, mkldnn_engine}, static_cast(diff_src)); auto diff_dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_dst); + mkldnn::memory({data_md, mkldnn_engine}, static_cast(diff_dst)); auto backward_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 7fbe4efc04..c4efbcd3f9 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index c990fe7843..0153e1253b 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/adagrad_op.h" +#include #include diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index dbcc7abb09..4309f0a549 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index e8123cb1a4..993610fded 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -13,6 +13,8 @@ // limitations under the License. #include "paddle/fluid/operators/assign_value_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index c7b1a55a5c..e749d6f6d3 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -14,6 +14,7 @@ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index 71de78b118..a168eaeab5 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/auc_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/auc_op.h index f4e8208c3f..8b016c3d31 100644 --- a/paddle/fluid/operators/auc_op.h +++ b/paddle/fluid/operators/auc_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -40,7 +42,7 @@ class AucKernel : public framework::OpKernel { std::vector thresholds_list; thresholds_list.reserve(num_thresholds); for (int i = 1; i < num_thresholds - 1; i++) { - thresholds_list[i] = (float)i / (num_thresholds - 1); + thresholds_list[i] = static_cast(i) / (num_thresholds - 1); } const float kEpsilon = 1e-7; thresholds_list[0] = 0.0f - kEpsilon; @@ -105,11 +107,12 @@ class AucKernel : public framework::OpKernel { float* fp_rate_data = fp_rate.mutable_data(ctx.GetPlace()); float* rec_rate_data = rec_rate.mutable_data(ctx.GetPlace()); for (int i = 0; i < num_thresholds; i++) { - tp_rate_data[i] = - ((float)tp_data[i] + epsilon) / (tp_data[i] + fn_data[i] + epsilon); - fp_rate_data[i] = (float)fp_data[i] / (fp_data[i] + tn_data[i] + epsilon); - rec_rate_data[i] = - ((float)tp_data[i] + epsilon) / (tp_data[i] + fp_data[i] + epsilon); + tp_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + (tp_data[i] + fn_data[i] + epsilon); + fp_rate_data[i] = + static_cast(fp_data[i]) / (fp_data[i] + tn_data[i] + epsilon); + rec_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + (tp_data[i] + fp_data[i] + epsilon); } *auc_data = 0.0f; if (curve == "ROC") { diff --git a/paddle/fluid/operators/average_accumulates_op.cc b/paddle/fluid/operators/average_accumulates_op.cc index c95077fcbd..b21deaf925 100644 --- a/paddle/fluid/operators/average_accumulates_op.cc +++ b/paddle/fluid/operators/average_accumulates_op.cc @@ -19,15 +19,15 @@ namespace operators { template <> void GetAccumulators( - const framework::ExecutionContext& ctx, int64_t& num_updates_, - int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t* num_updates_, + int64_t* num_accumulates_, int64_t* old_num_accumulates_) { auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); - old_num_accumulates_ = in_old_num_accumulates->data()[0]; - num_accumulates_ = in_num_accumulates->data()[0]; - num_updates_ = in_num_updates->data()[0]; + *old_num_accumulates_ = in_old_num_accumulates->data()[0]; + *num_accumulates_ = in_num_accumulates->data()[0]; + *num_updates_ = in_num_updates->data()[0]; } template <> diff --git a/paddle/fluid/operators/average_accumulates_op.h b/paddle/fluid/operators/average_accumulates_op.h index f858109d14..07ac5ced11 100644 --- a/paddle/fluid/operators/average_accumulates_op.h +++ b/paddle/fluid/operators/average_accumulates_op.h @@ -29,8 +29,8 @@ using EigenVector = framework::EigenVector; template void GetAccumulators(const framework::ExecutionContext& ctx, - int64_t& num_updates, int64_t& num_accumulates, - int64_t& old_num_accumulates); + int64_t* num_updates, int64_t* num_accumulates, + int64_t* old_num_accumulates); template void SetAccumulators(const framework::ExecutionContext& ctx, @@ -47,8 +47,8 @@ class AverageAccumulatesKernel : public framework::OpKernel { int64_t num_updates = 0; int64_t num_accumulates = 0; int64_t old_num_accumulates = 0; - GetAccumulators(ctx, num_updates, num_accumulates, - old_num_accumulates); + GetAccumulators(ctx, &num_updates, &num_accumulates, + &old_num_accumulates); // Get attrs float average_window = ctx.Attr("average_window"); From 11487de9eb242c8216fb70e29617a067cc3df3ed Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Sun, 8 Apr 2018 23:18:22 -0700 Subject: [PATCH 5/7] Resolve conflict --- .../fluid/operators/activation_mkldnn_op.cc | 35 ++++++++++++------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 5b6390ab73..ab7c612271 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -40,20 +40,24 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, const T *dst_data = dst->template mutable_data(ctx.GetPlace()); // get memory dim - PADDLE_ENFORCE(src->dims().size() == 4, - "Input dim must be with 4, i.e. NCHW"); + PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, + "Input dim must be with 2 or 4"); std::vector src_tz = framework::vectorize2int(src->dims()); // create memory description - // TODO(kbinias-intel): support more formats - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives auto src_memory = - mkldnn::memory({data_md, mkldnn_engine}, static_cast(src_data)); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(src_data))); auto dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, static_cast(dst_data)); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(dst_data))); auto forward_desc = mkldnn::eltwise_forward::desc( mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); @@ -93,16 +97,21 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, std::vector src_tz = framework::vectorize2int(x->dims()); // create memory description - auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); + auto data_md = src_tz.size() == 2 + ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nc) + : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, + mkldnn::memory::format::nchw); // create memory primitives - auto src_memory = - mkldnn::memory({data_md, mkldnn_engine}, static_cast(src)); + auto src_memory = mkldnn::memory( + {data_md, mkldnn_engine}, static_cast(const_cast(src))); auto diff_src_memory = - mkldnn::memory({data_md, mkldnn_engine}, static_cast(diff_src)); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_src))); auto diff_dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, static_cast(diff_dst)); + mkldnn::memory({data_md, mkldnn_engine}, + static_cast(const_cast(diff_dst))); auto backward_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); From 981d7d01f37d63d449853e13c3cdd45c0e664edd Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Sun, 8 Apr 2018 23:37:13 -0700 Subject: [PATCH 6/7] Fix CPPLint issues in spp_op, sum_op, topk_op, transpose_op, unpool_op and warpctc_op --- paddle/fluid/operators/spp_op.cc | 2 ++ paddle/fluid/operators/spp_op.h | 2 ++ paddle/fluid/operators/sum_op.cc | 2 ++ paddle/fluid/operators/sum_op.h | 1 + paddle/fluid/operators/top_k_op.h | 2 ++ paddle/fluid/operators/transpose_op.cc | 1 + paddle/fluid/operators/transpose_op.h | 1 + paddle/fluid/operators/unpool_op.cc | 2 ++ paddle/fluid/operators/unpool_op.h | 2 ++ paddle/fluid/operators/warpctc_op.h | 1 + 10 files changed, 16 insertions(+) diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index f1c4415f27..8c55b4ebbc 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/spp_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 3d2f226325..08cb7849d2 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/pooling.h" diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index d3d5c8a342..9061e137bd 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -10,6 +10,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sum_op.h" +#include +#include #include #include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/operators/detail/safe_ref.h" diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index e7e5346cdc..49a4afb3a8 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index 42828b7e65..9f8482aded 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -15,6 +15,8 @@ limitations under the License. */ #pragma once #include #include +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 87b1f530e0..4aea9cd65b 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/transpose_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 90f16499a6..895d1ce2cc 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 0ca7ea00fa..31859fd1d7 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/unpool_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unpool_op.h b/paddle/fluid/operators/unpool_op.h index a442104575..96abad3de9 100644 --- a/paddle/fluid/operators/unpool_op.h +++ b/paddle/fluid/operators/unpool_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/unpooling.h" diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 3e3e308931..afbfe69973 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_padding.h" From 1ea4490b14cef2004a9b67eae7fbc5df4fa6e430 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Sun, 8 Apr 2018 23:46:18 -0700 Subject: [PATCH 7/7] Resolve cuda compilation error --- paddle/fluid/operators/average_accumulates_op.cu | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/average_accumulates_op.cu b/paddle/fluid/operators/average_accumulates_op.cu index 270c469844..046f72b471 100644 --- a/paddle/fluid/operators/average_accumulates_op.cu +++ b/paddle/fluid/operators/average_accumulates_op.cu @@ -19,18 +19,18 @@ namespace paddle { namespace operators { template <> void GetAccumulators( - const framework::ExecutionContext& ctx, int64_t& num_updates_, - int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t* num_updates_, + int64_t* num_accumulates_, int64_t* old_num_accumulates_) { auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); auto stream = ctx.cuda_device_context().stream(); - memory::Copy(platform::CPUPlace(), &old_num_accumulates_, + memory::Copy(platform::CPUPlace(), old_num_accumulates_, platform::CUDAPlace(), in_old_num_accumulates->data(), sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), &num_accumulates_, platform::CUDAPlace(), + memory::Copy(platform::CPUPlace(), num_accumulates_, platform::CUDAPlace(), in_num_accumulates->data(), sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), &num_updates_, platform::CUDAPlace(), + memory::Copy(platform::CPUPlace(), num_updates_, platform::CUDAPlace(), in_num_updates->data(), sizeof(int64_t), stream); }