From d5ee580c5ca97ba21020c6ff83ba6ecb2f0aa79c Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Tue, 9 Apr 2019 20:46:04 +0800 Subject: [PATCH 01/38] move split filelist from trainer.py to fleet & fix error test=develop --- .../fluid/incubate/fleet/base/role_maker.py | 2 +- .../fleet/parameter_server/__init__.py | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index 528f7b3269..506a38059c 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -128,7 +128,7 @@ class MPIRoleMaker(RoleMakerBase): """ finalize the current MPI instance. """ - self.comm_.finalize() + pass class MPISymetricRoleMaker(MPIRoleMaker): diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py index 9b1ec412c7..1c49ea1f55 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py @@ -241,6 +241,35 @@ class Fleet(object): """ self._fleet_ptr.save_model(save_path) + def split_filelist(self, filelist): + """ + split filelist before distributed training, + for example, filelist is [a, b, c ,d, e] and trainer_num = 2, + then trainer 0 gets [a, b, c] and trainer 1 gets [d, e] + + Args: + filelist(list): list of filename, can be local or hdfs/afs. + + Returns: list of filename which belongs to this trainer. + """ + file_num = len(filelist) + trainer_id = self.get_worker_index() + trainer_num = self.get_worker_num() + if trainer_num > file_num: + raise ValueError( + "trainer_num should be <= file_num : " + "%s > %s" % (trainer_num, file_num) + ) + # get interval of filelist, it's [ ) + start = 0 + end = 0 + for i in range(0, trainer_id + 1): + length = file_num / trainer_num + (i < (file_num % trainer_num)) + start = end + end += length + myfilelist = filelist[start : end] + return myfilelist + def _set_opt_info(self, opt_info): """ this function saves the result from DistributedOptimizer.minimize() @@ -337,3 +366,4 @@ save_pserver_model = fleet_instance.save_pserver_model worker_num = fleet_instance.get_worker_num server_num = fleet_instance.get_server_num worker_index = fleet_instance.get_worker_index +split_filelist = fleet_instance.split_filelist From 19381329365a8118cc70451aa58772afee9d9f35 Mon Sep 17 00:00:00 2001 From: xujiaqi01 Date: Tue, 9 Apr 2019 22:37:25 +0800 Subject: [PATCH 02/38] fix code style test=develop --- .../fluid/incubate/fleet/parameter_server/__init__.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py index 1c49ea1f55..e976adffa0 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py @@ -256,10 +256,8 @@ class Fleet(object): trainer_id = self.get_worker_index() trainer_num = self.get_worker_num() if trainer_num > file_num: - raise ValueError( - "trainer_num should be <= file_num : " - "%s > %s" % (trainer_num, file_num) - ) + raise ValueError("trainer_num should be <= file_num : " + "%s > %s" % (trainer_num, file_num)) # get interval of filelist, it's [ ) start = 0 end = 0 @@ -267,7 +265,7 @@ class Fleet(object): length = file_num / trainer_num + (i < (file_num % trainer_num)) start = end end += length - myfilelist = filelist[start : end] + myfilelist = filelist[start:end] return myfilelist def _set_opt_info(self, opt_info): From 1c0ef929f9eba25e18cac79d00f4946baa5d6e20 Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Tue, 9 Apr 2019 22:41:01 +0800 Subject: [PATCH 03/38] fix code style test=develop --- .../paddle/fluid/incubate/fleet/parameter_server/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py index e976adffa0..926dd3649b 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py @@ -250,7 +250,8 @@ class Fleet(object): Args: filelist(list): list of filename, can be local or hdfs/afs. - Returns: list of filename which belongs to this trainer. + Returns: + list of filename which belongs to this trainer. """ file_num = len(filelist) trainer_id = self.get_worker_index() From e784884e70ecba4a419db785579a579ef31c2208 Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Wed, 10 Apr 2019 11:13:51 +0800 Subject: [PATCH 04/38] add Example in doc string of split_filelist test=develop --- .../fluid/incubate/fleet/parameter_server/__init__.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py index 926dd3649b..5eefb6e94d 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/__init__.py @@ -247,6 +247,12 @@ class Fleet(object): for example, filelist is [a, b, c ,d, e] and trainer_num = 2, then trainer 0 gets [a, b, c] and trainer 1 gets [d, e] + Example: + >>> all_filelist = ["a.txt", "b.txt", "c.txt"] + >>> my_filelist = fleet.split_filelist(all_filelist) + >>> dataset = fluid.DatasetFactory().create_dataset() + >>> dataset.set_filelist(my_filelist) + Args: filelist(list): list of filename, can be local or hdfs/afs. @@ -266,8 +272,8 @@ class Fleet(object): length = file_num / trainer_num + (i < (file_num % trainer_num)) start = end end += length - myfilelist = filelist[start:end] - return myfilelist + my_filelist = filelist[start:end] + return my_filelist def _set_opt_info(self, opt_info): """ From 20e304f2aedd5a09b260f593408057217a806e94 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 11 Apr 2019 09:56:52 +0800 Subject: [PATCH 05/38] Tracer does not hold op any more test=develop --- python/paddle/fluid/dygraph/tracer.py | 37 +++++++++++++++++----- python/paddle/fluid/framework.py | 45 +++++++++++---------------- 2 files changed, 48 insertions(+), 34 deletions(-) diff --git a/python/paddle/fluid/dygraph/tracer.py b/python/paddle/fluid/dygraph/tracer.py index 94e212b139..e5e715bcdc 100644 --- a/python/paddle/fluid/dygraph/tracer.py +++ b/python/paddle/fluid/dygraph/tracer.py @@ -24,7 +24,7 @@ __all__ = ['Tracer'] def release_op(op): - del framework._dygraph_tracer()._ops[op._trace_id] + del framework._dygraph_tracer()._ops[op._trace_id].inputs class Tracer(core.Tracer): @@ -46,11 +46,34 @@ class Tracer(core.Tracer): return list((item for name, item in six.iteritems(self._vars) if isinstance(item, framework.Parameter))) - def trace_op(self, op, stop_gradient=False): + def trace_op(self, op, inputs, outputs, stop_gradient=False): + # TODO(minqiyang): remove this line after we take apart all + # backward grads and forward variables + op.inputs = inputs + inps = defaultdict(list) + for k, vars in six.iteritems(inputs): + if isinstance(vars, framework.Variable): + op.previous_ops.append(vars.op) + inps[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + op.previous_ops.append(var.op) + inps[k].append(var._ivar) + + outs = defaultdict(list) + for k, vars in six.iteritems(outputs): + if isinstance(vars, framework.Variable): + vars.op = op + outs[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + var.op = op + outs[k].append(var._ivar) + # record op's trace id op.iop._trace_id = self._trace_id - backward_refs = self.trace(op.iop, op.inputs, op.outputs, op.attrs, + backward_refs = self.trace(op.iop, inps, outs, op.attrs, framework._current_expected_place(), stop_gradient) @@ -65,10 +88,10 @@ class Tracer(core.Tracer): # TODO(minqiyang): remove all inputs and outputs after separate # var and grad op.backward_refs = defaultdict(list) - for k, v in six.iteritems(op.inputs): + for k, v in six.iteritems(inputs): if k in backward_refs: - op.backward_refs[k] = op.inputs[k] + op.backward_refs[k] = inputs[k] - for k, v in six.iteritems(op.outputs): + for k, v in six.iteritems(outputs): if k in backward_refs: - op.backward_refs[k] = op.outputs[k] + op.backward_refs[k] = outputs[k] diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index c05e5fb9e3..a29db04900 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -411,6 +411,7 @@ class Variable(object): if persistable else False) if persistable: _dygraph_tracer().trace_var(name, self) + self.op = None else: self.error_clip = error_clip @@ -939,26 +940,9 @@ class Operator(object): raise ValueError( "`type` to initialized an Operator can not be None.") self.iop = core.OpBase(type) + self.previous_ops = [] - # TODO(minqiyang): remove these lines after we take apart all - # backward grads and forward variables - self.inputs = defaultdict(list) - if inputs is not None: - for k, v in six.iteritems(inputs): - if isinstance(v, Variable): - self.inputs[k].append(v._ivar) - elif isinstance(v, list) or isinstance(v, tuple): - self.inputs[k].extend([var._ivar for var in v]) - - self.outputs = defaultdict(list) - if outputs is not None: - for k, v in six.iteritems(outputs): - if isinstance(v, Variable): - self.outputs[k].append(v._ivar) - elif isinstance(v, list) or isinstance(v, tuple): - self.outputs[k].extend([var._ivar for var in v]) - - self.attrs = attrs if attrs else {} + self.attrs = attrs else: self.block = block self.desc = desc @@ -1647,15 +1631,18 @@ class Block(object): block=self, desc=None, type=kwargs.get("type", None), - inputs=kwargs.get("inputs", None), - outputs=kwargs.get("outputs", None), - attrs=kwargs.get("attrs", None)) + inputs=None, + outputs=None, + attrs=kwargs.get("attrs", {})) # record ops in tracer rather than blocks # # TODO(minqiyang): add op stop_gradient support in static mode too. # currently, we only support stop_gradient in dygraph mode. - _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False)) + _dygraph_tracer().trace_op(op, + kwargs.get("inputs", {}), + kwargs.get("outputs", {}), + kwargs.get("stop_gradient", False)) else: op_desc = self.desc.append_op() op = Operator( @@ -1719,10 +1706,14 @@ class Block(object): self, None, type=kwargs.get("type", None), - inputs=kwargs.get("inputs", None), - outputs=kwargs.get("outputs", None), - attrs=kwargs.get("attrs", None)) - _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False)) + inputs=None, + outputs=None, + attrs=kwargs.get("attrs", {})) + + _dygraph_tracer().trace_op(op, + kwargs.get("inputs", {}), + kwargs.get("outputs", {}), + kwargs.get("stop_gradient", False)) else: op_desc = self.desc._prepend_op() op = Operator( From 734260f47b5df3dbc16d880cc25efdf7aac01700 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 11 Apr 2019 11:01:11 +0800 Subject: [PATCH 06/38] Make Op Hold Output too and release backward_refs each time test=develop --- python/paddle/fluid/dygraph/tracer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/fluid/dygraph/tracer.py b/python/paddle/fluid/dygraph/tracer.py index e5e715bcdc..ad938188bf 100644 --- a/python/paddle/fluid/dygraph/tracer.py +++ b/python/paddle/fluid/dygraph/tracer.py @@ -25,6 +25,8 @@ __all__ = ['Tracer'] def release_op(op): del framework._dygraph_tracer()._ops[op._trace_id].inputs + del framework._dygraph_tracer()._ops[op._trace_id].outputs + del framework._dygraph_tracer()._ops[op._trace_id].backward_refs class Tracer(core.Tracer): @@ -60,6 +62,7 @@ class Tracer(core.Tracer): op.previous_ops.append(var.op) inps[k].append(var._ivar) + op.outputs = outputs outs = defaultdict(list) for k, vars in six.iteritems(outputs): if isinstance(vars, framework.Variable): From efe4311a989c5f1ac58571143376fd9c3ff9f3af Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Thu, 11 Apr 2019 16:06:24 +0800 Subject: [PATCH 07/38] fix release_memory not found & fix doc string example error test=develop --- python/paddle/fluid/dataset.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index e655fd4a97..25127bc764 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -218,6 +218,7 @@ class InMemoryDataset(DatasetBase): >>> dataset = fluid.DatasetFactory.create_dataset("InMemoryDataset") >>> filelist = ["a.txt", "b.txt"] >>> dataset.set_filelist(filelist) + >>> dataset.load_into_memory() >>> dataset.local_shuffle() """ self.dataset.local_shuffle() @@ -235,6 +236,7 @@ class InMemoryDataset(DatasetBase): >>> dataset = fluid.DatasetFactory.create_dataset("InMemoryDataset") >>> filelist = ["a.txt", "b.txt"] >>> dataset.set_filelist(filelist) + >>> dataset.load_into_memory() >>> dataset.global_shuffle(fleet) Args: @@ -254,6 +256,24 @@ class InMemoryDataset(DatasetBase): if fleet is not None: fleet.fleet_instance.role_maker_._barrier_worker() + def release_memory(self): + """ + Release InMemoryDataset memory data, when data will not be used again. + + Example: + >>> import paddle.fluid as fluid + >>> import paddle.fluid.incubate.fleet.parameter_server as fleet + >>> dataset = fluid.DatasetFactory.create_dataset("InMemoryDataset") + >>> filelist = ["a.txt", "b.txt"] + >>> dataset.set_filelist(filelist) + >>> dataset.load_into_memory() + >>> dataset.global_shuffle(fleet) + >>> exe = fluid.Executor(fluid.CPUPlace()) + >>> exe.run(fluid.default_startup_program()) + >>> exe.train_from_dataset(fluid.default_main_program(), dataset) + >>> dataset.release_memory() + """ + self.dataset.release_memory() class QueueDataset(DatasetBase): """ From 0ef19e2d4dcba951382411130d99a80d4ba6c03e Mon Sep 17 00:00:00 2001 From: xjqbest <173596896@qq.com> Date: Thu, 11 Apr 2019 16:53:23 +0800 Subject: [PATCH 08/38] fix code style --- python/paddle/fluid/dataset.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index 25127bc764..9e899a2a6c 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -275,6 +275,7 @@ class InMemoryDataset(DatasetBase): """ self.dataset.release_memory() + class QueueDataset(DatasetBase): """ QueueDataset, it will process data streamly. From bbfc82cc42289216a9a5970b36925ccc1ba422db Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 11 Apr 2019 12:15:09 +0000 Subject: [PATCH 09/38] softmax corss entropy support high rank test=develop --- .../softmax_with_cross_entropy_op.cc | 67 +++++++++---- .../softmax_with_cross_entropy_op.cu | 40 ++++++-- .../operators/softmax_with_cross_entropy_op.h | 32 +++++-- ...st_sigmoid_cross_entropy_with_logits_op.py | 93 +++++++++++++++++++ 4 files changed, 196 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index fda971b20e..7cf3511806 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -106,24 +106,40 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { auto logits_dims = ctx->GetInputDim("Logits"); auto labels_dims = ctx->GetInputDim("Label"); + + int rank = logits_dims.size(); PADDLE_ENFORCE_EQ( - logits_dims.size(), 2UL, - "The input of softmax_with_cross_entropy should be a 2-D tensor."); - PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, - "The labels should be a 2-D tensor."); + rank, labels_dims.size(), + "Input(logits) and Input(Label) shall have the same rank."); + bool check = true; + if ((!ctx->IsRuntime()) && (framework::product(logits_dims) <= 0 || + framework::product(labels_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ(framework::slice_ddim(logits_dims, 0, rank - 1), + framework::slice_ddim(labels_dims, 0, rank - 1), + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension."); + } if (ctx->Attrs().Get("soft_label")) { - PADDLE_ENFORCE_EQ(logits_dims[1], labels_dims[1], - "If Attr(soft_label) == true, the 2nd dimension of " - "Input(X) and Input(Label) should be equal."); + if (check) { + PADDLE_ENFORCE_EQ(logits_dims[rank - 1], labels_dims[rank - 1], + "If Attr(soft_label) == true, the last dimension of " + "Input(X) and Input(Label) should be equal."); + } } else { - PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "If Attr(soft_label) == false, the 2nd dimension of " + PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL, + "If Attr(softLabel) == false, the last dimension of " "Input(Label) should be 1."); } ctx->SetOutputDim("Softmax", logits_dims); - ctx->SetOutputDim("Loss", {logits_dims[0], 1}); + auto loss_dims = logits_dims; + loss_dims[rank - 1] = 1; + ctx->SetOutputDim("Loss", loss_dims); + // ctx->SetOutputDim("Loss", {logits_dims[0], 1}); ctx->ShareLoD("Logits", /*->*/ "Softmax"); ctx->ShareLoD("Logits", /*->*/ "Loss"); @@ -152,16 +168,33 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { auto softmax_dims = ctx->GetInputDim("Softmax"); auto labels_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, - "The labels should be a 2-D tensor."); + + int rank = softmax_dims.size(); + PADDLE_ENFORCE_EQ( + rank, labels_dims.size(), + "Input(logits) and Input(Label) shall have the same rank."); + bool check = true; + if ((!ctx->IsRuntime()) && (framework::product(softmax_dims) <= 0 || + framework::product(labels_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ( + framework::slice_ddim(softmax_dims, 0, rank - 1), + framework::slice_ddim(labels_dims, 0, rank - 1), + "Input(Softmax) and Input(Label) shall have the same shape " + "except the last dimension."); + } if (ctx->Attrs().Get("soft_label")) { - PADDLE_ENFORCE_EQ(softmax_dims[1], labels_dims[1], - "When Attr(soft_label) == true, the 2nd dimension of " - "Input(X) and Input(Label) should be equal."); + if (check) { + PADDLE_ENFORCE_EQ(softmax_dims[rank - 1], labels_dims[rank - 1], + "If Attr(soft_label) == true, the last dimension of " + "Input( Softmax) and Input(Label) should be equal."); + } } else { - PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "When Attr(soft_label) == false, the 2nd dimension of " + PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL, + "If Attr(softLabel) == false, the last dimension of " "Input(Label) should be 1."); } diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 89aaac4cbe..d3b8538124 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -400,9 +400,15 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { auto soft_label = context.Attr("soft_label"); auto ignore_index = context.Attr("ignore_index"); + + int rank = logits->dims().size(); if (soft_label) { - int batch_size = logits->dims()[0]; - int feature_size = logits->dims()[1]; + int batch_size = 1; + for (int i = 0; i < rank - 1; ++i) { + batch_size *= logits->dims()[i]; + } + + int feature_size = logits->dims()[rank - 1]; auto* logits_data = logits->data(); auto* labels_data = labels->data(); SoftmaxWithCrossEntropyFusedKernel( @@ -410,14 +416,23 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { feature_size, context.cuda_device_context().stream()); } else { if (!context.Attr("numeric_stable_mode")) { - math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, - softmax); + // reshape to 2d + Tensor logits_2d = framework::ReshapeToMatrix(*logits, rank - 1); + Tensor softmax_2d = framework::ReshapeToMatrix(*softmax, rank - 1); + Tensor loss_2d = framework::ReshapeToMatrix(*loss, rank - 1); + Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); + + math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), + &logits_2d, &softmax_2d); math::CrossEntropyFunctor()( - context.cuda_device_context(), loss, softmax, labels, false, - ignore_index); + context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, + false, ignore_index); } else { - int batch_size = logits->dims()[0]; - int feature_size = logits->dims()[1]; + int batch_size = 1; + for (int i = 0; i < rank - 1; ++i) { + batch_size *= logits->dims()[i]; + } + int feature_size = logits->dims()[rank - 1]; auto* logits_data = logits->data(); auto* labels_data = labels->data(); HardLabelSoftmaxWithCrossEntropy( @@ -443,8 +458,13 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { context.device_context(), logit_grad); T* logit_grad_data = logit_grad->data(); - const int batch_size = logit_grad->dims()[0]; - const int class_num = logit_grad->dims()[1]; + int rank = logit_grad->dims().size(); + int batch_size = 1; + for (int i = 0; i < rank - 1; ++i) { + batch_size *= logit_grad->dims()[i]; + } + + const int class_num = logit_grad->dims()[rank - 1]; int block = 512; auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr("ignore_index"); diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index 1042cbdcf5..8cba960c76 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -40,15 +40,22 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - int axis_dim = logits->dims()[logits->dims().size() - 1]; + // reshape to 2D tensor + int rank = logits->dims().size(); + Tensor logits_2d = framework::ReshapeToMatrix(*logits, rank - 1); + Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); + Tensor loss_2d = framework::ReshapeToMatrix(*loss, rank - 1); + Tensor softmax_2d = framework::ReshapeToMatrix(*softmax, rank - 1); + + int axis_dim = logits->dims()[rank - 1]; auto& dev_ctx = context.template device_context(); math::SoftmaxFunctor()( - dev_ctx, axis_dim, logits, softmax); + dev_ctx, axis_dim, &logits_2d, &softmax_2d); math::CrossEntropyFunctor()( - dev_ctx, loss, softmax, labels, context.Attr("soft_label"), - context.Attr("ignore_index")); + dev_ctx, &loss_2d, &softmax_2d, &labels_2d, + context.Attr("soft_label"), context.Attr("ignore_index")); } }; @@ -63,13 +70,19 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { context.Output(framework::GradVarName("Logits")); logit_grad->ShareDataWith(*context.Input("Softmax")); - const int class_num = logit_grad->dims()[1]; - auto out_grad_mat = EigenMatrix::From(*out_grad); - auto logit_grad_mat = EigenMatrix::From(*logit_grad); + int rank = logit_grad->dims().size(); + const int class_num = logit_grad->dims()[rank - 1]; + // reshape to 2d + Tensor logit_grad_2d = framework::ReshapeToMatrix(*logit_grad, rank - 1); + Tensor out_grad_2d = framework::ReshapeToMatrix(*out_grad, rank - 1); + + auto out_grad_mat = EigenMatrix::From(out_grad_2d); + auto logit_grad_mat = EigenMatrix::From(logit_grad_2d); auto& place = *context.template device_context() .eigen_device(); if (context.Attr("soft_label")) { - auto lbl_mat = EigenMatrix::From(*labels); + Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); + auto lbl_mat = EigenMatrix::From(labels_2d); logit_grad_mat.device(place) = out_grad_mat.broadcast(Eigen::DSizes(1, class_num)) * (logit_grad_mat - lbl_mat); @@ -78,7 +91,8 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { logit_grad_mat * out_grad_mat.broadcast(Eigen::DSizes(1, class_num)); - const int batch_size = logit_grad->dims()[0]; + const int batch_size = logit_grad_2d.dims()[0]; + const int64_t* label_data = labels->data(); T* logit_grad_data = logit_grad->data(); const T* out_grad_data = out_grad->data(); diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index ae1883f1f7..ec10b63409 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -149,5 +149,98 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): self.check_grad(['X'], 'Out') +class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label + """ + + def setUp(self): + self.op_type = "sigmoid_cross_entropy_with_logits" + batch_size = [10, 10] + num_classes = 20 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, tuple(batch_size + [num_classes])) + .astype("float32")), + 'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes])) + .astype("float32") + } + + # Fw Pass is implemented as elementwise sigmoid followed by + # elementwise logistic loss + # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) + sigmoid_X = expit(self.inputs['X']) + term1 = self.inputs['Label'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) + self.outputs = {'Out': -term1 - term2} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSigmoidCrossEntropyWithNorm2(OpTest): + def setUp(self): + self.op_type = "sigmoid_cross_entropy_with_logits" + batch_size = [10, 10] + num_classes = 20 + ignore_index = -1 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, tuple(batch_size + [num_classes])) + .astype("float32")), + 'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes])) + .astype("float32") + } + self.attrs = {'ignore_index': ignore_index, 'normalize': True} + sigmoid_X = expit(self.inputs['X']) + term1 = self.inputs['Label'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) + out = -term1 - term2 + out[np.where(self.inputs['Label'] == ignore_index)] = 0 + if self.attrs['normalize']: + out = out / float( + np.where(self.inputs['Label'] != ignore_index)[0].size) + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): + """Test sigmoid_cross_entropy_with_logit_op with binary label + """ + + def setUp(self): + self.op_type = "sigmoid_cross_entropy_with_logits" + batch_size = [10, 10] + num_classes = 20 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, tuple(batch_size + [num_classes])) + .astype("float32")), + 'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes])) + .astype("float32") + } + + # Fw Pass is implemented as elementwise sigmoid followed by + # elementwise logistic loss + # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) + sigmoid_X = expit(self.inputs['X']) + term1 = self.inputs['Label'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) + self.outputs = {'Out': -term1 - term2} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + if __name__ == '__main__': unittest.main() From 97d4622bdbfcca5e0372e9b58dd68cca7780296c Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 11 Apr 2019 12:19:47 +0000 Subject: [PATCH 10/38] add softmax test unit test=develop --- .../test_softmax_with_cross_entropy_op.py | 139 ++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index b0494f114c..b06b52f75d 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -195,5 +195,144 @@ class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3): self.numeric_stable_mode = True +class TestSoftmaxWithCrossEntropyOp5(OpTest): + """ + Test softmax with cross entropy operator with ignore_index. + """ + + def initParams(self): + self.numeric_stable_mode = False + + def setUp(self): + self.initParams() + self.op_type = "softmax_with_cross_entropy" + batch_size = [6, 10] + class_num = 47 + + logits = np.random.uniform( + 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") + softmax = np.apply_along_axis(stable_softmax, 2, logits) + labels = np.random.randint( + 0, class_num, tuple(batch_size + [1]), dtype="int64") + ignore_index = 7 + + softmax_2d = np.reshape(softmax, [-1, class_num]) + labels_2d = np.reshape(labels, [-1, 1]) + cross_entropy = np.asmatrix( + [[-np.log(softmax_2d[i][labels_2d[i][0]])] + if labels_2d[i] != ignore_index else [0] + for i in range(softmax_2d.shape[0])], + dtype="float64") + + cross_entropy = np.reshape(cross_entropy, batch_size) + + output_shape = tuple(batch_size + [1]) + output_res = cross_entropy.astype("float64") + output_res = np.expand_dims(output_res, axis=2) + self.inputs = {"Logits": logits, "Label": labels} + self.outputs = { + "Softmax": softmax.astype("float64"), + "Loss": output_res, + } + self.attrs = { + "ignore_index": ignore_index, + "numeric_stable_mode": self.numeric_stable_mode + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["Logits"], "Loss") + + +class TestSoftmaxWithCrossEntropyOp5NoCudnn(TestSoftmaxWithCrossEntropyOp5): + def initParams(self): + self.numeric_stable_mode = True + + +class TestSoftmaxWithCrossEntropyOp6(OpTest): + """ + Test softmax with cross entropy operator with soft labels. + """ + + def setUp(self): + self.op_type = "softmax_with_cross_entropy" + batch_size = [6, 10] + class_num = 37 + + logits = np.random.uniform( + 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") + softmax = np.apply_along_axis(stable_softmax, 2, logits) + labels = np.random.uniform( + 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") + labels /= np.sum(labels, axis=2, keepdims=True) + + cross_entropy = (-labels * np.log(softmax)).sum( + axis=2, keepdims=True).astype("float64") + + self.inputs = {"Logits": logits, "Label": labels} + self.outputs = { + "Softmax": softmax.astype("float64"), + "Loss": cross_entropy.astype("float64") + } + self.attrs = {"soft_label": True} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["Logits"], "Loss") + + +class TestSoftmaxWithCrossEntropyOpFp16_2(TestSoftmaxWithCrossEntropyOp): + def initParams(self): + self.numeric_stable_mode = False + self.dtype = np.float16 + + def setUp(self): + self.initParams() + self.op_type = "softmax_with_cross_entropy" + batch_size = [64, 10] + class_num = 37 + + # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. + logits = np.random.uniform( + 0.1, 1.0, tuple(batch_size + [class_num])).astype(np.float32) + softmax = np.apply_along_axis(stable_softmax, 2, logits) + labels = np.random.randint( + 0, class_num, tuple(batch_size + [1]), dtype="int64") + + softmax_2d = np.reshape(softmax, [-1, class_num]) + labels_2d = np.reshape(labels, [-1, 1]) + + cross_entropy = np.asmatrix( + [[-np.log(softmax_2d[i][labels_2d[i][0]])] + for i in range(softmax_2d.shape[0])], + dtype=np.float32) + + cross_entropy = np.reshape(cross_entropy, batch_size) + output_shape = tuple(batch_size + [1]) + output_res = cross_entropy.astype(self.dtype) + output_res = np.expand_dims(output_res, axis=2) + self.inputs = {"Logits": logits, "Label": labels} + + self.inputs = { + "Logits": logits.astype(self.dtype).view(np.uint16), + "Label": labels + } + self.outputs = { + "Softmax": softmax.astype(self.dtype), + "Loss": output_res, + } + self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} + + def test_check_output(self): + self.check_output(atol=1e-2) + + def test_check_grad(self): + self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + + if __name__ == "__main__": unittest.main() From 8063f5867f68e61c687d2b0c9a7a568e21c1d160 Mon Sep 17 00:00:00 2001 From: phlrain Date: Thu, 11 Apr 2019 12:29:33 +0000 Subject: [PATCH 11/38] remove sigmoid change; test=develop --- ...st_sigmoid_cross_entropy_with_logits_op.py | 93 ------------------- 1 file changed, 93 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index ec10b63409..ae1883f1f7 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -149,98 +149,5 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): self.check_grad(['X'], 'Out') -class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label - """ - - def setUp(self): - self.op_type = "sigmoid_cross_entropy_with_logits" - batch_size = [10, 10] - num_classes = 20 - self.inputs = { - 'X': logit( - np.random.uniform(0, 1, tuple(batch_size + [num_classes])) - .astype("float32")), - 'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes])) - .astype("float32") - } - - # Fw Pass is implemented as elementwise sigmoid followed by - # elementwise logistic loss - # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) - sigmoid_X = expit(self.inputs['X']) - term1 = self.inputs['Label'] * np.log(sigmoid_X) - term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) - self.outputs = {'Out': -term1 - term2} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestSigmoidCrossEntropyWithNorm2(OpTest): - def setUp(self): - self.op_type = "sigmoid_cross_entropy_with_logits" - batch_size = [10, 10] - num_classes = 20 - ignore_index = -1 - self.inputs = { - 'X': logit( - np.random.uniform(0, 1, tuple(batch_size + [num_classes])) - .astype("float32")), - 'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes])) - .astype("float32") - } - self.attrs = {'ignore_index': ignore_index, 'normalize': True} - sigmoid_X = expit(self.inputs['X']) - term1 = self.inputs['Label'] * np.log(sigmoid_X) - term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) - out = -term1 - term2 - out[np.where(self.inputs['Label'] == ignore_index)] = 0 - if self.attrs['normalize']: - out = out / float( - np.where(self.inputs['Label'] != ignore_index)[0].size) - self.outputs = {'Out': out} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with binary label - """ - - def setUp(self): - self.op_type = "sigmoid_cross_entropy_with_logits" - batch_size = [10, 10] - num_classes = 20 - self.inputs = { - 'X': logit( - np.random.uniform(0, 1, tuple(batch_size + [num_classes])) - .astype("float32")), - 'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes])) - .astype("float32") - } - - # Fw Pass is implemented as elementwise sigmoid followed by - # elementwise logistic loss - # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) - sigmoid_X = expit(self.inputs['X']) - term1 = self.inputs['Label'] * np.log(sigmoid_X) - term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) - self.outputs = {'Out': -term1 - term2} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - if __name__ == '__main__': unittest.main() From 06156b6cb7789e5129c1c47f32b0753f3b2fd789 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Fri, 12 Apr 2019 09:16:01 +0000 Subject: [PATCH 12/38] polish yolov3 loss annotation test=develop --- paddle/fluid/operators/detection/yolov3_loss_op.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/detection/yolov3_loss_op.cc b/paddle/fluid/operators/detection/yolov3_loss_op.cc index 6c37da17f4..e8a186b611 100644 --- a/paddle/fluid/operators/detection/yolov3_loss_op.cc +++ b/paddle/fluid/operators/detection/yolov3_loss_op.cc @@ -171,8 +171,8 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker { The output of previous network is in shape [N, C, H, W], while H and W should be the same, H and W specify the grid size, each grid point predict - given number boxes, this given number, which following will be represented as S, - is specified by the number of anchors, In the second dimension(the channel + given number bounding boxes, this given number, which following will be represented as S, + is specified by the number of anchor clusters in each scale. In the second dimension(the channel dimension), C should be equal to S * (class_num + 5), class_num is the object category number of source dataset(such as 80 in coco dataset), so in the second(channel) dimension, apart from 4 box location coordinates x, y, w, h, @@ -203,7 +203,7 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker { thresh, the confidence score loss of this anchor box will be ignored. Therefore, the yolov3 loss consist of three major parts, box location loss, - confidence score loss, and classification loss. The L2 loss is used for + confidence score loss, and classification loss. The L1 loss is used for box coordinates (w, h), and sigmoid cross entropy loss is used for box coordinates (x, y), confidence score loss and classification loss. From 9b9e5e606c7db022014761b0191900815f47d864 Mon Sep 17 00:00:00 2001 From: tink2123 Date: Fri, 12 Apr 2019 09:27:13 +0000 Subject: [PATCH 13/38] modified api.spec test=develop --- paddle/fluid/API.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 3f576a4516..d245cb4b87 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -351,7 +351,7 @@ paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', paddle.fluid.layers.iou_similarity (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '587845f60c5d97ffdf2dfd21da52eca1')) paddle.fluid.layers.box_coder (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '032d0f4b7d8f6235ee5d91e473344f0e')) paddle.fluid.layers.polygon_box_transform (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0e5ac2507723a0b5adec473f9556799b')) -paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gtscore', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', '57fa96922e42db8f064c3fb77f2255e8')) +paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gtscore', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', 'bbed7a8e63324cb76873ddd32b2f84ef')) paddle.fluid.layers.yolo_box (ArgSpec(args=['x', 'img_size', 'anchors', 'class_num', 'conf_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5566169a5ab993d177792c023c7fb340')) paddle.fluid.layers.box_clip (ArgSpec(args=['input', 'im_info', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '397e9e02b451d99c56e20f268fa03f2e')) paddle.fluid.layers.multiclass_nms (ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)), ('document', 'ca7d1107b6c5d2d6d8221039a220fde0')) From 165a7bd5a14010120353540ad697cbaa2798c0ad Mon Sep 17 00:00:00 2001 From: phlrain Date: Sat, 13 Apr 2019 15:19:42 +0000 Subject: [PATCH 14/38] fix shape check many; test=develop --- .../controlflow/tensor_array_read_write_op.cc | 6 ++- paddle/fluid/operators/data_norm_op.cc | 9 ++-- paddle/fluid/operators/huber_loss_op.cc | 22 ++++++-- paddle/fluid/operators/layer_norm_op.cc | 11 +++- .../operators/metrics/precision_recall_op.cc | 48 ++++++++++------- paddle/fluid/operators/minus_op.cc | 15 ++++-- .../fluid/operators/modified_huber_loss_op.cc | 32 +++++++++--- paddle/fluid/operators/space_to_depth_op.cc | 51 ++++++++++++++----- paddle/fluid/operators/tree_conv_op.cc | 31 +++++++++-- 9 files changed, 167 insertions(+), 58 deletions(-) diff --git a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc index 45f18ac925..2ca5242c5c 100644 --- a/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc @@ -81,8 +81,10 @@ class WriteToArrayInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index"); - PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, - "The number of element of subscript index must be 1"); + if (context->IsRuntime()) { + PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, + "The number of element of subscript index must be 1"); + } if (!context->HasInput("X")) { return; } diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc index 45bce6e520..a5c76db6fa 100644 --- a/paddle/fluid/operators/data_norm_op.cc +++ b/paddle/fluid/operators/data_norm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/data_norm_op.h" +#include #include #include "paddle/fluid/framework/data_layout.h" #ifdef PADDLE_WITH_MKLDNN @@ -65,9 +66,11 @@ class DataNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + } ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Means", {C}); diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index a72db384c1..dd2ce85545 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -31,13 +31,27 @@ class HuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The rank of Input(X) must be 2 and the shape is " "[batch_size, 1]."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, - "Each row of Input(X) contains a real value, " - "so the 2nd dimension of Input(X) must be 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, "Shape of X and Y should be same"); + } else { + if (x_dims[0] != -1 && y_dims[0] != -1) { + PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], + "The dim 0 of X and Y must be the same."); + } + + if (x_dims[1] != -1 && y_dims[1] != -1) { + PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1], + "The dim 1 of X and Y must be the same."); + } + } + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, + "Each row of Input(X) contains a real value, " + "so the 2nd dimension of Input(X) must be 1."); + } ctx->SetOutputDim("Residual", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index 9b1a854a31..1aac60ef36 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -46,11 +46,18 @@ class LayerNormOp : public framework::OperatorWithKernel { int right = static_cast(matrix_dim[1]); if (ctx->HasInput("Scale")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right, + "scale should with right"); + } } if (ctx->HasInput("Bias")) { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right, + "bias should with right"); + } } ctx->SetOutputDim("Y", ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/metrics/precision_recall_op.cc b/paddle/fluid/operators/metrics/precision_recall_op.cc index 1a67b13491..f6d6ffc668 100644 --- a/paddle/fluid/operators/metrics/precision_recall_op.cc +++ b/paddle/fluid/operators/metrics/precision_recall_op.cc @@ -40,30 +40,40 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { auto max_probs_dims = ctx->GetInputDim("MaxProbs"); auto labels_dims = ctx->GetInputDim("Labels"); - PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, - "Each instance contains one max probability, so the " - "shape of Input(MaxProbs) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims, - "The shape of Input(Indices) should be [batch_size, 1]."); - PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0], - "The 1st dimension of Input(MaxProbs) and " - "Input(Labels) both are batch_size and the shape should " - "be the same."); - PADDLE_ENFORCE_EQ(labels_dims[1], 1, - "The 2nd dimension of Input(Labels) contains instance " - "label and the shape should be equal to 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, + "Each instance contains one max probability, so the " + "shape of Input(MaxProbs) should be [batch_size, 1]."); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Indices"), max_probs_dims, + "The shape of Input(Indices) should bes same with max_probs_dims"); + PADDLE_ENFORCE_EQ( + max_probs_dims[0], labels_dims[0], + "The 1st dimension of Input(MaxProbs) and " + "Input(Labels) both are batch_size and the shape should " + "be the same."); + PADDLE_ENFORCE_EQ(labels_dims[1], 1, + "The 2nd dimension of Input(Labels) contains instance " + "label and the shape should be equal to 1."); + } if (ctx->HasInput("Weights")) { auto weights_dims = ctx->GetInputDim("Weights"); - PADDLE_ENFORCE_EQ(weights_dims, - framework::make_ddim({max_probs_dims[0], 1}), - "The shape of Input(Weights) should be " - "[batch_size, 1]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(weights_dims, + framework::make_ddim({max_probs_dims[0], 1}), + "The shape of Input(Weights) should be " + "[batch_size, 1]."); + } } if (ctx->HasInput("StatesInfo")) { auto states_dims = ctx->GetInputDim("StatesInfo"); - PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), - "The shape of Input(StatesInfo) should be " - "[class_number, 4]."); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), + "The shape of Input(StatesInfo) should be " + "[class_number, 4]."); + } } // Layouts of BatchMetrics and AccumMetrics both are: diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 34571a38a1..91608ebf8a 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/minus_op.h" +#include #include #include @@ -38,9 +39,17 @@ class MinusOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ( - x_dims, y_dims, - "Minus operator must take two tensor with same num of elements"); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ( + x_dims, y_dims, + "Minus operator must take two tensor with same num of elements"); + } else { + if (framework::product(x_dims) > 0 && framework::product(y_dims) > 0) { + PADDLE_ENFORCE_EQ( + x_dims, y_dims, + "Minus operator must take two tensor with same num of elements"); + } + } ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 9954e51083..83c6cba42b 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -28,9 +28,25 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2."); - PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims, y_dims, + "The shape of X and Y must be the same."); + } else { + if (x_dims[0] != -1 && y_dims[0] != -1) { + PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], + "The dim 0 of X and Y must be the same."); + } + + if (x_dims[1] != -1 && y_dims[1] != -1) { + PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1], + "The dim 1 of X and Y must be the same."); + } + } + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); + } ctx->SetOutputDim("IntermediateVal", x_dims); ctx->SetOutputDim("Out", {x_dims[0], 1}); @@ -90,11 +106,13 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { auto intermediate_dims = ctx->GetInputDim("IntermediateVal"); auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out")); - PADDLE_ENFORCE_EQ( - intermediate_dims, x_dims, - "The shape of X and intermediate value must be the same."); - PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, - "The shape of Input(Out@Grad) and X must be the same."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ( + intermediate_dims, x_dims, + "The shape of X and intermediate value must be the same."); + PADDLE_ENFORCE_EQ(out_grad_dims, x_dims, + "The shape of Input(Out@Grad) and X must be the same."); + } if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc index a286fea3ef..3d66613248 100644 --- a/paddle/fluid/operators/space_to_depth_op.cc +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -40,19 +40,44 @@ class SpaceToDepthOp : public framework::OperatorWithKernel { auto blocksize = ctx->Attrs().Get("blocksize"); PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1"); - PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); - PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); - - PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, - "input channel should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, - "input Height should be divisible of the square of " - "SpaceToDepthOp blocksize"); - PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, - "input Width should be divisible of the square of " - "SpaceToDepthOp blocksize"); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } else { + if (x_dims[1] != -1) { + PADDLE_ENFORCE_GT(x_dims[1], 0, + "input channel should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + if (x_dims[2] != -1) { + PADDLE_ENFORCE_GT(x_dims[2], 0, + "input Height should be Greater than 0"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + + if (x_dims[3] != -1) { + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + } + } VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims << "Attribute blocksize" << blocksize << std::endl; diff --git a/paddle/fluid/operators/tree_conv_op.cc b/paddle/fluid/operators/tree_conv_op.cc index 159e594946..566939afaa 100644 --- a/paddle/fluid/operators/tree_conv_op.cc +++ b/paddle/fluid/operators/tree_conv_op.cc @@ -64,17 +64,38 @@ class TreeConvOp : public framework::OperatorWithKernel { auto edge_dims = ctx->GetInputDim("EdgeSet"); auto vector_dims = ctx->GetInputDim("NodesVector"); auto filter_dims = ctx->GetInputDim("Filter"); - PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } else { + if (edge_dims[2] != -1) { + PADDLE_ENFORCE_EQ(edge_dims[2], 2, "Input(EdgeSet) dim[2] should be 2"); + } + } PADDLE_ENFORCE_EQ(edge_dims.size(), 3, "The dimension of EdgeSet Tensor should be 3"); PADDLE_ENFORCE_EQ(vector_dims.size(), 3, "The dimension of NodesVector Tensor should be 3"); PADDLE_ENFORCE_EQ(filter_dims.size(), 4, "The dimension of Filter Tensor should be 4"); - PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); - PADDLE_ENFORCE_EQ( - filter_dims[0], vector_dims[2], - "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, "Input(Filter) dim[1] should be 3"); + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } else { + if (filter_dims[1] != -1) { + PADDLE_ENFORCE_EQ(filter_dims[1], 3, + "Input(Filter) dim[1] should be 3"); + } + + if (filter_dims[0] != -1 && vector_dims[2] != -1) { + PADDLE_ENFORCE_EQ( + filter_dims[0], vector_dims[2], + "Input(Filter) dim[0] must equal to Input(NodesVector) dim[2]"); + } + } auto output_dims = framework::make_ddim( {vector_dims[0], vector_dims[1], filter_dims[2], filter_dims[3]}); ctx->SetOutputDim("Out", output_dims); From 2120f075a3f888458d4b5b5c02e81fab93ee2346 Mon Sep 17 00:00:00 2001 From: SunGaofeng Date: Mon, 15 Apr 2019 02:36:44 +0000 Subject: [PATCH 15/38] modify infer shape in pad_op.cc, pad_constant_like_op.cc. No need in psroi_pool_op.cc, crop_op.cc --- paddle/fluid/operators/pad_constant_like_op.cc | 14 ++++++++++++-- paddle/fluid/operators/pad_op.cc | 16 ++++++++-------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/pad_constant_like_op.cc b/paddle/fluid/operators/pad_constant_like_op.cc index 3f827c26fd..920383e652 100644 --- a/paddle/fluid/operators/pad_constant_like_op.cc +++ b/paddle/fluid/operators/pad_constant_like_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/pad_constant_like_op.h" +#include namespace paddle { namespace operators { @@ -38,8 +39,13 @@ class PadConstantLikeOp : public framework::OperatorWithKernel { "The dimention of X and Y should be the same."); for (int i = 0; i < x_dim.size(); ++i) { - PADDLE_ENFORCE_GE(x_dim[i], y_dim[i]); + if ((!ctx->IsRuntime()) && ((x_dim[i] == -1) || (y_dim[i] == -1))) { + continue; + } else { + PADDLE_ENFORCE_GE(x_dim[i], y_dim[i]); + } } + ctx->SetOutputDim("Out", x_dim); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -162,7 +168,11 @@ class PadConstantLikeOpGrad : public framework::OperatorWithKernel { ctx->ShareLoD("Y", /*->*/ y_grad_name); for (int i = 0; i < y_dim.size(); ++i) { - PADDLE_ENFORCE_GE(dout_dim[i], y_dim[i]); + if ((!ctx->IsRuntime()) && ((dout_dim[i] == -1) || (y_dim[i] == -1))) { + continue; + } else { + PADDLE_ENFORCE_GE(dout_dim[i], y_dim[i]); + } } } } diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index c28106d312..4b4f43344f 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -36,7 +36,11 @@ class PadOp : public framework::OperatorWithKernel { "of input tensor."); std::vector out_dims(x_dim.size()); for (int i = 0; i < x_dim.size(); ++i) { - out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; + if ((!ctx->IsRuntime()) && (x_dim[i] == -1)) { + out_dims[i] = -1; + } else { + out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; + } } ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); if (out_dims[0] == x_dim[0]) { @@ -100,18 +104,14 @@ class PadOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); - auto& paddings = ctx->Attrs().Get>("paddings"); - for (int i = 0; i < dout_dims.size(); ++i) { - dout_dims[i] -= (paddings[i * 2] + paddings[i * 2 + 1]); - } - auto x_grad_name = framework::GradVarName("X"); if (ctx->HasOutput(x_grad_name)) { auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto& paddings = ctx->Attrs().Get>("paddings"); for (int i = 0; i < dout_dims.size(); ++i) { - dout_dims[i] -= (paddings[i * 2] + paddings[i * 2 + 1]); + if (ctx->IsRuntime() || (dout_dims[i] != -1)) { + dout_dims[i] -= (paddings[i * 2] + paddings[i * 2 + 1]); + } } ctx->SetOutputDim(x_grad_name, dout_dims); } From b20b27f0f7fec83117aa1b43d8b4bdaef171df53 Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Mon, 15 Apr 2019 04:06:34 +0000 Subject: [PATCH 16/38] fix yolov3_loss param name. test=develop --- python/paddle/fluid/layers/detection.py | 34 ++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 0a1ddbc1db..cf06a117ef 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -509,14 +509,14 @@ def polygon_box_transform(input, name=None): @templatedoc(op_type="yolov3_loss") def yolov3_loss(x, - gtbox, - gtlabel, + gt_box, + gt_label, anchors, anchor_mask, class_num, ignore_thresh, downsample_ratio, - gtscore=None, + gt_score=None, use_label_smooth=True, name=None): """ @@ -524,12 +524,12 @@ def yolov3_loss(x, Args: x (Variable): ${x_comment} - gtbox (Variable): groud truth boxes, should be in shape of [N, B, 4], + gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4], in the third dimenstion, x, y, w, h should be stored and x, y, w, h should be relative value of input image. N is the batch number and B is the max box number in an image. - gtlabel (Variable): class id of ground truth boxes, shoud be in shape + gt_label (Variable): class id of ground truth boxes, shoud be in shape of [N, B]. anchors (list|tuple): ${anchors_comment} anchor_mask (list|tuple): ${anchor_mask_comment} @@ -537,7 +537,7 @@ def yolov3_loss(x, ignore_thresh (float): ${ignore_thresh_comment} downsample_ratio (int): ${downsample_ratio_comment} name (string): the name of yolov3 loss. Default None. - gtscore (Variable): mixup score of ground truth boxes, shoud be in shape + gt_score (Variable): mixup score of ground truth boxes, shoud be in shape of [N, B]. Default None. use_label_smooth (bool): ${use_label_smooth_comment} @@ -558,13 +558,13 @@ def yolov3_loss(x, .. code-block:: python x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32') - gtbox = fluid.layers.data(name='gtbox', shape=[6, 4], dtype='float32') - gtlabel = fluid.layers.data(name='gtlabel', shape=[6], dtype='int32') - gtscore = fluid.layers.data(name='gtscore', shape=[6], dtype='float32') + gt_box = fluid.layers.data(name='gt_box', shape=[6, 4], dtype='float32') + gt_label = fluid.layers.data(name='gt_label', shape=[6], dtype='int32') + gt_score = fluid.layers.data(name='gt_score', shape=[6], dtype='float32') anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] anchor_mask = [0, 1, 2] - loss = fluid.layers.yolov3_loss(x=x, gtbox=gtbox, gtlabel=gtlabel, - gtscore=gtscore, anchors=anchors, + loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label, + gt_score=gt_score, anchors=anchors, anchor_mask=anchor_mask, class_num=80, ignore_thresh=0.7, downsample_ratio=32) """ @@ -572,11 +572,11 @@ def yolov3_loss(x, if not isinstance(x, Variable): raise TypeError("Input x of yolov3_loss must be Variable") - if not isinstance(gtbox, Variable): + if not isinstance(gt_box, Variable): raise TypeError("Input gtbox of yolov3_loss must be Variable") - if not isinstance(gtlabel, Variable): + if not isinstance(gt_label, Variable): raise TypeError("Input gtlabel of yolov3_loss must be Variable") - if gtscore is not None and not isinstance(gtscore, Variable): + if gt_score is not None and not isinstance(gt_score, Variable): raise TypeError("Input gtscore of yolov3_loss must be Variable") if not isinstance(anchors, list) and not isinstance(anchors, tuple): raise TypeError("Attr anchors of yolov3_loss must be list or tuple") @@ -602,11 +602,11 @@ def yolov3_loss(x, inputs = { "X": x, - "GTBox": gtbox, - "GTLabel": gtlabel, + "GTBox": gt_box, + "GTLabel": gt_label, } if gtscore: - inputs["GTScore"] = gtscore + inputs["GTScore"] = gt_score attrs = { "anchors": anchors, From 76888b0ba143d4aad18a17849fd686bb1ee2a327 Mon Sep 17 00:00:00 2001 From: SunGaofeng Date: Mon, 15 Apr 2019 04:24:56 +0000 Subject: [PATCH 17/38] modify in pad_op and pad_constant --- paddle/fluid/operators/pad_constant_like_op.cc | 10 ++++++++-- paddle/fluid/operators/pad_op.cc | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/pad_constant_like_op.cc b/paddle/fluid/operators/pad_constant_like_op.cc index 920383e652..31ed0a686f 100644 --- a/paddle/fluid/operators/pad_constant_like_op.cc +++ b/paddle/fluid/operators/pad_constant_like_op.cc @@ -42,7 +42,10 @@ class PadConstantLikeOp : public framework::OperatorWithKernel { if ((!ctx->IsRuntime()) && ((x_dim[i] == -1) || (y_dim[i] == -1))) { continue; } else { - PADDLE_ENFORCE_GE(x_dim[i], y_dim[i]); + PADDLE_ENFORCE_GE( + x_dim[i], y_dim[i], + "expected X_dim[i] >= Y_dim[i], but received %d < %d for dim %d", + x_dim[i], y_dim[i], i); } } @@ -171,7 +174,10 @@ class PadConstantLikeOpGrad : public framework::OperatorWithKernel { if ((!ctx->IsRuntime()) && ((dout_dim[i] == -1) || (y_dim[i] == -1))) { continue; } else { - PADDLE_ENFORCE_GE(dout_dim[i], y_dim[i]); + PADDLE_ENFORCE_GE(dout_dim[i], y_dim[i], + "expected Out_dim[i] >= Y_dim[i], but received %d " + "< %d for dim %d", + dout_dim[i], y_dim[i], i); } } } diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index 4b4f43344f..36dc8b0dbb 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -34,6 +34,9 @@ class PadOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); + for (size_t i = 0; i < paddings.size(); ++i) { + PADDLE_ENFORCE_GE(paddings[i], 0, "paddings should >= 0."); + } std::vector out_dims(x_dim.size()); for (int i = 0; i < x_dim.size(); ++i) { if ((!ctx->IsRuntime()) && (x_dim[i] == -1)) { From 7b1702d9a1f133cc2d5e63c9b184d305592f50ea Mon Sep 17 00:00:00 2001 From: dengkaipeng Date: Mon, 15 Apr 2019 04:39:01 +0000 Subject: [PATCH 18/38] fix unittest and API.spec. test=develop --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/layers/detection.py | 2 +- python/paddle/fluid/tests/test_detection.py | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index bf39325cc9..cea7dd2df4 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -351,7 +351,7 @@ paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', paddle.fluid.layers.iou_similarity (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '587845f60c5d97ffdf2dfd21da52eca1')) paddle.fluid.layers.box_coder (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '032d0f4b7d8f6235ee5d91e473344f0e')) paddle.fluid.layers.polygon_box_transform (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0e5ac2507723a0b5adec473f9556799b')) -paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gtscore', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', '57fa96922e42db8f064c3fb77f2255e8')) +paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gt_box', 'gt_label', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gt_score', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', '059021025283ad1ee6f4d32228cf3e4e')) paddle.fluid.layers.yolo_box (ArgSpec(args=['x', 'img_size', 'anchors', 'class_num', 'conf_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5566169a5ab993d177792c023c7fb340')) paddle.fluid.layers.box_clip (ArgSpec(args=['input', 'im_info', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '397e9e02b451d99c56e20f268fa03f2e')) paddle.fluid.layers.multiclass_nms (ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)), ('document', 'ca7d1107b6c5d2d6d8221039a220fde0')) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index cf06a117ef..dcac233f81 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -605,7 +605,7 @@ def yolov3_loss(x, "GTBox": gt_box, "GTLabel": gt_label, } - if gtscore: + if gt_score: inputs["GTScore"] = gt_score attrs = { diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 7d1b869cf5..e1c4c2eca0 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -474,17 +474,17 @@ class TestYoloDetection(unittest.TestCase): program = Program() with program_guard(program): x = layers.data(name='x', shape=[30, 7, 7], dtype='float32') - gtbox = layers.data(name='gtbox', shape=[10, 4], dtype='float32') - gtlabel = layers.data(name='gtlabel', shape=[10], dtype='int32') - gtscore = layers.data(name='gtscore', shape=[10], dtype='float32') + gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32') + gt_label = layers.data(name='gt_label', shape=[10], dtype='int32') + gt_score = layers.data(name='gt_score', shape=[10], dtype='float32') loss = layers.yolov3_loss( x, - gtbox, - gtlabel, [10, 13, 30, 13], [0, 1], + gt_box, + gt_label, [10, 13, 30, 13], [0, 1], 10, 0.7, 32, - gtscore=gtscore, + gt_score=gt_score, use_label_smooth=False) self.assertIsNotNone(loss) From 10879a3cae4548665377f3d0fe2f20d754f575d4 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 15 Apr 2019 05:34:57 +0000 Subject: [PATCH 19/38] separate runtime infershape test=develop --- paddle/fluid/operators/attention_lstm_op.cc | 53 ++++++++++++++------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 912ec79910..9c46832218 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -54,17 +54,25 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto w_dims = ctx->GetInputDim("LSTMWeight"); const int D = w_dims[1] / 4; PADDLE_ENFORCE_EQ(w_dims.size(), 2, "Input(LSTMWeight)'s rank must be 2."); - PADDLE_ENFORCE_EQ(w_dims[0], D + M, - "LSTMWeight dims should be (%d + %d) * %d.", D, M, 4 * D); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(w_dims[0], D + M, + "LSTMWeight dims should be (%d + %d) * %d.", D, M, 4 * D); + } auto b_dims = ctx->GetInputDim("LSTMBias"); PADDLE_ENFORCE_EQ(b_dims.size(), 2, "Input(LSTMBias)'s rank must be 2."); - PADDLE_ENFORCE_EQ(b_dims[0], 1, "LSTMBias dims should be 1 x %d.", 4 * D); - PADDLE_ENFORCE_EQ(b_dims[1], 4 * D, "LSTMBias dims should be 1 x %d.", 4 * D); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(b_dims[0], 1, "LSTMBias dims should be 1 x %d.", 4 * D); + PADDLE_ENFORCE_EQ(b_dims[1], 4 * D, "LSTMBias dims should be 1 x %d.", + 4 * D); + } auto c_dims = ctx->GetInputDim("C0"); PADDLE_ENFORCE_EQ(c_dims.size(), 2, "Input(C0)'s rank must be 2."); - PADDLE_ENFORCE_EQ(c_dims[1], D, "C0 dims should be N x %d.", D); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(c_dims[1], D, "C0 dims should be N x %d.", D); + } + if (ctx->HasInput("H0")) { auto h_dims = ctx->GetInputDim("H0"); PADDLE_ENFORCE(h_dims == c_dims, @@ -75,26 +83,33 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto atten_w_dims = ctx->GetInputDim("AttentionWeight"); PADDLE_ENFORCE_EQ(atten_w_dims.size(), 2, "Input(AttentionWeight)'s rank must be 2."); - PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, - "AttentionWeight shapes must be (%d + %d) * 1.", M, D); - PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, - "AttentionWeight shapes must be (%d + %d) * 1.", M, D); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, + "AttentionWeight shapes must be (%d + %d) * 1.", M, D); + PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, + "AttentionWeight shapes must be (%d + %d) * 1.", M, D); + } + if (ctx->HasInput("AttentionBias")) { auto atten_b_dims = ctx->GetInputDim("AttentionBias"); PADDLE_ENFORCE_EQ(atten_b_dims.size(), 2, "Input(AttentionBias)'s rank must be 2."); - PADDLE_ENFORCE_EQ(atten_b_dims[0], 1, - "AttentionBias shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(atten_b_dims[1], 1, - "AttentionBias shapes must be 1 * 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(atten_b_dims[0], 1, + "AttentionBias shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(atten_b_dims[1], 1, + "AttentionBias shapes must be 1 * 1."); + } } if (ctx->HasInput("AttentionScalar")) { auto dims = ctx->GetInputDim("AttentionScalar"); PADDLE_ENFORCE_EQ(dims.size(), 2, "Input(AttentionScalar)'s rank must be 2."); - PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalar shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalar shapes must be 1 * 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalar shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalar shapes must be 1 * 1."); + } } if (ctx->HasInput("AttentionScalarBias")) { @@ -104,8 +119,12 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { "AttentionScalar should not be null when have AttentionScalarBias."); PADDLE_ENFORCE_EQ(dims.size(), 2, "Input(AttentionScalarBias)'s rank must be 2."); - PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalarBias shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalarBias shapes must be 1 * 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(dims[0], 1, + "AttentionScalarBias shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(dims[1], 1, + "AttentionScalarBias shapes must be 1 * 1."); + } } framework::DDim out_dims({x_dims[0], D}); From 73cbdc2998140908c4c1f4a0476f3e4868cf64eb Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 15 Apr 2019 15:16:48 +0800 Subject: [PATCH 20/38] Add train mode test=develop --- python/paddle/fluid/dygraph/layers.py | 12 ++ python/paddle/fluid/dygraph/tracer.py | 69 ++++-- .../unittests/test_dygraph_multi_forward.py | 201 ++++++++++++++++++ 3 files changed, 260 insertions(+), 22 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 39e06e3486..6b78e2abb3 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -48,6 +48,12 @@ class Layer(core.Layer): self._helper = LayerObjectHelper(self._full_name) + def train(self): + framework._dygraph_tracer()._train_mode() + + def eval(self): + framework._dygraph_tracer()._eval_mode() + def full_name(self): """Full name for this layers. @@ -254,6 +260,12 @@ class PyLayer(core.PyLayer): def __init__(self): super(PyLayer, self).__init__() + def train(self): + framework._dygraph_tracer()._train_mode() + + def eval(self): + framework._dygraph_tracer()._eval_mode() + @classmethod def _do_forward(cls, inputs): return cls._to_tuple(cls.forward(inputs)) diff --git a/python/paddle/fluid/dygraph/tracer.py b/python/paddle/fluid/dygraph/tracer.py index ad938188bf..ee37ffab2c 100644 --- a/python/paddle/fluid/dygraph/tracer.py +++ b/python/paddle/fluid/dygraph/tracer.py @@ -40,6 +40,7 @@ class Tracer(core.Tracer): self._ops = defaultdict() self._vars = defaultdict() self._trace_id = 0 + self._train_mode = True def trace_var(self, name, var): self._vars[name] = var @@ -51,27 +52,45 @@ class Tracer(core.Tracer): def trace_op(self, op, inputs, outputs, stop_gradient=False): # TODO(minqiyang): remove this line after we take apart all # backward grads and forward variables - op.inputs = inputs - inps = defaultdict(list) - for k, vars in six.iteritems(inputs): - if isinstance(vars, framework.Variable): - op.previous_ops.append(vars.op) - inps[k].append(vars._ivar) - elif isinstance(vars, list) or isinstance(vars, tuple): - for var in vars: - op.previous_ops.append(var.op) - inps[k].append(var._ivar) - - op.outputs = outputs - outs = defaultdict(list) - for k, vars in six.iteritems(outputs): - if isinstance(vars, framework.Variable): - vars.op = op - outs[k].append(vars._ivar) - elif isinstance(vars, list) or isinstance(vars, tuple): - for var in vars: - var.op = op - outs[k].append(var._ivar) + if self._train_mode: + op.inputs = inputs + inps = defaultdict(list) + for k, vars in six.iteritems(inputs): + if isinstance(vars, framework.Variable): + inps[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + inps[k].append(var._ivar) + + op.outputs = outputs + outs = defaultdict(list) + for k, vars in six.iteritems(outputs): + if isinstance(vars, framework.Variable): + outs[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + outs[k].append(var._ivar) + else: + inps = defaultdict(list) + for k, vars in six.iteritems(inputs): + if isinstance(vars, framework.Variable): + op.previous_ops.append(vars.op) + inps[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + op.previous_ops.append(var.op) + inps[k].append(var._ivar) + + op.outputs = outputs + outs = defaultdict(list) + for k, vars in six.iteritems(outputs): + if isinstance(vars, framework.Variable): + vars.op = op + outs[k].append(vars._ivar) + elif isinstance(vars, list) or isinstance(vars, tuple): + for var in vars: + var.op = op + outs[k].append(var._ivar) # record op's trace id op.iop._trace_id = self._trace_id @@ -80,7 +99,7 @@ class Tracer(core.Tracer): framework._current_expected_place(), stop_gradient) - if not stop_gradient: + if not stop_gradient and self._train_mode: self._trace_id += 1 self._ops[op.iop._trace_id] = op @@ -98,3 +117,9 @@ class Tracer(core.Tracer): for k, v in six.iteritems(outputs): if k in backward_refs: op.backward_refs[k] = outputs[k] + + def _train_mode(self): + self._train_mode = True + + def _eval_mode(self): + self._train_mode = False diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py new file mode 100644 index 0000000000..8b8fdcc887 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -0,0 +1,201 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import contextlib +import unittest +import numpy as np +import six + +import paddle +import paddle.fluid as fluid +from paddle.fluid import core +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC +from paddle.fluid.dygraph.base import to_variable +from test_imperative_base import new_program_scope + + +class SimpleImgConvPool(fluid.dygraph.Layer): + def __init__(self, + name_scope, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None): + super(SimpleImgConvPool, self).__init__(name_scope) + + self._conv2d = Conv2D( + self.full_name(), + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + use_cudnn=use_cudnn) + + self._pool2d = Pool2D( + self.full_name(), + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn) + + def forward(self, inputs): + x = self._conv2d(inputs) + x = self._pool2d(x) + return x + + +class MNIST(fluid.dygraph.Layer): + def __init__(self, name_scope): + super(MNIST, self).__init__(name_scope) + + self._simple_img_conv_pool_1 = SimpleImgConvPool( + self.full_name(), 1, 20, 5, 2, 2, act="relu") + + self._simple_img_conv_pool_2 = SimpleImgConvPool( + self.full_name(), 20, 50, 5, 2, 2, act="relu") + + pool_2_shape = 50 * 4 * 4 + SIZE = 10 + scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 + self._fc = FC(self.full_name(), + 10, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale)), + act="softmax") + + def forward(self, inputs): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = self._fc(x) + return x + + +class TestDygraphMultiForward(unittest.TestCase): + def test_mnist_forward_float32(self): + seed = 90 + epoch_num = 1 + with fluid.dygraph.guard(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + + mnist = MNIST("mnist") + sgd = SGDOptimizer(learning_rate=1e-3) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True) + + dy_param_init_value = {} + mnist.eval() + for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + dy_x_data = np.array( + [x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape(128, 1) + + img = to_variable(dy_x_data) + label = to_variable(y_data) + label.stop_gradient = True + + cost = mnist(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + + dy_out = avg_loss.numpy() + + if epoch == 0 and batch_id == 0: + for param in mnist.parameters(): + dy_param_init_value[param.name] = param.numpy() + + with new_program_scope(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + + exe = fluid.Executor(fluid.CPUPlace( + ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + + mnist = MNIST("mnist") + sgd = SGDOptimizer(learning_rate=1e-3) + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128, drop_last=True) + + img = fluid.layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + cost = mnist(img) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + + # initialize params and fetch them + static_param_init_value = {} + static_param_name_list = [] + for param in mnist.parameters(): + static_param_name_list.append(param.name) + + out = exe.run(fluid.default_startup_program(), + fetch_list=static_param_name_list) + + for i in range(len(static_param_name_list)): + static_param_init_value[static_param_name_list[i]] = out[i] + + for epoch in range(epoch_num): + for batch_id, data in enumerate(train_reader()): + static_x_data = np.array( + [x[0].reshape(1, 28, 28) + for x in data]).astype('float32') + y_data = np.array( + [x[1] for x in data]).astype('int64').reshape([128, 1]) + + fetch_list = [avg_loss.name] + out = exe.run( + fluid.default_main_program(), + feed={"pixel": static_x_data, + "label": y_data}, + fetch_list=fetch_list) + + static_out = out[0] + + self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all())) + + for key, value in six.iteritems(static_param_init_value): + self.assertTrue(np.allclose(value, dy_param_init_value[key])) + + self.assertTrue(np.allclose(static_out, dy_out)) + + +if __name__ == '__main__': + unittest.main() From 9a3848a2eac63ef2f04bc0aad3f9092913b984e4 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 15 Apr 2019 19:25:14 +0800 Subject: [PATCH 21/38] Fix attrs test=develop --- python/paddle/fluid/framework.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a29db04900..535909d710 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -942,7 +942,7 @@ class Operator(object): self.iop = core.OpBase(type) self.previous_ops = [] - self.attrs = attrs + self.attrs = attrs if attrs else {} else: self.block = block self.desc = desc From 87916f8d84626102f302e0dc43b04509cf960fba Mon Sep 17 00:00:00 2001 From: phlrain Date: Mon, 15 Apr 2019 13:35:13 +0000 Subject: [PATCH 22/38] simple code;test=develop --- paddle/fluid/operators/huber_loss_op.cc | 13 ++----------- paddle/fluid/operators/minus_op.cc | 9 ++------- paddle/fluid/operators/modified_huber_loss_op.cc | 13 ++----------- 3 files changed, 6 insertions(+), 29 deletions(-) diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index dd2ce85545..157f13ffbc 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -34,18 +34,9 @@ class HuberLossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The rank of Input(X) must be 2 and the shape is " "[batch_size, 1]."); - if (ctx->IsRuntime()) { + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { PADDLE_ENFORCE_EQ(x_dims, y_dims, "Shape of X and Y should be same"); - } else { - if (x_dims[0] != -1 && y_dims[0] != -1) { - PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], - "The dim 0 of X and Y must be the same."); - } - - if (x_dims[1] != -1 && y_dims[1] != -1) { - PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1], - "The dim 1 of X and Y must be the same."); - } } if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(x_dims[1], 1, diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 91608ebf8a..02a90d77b6 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -39,16 +39,11 @@ class MinusOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - if (ctx->IsRuntime()) { + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { PADDLE_ENFORCE_EQ( x_dims, y_dims, "Minus operator must take two tensor with same num of elements"); - } else { - if (framework::product(x_dims) > 0 && framework::product(y_dims) > 0) { - PADDLE_ENFORCE_EQ( - x_dims, y_dims, - "Minus operator must take two tensor with same num of elements"); - } } ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 83c6cba42b..14d75aee75 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -29,19 +29,10 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { auto y_dims = ctx->GetInputDim("Y"); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2."); - if (ctx->IsRuntime()) { + if (ctx->IsRuntime() || + (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same."); - } else { - if (x_dims[0] != -1 && y_dims[0] != -1) { - PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], - "The dim 0 of X and Y must be the same."); - } - - if (x_dims[1] != -1 && y_dims[1] != -1) { - PADDLE_ENFORCE_EQ(x_dims[1], y_dims[1], - "The dim 1 of X and Y must be the same."); - } } if (ctx->IsRuntime()) { From c139f1e049bea61019ca9431a1a30b3f8013b2c9 Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Tue, 16 Apr 2019 02:06:35 +0000 Subject: [PATCH 23/38] refine roi align infer shape, test=develop --- paddle/fluid/operators/roi_align_op.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/roi_align_op.cc b/paddle/fluid/operators/roi_align_op.cc index 7bb10ce063..d0dd861af7 100644 --- a/paddle/fluid/operators/roi_align_op.cc +++ b/paddle/fluid/operators/roi_align_op.cc @@ -37,9 +37,11 @@ class ROIAlignOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(rois_dims.size() == 2, "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" "given as [[x1, y1, x2, y2], ...]."); - PADDLE_ENFORCE(rois_dims[1] == 4, - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" - "given as [[x1, y1, x2, y2], ...]."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE(rois_dims[1] == 4, + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], ...]."); + } int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); float spatial_scale = ctx->Attrs().Get("spatial_scale"); From 1cca7114c647a3a8833eeb2ae981e4392ecb347d Mon Sep 17 00:00:00 2001 From: heqiaozhi Date: Tue, 16 Apr 2019 10:08:05 +0800 Subject: [PATCH 24/38] fix infer test=develop --- .../teacher_student_sigmoid_loss_op.cc | 39 +++++++++++-------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc index 6a4bea9437..7f95d16f09 100644 --- a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc @@ -37,12 +37,14 @@ class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, "Input(Label)'s rank should be 2."); - PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], - "The 1st dimension of Input(X) and Input(Label) should " - "be equal."); - PADDLE_ENFORCE_EQ(label_dims[1], 1UL, - "The 2nd dimension of " - "Input(Label) should be 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], + "The 1st dimension of Input(X) and Input(Label) should " + "be equal."); + PADDLE_ENFORCE_EQ(label_dims[1], 1UL, + "The 2nd dimension of " + "Input(Label) should be 1."); + } ctx->SetOutputDim("Y", {x_dims[0], 1}); ctx->ShareLoD("X", /*->*/ "Y"); } @@ -99,17 +101,20 @@ class TeacherStudentSigmoidLossGradientOp PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); PADDLE_ENFORCE_EQ(label_dims.size(), 2, "Input(Label)'s rank should be 2."); - PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], - "The 1st dimension of Input(X) and Input(Label) should " - "be equal."); - PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0], - "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal."); - PADDLE_ENFORCE_EQ(dy_dims[1], 1, - "The 2nd dimension of Input(Y@Grad) should be 1."); - PADDLE_ENFORCE_EQ(label_dims[1], 1, - "When Attr(soft_label) == false, the 2nd dimension of " - "Input(Label) should be 1."); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], + "The 1st dimension of Input(X) and Input(Label) should " + "be equal."); + PADDLE_ENFORCE_EQ( + x_dims[0], dy_dims[0], + "The 1st dimension of Input(X) and Input(Y@Grad) should " + "be equal."); + PADDLE_ENFORCE_EQ(dy_dims[1], 1, + "The 2nd dimension of Input(Y@Grad) should be 1."); + PADDLE_ENFORCE_EQ(label_dims[1], 1, + "When Attr(soft_label) == false, the 2nd dimension of " + "Input(Label) should be 1."); + } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } From f7a5a98fdbdd2e721c2953d7d020e79901151e8e Mon Sep 17 00:00:00 2001 From: phlrain Date: Tue, 16 Apr 2019 02:11:45 +0000 Subject: [PATCH 25/38] remove unused code; test=develop --- paddle/fluid/operators/softmax_with_cross_entropy_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 7cf3511806..2f4702ed71 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -139,7 +139,6 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { auto loss_dims = logits_dims; loss_dims[rank - 1] = 1; ctx->SetOutputDim("Loss", loss_dims); - // ctx->SetOutputDim("Loss", {logits_dims[0], 1}); ctx->ShareLoD("Logits", /*->*/ "Softmax"); ctx->ShareLoD("Logits", /*->*/ "Loss"); From 5309b081f6a483b2d4e2a76e05a28ed37a62a30d Mon Sep 17 00:00:00 2001 From: phlrain Date: Tue, 16 Apr 2019 02:52:13 +0000 Subject: [PATCH 26/38] simple code; test=develop --- paddle/fluid/operators/softmax_with_cross_entropy_op.cc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 2f4702ed71..7c024e50dd 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -111,11 +111,8 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( rank, labels_dims.size(), "Input(logits) and Input(Label) shall have the same rank."); - bool check = true; - if ((!ctx->IsRuntime()) && (framework::product(logits_dims) <= 0 || - framework::product(labels_dims) <= 0)) { - check = false; - } + bool check = ctx->IsRuntime() || (framework::product(logits_dims) > 0 && + framework::product(labels_dims) > 0); if (check) { PADDLE_ENFORCE_EQ(framework::slice_ddim(logits_dims, 0, rank - 1), framework::slice_ddim(labels_dims, 0, rank - 1), From 592011bbcfbb2e46aefbfd2896bfb330046ed014 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Tue, 16 Apr 2019 11:45:44 +0800 Subject: [PATCH 27/38] Fix infer shape of split lod tensor op test=develop --- paddle/fluid/operators/split_lod_tensor_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 5ede972c71..c89e683d76 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -157,7 +157,9 @@ class SplitLoDTensorInferShape : public framework::InferShapeBase { auto mask_dim = context->GetInputDim("Mask"); PADDLE_ENFORCE_EQ(mask_dim.size(), 2); - PADDLE_ENFORCE_EQ(mask_dim[1], 1); + if (context->IsRuntime()) { + PADDLE_ENFORCE_EQ(mask_dim[1], 1); + } context->SetOutputDim("OutTrue", context->GetInputDim("X")); context->SetOutputDim("OutFalse", context->GetInputDim("X")); From 411b9ba520399f78067d0924c6cd929ca41fcfe5 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 16 Apr 2019 04:21:22 +0000 Subject: [PATCH 28/38] update test=develop --- paddle/fluid/operators/attention_lstm_op.cc | 47 +++++++-------------- 1 file changed, 16 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 9c46832218..7d599ffd6f 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -54,18 +54,13 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto w_dims = ctx->GetInputDim("LSTMWeight"); const int D = w_dims[1] / 4; PADDLE_ENFORCE_EQ(w_dims.size(), 2, "Input(LSTMWeight)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(w_dims[0], D + M, - "LSTMWeight dims should be (%d + %d) * %d.", D, M, 4 * D); - } + PADDLE_ENFORCE_EQ(w_dims[0], D + M, + "LSTMWeight dims should be (%d + %d) * %d.", D, M, 4 * D); auto b_dims = ctx->GetInputDim("LSTMBias"); PADDLE_ENFORCE_EQ(b_dims.size(), 2, "Input(LSTMBias)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(b_dims[0], 1, "LSTMBias dims should be 1 x %d.", 4 * D); - PADDLE_ENFORCE_EQ(b_dims[1], 4 * D, "LSTMBias dims should be 1 x %d.", - 4 * D); - } + PADDLE_ENFORCE_EQ(b_dims[0], 1, "LSTMBias dims should be 1 x %d.", 4 * D); + PADDLE_ENFORCE_EQ(b_dims[1], 4 * D, "LSTMBias dims should be 1 x %d.", 4 * D); auto c_dims = ctx->GetInputDim("C0"); PADDLE_ENFORCE_EQ(c_dims.size(), 2, "Input(C0)'s rank must be 2."); @@ -83,33 +78,27 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { auto atten_w_dims = ctx->GetInputDim("AttentionWeight"); PADDLE_ENFORCE_EQ(atten_w_dims.size(), 2, "Input(AttentionWeight)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, - "AttentionWeight shapes must be (%d + %d) * 1.", M, D); - PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, - "AttentionWeight shapes must be (%d + %d) * 1.", M, D); - } + PADDLE_ENFORCE_EQ(atten_w_dims[0], M + D, + "AttentionWeight shapes must be (%d + %d) * 1.", M, D); + PADDLE_ENFORCE_EQ(atten_w_dims[1], 1, + "AttentionWeight shapes must be (%d + %d) * 1.", M, D); if (ctx->HasInput("AttentionBias")) { auto atten_b_dims = ctx->GetInputDim("AttentionBias"); PADDLE_ENFORCE_EQ(atten_b_dims.size(), 2, "Input(AttentionBias)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(atten_b_dims[0], 1, - "AttentionBias shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(atten_b_dims[1], 1, - "AttentionBias shapes must be 1 * 1."); - } + PADDLE_ENFORCE_EQ(atten_b_dims[0], 1, + "AttentionBias shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(atten_b_dims[1], 1, + "AttentionBias shapes must be 1 * 1."); } if (ctx->HasInput("AttentionScalar")) { auto dims = ctx->GetInputDim("AttentionScalar"); PADDLE_ENFORCE_EQ(dims.size(), 2, "Input(AttentionScalar)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalar shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalar shapes must be 1 * 1."); - } + PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalar shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalar shapes must be 1 * 1."); } if (ctx->HasInput("AttentionScalarBias")) { @@ -119,12 +108,8 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { "AttentionScalar should not be null when have AttentionScalarBias."); PADDLE_ENFORCE_EQ(dims.size(), 2, "Input(AttentionScalarBias)'s rank must be 2."); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(dims[0], 1, - "AttentionScalarBias shapes must be 1 * 1."); - PADDLE_ENFORCE_EQ(dims[1], 1, - "AttentionScalarBias shapes must be 1 * 1."); - } + PADDLE_ENFORCE_EQ(dims[0], 1, "AttentionScalarBias shapes must be 1 * 1."); + PADDLE_ENFORCE_EQ(dims[1], 1, "AttentionScalarBias shapes must be 1 * 1."); } framework::DDim out_dims({x_dims[0], D}); From ed892ebaf963743a7f9cbf155f9db45998708dcd Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 16 Apr 2019 05:17:11 +0000 Subject: [PATCH 29/38] update test=develop --- paddle/fluid/operators/attention_lstm_op.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index 7d599ffd6f..aecd3d4302 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -70,9 +70,13 @@ void AttentionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { if (ctx->HasInput("H0")) { auto h_dims = ctx->GetInputDim("H0"); - PADDLE_ENFORCE(h_dims == c_dims, - "The dimension of Input(H0) and Input(C0) " - "should be the same."); + PADDLE_ENFORCE_EQ(h_dims.size(), 2UL, "Input(H0)'s rank must be 2."); + if (ctx->IsRuntime() || + (framework::product(c_dims) > 0 && framework::product(h_dims) > 0)) { + PADDLE_ENFORCE(h_dims == c_dims, + "The dimension of Input(H0) and Input(C0) " + "should be the same."); + } } auto atten_w_dims = ctx->GetInputDim("AttentionWeight"); From 5663fbfb0a8c6c90c095536f5c58f1c593ef552e Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 07:19:58 +0000 Subject: [PATCH 30/38] fix infershape bug test=develop --- paddle/fluid/operators/linear_chain_crf_op.cc | 46 ++++++++++++------- paddle/fluid/operators/metrics/accuracy_op.cc | 9 ++-- paddle/fluid/operators/metrics/auc_op.cc | 4 +- paddle/fluid/operators/sample_logits_op.cc | 8 +++- paddle/fluid/operators/smooth_l1_loss_op.cc | 21 ++++++--- .../fluid/operators/squared_l2_distance_op.cc | 39 ++++++++++------ paddle/fluid/platform/enforce.h | 41 +++++++++++++++++ 7 files changed, 125 insertions(+), 43 deletions(-) diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index fa09cb61e6..d5e29eb179 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -152,12 +152,19 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { auto transition_dims = ctx->GetInputDim("Transition"); PADDLE_ENFORCE_EQ(transition_dims.size(), 2, "The Input(Transition) should be a 2-D tensor."); - PADDLE_ENFORCE_EQ( - transition_dims[0] - 2, transition_dims[1], - "An invalid dimension for the Input(Transition), which should " - "be a 2-D tensor with shape [(D + 2) x D]."); - PADDLE_ENFORCE_EQ( - emission_dims[1], transition_dims[1], + bool check = true; + if ((!ctx->IsRuntime()) && + (transition_dims[0] <= 0 || transition_dims[1] <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ( + transition_dims[0] - 2, transition_dims[1], + "An invalid dimension for the Input(Transition), which should " + "be a 2-D tensor with shape [(D + 2) x D]."); + } + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, emission_dims[1], transition_dims[1], "The 2nd dimension of the Input(Emission) and the Input(Transition) " "should be equal to the tag number."); @@ -165,8 +172,8 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, "The Input(Label) should be a 2-D tensor with the 2nd " "dimensions fixed to 1."); - PADDLE_ENFORCE_EQ( - emission_dims[0], label_dims[0], + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, emission_dims[0], label_dims[0], "The height of Input(Emission) and the height of Input(Label) " "should be the same."); @@ -211,12 +218,19 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { auto transition_exps_dims = ctx->GetInputDim("TransitionExps"); PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2, "The Input(TransitionExps) should be a 2-D tensor."); - PADDLE_ENFORCE_EQ( - transition_exps_dims[0] - 2, transition_exps_dims[1], - "An invalid dimension for the Input(TransitionExps), which should " - "be a 2-D tensor with shape [(D + 2) x D]."); - PADDLE_ENFORCE_EQ( - emission_exps_dims[1], transition_exps_dims[1], + bool check = true; + if ((!ctx->IsRuntime()) && + (transition_exps_dims[0] <= 0 || transition_exps_dims[1] <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ( + transition_exps_dims[0] - 2, transition_exps_dims[1], + "An invalid dimension for the Input(TransitionExps), which should " + "be a 2-D tensor with shape [(D + 2) x D]."); + } + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, emission_exps_dims[1], transition_exps_dims[1], "The 2nd dimension of the Input(EmissionExps) and the " "Input(TransitionExps) should be equal to the tag number."); @@ -224,8 +238,8 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, "The Input(Label) should be a 2-D tensor with the 2nd " "dimensions fixed to 1."); - PADDLE_ENFORCE_EQ( - emission_exps_dims[0], label_dims[0], + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, emission_exps_dims[0], label_dims[0], "The height of Input(EmissionExps) and the height of Input(Label) " "should be the same."); diff --git a/paddle/fluid/operators/metrics/accuracy_op.cc b/paddle/fluid/operators/metrics/accuracy_op.cc index 7db6dff297..26e6ab1568 100644 --- a/paddle/fluid/operators/metrics/accuracy_op.cc +++ b/paddle/fluid/operators/metrics/accuracy_op.cc @@ -41,10 +41,11 @@ class AccuracyOp : public framework::OperatorWithKernel { // it's the output of topk. PADDLE_ENFORCE_EQ(label_dim.size(), 2, "label's rank must be 2."); - PADDLE_ENFORCE_EQ(label_dim[1], 1, "label's second dimension must be 1"); - PADDLE_ENFORCE_EQ(inference_dim[0], label_dim[0], - "the inference tensor's num_rows must be" - " the same as label."); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, label_dim[1], 1, + "label's second dimension must be 1"); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, inference_dim[0], label_dim[0], + "the inference tensor's num_rows must be" + " the same as label."); ctx->SetOutputDim("Accuracy", {1}); ctx->SetOutputDim("Correct", {1}); diff --git a/paddle/fluid/operators/metrics/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc index 5e33dd9606..4670eb23b3 100644 --- a/paddle/fluid/operators/metrics/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -32,8 +32,8 @@ class AucOp : public framework::OperatorWithKernel { auto predict_height = ctx->GetInputDim("Predict")[0]; auto label_height = ctx->GetInputDim("Label")[0]; - PADDLE_ENFORCE_EQ(predict_height, label_height, - "Out and Label should have same height."); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, predict_height, label_height, + "Out and Label should have same height."); int num_pred_buckets = ctx->Attrs().Get("num_thresholds") + 1; int slide_steps = ctx->Attrs().Get("slide_steps"); diff --git a/paddle/fluid/operators/sample_logits_op.cc b/paddle/fluid/operators/sample_logits_op.cc index a7f7fb26b1..8019fceb27 100644 --- a/paddle/fluid/operators/sample_logits_op.cc +++ b/paddle/fluid/operators/sample_logits_op.cc @@ -11,10 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/operators/sample_logits_op.h" #include "paddle/fluid/operators/math/sample_prob.h" +#include + namespace paddle { namespace operators { @@ -132,7 +133,10 @@ class SampleLogitsOp : public framework::OperatorWithKernel { "The labels should be a 2-D tensor."); const int num_samples = ctx->Attrs().Get("num_samples"); - const int num_sampled_classes = labels_dims[1] + num_samples; + int num_sampled_classes = labels_dims[1] + num_samples; + if ((!ctx->IsRuntime()) && labels_dims[1] <= 0) { + num_sampled_classes = -1; + } ctx->SetOutputDim("Samples", {logits_dims[0], num_sampled_classes}); ctx->SetOutputDim("Probabilities", {logits_dims[0], num_sampled_classes}); ctx->SetOutputDim("SampledLogits", {logits_dims[0], num_sampled_classes}); diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 622420c1c3..5282bcbc69 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/smooth_l1_loss_op.h" +#include + namespace paddle { namespace operators { @@ -27,7 +29,14 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims); + bool check = true; + if ((!ctx->IsRuntime()) && + (framework::product(x_dims) <= 0 || framework::product(y_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ(x_dims, y_dims); + } PADDLE_ENFORCE_GE(x_dims.size(), 2, "The tensor rank of Input(X) should not be less than 2."); if (ctx->HasInput("InsideWeight")) { @@ -110,11 +119,11 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE(out_dims.size(), 2, "The tensor rank of Input(Out@Grad) should be 2."); - PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0], - "The 1st dimension of Input(Out@Grad) must be " - "same as input."); - PADDLE_ENFORCE_EQ(out_dims[1], 1, - "The 2nd dimension of Input(Out@Grad) must be 1."); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], in_dims[0], + "The 1st dimension of Input(Out@Grad) must be " + "same as input."); + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, out_dims[1], 1, "The 2nd dimension of Input(Out@Grad) must be 1."); auto x_grad_name = framework::GradVarName("X"); auto y_grad_name = framework::GradVarName("Y"); diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index 0652c163f7..6e82bf4074 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -45,13 +45,26 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { int rank = framework::arity(x_dims); PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2."); - PADDLE_ENFORCE_EQ(product(x_dims) / x_dims[0], product(y_dims) / y_dims[0], - "Product of dimensions expcet the first dimension of " - "input and target must be equal."); - PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], - "First dimension of target must be equal to input " - "or to 1."); - + bool check = true; + if ((!ctx->IsRuntime()) && + (framework::product(x_dims) <= 0 || framework::product(y_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ(product(x_dims) / x_dims[0], + product(y_dims) / y_dims[0], + "Product of dimensions expcet the first dimension of " + "input and target must be equal."); + } + check = true; + if ((!ctx->IsRuntime()) && (y_dims[0] <= 0 || x_dims[0] <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], + "First dimension of target must be equal to input " + "or to 1."); + } ctx->SetOutputDim("sub_result", {x_dims[0], product(x_dims) / x_dims[0]}); ctx->SetOutputDim("Out", {x_dims[0], 1}); ctx->ShareLoD("X", /*->*/ "Out"); @@ -124,12 +137,12 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], - "First dimension of output gradient and " - "input value must be equal."); - PADDLE_ENFORCE_EQ(out_dims[1], 1, - "Second dimension of output gradient " - "must be 1."); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[0], x_dims[0], + "First dimension of output gradient and " + "input value must be equal."); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, out_dims[1], 1, + "Second dimension of output gradient " + "must be 1."); auto x_grad_name = framework::GradVarName("X"); auto y_grad_name = framework::GradVarName("Y"); if (ctx->HasOutput(x_grad_name)) ctx->SetOutputDim(x_grad_name, x_dims); diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index bdb1d1bd3b..127be44525 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -356,5 +356,46 @@ using CommonType2 = typename std::add_lvalue_reference< #define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \ __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__) +#define __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL1, __VAL2, __CMP, \ + __INV_CMP, ...) \ + do { \ + auto __val1 = (__VAL1); \ + auto __val2 = (__VAL2); \ + if (!__CTX->IsRuntime()) { \ + if (__val1 == -1 || __val2 == -1) { \ + break; \ + } \ + } \ + using __TYPE1__ = decltype(__val1); \ + using __TYPE2__ = decltype(__val2); \ + using __COMMON_TYPE1__ = \ + ::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>; \ + using __COMMON_TYPE2__ = \ + ::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>; \ + bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP( \ + static_cast<__COMMON_TYPE2__>(__val2)); \ + if (UNLIKELY(!__is_not_error)) { \ + PADDLE_THROW("Enforce failed. Expected %s " #__CMP \ + " %s, but received %s:%s " #__INV_CMP " %s:%s.\n%s", \ + #__VAL1, #__VAL2, #__VAL1, \ + ::paddle::string::to_string(__val1), #__VAL2, \ + ::paddle::string::to_string(__val2), \ + ::paddle::string::Sprintf(__VA_ARGS__)); \ + } \ + } while (0) + +#define PADDLE_INFERSHAPE_ENFORCE_EQ(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, ==, !=, __VA_ARGS__) +#define PADDLE_INFERSHAPE_ENFORCE_NE(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, !=, ==, __VA_ARGS__) +#define PADDLE_INFERSHAPE_ENFORCE_GT(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, >, <=, __VA_ARGS__) +#define PADDLE_INFERSHAPE_ENFORCE_GE(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, >=, <, __VA_ARGS__) +#define PADDLE_INFERSHAPE_ENFORCE_LT(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, <, >=, __VA_ARGS__) +#define PADDLE_INFERSHAPE_ENFORCE_LE(__CTX, __VAL0, __VAL1, ...) \ + __PADDLE_INFERSHAPE_BINARY_COMPARE(__CTX, __VAL0, __VAL1, <=, >, __VA_ARGS__) + } // namespace platform } // namespace paddle From ccc3bd70c1d975be65d8100b3165056fe81ec5d3 Mon Sep 17 00:00:00 2001 From: xiaoting <31891223+tink2123@users.noreply.github.com> Date: Tue, 16 Apr 2019 15:27:27 +0800 Subject: [PATCH 31/38] polish doc for uniform_random and multi_box_head (#16864) --- paddle/fluid/API.spec | 4 ++-- python/paddle/fluid/layers/detection.py | 2 +- python/paddle/fluid/layers/ops.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index d02466db9a..3689d877f1 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -337,13 +337,13 @@ paddle.fluid.layers.reciprocal (ArgSpec(args=['x', 'name'], varargs=None, keywor paddle.fluid.layers.square (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '48dfb45d773dbc30126c3a7f777de5ee')) paddle.fluid.layers.softplus (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '459c5781e9d1dd88283b7c5769d7872a')) paddle.fluid.layers.softsign (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '80846bcd4bd457207457a6d5411f4148')) -paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '308b619af849caa82bbc31e897f5e641')) +paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', 'a8c4e972b7d6742c838a37abf407ed9a')) paddle.fluid.layers.hard_shrink (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c142f5884f3255e0d6075c286bbd531e')) paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '944d7c03057f5fc88bc78acd4d82f926')) paddle.fluid.layers.thresholded_relu (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '90566ea449ea4c681435546e2f70610a')) paddle.fluid.layers.prior_box (ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '14cac0ee643fa6e026ad82aeeee75bd8')) paddle.fluid.layers.density_prior_box (ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', 'a0d762bb08de9ce93bc780aa57cd5cd9')) -paddle.fluid.layers.multi_box_head (ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)), ('document', 'a6ab47a2fe681e52fabb7057ddf0efdd')) +paddle.fluid.layers.multi_box_head (ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)), ('document', 'fe9afaee481dd09f28866df22756466f')) paddle.fluid.layers.bipartite_match (ArgSpec(args=['dist_matrix', 'match_type', 'dist_threshold', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '3ddb9b966f193900193a95a3df77c3c1')) paddle.fluid.layers.target_assign (ArgSpec(args=['input', 'matched_indices', 'negative_indices', 'mismatch_value', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c0b334f917828f95056f6ebe10907b1c')) paddle.fluid.layers.detection_output (ArgSpec(args=['loc', 'scores', 'prior_box', 'prior_box_var', 'background_label', 'nms_threshold', 'nms_top_k', 'keep_top_k', 'score_threshold', 'nms_eta'], varargs=None, keywords=None, defaults=(0, 0.3, 400, 200, 0.01, 1.0)), ('document', 'c33093a82a46e3091e789e5572588db1')) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 0a1ddbc1db..920e9e3eaa 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -1542,7 +1542,7 @@ def multi_box_head(inputs, .. code-block:: python mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( - inputs=[conv1, conv2, conv3, conv4, conv5, conv5], + inputs=[conv1, conv2, conv3, conv4, conv5, conv6], image=images, num_classes=21, min_ratio=20, diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 17c84b1a43..f06c0abaf9 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -82,8 +82,8 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0): Examples: .. code-block:: python - - result = fluid.layers.uniform_random(shape=[32, 784]) + + result = fluid.layers.uniform_random(shape=[32, 784]) """ locals_var = locals().keys() From afbc435adf63f59a0863bb3424f9fb33d5e35131 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 08:13:55 +0000 Subject: [PATCH 32/38] fix infershape check bug test=develop --- paddle/fluid/operators/metrics/auc_op.cc | 3 ++- paddle/fluid/operators/smooth_l1_loss_op.cc | 21 +++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/metrics/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc index 4670eb23b3..001d269368 100644 --- a/paddle/fluid/operators/metrics/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -28,7 +28,8 @@ class AucOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Label"), "Input of Label should not be null."); auto predict_width = ctx->GetInputDim("Predict")[1]; - PADDLE_ENFORCE_EQ(predict_width, 2, "Only support binary classification"); + PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, predict_width, 2, + "Only support binary classification"); auto predict_height = ctx->GetInputDim("Predict")[0]; auto label_height = ctx->GetInputDim("Label")[0]; diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 5282bcbc69..5af47b0f6d 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -43,8 +43,25 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("OutsideWeight"), "If weights are provided, must specify both " "inside and outside weights."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("InsideWeight"), x_dims); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("OutsideWeight"), x_dims); + auto dims = ctx->GetInputDim("InsideWeight"); + bool check = true; + if ((!ctx->IsRuntime()) && + (framework::product(dims) <= 0 || framework::product(x_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ(dims, x_dims); + } + + dims = ctx->GetInputDim("OutsideWeight"); + check = true; + if ((!ctx->IsRuntime()) && + (framework::product(dims) <= 0 || framework::product(x_dims) <= 0)) { + check = false; + } + if (check) { + PADDLE_ENFORCE_EQ(dims, x_dims); + } } ctx->SetOutputDim("Diff", x_dims); From fb75bd3e9cfa22dbe18c4c62085a487107cdeea9 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 08:37:31 +0000 Subject: [PATCH 33/38] remove test=develop --- paddle/fluid/operators/linear_chain_crf_op.cc | 1 - paddle/fluid/operators/sample_logits_op.cc | 2 -- 2 files changed, 3 deletions(-) diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index d5e29eb179..f6b090cf5f 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/linear_chain_crf_op.h" -#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sample_logits_op.cc b/paddle/fluid/operators/sample_logits_op.cc index 8019fceb27..9793118b70 100644 --- a/paddle/fluid/operators/sample_logits_op.cc +++ b/paddle/fluid/operators/sample_logits_op.cc @@ -14,8 +14,6 @@ limitations under the License. */ #include "paddle/fluid/operators/sample_logits_op.h" #include "paddle/fluid/operators/math/sample_prob.h" -#include - namespace paddle { namespace operators { From 4791029f190eadc4853c4f2cd74fa224063700f1 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 08:58:12 +0000 Subject: [PATCH 34/38] remove test=develop --- paddle/fluid/operators/smooth_l1_loss_op.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 5af47b0f6d..f4c284013c 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -14,8 +14,6 @@ limitations under the License. */ #include "paddle/fluid/operators/smooth_l1_loss_op.h" -#include - namespace paddle { namespace operators { From 41740519df537898f3f6ae462b11bee194e1db5a Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 09:01:49 +0000 Subject: [PATCH 35/38] add test=develop --- paddle/fluid/operators/linear_chain_crf_op.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index f6b090cf5f..a94704a728 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/linear_chain_crf_op.h" +#include + namespace paddle { namespace operators { From 01eda557cd6dbc6b4c8bc53d26b0d8f0f3a893ee Mon Sep 17 00:00:00 2001 From: phlrain Date: Tue, 16 Apr 2019 09:02:12 +0000 Subject: [PATCH 36/38] fix bpr loss; test=developp --- paddle/fluid/operators/bpr_loss_op.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/bpr_loss_op.cc b/paddle/fluid/operators/bpr_loss_op.cc index b2dbaecfcf..51c4d87814 100644 --- a/paddle/fluid/operators/bpr_loss_op.cc +++ b/paddle/fluid/operators/bpr_loss_op.cc @@ -32,10 +32,14 @@ class BprLossOp : public framework::OperatorWithKernel { int rank = x_dims.size(); PADDLE_ENFORCE_EQ(rank, label_dims.size(), "Input(X) and Input(Label) shall have the same rank."); - PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), - framework::slice_ddim(label_dims, 0, rank - 1), - "Input(X) and Input(Label) shall have the same shape " - "except the last dimension."); + + if (ctx->IsRuntime() || (framework::product(x_dims) > 0 && + framework::product(label_dims) > 0)) { + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), + framework::slice_ddim(label_dims, 0, rank - 1), + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension."); + } auto y_dims = x_dims; y_dims[rank - 1] = 1; From 9c6ee7cf4cefa66fc4a592e91f36e540c10cf788 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 16 Apr 2019 09:19:38 +0000 Subject: [PATCH 37/38] add test=develop --- paddle/fluid/operators/smooth_l1_loss_op.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index f4c284013c..22b621248d 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/smooth_l1_loss_op.h" +#include namespace paddle { namespace operators { From 008fd785fd0b32cf1990a18e0cbbcfbf0fc0dc5c Mon Sep 17 00:00:00 2001 From: tangwei12 Date: Tue, 16 Apr 2019 22:09:34 +0800 Subject: [PATCH 38/38] fix/positive negative pair op (#16895) * fix infershape in runtime * fix infershape in runtime test=develop * fix infershape in runtime --- .../operators/positive_negative_pair_op.cc | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/operators/positive_negative_pair_op.cc b/paddle/fluid/operators/positive_negative_pair_op.cc index 99256e408d..e917e778e4 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.cc +++ b/paddle/fluid/operators/positive_negative_pair_op.cc @@ -61,23 +61,31 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { auto query_dim = ctx->GetInputDim("QueryID"); PADDLE_ENFORCE_EQ(score_dim.size(), 2, "Score should be a 2-D tensor."); PADDLE_ENFORCE_EQ(label_dim.size(), 2, "Label should be a 2-D tensor."); - PADDLE_ENFORCE_EQ( - label_dim[0], score_dim[0], - "Tensor Score and Label should have the same height (batch size)."); - PADDLE_ENFORCE_EQ(label_dim[1], 1, - "The width of Label should be 1, i.e. each item should " - "have a scalar label."); - PADDLE_ENFORCE(query_dim == label_dim, - "QueryID should have the same shape as Label."); - if (ctx->HasInput("Weight")) { - PADDLE_ENFORCE(ctx->GetInputDim("Weight") == label_dim, - "Weight should have the same shape as Label."); + + if (ctx->IsRuntime() || + (score_dim[0] > 0 && label_dim[0] > 0 && query_dim[0] > 0)) { + PADDLE_ENFORCE_EQ( + label_dim[0], score_dim[0], + "Tensor Score and Label should have the same height (batch size)."); + + PADDLE_ENFORCE_EQ(label_dim[1], 1, + "The width of Label should be 1, i.e. each item should " + "have a scalar label."); + + PADDLE_ENFORCE(query_dim == label_dim, + "QueryID should have the same shape as Label."); + + if (ctx->HasInput("Weight")) { + PADDLE_ENFORCE(ctx->GetInputDim("Weight") == label_dim, + "Weight should have the same shape as Label."); + } + + int column = ctx->Attrs().Get("column"); + auto depth = score_dim[1]; + PADDLE_ENFORCE(column < depth && column >= -depth, + "Attribute column should be in the range of [-%l, %l)", + depth, depth); } - int column = ctx->Attrs().Get("column"); - auto depth = score_dim[1]; - PADDLE_ENFORCE(column < depth && column >= -depth, - "Attribute column should be in the range of [-%l, %l)", - depth, depth); ctx->SetOutputDim("PositivePair", scalar_dim); ctx->SetOutputDim("NegativePair", scalar_dim);