change paddle.fluid.layers.reduce_sum to paddle.sum in sample codes (#27998)

* change paddle.fluid.layers.reduce_sum to paddle.sum in sample codes

* format codes
swt-req
chentianyu03 4 years ago committed by GitHub
parent f94d053705
commit 05fd49e974
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) {
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs2) ret = paddle.sums(inputs2)
loss = paddle.fluid.layers.reduce_sum(ret) loss = paddle.sum(ret)
loss.backward() loss.backward()
print("Before clear_gradient {}".format(loss.grad)) print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient() loss.clear_gradient()

@ -305,13 +305,15 @@ def binary_cross_entropy_with_logits(logit,
out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
logit, label, name=sigmoid_name) logit, label, name=sigmoid_name)
one = paddle.fluid.layers.fill_constant(shape=[1], value=1.0, dtype=logit.dtype) one = paddle.fluid.layers.fill_constant(
shape=[1], value=1.0, dtype=logit.dtype)
if pos_weight is not None: if pos_weight is not None:
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
pos_weight, 'pos_weight', ['float32', 'float64'], pos_weight, 'pos_weight', ['float32', 'float64'],
'binary_cross_entropy_with_logits') 'binary_cross_entropy_with_logits')
log_weight = paddle.add( log_weight = paddle.add(
paddle.multiply(label, paddle.fluid.layers.elementwise_sub(pos_weight, one)), paddle.multiply(
label, paddle.fluid.layers.elementwise_sub(pos_weight, one)),
one) one)
pos_weight_name = name if reduction == 'none' and weight is None else None pos_weight_name = name if reduction == 'none' and weight is None else None
out = paddle.multiply(out, log_weight, name=pos_weight_name) out = paddle.multiply(out, log_weight, name=pos_weight_name)
@ -618,7 +620,8 @@ def margin_ranking_loss(input,
if margin != 0.0: if margin != 0.0:
margin_var = out.block.create_var(dtype=out.dtype) margin_var = out.block.create_var(dtype=out.dtype)
paddle.fluid.layers.fill_constant([1], out.dtype, margin, out=margin_var) paddle.fluid.layers.fill_constant(
[1], out.dtype, margin, out=margin_var)
out = paddle.add(out, margin_var) out = paddle.add(out, margin_var)
result_out = helper.create_variable_for_type_inference(input.dtype) result_out = helper.create_variable_for_type_inference(input.dtype)
@ -729,7 +732,8 @@ def l1_loss(input, label, reduction='mean', name=None):
unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.mean(unreduced, name=name) return paddle.mean(unreduced, name=name)
else: else:
return paddle.fluid.layers.elementwise_sub(input, label, act='abs', name=name) return paddle.fluid.layers.elementwise_sub(
input, label, act='abs', name=name)
def nll_loss(input, def nll_loss(input,
@ -1342,7 +1346,7 @@ def sigmoid_focal_loss(logit,
label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32') label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32')
one = paddle.to_tensor([1.], dtype='float32') one = paddle.to_tensor([1.], dtype='float32')
fg_label = paddle.greater_equal(label, one) fg_label = paddle.greater_equal(label, one)
fg_num = paddle.fluid.layers.reduce_sum(paddle.cast(fg_label, dtype='float32')) fg_num = paddle.sum(paddle.cast(fg_label, dtype='float32'))
output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num) output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num)
print(output.numpy()) # [0.65782464] print(output.numpy()) # [0.65782464]

Loading…
Cancel
Save