Update the demo code and the doc of varbase.backward. (#26506)

* update the demo code and the doc of varbase.backward.

* update the doc of the fake interface `paddle.fluid.Variable`.

* remove BackwardStrategy.
revert-26856-strategy_example2
Zhen Wang 5 years ago committed by GitHub
parent 1c898b66d6
commit f9066e6a6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,33 +0,0 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Created by Jiabin on 2019-04-25.
//
#pragma once
namespace paddle {
namespace imperative {
namespace detail {
struct BackwardStrategy {
/* DyGraph now support two kinds of backward strategy, one is sorted sum
* gradient, another is sum gradient once they are created */
// TODO(jiabin): add more Strategy when we support
bool sorted_sum_gradient_{false};
};
} // namespace detail
} // namespace imperative
} // namespace paddle

@ -30,12 +30,13 @@
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(sort_sum_gradient);
namespace paddle {
namespace imperative {
void BasicEngine::Init(VarBase* var, const detail::BackwardStrategy& strategy,
bool retain_graph) {
backward_strategy_ = strategy;
void BasicEngine::Init(VarBase* var, bool retain_graph) {
sorted_sum_gradient_ = FLAGS_sort_sum_gradient;
retain_graph_ = retain_graph;
init_node_ = var->GradVarBase()->GradNode();
var->GradVarBase()->ClearGradNode();
@ -105,7 +106,7 @@ void BasicEngine::PrepareGradAccumulators(const OpBase& op) {
auto& accumulator = accumulators_[var.get()];
if (!accumulator) {
if (backward_strategy_.sorted_sum_gradient_) {
if (sorted_sum_gradient_) {
accumulator.reset(new SortedGradientAccumulator(var.get()));
} else {
accumulator.reset(new EagerGradientAccumulator(var.get()));

@ -18,7 +18,6 @@
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/engine.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
@ -30,8 +29,7 @@ class OpBase;
class BasicEngine : public Engine {
public:
void Init(VarBase* var, const detail::BackwardStrategy& strategy,
bool retain_graph = false);
void Init(VarBase* var, bool retain_graph = false);
void Execute() override;
@ -46,7 +44,7 @@ class BasicEngine : public Engine {
private:
std::shared_ptr<GradOpNode> init_node_;
detail::BackwardStrategy backward_strategy_;
bool sorted_sum_gradient_;
std::unordered_map<GradOpNode*, size_t> node_deps_;
std::unordered_map<VariableWrapper*, std::unique_ptr<GradientAccumulator>>
accumulators_;

@ -33,6 +33,8 @@
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/string/string_helper.h"
DECLARE_bool(sort_sum_gradient);
namespace paddle {
namespace imperative {
@ -529,8 +531,7 @@ class PartialGradTask {
const std::vector<std::shared_ptr<VarBase>> &output_targets,
const std::vector<std::shared_ptr<VarBase>> &output_grads,
const std::vector<std::shared_ptr<VarBase>> &no_grad_vars,
const platform::Place &place,
const detail::BackwardStrategy &strategy, bool create_graph,
const platform::Place &place, bool create_graph,
bool retain_graph, bool allow_unused, bool only_inputs);
std::vector<std::shared_ptr<VarBase>> Run();
@ -577,7 +578,7 @@ class PartialGradTask {
bool retain_graph_;
bool allow_unused_;
bool only_inputs_;
detail::BackwardStrategy strategy_;
bool sorted_sum_gradient_{FLAGS_sort_sum_gradient};
};
PartialGradTask::PartialGradTask(
@ -585,15 +586,14 @@ PartialGradTask::PartialGradTask(
const std::vector<std::shared_ptr<VarBase>> &output_targets,
const std::vector<std::shared_ptr<VarBase>> &output_grads,
const std::vector<std::shared_ptr<VarBase>> &no_grad_vars,
const platform::Place &place, const detail::BackwardStrategy &strategy,
bool create_graph, bool retain_graph, bool allow_unused, bool only_inputs) {
const platform::Place &place, bool create_graph, bool retain_graph,
bool allow_unused, bool only_inputs) {
input_targets_ = input_targets;
place_ = place;
create_graph_ = create_graph;
retain_graph_ = retain_graph;
allow_unused_ = allow_unused;
only_inputs_ = only_inputs;
strategy_ = strategy;
PADDLE_ENFORCE_EQ(only_inputs_, true,
platform::errors::Unimplemented(
@ -981,7 +981,7 @@ void PartialGradTask::PrepareInitialGradientAccumulators(const OpBase *op) {
if (!accumulator) {
accumulator.reset(new GradientAccumulationInfo(
var, strategy_.sorted_sum_gradient_, create_graph_));
var, sorted_sum_gradient_, create_graph_));
}
accumulator->IncreaseTotalRefCnt();
@ -1033,11 +1033,11 @@ PartialGradEngine::PartialGradEngine(
const std::vector<std::shared_ptr<VarBase>> &output_targets,
const std::vector<std::shared_ptr<VarBase>> &output_grads,
const std::vector<std::shared_ptr<VarBase>> &no_grad_vars,
const platform::Place &place, const detail::BackwardStrategy &strategy,
bool create_graph, bool retain_graph, bool allow_unused, bool only_inputs)
const platform::Place &place, bool create_graph, bool retain_graph,
bool allow_unused, bool only_inputs)
: task_(new PartialGradTask(input_targets, output_targets, output_grads,
no_grad_vars, place, strategy, create_graph,
retain_graph, allow_unused, only_inputs)) {}
no_grad_vars, place, create_graph, retain_graph,
allow_unused, only_inputs)) {}
PartialGradEngine::~PartialGradEngine() { Clear(); }

@ -16,7 +16,6 @@
#include <memory>
#include <vector>
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/engine.h"
#include "paddle/fluid/platform/place.h"
@ -33,8 +32,7 @@ class PartialGradEngine : public Engine {
const std::vector<std::shared_ptr<VarBase>> &output_targets,
const std::vector<std::shared_ptr<VarBase>> &output_grads,
const std::vector<std::shared_ptr<VarBase>> &no_grad_vars,
const platform::Place &place,
const detail::BackwardStrategy &strategy, bool create_graph,
const platform::Place &place, bool create_graph,
bool retain_graph, bool allow_unused, bool only_inputs);
~PartialGradEngine();

@ -240,9 +240,8 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) {
framework::AttributeMap reduce_attr_map;
tracer.TraceOp("reduce_sum", reduce_in, reduce_out, reduce_attr_map,
gpu_place, true);
detail::BackwardStrategy back_st;
imperative::BasicEngine engine;
engine.Init(reduce_sum_out.get(), back_st);
engine.Init(reduce_sum_out.get());
engine.Execute();
framework::LoDTensor rlt;
@ -356,9 +355,8 @@ TEST(test_tracer, test_var_without_grad_var) {
ASSERT_EQ(y_in->GradVarBase()->GradOpNum(), 0UL);
ASSERT_EQ(vout->GradVarBase()->GradOpNum(), 1UL);
detail::BackwardStrategy back_st;
imperative::BasicEngine engine;
engine.Init(vout.get(), back_st);
engine.Init(vout.get());
engine.Execute();
// check the grad

@ -508,3 +508,16 @@ DEFINE_int32(
"summary will be shown."
"If FLAGS_call_stack_level == 2, the python stack, c++ stack, and "
"error message summary will be shown.");
/**
* Debug related FLAG
* Name: sort_sum_gradient
* Since Version: 2.0.0
* Value Range: bool, default=false
* Example:
* Note: If True, gradients are summed by the reverse order of
* the forward execution sequence.
*/
DEFINE_bool(sort_sum_gradient, false,
"Sum gradients by the reverse order of "
"the forward execution sequence.");

@ -38,6 +38,7 @@ DECLARE_bool(enable_rpc_profiler);
DECLARE_int32(multiple_of_cupti_buffer_size);
DECLARE_bool(reader_queue_speed_test_mode);
DECLARE_int32(call_stack_level);
DECLARE_bool(sort_sum_gradient);
// device management
DECLARE_int32(paddle_num_threads);
// executor
@ -340,7 +341,7 @@ static void RegisterGlobalVarGetterSetter() {
REGISTER_PUBLIC_GLOBAL_VAR(
FLAGS_eager_delete_tensor_gb, FLAGS_enable_parallel_graph,
FLAGS_allocator_strategy, FLAGS_use_system_allocator, FLAGS_check_nan_inf,
FLAGS_call_stack_level, FLAGS_cpu_deterministic,
FLAGS_call_stack_level, FLAGS_sort_sum_gradient, FLAGS_cpu_deterministic,
FLAGS_enable_rpc_profiler, FLAGS_multiple_of_cupti_buffer_size,
FLAGS_reader_queue_speed_test_mode, FLAGS_pe_profile_fname,
FLAGS_print_sub_graph_dir, FLAGS_fraction_of_cpu_memory_to_use,

@ -30,7 +30,6 @@ limitations under the License. */
#include "paddle/fluid/imperative/all_reduce.h"
#include "paddle/fluid/imperative/amp_auto_cast.h"
#include "paddle/fluid/imperative/backward_strategy.h"
#include "paddle/fluid/imperative/basic_engine.h"
#include "paddle/fluid/imperative/data_loader.h"
#include "paddle/fluid/imperative/layer.h"
@ -507,50 +506,6 @@ void BindImperative(py::module *m_ptr) {
[]() { memory::allocation::MemoryMapFdSet::Instance().Clear(); });
#endif
py::class_<imperative::detail::BackwardStrategy> backward_strategy(
m, "BackwardStrategy", R"DOC(
BackwardStrategy is a descriptor of how to run the backward process.
**Note**:
**This API is only available in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **Mode**
Attribute:
**sort_sum_gradient**:
If framework will sum the gradient by the reverse order of trace. eg. x_var ( :ref:`api_guide_Variable` ) will be the input of multiple OP such as :ref:`api_fluid_layers_scale` , this attr will decide if framework will sum gradient of `x_var` by the reverse order.
By Default: False
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
x_var = fluid.dygraph.to_variable(x)
sums_inputs = []
# x_var will be multi-scales' input here
for _ in range(10):
sums_inputs.append(fluid.layers.scale(x_var))
ret2 = fluid.layers.sums(sums_inputs)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
)DOC");
backward_strategy.def(py::init())
.def_property("sort_sum_gradient",
[](const imperative::detail::BackwardStrategy &self) {
return self.sorted_sum_gradient_;
},
[](imperative::detail::BackwardStrategy &self,
bool sorted_sum_gradient) {
self.sorted_sum_gradient_ = sorted_sum_gradient;
});
m.def("start_imperative_gperf_profiler",
[]() { imperative::StartProfile(); });
@ -745,21 +700,18 @@ void BindImperative(py::module *m_ptr) {
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
)DOC")
.def("_run_backward",
[](imperative::VarBase &self,
const imperative::detail::BackwardStrategy &bckst,
const imperative::Tracer &tracer, bool retain_graph) {
[](imperative::VarBase &self, const imperative::Tracer &tracer,
bool retain_graph) {
// TODO(jiabin): when we impl more backward execution we can
// select them
auto *engine = tracer.GetEngine();
engine->Init(&self, bckst, retain_graph);
engine->Init(&self, retain_graph);
VLOG(3) << "Start backward";
engine->Execute();
VLOG(3) << "Finish backward";
@ -1024,13 +976,11 @@ void BindImperative(py::module *m_ptr) {
&output_targets,
const std::vector<std::shared_ptr<imperative::VarBase>> &output_grads,
const std::vector<std::shared_ptr<imperative::VarBase>> &no_grad_vars,
const platform::Place &place,
const imperative::detail::BackwardStrategy &strategy,
bool create_graph, bool retain_graph, bool allow_unused,
bool only_inputs) {
const platform::Place &place, bool create_graph, bool retain_graph,
bool allow_unused, bool only_inputs) {
imperative::PartialGradEngine engine(
input_targets, output_targets, output_grads, no_grad_vars, place,
strategy, create_graph, retain_graph, allow_unused, only_inputs);
create_graph, retain_graph, allow_unused, only_inputs);
engine.Execute();
return engine.GetResult();
},

@ -225,7 +225,6 @@ from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import BackwardStrategy #DEFINE_ALIAS
from .framework import to_variable #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS

@ -196,6 +196,7 @@ def __bootstrap__():
'free_idle_chunk',
'free_when_no_cache_hit',
'call_stack_level',
'sort_sum_gradient',
]
if 'Darwin' not in sysstr:
read_env_flags.append('use_pinned_memory')

@ -38,9 +38,6 @@ from .checkpoint import *
from . import learning_rate_scheduler
from .learning_rate_scheduler import *
from . import backward_strategy
from .backward_strategy import *
from . import jit
from .jit import *
@ -69,7 +66,6 @@ __all__ += nn.__all__
__all__ += parallel.__all__
__all__ += checkpoint.__all__
__all__ += learning_rate_scheduler.__all__
__all__ += backward_strategy.__all__
__all__ += jit.__all__
__all__ += io.__all__
__all__ += rnn.__all__

@ -1,19 +0,0 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core
__all__ = ["BackwardStrategy"]
BackwardStrategy = core.BackwardStrategy

@ -319,8 +319,7 @@ def grad(outputs,
create_graph=False,
only_inputs=True,
allow_unused=False,
no_grad_vars=None,
backward_strategy=None):
no_grad_vars=None):
'''
.. note::
**This API is ONLY available in Dygraph mode.**
@ -363,9 +362,6 @@ def grad(outputs,
their gradients if allow_unused=True. Default False.
no_grad_vars (Variable|list(Variable)|tuple(Variable)|set(Variable), optional):
the Variables whose gradients are not needed to compute. Default None.
backward_strategy (BackwardStrategy, optional): The backward strategy to
compute gradients. See :ref:`api_fluid_dygraph_BackwardStrategy` for
details. Default None.
Returns:
tuple: a tuple of Variables, whose length is the same as the Variable number
@ -503,12 +499,6 @@ def grad(outputs,
raise AssertionError(
"no_grad_vars must be None, Variable or list/tuple/set of Variables")
if backward_strategy is None:
backward_strategy = core.BackwardStrategy()
assert isinstance(backward_strategy, core.BackwardStrategy), \
"backward_strategy must be type paddle.fluid.dygraph.BackwardStrategy"
assert isinstance(create_graph, bool), "create_graph must be True or False"
if retain_graph is None:
@ -524,9 +514,9 @@ def grad(outputs,
place = core.Place()
place.set_place(framework._current_expected_place())
return core.dygraph_partial_grad(
inputs, outputs, grad_outputs, no_grad_vars, place, backward_strategy,
create_graph, retain_graph, allow_unused, only_inputs)
return core.dygraph_partial_grad(inputs, outputs, grad_outputs,
no_grad_vars, place, create_graph,
retain_graph, allow_unused, only_inputs)
@framework.dygraph_only

@ -15,7 +15,6 @@
import inspect
from .. import framework
from .. import core
from . import BackwardStrategy
from ..framework import Variable, Parameter, ParamBase
from .base import switch_to_static_graph
import numpy as np
@ -129,19 +128,18 @@ def monkey_patch_varbase():
framework._current_expected_place())
@framework.dygraph_only
def backward(self, backward_strategy=None, retain_graph=False):
def backward(self, retain_graph=False):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Run backward of current Graph which starts from current Variable
Run backward of current Graph which starts from current Tensor.
Args:
backward_strategy( :ref:`api_fluid_dygraph_BackwardStrategy` ): The Backward Strategy to run backward
retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
like to add more ops to the built graph after calling this method(`backward`), set the parameter
`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
Defaults to False.
like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
:code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
Defaults to False.
Returns:
NoneType: None
@ -149,32 +147,25 @@ def monkey_patch_varbase():
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.disable_static()
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
inputs = []
for _ in range(10):
tmp = paddle.to_tensor(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret)
loss.backward()
"""
if framework.in_dygraph_mode():
if backward_strategy is None:
backward_strategy = BackwardStrategy()
backward_strategy.sort_sum_gradient = False
self._run_backward(backward_strategy,
framework._dygraph_tracer(), retain_graph)
self._run_backward(framework._dygraph_tracer(), retain_graph)
else:
raise ValueError(
"Variable.backward() is only available in DyGraph mode")
@ -205,9 +196,7 @@ def monkey_patch_varbase():
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
loss2.backward()
print(loss2.gradient())
"""

@ -1106,15 +1106,18 @@ class Variable(object):
pass
@fake_interface_only
def backward(self, backward_strategy=None):
def backward(self, retain_graph=False):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Run backward of current Graph which starts from current Variable
Run backward of current Graph which starts from current Tensor.
Args:
backward_strategy( :ref:`api_fluid_dygraph_BackwardStrategy` ): The Backward Strategy to run backward
retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
:code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
Defaults to False.
Returns:
NoneType: None
@ -1122,23 +1125,21 @@ class Variable(object):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.disable_static()
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
inputs = []
for _ in range(10):
tmp = paddle.to_tensor(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.sums(inputs)
loss = paddle.reduce_sum(ret)
loss.backward()
"""
pass
@ -1170,9 +1171,7 @@ class Variable(object):
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
loss2.backward()
print(loss2.gradient())
# example2: return tuple of ndarray
@ -1218,9 +1217,7 @@ class Variable(object):
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))

@ -38,8 +38,7 @@ class TestDirectory(unittest.TestCase):
'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load',
'paddle.BackwardStrategy', 'paddle.ParallelEnv',
'paddle.static.save', 'paddle.static.load', 'paddle.ParallelEnv',
'paddle.prepare_context', 'paddle.DataParallel', 'paddle.jit',
'paddle.jit.TracedLayer', 'paddle.jit.to_static',
'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer',
@ -98,7 +97,6 @@ class TestDirectory(unittest.TestCase):
'paddle.imperative.enable', 'paddle.imperative.guard',
'paddle.imperative.grad', 'paddle.imperative.no_grad',
'paddle.imperative.save', 'paddle.imperative.load',
'paddle.imperative.BackwardStrategy',
'paddle.imperative.ParallelEnv',
'paddle.imperative.prepare_context',
'paddle.imperative.DataParalell', 'paddle.imperative.jit',

@ -238,8 +238,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out2 = linear2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
backward_strategy = fluid.dygraph.BackwardStrategy()
out.backward(backward_strategy)
out.backward()
self.assertTrue(linear.weight.gradient() is None)
self.assertTrue(out1.gradient() is None)
@ -311,9 +310,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
out2 = linear2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
out.backward(backward_strategy)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
out.backward()
self.assertTrue(linear.weight.gradient() is None)
self.assertTrue(out1.gradient() is None)

@ -314,9 +314,8 @@ class TestImperative(unittest.TestCase):
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
loss2.backward()
self.assertTrue(np.allclose(ret.numpy(), x * 10))
self.assertTrue(np.allclose(inputs[0].gradient(), x))
@ -403,9 +402,8 @@ class TestImperative(unittest.TestCase):
x2 = l2(var_inp2)[0]
self.assertIsNotNone(x2)
dy_out2 = x2.numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
x2.backward(backward_strategy)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
x2.backward()
dy_grad2 = l2._x_for_debug.gradient()
with new_program_scope():
@ -442,9 +440,8 @@ class TestImperative(unittest.TestCase):
mlp2 = MLP(input_size=2)
out2 = mlp2(var_inp2)
dy_out2 = out2.numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
out2.backward(backward_strategy)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
out2.backward()
dy_grad2 = mlp2._linear1.weight.gradient()
with new_program_scope():
@ -552,9 +549,8 @@ class TestImperative(unittest.TestCase):
simple_rnn2 = SimpleRNN()
outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)
dy_out2 = outs2[3].numpy()
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
outs2[3].backward(backward_strategy)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
outs2[3].backward()
dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()
dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()
dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()

@ -275,8 +275,7 @@ class TestDygraphDeepCF(unittest.TestCase):
deepcf2 = DeepCF(num_users, num_items, matrix)
adam2 = fluid.optimizer.AdamOptimizer(
0.01, parameter_list=deepcf2.parameters())
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
for e in range(NUM_EPOCHES):
sys.stderr.write('epoch %d\n' % e)
for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):
@ -289,7 +288,7 @@ class TestDygraphDeepCF(unittest.TestCase):
fluid.layers.log_loss(prediction2,
to_variable(labels_np[
slice:slice + BATCH_SIZE])))
loss2.backward(backward_strategy)
loss2.backward()
adam2.minimize(loss2)
deepcf2.clear_gradients()
dy_loss2 = loss2.numpy()

@ -52,8 +52,7 @@ class TestDygraphDoubleGrad(TestCase):
retain_graph=None,
create_graph=False,
allow_unused=False):
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = self.sort_sum_gradient
fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient})
return fluid.dygraph.grad(
outputs=outputs,
inputs=inputs,
@ -61,8 +60,7 @@ class TestDygraphDoubleGrad(TestCase):
no_grad_vars=no_grad_vars,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=allow_unused,
backward_strategy=backward_strategy)
allow_unused=allow_unused)
@dygraph_guard
def test_exception(self):
@ -310,8 +308,8 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
out = out + linear(input)
return out
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
with fluid.dygraph.guard():
paddle.manual_seed(123)
a = fluid.dygraph.to_variable(value)
@ -324,8 +322,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
inputs=[a],
create_graph=False,
only_inputs=True,
allow_unused=False,
backward_strategy=backward_strategy)
allow_unused=False)
grad_1 = dx[0].numpy()
@ -335,7 +332,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
a.stop_gradient = False
out = model_f(a)
out.backward(backward_strategy)
out.backward()
grad_2 = a.gradient()

@ -179,9 +179,8 @@ class TestDygraphGAN(unittest.TestCase):
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
discriminator2 = Discriminator()
generator2 = Generator()
sgd2 = SGDOptimizer(
@ -201,7 +200,7 @@ class TestDygraphGAN(unittest.TestCase):
x=d_fake2, label=to_variable(np.zeros([2, 1], np.float32))))
d_loss2 = d_loss_real2 + d_loss_fake2
d_loss2.backward(backward_strategy)
d_loss2.backward()
sgd2.minimize(d_loss2)
discriminator2.clear_gradients()
generator2.clear_gradients()
@ -211,7 +210,7 @@ class TestDygraphGAN(unittest.TestCase):
g_loss2 = fluid.layers.reduce_mean(
fluid.layers.sigmoid_cross_entropy_with_logits(
x=d_fake2, label=to_variable(np.ones([2, 1], np.float32))))
g_loss2.backward(backward_strategy)
g_loss2.backward()
sgd2.minimize(g_loss2)
for p in discriminator2.parameters():
dy_params2[p.name] = p.numpy()

@ -62,8 +62,7 @@ class Test_Forward_Hook(unittest.TestCase):
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
input_word = np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7,
@ -132,8 +131,7 @@ class Test_Forward_Hook(unittest.TestCase):
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
global call_forward_hook
global call_forward_pre_hook

@ -113,8 +113,9 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss = None
helper = DyGraphProgramDescTracerTestHelper(self)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = is_sort_sum_gradient
fluid.set_flags({
'FLAGS_sort_sum_gradient': is_sort_sum_gradient
})
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
@ -129,7 +130,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
if i == 0:
for param in simple_net.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward(backward_strategy)
dy_loss.backward()
sgd.minimize(dy_loss)
sgd.clear_gradients()
if i == batch_num - 1:

@ -36,8 +36,7 @@ class TestImperativeMnistSortGradient(unittest.TestCase):
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
mnist2 = MNIST()
sgd2 = SGDOptimizer(
@ -69,7 +68,7 @@ class TestImperativeMnistSortGradient(unittest.TestCase):
for param in mnist2.parameters():
dy_param_init_value2[param.name] = param.numpy()
avg_loss2.backward(backward_strategy)
avg_loss2.backward()
sgd2.minimize(avg_loss2)
mnist2.clear_gradients()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save