|
|
@ -685,8 +685,6 @@ void BindImperative(py::module *m_ptr) {
|
|
|
|
.. code-block:: python
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
|
|
import paddle
|
|
|
|
import paddle
|
|
|
|
paddle.disable_static()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
linear = Linear(32, 64)
|
|
|
|
linear = Linear(32, 64)
|
|
|
|
data = paddle.uniform(shape=[30, 10, 32], -1, 1)
|
|
|
|
data = paddle.uniform(shape=[30, 10, 32], -1, 1)
|
|
|
|
x = linear(data)
|
|
|
|
x = linear(data)
|
|
|
@ -704,19 +702,13 @@ void BindImperative(py::module *m_ptr) {
|
|
|
|
.. code-block:: python
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
|
|
import paddle
|
|
|
|
import paddle
|
|
|
|
paddle.disable_static()
|
|
|
|
input = paddle.uniform([10, 2])
|
|
|
|
|
|
|
|
linear = paddle.nn.Linear(2, 3)
|
|
|
|
inputs = []
|
|
|
|
out = linear(input)
|
|
|
|
for _ in range(10):
|
|
|
|
out.backward()
|
|
|
|
tmp = paddle.ones([2, 2])
|
|
|
|
print("Before clear_gradient, linear.weight.grad: {}".format(linear.weight.grad))
|
|
|
|
tmp.stop_gradient=False
|
|
|
|
linear.weight.clear_gradient()
|
|
|
|
inputs.append(tmp)
|
|
|
|
print("After clear_gradient, linear.weight.grad: {}".format(linear.weight.grad))
|
|
|
|
ret = paddle.sums(inputs2)
|
|
|
|
|
|
|
|
loss = paddle.sum(ret)
|
|
|
|
|
|
|
|
loss.backward()
|
|
|
|
|
|
|
|
print("Before clear_gradient {}".format(loss.grad))
|
|
|
|
|
|
|
|
loss.clear_gradient()
|
|
|
|
|
|
|
|
print("After clear_gradient {}".format(loss.grad))
|
|
|
|
|
|
|
|
)DOC")
|
|
|
|
)DOC")
|
|
|
|
.def("clone",
|
|
|
|
.def("clone",
|
|
|
|
[](std::shared_ptr<imperative::VarBase> &self) {
|
|
|
|
[](std::shared_ptr<imperative::VarBase> &self) {
|
|
|
|