copy dygraph api to paddle.imperative (#24085)

* copy dygraph api to paddle.imperative, test=develop

* polish the code, test=develop

* polish code, test=develop

* polish code, test=develop

* move paddle.imperative.Layer to paddle.nn.Layer, test=develop
revert-24314-dev/fix_err_msg
zhongpu 5 years ago committed by GitHub
parent 7728a20175
commit a851b97a58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -202,3 +202,9 @@ from .tensor.stat import var #DEFINE_ALIAS
# from .tensor.tensor import Tensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS
from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode #DEFINE_ALIAS
enable_imperative = enable_dygraph #DEFINE_ALIAS
disable_imperative = disable_dygraph #DEFINE_ALIAS
in_imperative_mode = in_dygraph_mode

@ -204,6 +204,31 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
def test_functional_paddle_imperative_dygraph_context(self):
self.assertFalse(paddle.imperative.enabled())
paddle.enable_imperative()
self.assertTrue(paddle.imperative.enabled())
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
var_inp = paddle.imperative.to_variable(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out1 = out.numpy()
out.backward()
dy_grad1 = mlp._linear1.weight.gradient()
paddle.disable_imperative()
self.assertFalse(paddle.imperative.enabled())
with paddle.imperative.guard():
self.assertTrue(paddle.imperative.enabled())
var_inp = paddle.imperative.to_variable(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out2 = out.numpy()
out.backward()
dy_grad2 = mlp._linear1.weight.gradient()
self.assertFalse(paddle.imperative.enabled())
self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
def test_isinstance(self):
var = fluid.layers.data(shape=[1], name='x', dtype='float32')
self.assertTrue(isinstance(var, fluid.Variable))

@ -289,7 +289,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
np_t = v.numpy()
self.model_base[k] = np_t
fluid.save_dygraph(self.state_dict, "./test_dy")
paddle.imperative.save(self.state_dict, "./test_dy")
def testLoadAndSetVarBase(self):
seed = 90
@ -369,7 +369,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
para_state_dict, opti_state_dict = fluid.load_dygraph("./test_dy")
para_state_dict, opti_state_dict = paddle.imperative.load(
"./test_dy")
adam.set_dict(opti_state_dict)
opti_dict = adam.state_dict()
@ -881,18 +882,18 @@ class TestDygraphPtbRnn(unittest.TestCase):
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
paddle.imperative.save_dygraph(state_dict,
os.path.join('saved_dy', 'emb_dy'))
paddle.imperative.save(state_dict,
os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = paddle.imperative.load(
os.path.join('saved_dy', 'emb_dy'))
self.assertTrue(opti_state_dict == None)
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = paddle.imperative.load(
os.path.join('saved_dy', 'emb_dy.pdparams'))
para_state_dict, opti_state_dict = paddle.imperative.load_dygraph(
para_state_dict, opti_state_dict = paddle.imperative.load(
os.path.join('saved_dy', 'emb_dy.pdopt'))

@ -24,7 +24,7 @@ import paddle.fluid.core as core
import paddle
class SimpleNet(paddle.imperative.Layer):
class SimpleNet(paddle.nn.Layer):
def __init__(self, vocab_size, hidden_size, dtype):
super(SimpleNet, self).__init__()
self.emb = fluid.dygraph.Embedding(

@ -14,16 +14,16 @@
# define api used to run in imperative mode
__all__ = [
'BackwardStrategy', 'guard', 'Layer', 'LayerList', 'load_dygraph',
'save_dygraph', 'prepare_context', 'to_variable', 'TracedLayer', 'no_grad',
'ParameterList', 'Sequential'
'BackwardStrategy', 'enabled', 'grad', 'guard', 'LayerList', 'load', 'save',
'prepare_context', 'to_variable', 'TracedLayer', 'no_grad', 'ParameterList',
'Sequential'
]
from paddle.fluid import core
from ..fluid.dygraph.base import guard, no_grad, to_variable
from ..fluid.dygraph.layers import Layer
from ..fluid.dygraph.base import enabled, guard, no_grad, to_variable, grad
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential
from ..fluid.dygraph.checkpoint import load_dygraph, save_dygraph
from ..fluid.dygraph.checkpoint import load_dygraph as load
from ..fluid.dygraph.checkpoint import save_dygraph as save
from ..fluid.dygraph.parallel import prepare_context
from ..fluid.dygraph.jit import TracedLayer

@ -95,3 +95,4 @@ from .layer.norm import InstanceNorm #DEFINE_ALIAS
# from .layer.rnn import LSTMCell #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS

Loading…
Cancel
Save