Paddle-2.0 API directory migration (#25898)

* Directory migration, test=develop

* Change imperative from paddle init to paddle framework, test=develop

* Fixed jit bug, test=develop

* default static mode, test=develop

* fixed format and create parameter belongs to framework, test=develop

* Fixed import package, test=develop

* fix __init__ format, test=develop

* fixed alias problem

* fixed paddle.enable_imperative problems, test=develop

* Add unittest

* delete install_check comment

* Fixed unittest timeout

* fixed unittest error

* move Program default_xx_program to static package

* optimize unittest method

* fixed framework __init__ format

* fixed jit path

* delete alias

* move jit to paddle

* Fixed unittest format

* fixed paddle.default_main_program

* Fixed save load API in paddle __init__.py

* fixed ci paddle.imperative.to_variable
revert-24895-update_cub
pangyoki 5 years ago committed by GitHub
parent cd7b55a221
commit 2efcb481c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -38,7 +38,6 @@ import paddle.tensor
import paddle.nn
import paddle.fleet
import paddle.framework
import paddle.imperative
import paddle.optimizer
import paddle.metric
import paddle.incubate.complex as complex
@ -69,8 +68,6 @@ from .tensor.creation import full_like #DEFINE_ALIAS
from .tensor.creation import triu #DEFINE_ALIAS
from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS
from .tensor.io import save #DEFINE_ALIAS
from .tensor.io import load #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
@ -201,30 +198,34 @@ from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS
from .framework import append_backward #DEFINE_ALIAS
from .framework import gradients #DEFINE_ALIAS
from .framework import Executor #DEFINE_ALIAS
from .framework import global_scope #DEFINE_ALIAS
from .framework import scope_guard #DEFINE_ALIAS
from .framework import BuildStrategy #DEFINE_ALIAS
from .framework import CompiledProgram #DEFINE_ALIAS
from .framework import default_main_program #DEFINE_ALIAS
from .framework import default_startup_program #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS
from .framework import Print #DEFINE_ALIAS
from .framework import py_func #DEFINE_ALIAS
from .framework import ExecutionStrategy #DEFINE_ALIAS
from .framework import name_scope #DEFINE_ALIAS
from .framework import ParallelExecutor #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
from .framework import Program #DEFINE_ALIAS
from .framework import program_guard #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import WeightNormParamAttr #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import BackwardStrategy #DEFINE_ALIAS
from .framework import to_variable #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS
from .framework import save_dygraph #DEFINE_ALIAS
from .framework import load_dygraph #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS
from .framework import load #DEFINE_ALIAS
from .framework import prepare_context #DEFINE_ALIAS
from .framework import ParallelEnv #DEFINE_ALIAS
from .framework import DataParallel #DEFINE_ALIAS
from .framework import NoamDecay #DEFINE_ALIAS
from .framework import PiecewiseDecay #DEFINE_ALIAS
from .framework import NaturalExpDecay #DEFINE_ALIAS
from .framework import ExponentialDecay #DEFINE_ALIAS
from .framework import InverseTimeDecay #DEFINE_ALIAS
from .framework import PolynomialDecay #DEFINE_ALIAS
from .framework import CosineDecay #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS
@ -237,6 +238,11 @@ from .fluid.data import data
from . import incubate
from .incubate import hapi
from .fluid.dygraph.base import enable_dygraph as enable_imperative #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as disable_imperative #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_imperative_mode #DEFINE_ALIAS
from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from . import jit
from . import static

@ -1,44 +0,0 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'fc',
'batch_norm',
'embedding',
'bilinear_tensor_product'
'conv2d'
'conv2d_transpose'
'conv3d'
'conv3d_transpose'
'create_parameter'
'crf_decoding'
'data_norm'
'deformable_conv'
'group_norm'
'hsigmoid'
'instance_norm'
'layer_norm'
'multi_box_head'
'nce'
'prelu'
'row_conv'
'spectral_norm',
]
from ..fluid.layers import fc, batch_norm, bilinear_tensor_product, \
conv2d, conv2d_transpose, conv3d, conv3d_transpose, create_parameter, \
crf_decoding, data_norm, deformable_conv, group_norm, hsigmoid, instance_norm, \
layer_norm, multi_box_head, nce, prelu, row_conv, spectral_norm
from ..fluid.input import embedding

@ -286,8 +286,8 @@ class Fleet(object):
context["loss"] = loss
if startup_program == None:
self.origin_startup_program = \
paddle.default_startup_program().clone(for_test=False)
startup_program = paddle.default_startup_program()
paddle.static.default_startup_program().clone(for_test=False)
startup_program = paddle.static.default_startup_program()
else:
self.origin_startup_program = \
startup_program.clone(for_test=False)
@ -338,7 +338,7 @@ class Fleet(object):
parameter_list=parameter_list,
no_grad_set=no_grad_set)
default_program = paddle.default_main_program()
default_program = paddle.static.default_main_program()
if id(default_program) != id(loss.block.program):
paddle.fluid.framework.switch_main_program(loss.block.program)

@ -190,7 +190,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase):
parameter_list=None,
no_grad_set=None):
if startup_program == None:
startup_program = paddle.default_startup_program()
startup_program = paddle.static.default_startup_program()
compiled_program = self._try_to_compile(startup_program,
loss.block.program, loss)
loss.block.program._graph = compiled_program

@ -121,10 +121,6 @@ def enabled():
def enable_dygraph(place=None):
"""
:alias_main: paddle.enable_dygraph
:alias: paddle.enable_dygraph,paddle.enable_imperative.enable_dygraph
:old_api: paddle.fluid.dygraph.base.enable_dygraph
This function enables dynamic graph mode.
Parameters:
@ -155,10 +151,6 @@ def enable_dygraph(place=None):
def disable_dygraph():
"""
:alias_main: paddle.disable_dygraph
:alias: paddle.disable_dygraph,paddle.disable_imperative.disable_dygraph
:old_api: paddle.fluid.dygraph.base.disable_dygraph
This function disables dynamic graph mode.
return:

@ -701,11 +701,11 @@ def save(layer, model_path, input_spec=None, configs=None):
prog_translator = ProgramTranslator()
if not prog_translator.enable:
raise RuntimeError(
"The paddle.imperative.jit.save doesn't work when setting ProgramTranslator.enable=False."
"The paddle.jit.save doesn't work when setting ProgramTranslator.enable=False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input layer of paddle.imperative.jit.save should be 'Layer', but received layer type is %s."
"The input layer of paddle.jit.save should be 'Layer', but received layer type is %s."
% type(layer))
if configs is None:

@ -146,7 +146,7 @@ class Layer(core.Layer):
import paddle
import paddle.nn as nn
paddle.enable_imperative()
paddle.disable_static()
net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

@ -3207,12 +3207,12 @@ class Flatten(layers.Layer):
.. code-block:: python
import paddle
from paddle.imperative import to_variable
from paddle import to_variable
import numpy as np
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
paddle.enable_imperative()
paddle.disable_static()
inp_np = to_variable(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)

@ -226,7 +226,7 @@ def monkey_patch_varbase():
.. code-block:: python
import paddle
paddle.enable_imperative()
paddle.disable_static()
x = paddle.rand([1, 5])
print(x)
# Variable: eager_tmp_0
@ -235,7 +235,7 @@ def monkey_patch_varbase():
# - layout: NCHW
# - dtype: float
# - data: [0.645307 0.597973 0.732793 0.646921 0.540328]
paddle.disable_imperative()
paddle.enable_static()
"""
tensor = self.value().get_tensor()
if tensor._is_initialized():

@ -5175,7 +5175,7 @@ class ParamBase(core.VarBase):
.. code-block:: python
import paddle
paddle.enable_imperative()
paddle.disable_static()
conv = paddle.nn.Conv2D(3, 3, 5)
print(conv.weight)
# Parameter: conv2d_0.w_0
@ -5184,7 +5184,7 @@ class ParamBase(core.VarBase):
# - layout: NCHW
# - dtype: float
# - data: [...]
paddle.disable_imperative()
paddle.enable_static()
"""
tensor = self.value().get_tensor()
if tensor._is_initialized():

@ -13,6 +13,7 @@
# limitations under the License.
import os
import paddle
from .framework import Program, program_guard, unique_name, cuda_places, cpu_places
from .param_attr import ParamAttr
from .initializer import Constant

@ -1669,9 +1669,6 @@ def _load_persistable_nodes(executor, dirname, graph):
def save(program, model_path):
"""
:api_attr: Static Graph
:alias_main: paddle.save
:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save
:old_api: paddle.fluid.save
This function save parameters, optimizer information and network description to model_path.
@ -1733,9 +1730,6 @@ def save(program, model_path):
def load(program, model_path, executor=None, var_list=None):
"""
:api_attr: Static Graph
:alias_main: paddle.load
:alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load
:old_api: paddle.fluid.io.load
This function get parameters and optimizer information from program, and then get corresponding value from file.
An exception will throw if shape or dtype of the parameters is not match.

@ -12073,11 +12073,11 @@ def logical_and(x, y, out=None, name=None):
import paddle
import numpy as np
paddle.enable_imperative()
paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
res = paddle.logical_and(x, y)
print(res.numpy()) # [True False False False]
"""
@ -12115,11 +12115,11 @@ def logical_or(x, y, out=None, name=None):
import paddle
import numpy as np
paddle.enable_imperative()
paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
res = paddle.logical_or(x, y)
print(res.numpy()) # [True True True False]
"""
@ -12157,11 +12157,11 @@ def logical_xor(x, y, out=None, name=None):
import paddle
import numpy as np
paddle.enable_imperative()
paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
res = paddle.logical_xor(x, y)
print(res.numpy()) # [False True True False]
"""
@ -12197,9 +12197,9 @@ def logical_not(x, out=None, name=None):
import paddle
import numpy as np
paddle.enable_imperative()
paddle.disable_static()
x_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data)
x = paddle.to_variable(x_data)
res = paddle.logical_not(x)
print(res.numpy()) # [False True False True]
"""

@ -16,7 +16,7 @@ from __future__ import print_function
import paddle
from paddle.fluid import core
from paddle import program_guard, Program
from paddle.static import program_guard, Program
import unittest
import numpy as np
from op_test import OpTest
@ -82,7 +82,7 @@ class TestArangeAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
exe = paddle.Executor(place)
exe = paddle.static.Executor(place)
out = exe.run(fetch_list=[x1])
expected_data = np.arange(0, 5, 1).astype(np.float32)
@ -93,15 +93,16 @@ class TestArangeImperative(unittest.TestCase):
def test_out(self):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with paddle.imperative.guard(place):
x1 = paddle.arange(0, 5, 1)
x2 = paddle.tensor.arange(5)
x3 = paddle.tensor.creation.arange(5)
start = paddle.imperative.to_variable(np.array([0], 'float32'))
end = paddle.imperative.to_variable(np.array([5], 'float32'))
step = paddle.imperative.to_variable(np.array([1], 'float32'))
x4 = paddle.arange(start, end, step, 'int64')
paddle.disable_static(place)
x1 = paddle.arange(0, 5, 1)
x2 = paddle.tensor.arange(5)
x3 = paddle.tensor.creation.arange(5)
start = paddle.to_variable(np.array([0], 'float32'))
end = paddle.to_variable(np.array([5], 'float32'))
step = paddle.to_variable(np.array([1], 'float32'))
x4 = paddle.arange(start, end, step, 'int64')
paddle.enable_static()
expected_data = np.arange(0, 5, 1).astype(np.int64)
for i in [x1, x2, x3, x4]:

@ -17,7 +17,6 @@ from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.imperative as imperative
import paddle.fluid.layers as layers
import numpy as np
import six
@ -384,20 +383,21 @@ class TestArgsortDygraph(unittest.TestCase):
self.place = core.CPUPlace()
def test_api_0(self):
with imperative.guard(self.place):
var_x = imperative.to_variable(self.input_data)
out = paddle.argsort(var_x)
self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(),
True)
paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data)
out = paddle.argsort(var_x)
self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(),
True)
paddle.enable_static()
def test_api_1(self):
with imperative.guard(self.place):
var_x = imperative.to_variable(self.input_data)
out = paddle.argsort(var_x, axis=-1)
self.assertEqual(
(np.argsort(
self.input_data, axis=-1) == out.numpy()).all(),
True)
paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data)
out = paddle.argsort(var_x, axis=-1)
self.assertEqual(
(np.argsort(
self.input_data, axis=-1) == out.numpy()).all(), True)
paddle.enable_static()
if __name__ == "__main__":

@ -97,7 +97,7 @@ def create_paddle_case(op_type, callback):
y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.Executor(self.place)
exe = paddle.static.Executor(self.place)
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32)
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32)
real_result = callback(input_x, input_y)

@ -268,9 +268,9 @@ class TestConcatAPI(unittest.TestCase):
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)
exe = paddle.Executor(place=paddle.CPUPlace())
exe = paddle.static.Executor(place=paddle.CPUPlace())
[res_1, res_2, res_3, res_4] = exe.run(
paddle.default_main_program(),
paddle.static.default_main_program(),
feed={"x_1": input_2,
"x_2": input_2,
"x_3": input_3},
@ -284,14 +284,15 @@ class TestConcatAPI(unittest.TestCase):
in1 = np.array([[1, 2, 3], [4, 5, 6]])
in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]])
with paddle.imperative.guard():
x1 = paddle.imperative.to_variable(in1)
x2 = paddle.imperative.to_variable(in2)
x3 = paddle.imperative.to_variable(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1)
np_out2 = np.concatenate([in1, in2], axis=0)
paddle.disable_static()
x1 = paddle.to_variable(in1)
x2 = paddle.to_variable(in2)
x3 = paddle.to_variable(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1)
np_out2 = np.concatenate([in1, in2], axis=0)
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)

@ -21,7 +21,7 @@ import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.imperative import to_variable
from paddle import to_variable
class TestCumsumOp(unittest.TestCase):
@ -83,16 +83,18 @@ class TestCumsumOp(unittest.TestCase):
self.assertTrue(np.allclose(z, out[5]))
def test_cpu(self):
with paddle.imperative.guard(paddle.fluid.CPUPlace()):
self.run_cases()
paddle.disable_static(paddle.fluid.CPUPlace())
self.run_cases()
paddle.enable_static()
self.run_static()
def test_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with paddle.imperative.guard(paddle.fluid.CUDAPlace(0)):
self.run_cases()
paddle.disable_static(paddle.fluid.CUDAPlace(0))
self.run_cases()
paddle.enable_static()
self.run_static(use_gpu=True)

@ -0,0 +1,181 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import time
import subprocess
import unittest
import numpy as np
import paddle
class TestDirectory(unittest.TestCase):
def get_import_command(self, module):
paths = module.split('.')
if len(paths) <= 1:
return module
package = '.'.join(paths[:-1])
func = paths[-1]
cmd = 'from {} import {}'.format(package, func)
return cmd
def test_new_directory(self):
new_directory = [
'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load',
'paddle.BackwardStrategy', 'paddle.ParallelEnv',
'paddle.prepare_context', 'paddle.DataParallel', 'paddle.jit',
'paddle.jit.TracedLayer', 'paddle.jit.to_static',
'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer',
'paddle.jit.save', 'paddle.jit.load', 'paddle.jit.SaveLoadConfig',
'paddle.NoamDecay', 'paddle.PiecewiseDecay',
'paddle.NaturalExpDecay', 'paddle.ExponentialDecay',
'paddle.InverseTimeDecay', 'paddle.PolynomialDecay',
'paddle.CosineDecay', 'paddle.static.Executor',
'paddle.static.global_scope', 'paddle.static.scope_guard',
'paddle.static.append_backward', 'paddle.static.gradients',
'paddle.static.BuildStrategy', 'paddle.static.CompiledProgram',
'paddle.static.ExecutionStrategy',
'paddle.static.default_main_program',
'paddle.static.default_startup_program', 'paddle.static.Program',
'paddle.static.name_scope', 'paddle.static.program_guard',
'paddle.static.Print', 'paddle.static.py_func',
'paddle.static.ParallelExecutor',
'paddle.static.WeightNormParamAttr', 'paddle.static.nn.fc',
'paddle.static.nn.batch_norm',
'paddle.static.nn.bilinear_tensor_product',
'paddle.static.nn.conv2d', 'paddle.static.nn.conv2d_transpose',
'paddle.static.nn.conv3d', 'paddle.static.nn.conv3d_transpose',
'paddle.static.nn.create_parameter',
'paddle.static.nn.crf_decoding', 'paddle.static.nn.data_norm',
'paddle.static.nn.deformable_conv', 'paddle.static.nn.group_norm',
'paddle.static.nn.hsigmoid', 'paddle.static.nn.instance_norm',
'paddle.static.nn.layer_norm', 'paddle.static.nn.multi_box_head',
'paddle.static.nn.nce', 'paddle.static.nn.prelu',
'paddle.static.nn.row_conv', 'paddle.static.nn.spectral_norm',
'paddle.static.nn.embedding'
]
import_file = 'run_import_modules.py'
with open(import_file, "w") as wb:
for module in new_directory:
run_cmd = self.get_import_command(module)
wb.write("{}\n".format(run_cmd))
_python = sys.executable
ps_cmd = "{} {}".format(_python, import_file)
ps_proc = subprocess.Popen(
ps_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ps_proc.communicate()
assert "Error" not in str(stderr), "Error: Can't" \
" import Module {}".format(module)
def test_old_directory(self):
old_directory = [
'paddle.enable_imperative', 'paddle.disable_imperative',
'paddle.in_imperative_mode', 'paddle.imperative.to_variable',
'paddle.imperative.enable', 'paddle.imperative.guard',
'paddle.imperative.grad', 'paddle.imperative.no_grad',
'paddle.imperative.save', 'paddle.imperative.load',
'paddle.imperative.BackwardStrategy',
'paddle.imperative.ParallelEnv',
'paddle.imperative.prepare_context',
'paddle.imperative.DataParalell', 'paddle.imperative.jit',
'paddle.imperative.TracedLayer', 'paddle.imperative.declarative',
'paddle.imperative.ProgramTranslator',
'paddle.imperative.TranslatedLayer', 'paddle.imperative.jit.save',
'paddle.imperative.jit.load',
'paddle.imperative.jit.SaveLoadConfig',
'paddle.imperative.NoamDecay'
'paddle.imperative.PiecewiseDecay',
'paddle.imperative.NaturalExpDecay',
'paddle.imperative.ExponentialDecay',
'paddle.imperative.InverseTimeDecay',
'paddle.imperative.PolynomialDecay',
'paddle.imperative.CosineDecay', 'paddle.Executor',
'paddle.global_scope', 'paddle.scope_guard',
'paddle.append_backward', 'paddle.gradients',
'paddle.BuildStrategy', 'paddle.CompiledProgram',
'paddle.ExecutionStrategy', 'paddle.name_scope',
'paddle.program_guard', 'paddle.Print', 'paddle.py_func',
'paddle.ParallelExecutor', 'paddle.default_main_program',
'paddle.default_startup_program', 'paddle.Program',
'paddle.WeightNormParamAttr', 'paddle.declarative.fc',
'paddle.declarative.batch_norm',
'paddle.declarative.bilinear_tensor_product',
'paddle.declarative.conv2d', 'paddle.declarative.conv2d_transpose',
'paddle.declarative.conv3d', 'paddle.declarative.conv3d_transpose',
'paddle.declarative.create_parameter',
'paddle.declarative.crf_decoding', 'paddle.declarative.data_norm',
'paddle.declarative.deformable_conv',
'paddle.declarative.group_norm', 'paddle.declarative.hsigmoid',
'paddle.declarative.instance_norm', 'paddle.declarative.layer_norm',
'paddle.declarative.multi_box_head', 'paddle.declarative.nce',
'paddle.declarative.prelu', 'paddle.declarative.row_conv',
'paddle.declarative.spectral_norm', 'paddle.declarative.embedding'
]
import_file = 'run_old_import_modules.py'
with open(import_file, "w") as wb:
cmd_context_count = """
count = 0
err_module = ""
"""
wb.write(cmd_context_count)
for module in old_directory:
run_cmd = self.get_import_command(module)
cmd_context_loop_template = """
try:
{run_cmd}
except:
count += 1
else:
err_module = "{module}"
"""
cmd_context_loop = cmd_context_loop_template.format(
run_cmd=run_cmd, module=module)
wb.write(cmd_context_loop)
cmd_context_print_template = """
if count != {len_old_directory}:
print("Error: Module " + err_module + " should not be imported")
"""
cmd_context_print = cmd_context_print_template.format(
len_old_directory=str(len(old_directory)))
wb.write(cmd_context_print)
_python = sys.executable
ps_cmd = "{} {}".format(_python, import_file)
ps_proc = subprocess.Popen(
ps_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ps_proc.communicate()
assert "Error" not in str(stdout), str(stdout)
if __name__ == '__main__':
unittest.main()

@ -74,73 +74,70 @@ class TestEyeOp2(OpTest):
class API_TestTensorEye(unittest.TestCase):
def test_out(self):
with paddle.program_guard(paddle.Program()):
with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10)
place = fluid.CPUPlace()
exe = paddle.Executor(place)
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()):
with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10, num_columns=7, dtype="float64")
place = paddle.CPUPlace()
exe = paddle.Executor(place)
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, 7, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()):
with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10, dtype="int64")
place = paddle.CPUPlace()
exe = paddle.Executor(place)
exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with paddle.imperative.guard():
out = paddle.eye(10, dtype="int64")
expected_result = np.eye(10, dtype="int64")
paddle.disable_static()
out = paddle.eye(10, dtype="int64")
expected_result = np.eye(10, dtype="int64")
paddle.enable_static()
self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard():
batch_shape = [2]
out = fluid.layers.eye(10,
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
paddle.disable_static()
batch_shape = [2]
out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
paddle.enable_static()
self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True)
self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard():
batch_shape = [3, 2]
out = fluid.layers.eye(10,
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
paddle.disable_static()
batch_shape = [3, 2]
out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
paddle.enable_static()
self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True)
self.assertEqual((out.numpy() == expected_result).all(), True)
def test_errors(self):
with paddle.program_guard(paddle.Program()):
with paddle.static.program_guard(paddle.static.Program()):
def test_num_rows_type_check():
paddle.eye(-1, dtype="int64")

@ -191,8 +191,8 @@ class TestFlattenPython(unittest.TestCase):
self.assertRaises(ValueError, test_InputError)
def test_Negative():
paddle.enable_imperative()
img = paddle.imperative.to_variable(x)
paddle.disable_static()
img = paddle.to_variable(x)
out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
return out.numpy().shape

@ -16,7 +16,7 @@ from __future__ import print_function
import paddle
import paddle.fluid.core as core
from paddle import Program, program_guard
from paddle.static import program_guard, Program
import paddle.compat as cpt
import unittest
import numpy as np
@ -38,7 +38,7 @@ class TestFullOp(unittest.TestCase):
place = paddle.CPUPlace()
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
exe = paddle.Executor(place)
exe = paddle.static.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
@ -53,12 +53,13 @@ class TestFullOp(unittest.TestCase):
msg="full_like output is wrong, out = " + str(out_np))
def test_full_like_imperative(self):
with paddle.imperative.guard():
input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=888.88, dtype='float32')
out_numpy = np.random.random((4)).astype("float32")
out_numpy.fill(888.88)
self.assertTrue((out.numpy() == out_numpy).all(), True)
paddle.disable_static()
input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=888.88, dtype='float32')
out_numpy = np.random.random((4)).astype("float32")
out_numpy.fill(888.88)
self.assertTrue((out.numpy() == out_numpy).all(), True)
paddle.enable_static()
class TestFullOpError(unittest.TestCase):

@ -205,27 +205,28 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
def test_functional_paddle_imperative_dygraph_context(self):
self.assertFalse(paddle.imperative.enabled())
paddle.enable_imperative()
self.assertTrue(paddle.imperative.enabled())
self.assertFalse(paddle.in_dynamic_mode())
paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode())
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
var_inp = paddle.imperative.to_variable(np_inp)
var_inp = paddle.to_variable(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out1 = out.numpy()
out.backward()
dy_grad1 = mlp._linear1.weight.gradient()
paddle.disable_imperative()
self.assertFalse(paddle.imperative.enabled())
with paddle.imperative.guard():
self.assertTrue(paddle.imperative.enabled())
var_inp = paddle.imperative.to_variable(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out2 = out.numpy()
out.backward()
dy_grad2 = mlp._linear1.weight.gradient()
self.assertFalse(paddle.imperative.enabled())
paddle.enable_static()
self.assertFalse(paddle.in_dynamic_mode())
paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode())
var_inp = paddle.to_variable(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out2 = out.numpy()
out.backward()
dy_grad2 = mlp._linear1.weight.gradient()
paddle.enable_static()
self.assertFalse(paddle.in_dynamic_mode())
self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
@ -281,7 +282,7 @@ class TestImperative(unittest.TestCase):
l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None)
l1 = fluid.Linear(2, 2)
with paddle.imperative.no_grad():
with paddle.no_grad():
self.assertTrue(l1.weight.stop_gradient is False)
tmp = l1.weight * 2
self.assertTrue(tmp.stop_gradient)

@ -43,7 +43,7 @@ class MLP(fluid.Layer):
class TestDataParallelStateDict(unittest.TestCase):
def test_data_parallel_state_dict(self):
with fluid.dygraph.guard():
strategy = paddle.imperative.prepare_context()
strategy = paddle.prepare_context()
mlp = MLP()
parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy)

@ -153,7 +153,7 @@ class TestImperativeMnist(unittest.TestCase):
label.stop_gradient = True
if batch_id % 10 == 0:
cost, traced_layer = paddle.imperative.TracedLayer.trace(
cost, traced_layer = paddle.jit.TracedLayer.trace(
mnist, inputs=img)
if program is not None:
self.assertTrue(program, traced_layer.program)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save