remove to_variable from 2.0 (#27528)

revert-27356-init_low_level_gloo
Zhou Wei 5 years ago committed by GitHub
parent 9b12401434
commit 162b4d6c13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -230,7 +230,6 @@ from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import to_variable #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS

@ -3230,14 +3230,11 @@ class Flatten(layers.Layer):
.. code-block:: python
import paddle
from paddle import to_variable
import numpy as np
paddle.disable_static()
inp_np = np.ones([5, 2, 3, 4]).astype('float32')
paddle.disable_static()
inp_np = to_variable(inp_np)
inp_np = paddle.to_tensor(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np)

@ -228,7 +228,7 @@ class TestTanhAPI(unittest.TestCase):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out1 = F.tanh(x)
out2 = paddle.tanh(x)
th = paddle.nn.Tanh()
@ -573,7 +573,7 @@ class TestHardShrinkAPI(unittest.TestCase):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out1 = F.hardshrink(x)
hd = paddle.nn.Hardshrink()
out2 = hd(x)
@ -639,7 +639,7 @@ class TestHardtanhAPI(unittest.TestCase):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out1 = F.hardtanh(x)
m = paddle.nn.Hardtanh()
out2 = m(x)
@ -1063,7 +1063,7 @@ class TestLeakyReluAPI(unittest.TestCase):
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out1 = F.leaky_relu(x)
m = paddle.nn.LeakyReLU()
out2 = m(x)

@ -25,7 +25,7 @@ class TestAdamaxAPI(unittest.TestCase):
def test_adamax_api_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adamax(
learning_rate=0.01,

@ -22,7 +22,7 @@ class TestAdamWOp(unittest.TestCase):
def test_adamw_op_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW(
learning_rate=0.01,
@ -37,7 +37,7 @@ class TestAdamWOp(unittest.TestCase):
def test_adamw_op_coverage(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW(
learning_rate=0.0,

@ -147,7 +147,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_avg_pool2d(
x=x, output_size=[3, 3])
@ -245,7 +245,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x)

@ -162,7 +162,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3])
@ -262,7 +262,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(
output_size=[3, 3, 3])

@ -147,7 +147,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_max_pool2d(
x=x, return_indices=False, output_size=[3, 3])
@ -240,7 +240,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x)

@ -162,7 +162,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3])
@ -257,7 +257,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np)
x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
output_size=[3, 3, 3])

@ -244,9 +244,9 @@ class TestAddMMAPI(unittest.TestCase):
def test_error1():
data_x_wrong = np.ones((2, 3)).astype(np.float32)
x = paddle.to_variable(data_x_wrong)
y = paddle.to_variable(data_y)
input = paddle.to_variable(data_input)
x = paddle.to_tensor(data_x_wrong)
y = paddle.to_tensor(data_y)
input = paddle.to_tensor(data_input)
out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
self.assertRaises(ValueError, test_error1)
'''

@ -98,9 +98,9 @@ class TestArangeImperative(unittest.TestCase):
x2 = paddle.tensor.arange(5)
x3 = paddle.tensor.creation.arange(5)
start = paddle.to_variable(np.array([0], 'float32'))
end = paddle.to_variable(np.array([5], 'float32'))
step = paddle.to_variable(np.array([1], 'float32'))
start = paddle.to_tensor(np.array([0], 'float32'))
end = paddle.to_tensor(np.array([5], 'float32'))
step = paddle.to_tensor(np.array([1], 'float32'))
x4 = paddle.arange(start, end, step, 'int64')
paddle.enable_static()

@ -96,7 +96,7 @@ class TestDygraph(unittest.TestCase):
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_variable(x_data)
x = paddle.to_tensor(x_data)
out = paddle.cholesky(x, upper=False)

@ -168,9 +168,9 @@ class TestClipAPI(unittest.TestCase):
paddle.disable_static(place)
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = paddle.to_variable(data, dtype='float32')
v_min = paddle.to_variable(np.array([0.2], dtype=np.float32))
v_max = paddle.to_variable(np.array([0.8], dtype=np.float32))
images = paddle.to_tensor(data, dtype='float32')
v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
out_1 = paddle.clip(images, min=0.2, max=0.8)
out_2 = paddle.clip(images, min=0.2, max=0.9)

@ -285,9 +285,9 @@ class TestConcatAPI(unittest.TestCase):
in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]])
paddle.disable_static()
x1 = paddle.to_variable(in1)
x2 = paddle.to_variable(in2)
x3 = paddle.to_variable(in3)
x1 = paddle.to_tensor(in1)
x2 = paddle.to_tensor(in2)
x3 = paddle.to_tensor(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1)

@ -75,8 +75,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1)
tesnor_x2 = paddle.to_variable(np_x2)
tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out))
@ -92,8 +92,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1)
tesnor_x2 = paddle.to_variable(np_x2)
tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out))
@ -110,8 +110,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape2).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1)
tesnor_x2 = paddle.to_variable(np_x2)
tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out))
@ -129,8 +129,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
cos_sim_func = nn.CosineSimilarity(axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1)
tesnor_x2 = paddle.to_variable(np_x2)
tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_tensor(np_x2)
y = cos_sim_func(tesnor_x1, tesnor_x2)
self.assertTrue(np.allclose(y.numpy(), np_out))

@ -21,13 +21,12 @@ import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle import to_variable
class TestCumsumOp(unittest.TestCase):
def run_cases(self):
data_np = np.arange(12).reshape(3, 4)
data = to_variable(data_np)
data = paddle.to_tensor(data_np)
y = paddle.cumsum(data)
z = np.cumsum(data_np)

@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
import paddle.fluid.core as core
from paddle import to_variable
class TestDefaultType(unittest.TestCase):

@ -36,7 +36,7 @@ class TestDirectory(unittest.TestCase):
def test_new_directory(self):
new_directory = [
'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad',
'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load',
'paddle.distributed.ParallelEnv',

@ -195,7 +195,7 @@ class TestFlattenPython(unittest.TestCase):
def test_Negative():
paddle.disable_static()
img = paddle.to_variable(x)
img = paddle.to_tensor(x)
out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
return out.numpy().shape

@ -211,7 +211,7 @@ class TestImperative(unittest.TestCase):
paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode())
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
var_inp = paddle.to_variable(np_inp)
var_inp = paddle.to_tensor(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out1 = out.numpy()
@ -221,7 +221,7 @@ class TestImperative(unittest.TestCase):
self.assertFalse(paddle.in_dynamic_mode())
paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode())
var_inp = paddle.to_variable(np_inp)
var_inp = paddle.to_tensor(np_inp)
mlp = MLP(input_size=2)
out = mlp(var_inp)
dy_out2 = out.numpy()

@ -54,7 +54,7 @@ class TestSimpleNet(unittest.TestCase):
# grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
input_word = np.array([[1, 2], [2, 1]]).astype('int64')
input = paddle.to_variable(input_word)
input = paddle.to_tensor(input_word)
simplenet = SimpleNet(20, 32, dtype)
adam = SGDOptimizer(

@ -41,7 +41,7 @@ def run_dygraph(x_np, op_str, use_gpu=True):
if use_gpu and fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
paddle.disable_static(place)
x = paddle.to_variable(x_np)
x = paddle.to_tensor(x_np)
dygraph_result = getattr(paddle.tensor, op_str)(x)
return dygraph_result

@ -543,9 +543,9 @@ class TestJitSaveMultiCases(unittest.TestCase):
loaded_layer = paddle.jit.load(model_path)
loaded_layer.eval()
# inference & compare
x = paddle.to_variable(np.random.random((1, 784)).astype('float32'))
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
if with_label:
y = paddle.to_variable(np.random.random((1, 1)).astype('int64'))
y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred, _ = layer(x, y)
pred = pred.numpy()
else:
@ -677,7 +677,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
model_path = "test_not_prune_output_spec_name_warning"
configs = paddle.SaveLoadConfig()
out = paddle.to_variable(np.random.random((1, 1)).astype('float'))
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out]
paddle.jit.save(layer, model_path, configs=configs)
@ -709,7 +709,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
model_path = "test_prune_to_static_after_train"
configs = paddle.SaveLoadConfig()
out = paddle.to_variable(np.random.random((1, 1)).astype('float'))
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out]
with self.assertRaises(ValueError):
paddle.jit.save(
@ -730,7 +730,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase):
def test_save_load_empty_layer(self):
layer = EmptyLayer()
x = paddle.to_variable(np.random.random((10)).astype('float32'))
x = paddle.to_tensor(np.random.random((10)).astype('float32'))
out = layer(x)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
@ -746,8 +746,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase):
def test_save_load_no_param_layer(self):
layer = NoParamLayer()
x = paddle.to_variable(np.random.random((5)).astype('float32'))
y = paddle.to_variable(np.random.random((5)).astype('float32'))
x = paddle.to_tensor(np.random.random((5)).astype('float32'))
y = paddle.to_tensor(np.random.random((5)).astype('float32'))
out = layer(x, y)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)

@ -90,7 +90,7 @@ class TestKLDivLossDygraph(unittest.TestCase):
with paddle.fluid.dygraph.guard():
kldiv_criterion = paddle.nn.KLDivLoss(reduction)
pred_loss = kldiv_criterion(
paddle.to_variable(x), paddle.to_variable(target))
paddle.to_tensor(x), paddle.to_tensor(target))
self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss))
def test_kl_loss_batchmean(self):

@ -26,8 +26,8 @@ class TestFunctionalL1Loss(unittest.TestCase):
self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32)
def run_imperative(self):
input = paddle.to_variable(self.input_np)
label = paddle.to_variable(self.label_np)
input = paddle.to_tensor(self.input_np)
label = paddle.to_tensor(self.label_np)
dy_result = paddle.nn.functional.l1_loss(input, label)
expected = np.mean(np.abs(self.input_np - self.label_np))
self.assertTrue(np.allclose(dy_result.numpy(), expected))
@ -106,8 +106,8 @@ class TestClassL1Loss(unittest.TestCase):
self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32)
def run_imperative(self):
input = paddle.to_variable(self.input_np)
label = paddle.to_variable(self.label_np)
input = paddle.to_tensor(self.input_np)
label = paddle.to_tensor(self.label_np)
l1_loss = paddle.nn.loss.L1Loss()
dy_result = l1_loss(input, label)
expected = np.mean(np.abs(self.input_np - self.label_np))

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save