update doc of paddle.to_tensor (#26820)

update doc of paddle.to_tensor
numel
Zhou Wei 4 years ago committed by GitHub
parent 72f6e566be
commit 352ac149ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12175,13 +12175,10 @@ def logical_and(x, y, out=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res.numpy()) # [True False True False]
"""
@ -12294,11 +12291,9 @@ def logical_not(x, out=None, name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res.numpy()) # [False True False True]
"""

File diff suppressed because it is too large Load Diff

@ -138,13 +138,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
.. code-block:: python
import paddle
import numpy as np
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
paddle.disable_static()
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
@ -277,8 +274,8 @@ def binary_cross_entropy_with_logits(logit,
import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32")
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808]
@ -569,13 +566,10 @@ def l1_loss(input, label, reduction='mean', name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
@ -868,7 +862,7 @@ def mse_loss(input, label, reduction='mean', name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
@ -878,8 +872,6 @@ def mse_loss(input, label, reduction='mean', name=None):
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
place = paddle.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output = mse_loss(input,label)
exe = paddle.static.Executor(place)
@ -894,8 +886,8 @@ def mse_loss(input, label, reduction='mean', name=None):
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(1.5)
label = paddle.to_tensor(1.7)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]

@ -366,11 +366,10 @@ def ones_like(x, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1,2,3], dtype='float32'))
x = paddle.to_tensor([1,2,3])
out1 = paddle.zeros_like(x) # [1., 1., 1.]
out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1]
@ -453,11 +452,10 @@ def zeros_like(x, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1,2,3], dtype='float32'))
x = paddle.to_tensor([1,2,3])
out1 = paddle.zeros_like(x) # [0., 0., 0.]
out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0]
@ -619,7 +617,6 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -633,7 +630,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
out3 = paddle.arange(4.999, dtype='float32')
# [0., 1., 2., 3., 4.]
start_var = paddle.to_tensor(np.array([3]))
start_var = paddle.to_tensor([3])
out4 = paddle.arange(start_var, 7)
# [3, 4, 5, 6]
@ -725,7 +722,7 @@ def tril(x, diagonal=0, name=None):
paddle.disable_static()
x = paddle.to_variable(data)
x = paddle.to_tensor(data)
tril1 = paddle.tensor.tril(x)
# array([[ 1, 0, 0, 0],
@ -797,7 +794,7 @@ def triu(x, diagonal=0, name=None):
paddle.disable_static()
# example 1, default diagonal
x = paddle.to_variable(data)
x = paddle.to_tensor(data)
triu1 = paddle.tensor.triu(x)
# array([[ 1, 2, 3, 4],
# [ 0, 6, 7, 8],

@ -810,7 +810,7 @@ def cholesky(x, upper=False, name=None):
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_variable(x_data)
x = paddle.to_tensor(x_data)
out = paddle.cholesky(x, upper=False)
print(out.numpy())
# [[1.190523 0. 0. ]
@ -855,15 +855,16 @@ def bmm(x, y, name=None):
Examples:
import paddle
# In imperative mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
paddle.disable_static()
x = paddle.to_variable(input1)
y = paddle.to_variable(input2)
# In imperative mode:
# size x: (2, 2, 3) and y: (2, 3, 2)
x = paddle.to_tensor([[[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0]]])
y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
@ -924,10 +925,8 @@ def histogram(input, bins=100, min=0, max=0):
Code Example 2:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static(paddle.CPUPlace())
inputs_np = np.array([1, 2, 1]).astype(np.float)
inputs = paddle.to_variable(inputs_np)
inputs = paddle.to_tensor([1, 2, 1])
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
paddle.enable_static()

@ -71,13 +71,12 @@ def equal_all(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 2, 3]))
z = paddle.to_variable(np.array([1, 4, 3]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ]
result2 = paddle.equal_all(x, z)
@ -120,14 +119,11 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_x = np.array([10000., 1e-07]).astype("float32")
np_y = np.array([10000.1, 1e-08]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
@ -137,10 +133,8 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
np_result2 = result2.numpy()
# [False]
np_x = np.array([1.0, float('nan')]).astype("float32")
np_y = np.array([1.0, float('nan')]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
@ -195,12 +189,11 @@ def equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False]
"""
@ -227,12 +220,11 @@ def greater_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True]
"""
@ -259,12 +251,11 @@ def greater_than(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True]
"""
@ -292,12 +283,11 @@ def less_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False]
"""
@ -325,12 +315,11 @@ def less_than(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False]
"""
@ -358,12 +347,12 @@ def not_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True]
"""

@ -98,18 +98,14 @@ def concat(x, axis=0, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static() # Now we are in imperative mode
in1 = np.array([[1, 2, 3],
[4, 5, 6]])
in2 = np.array([[11, 12, 13],
[14, 15, 16]])
in3 = np.array([[21, 22],
[23, 24]])
x1 = paddle.to_tensor(in1)
x2 = paddle.to_tensor(in2)
x3 = paddle.to_tensor(in3)
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
@ -158,7 +154,7 @@ def flip(x, axis, name=None):
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_variable(x)
img = paddle.to_tensor(x)
out = paddle.flip(img, [0,1])
print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
@ -250,7 +246,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
img = paddle.to_variable(x)
img = paddle.to_tensor(x)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
"""
@ -315,15 +311,13 @@ def roll(x, shifts, axis=None, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
data = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
paddle.disable_static()
x = paddle.to_variable(data)
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
out_z1 = paddle.roll(x, shifts=1)
print(out_z1.numpy())
#[[9. 1. 2.]
@ -447,8 +441,7 @@ def stack(x, axis=0, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
@ -632,12 +625,10 @@ def unique(x,
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x_data = np.array([2, 3, 3, 1, 5, 3])
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
@ -645,8 +636,7 @@ def unique(x,
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
x_data = np.array([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
@ -815,14 +805,11 @@ def gather(x, index, axis=None, name=None):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_1 = np.array([[1,2],[3,4],[5,6]])
index_1 = np.array([0,1])
input = paddle.to_tensor(input_1)
index = paddle.to_tensor(index_1)
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
@ -958,16 +945,11 @@ def scatter(x, index, updates, overwrite=True, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
x = paddle.to_tensor(x_data)
index = paddle.to_tensor(index_data)
updates = paddle.to_tensor(updates_data)
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
@ -1074,11 +1056,9 @@ def tile(x, repeat_times, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_tensor(np_data)
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1087,8 +1067,7 @@ def tile(x, repeat_times, name=None):
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
np_repeat_times = np.array([2, 1]).astype("int32")
repeat_times = paddle.to_tensor(np_repeat_times)
repeat_times = paddle.to_tensor([2, 1], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1156,15 +1135,12 @@ def expand_as(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
np_data_x = np.array([1, 2, 3]).astype('int32')
np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32')
data_x = paddle.to_tensor(np_data_x)
data_y = paddle.to_tensor(np_data_y)
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1212,12 +1188,10 @@ def expand(x, shape, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_tensor(np_data)
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1416,14 +1390,11 @@ def gather_nd(x, index, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_x = np.array([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
np_index = [[0, 1]]
x = paddle.to_tensor(np_x)
index = paddle.to_tensor(np_index)
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]

File diff suppressed because it is too large Load Diff

@ -60,7 +60,6 @@ def bernoulli(x, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -188,7 +187,6 @@ def standard_normal(shape, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -209,8 +207,9 @@ def standard_normal(shape, dtype=None, name=None):
# [ 0.8086993 , 0.6868893 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor(np.array([2, 3]))
out3 = paddle.standard_normal(shape_tensor)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.standard_normal(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
@ -258,7 +257,6 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -266,11 +264,11 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [[ 0.17501129 0.32364586 1.561118 ] # random
# [-1.7232178 1.1545963 -0.76156676]] # random
mean_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0]))
mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out2 = paddle.normal(mean=mean_tensor)
# [ 0.18644847 -1.19434458 3.93694787] # random
std_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0]))
std_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out3 = paddle.normal(mean=mean_tensor, std=std_tensor)
# [1.00780561 3.78457445 5.81058198] # random
@ -357,7 +355,6 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
@ -379,8 +376,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
shape = np.array([2, 3])
shape_tensor = paddle.to_tensor(shape)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.tensor.random.uniform(shape_tensor)
# if shape_tensor's value is [2, 3]
# result_3 is:
@ -454,7 +450,6 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -473,8 +468,10 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
# example 3:
# attr shape is a Tensor
shape_tensor = paddle.to_tensor(np.array([3]))
out3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
shape_tensor = paddle.to_tensor(3)
result_3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
# [-2, 2, 3] # random
# example 4:
@ -604,7 +601,6 @@ def rand(shape, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor.
@ -624,8 +620,9 @@ def rand(shape, dtype=None, name=None):
# [0.870881 , 0.2984597 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor(np.array([2, 3]))
out2 = paddle.rand(shape_tensor)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.rand(shape_tensor)
# [[0.22920267, 0.841956 , 0.05981819], # random
# [0.4836288 , 0.24573246, 0.7516129 ]] # random

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save