update doc of paddle.to_tensor (#26820)

update doc of paddle.to_tensor
numel
Zhou Wei 5 years ago committed by GitHub
parent 72f6e566be
commit 352ac149ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12175,13 +12175,10 @@ def logical_and(x, y, out=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
x = paddle.to_tensor([True])
y = paddle.to_tensor([True, False, True, False])
res = paddle.logical_and(x, y)
print(res.numpy()) # [True False True False]
"""
@ -12294,11 +12291,9 @@ def logical_not(x, out=None, name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.to_variable(x_data)
x = paddle.to_tensor([True, False, True, False])
res = paddle.logical_not(x)
print(res.numpy()) # [False True False True]
"""

File diff suppressed because it is too large Load Diff

@ -138,13 +138,10 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
.. code-block:: python
import paddle
import numpy as np
input_data = np.array([0.5, 0.6, 0.7]).astype("float32")
label_data = np.array([1.0, 0.0, 1.0]).astype("float32")
paddle.disable_static()
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([0.5, 0.6, 0.7], 'float32')
label = paddle.to_tensor([1.0, 0.0, 1.0], 'float32')
output = paddle.nn.functional.binary_cross_entropy(input, label)
print(output.numpy()) # [0.65537095]
@ -277,8 +274,8 @@ def binary_cross_entropy_with_logits(logit,
import paddle
paddle.disable_static()
logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32")
label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32")
logit = paddle.to_tensor([5.0, 1.0, 3.0])
label = paddle.to_tensor([1.0, 0.0, 1.0])
output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
print(output.numpy()) # [0.45618808]
@ -569,13 +566,10 @@ def l1_loss(input, label, reduction='mean', name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32")
label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32")
input = paddle.to_tensor(input_data)
label = paddle.to_tensor(label_data)
input = paddle.to_tensor([[1.5, 0.8], [0.2, 1.3]])
label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]])
l1_loss = paddle.nn.functional.l1_loss(input, label)
print(l1_loss.numpy())
@ -868,7 +862,7 @@ def mse_loss(input, label, reduction='mean', name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
@ -878,8 +872,6 @@ def mse_loss(input, label, reduction='mean', name=None):
input = paddle.data(name="input", shape=[1])
label = paddle.data(name="label", shape=[1])
place = paddle.CPUPlace()
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output = mse_loss(input,label)
exe = paddle.static.Executor(place)
@ -894,8 +886,8 @@ def mse_loss(input, label, reduction='mean', name=None):
# dynamic graph mode
paddle.disable_static()
input = paddle.to_variable(input_data)
label = paddle.to_variable(label_data)
input = paddle.to_tensor(1.5)
label = paddle.to_tensor(1.7)
output = mse_loss(input, label)
print(output.numpy())
# [0.04000002]

@ -366,11 +366,10 @@ def ones_like(x, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1,2,3], dtype='float32'))
x = paddle.to_tensor([1,2,3])
out1 = paddle.zeros_like(x) # [1., 1., 1.]
out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1]
@ -453,11 +452,10 @@ def zeros_like(x, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x = paddle.to_tensor(np.array([1,2,3], dtype='float32'))
x = paddle.to_tensor([1,2,3])
out1 = paddle.zeros_like(x) # [0., 0., 0.]
out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0]
@ -619,7 +617,6 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -633,7 +630,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
out3 = paddle.arange(4.999, dtype='float32')
# [0., 1., 2., 3., 4.]
start_var = paddle.to_tensor(np.array([3]))
start_var = paddle.to_tensor([3])
out4 = paddle.arange(start_var, 7)
# [3, 4, 5, 6]
@ -725,7 +722,7 @@ def tril(x, diagonal=0, name=None):
paddle.disable_static()
x = paddle.to_variable(data)
x = paddle.to_tensor(data)
tril1 = paddle.tensor.tril(x)
# array([[ 1, 0, 0, 0],
@ -797,7 +794,7 @@ def triu(x, diagonal=0, name=None):
paddle.disable_static()
# example 1, default diagonal
x = paddle.to_variable(data)
x = paddle.to_tensor(data)
triu1 = paddle.tensor.triu(x)
# array([[ 1, 2, 3, 4],
# [ 0, 6, 7, 8],

@ -810,7 +810,7 @@ def cholesky(x, upper=False, name=None):
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_variable(x_data)
x = paddle.to_tensor(x_data)
out = paddle.cholesky(x, upper=False)
print(out.numpy())
# [[1.190523 0. 0. ]
@ -855,15 +855,16 @@ def bmm(x, y, name=None):
Examples:
import paddle
# In imperative mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
paddle.disable_static()
x = paddle.to_variable(input1)
y = paddle.to_variable(input2)
# In imperative mode:
# size x: (2, 2, 3) and y: (2, 3, 2)
x = paddle.to_tensor([[[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0]]])
y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
@ -924,10 +925,8 @@ def histogram(input, bins=100, min=0, max=0):
Code Example 2:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static(paddle.CPUPlace())
inputs_np = np.array([1, 2, 1]).astype(np.float)
inputs = paddle.to_variable(inputs_np)
inputs = paddle.to_tensor([1, 2, 1])
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
paddle.enable_static()

@ -71,13 +71,12 @@ def equal_all(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 2, 3]))
z = paddle.to_variable(np.array([1, 4, 3]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ]
result2 = paddle.equal_all(x, z)
@ -120,14 +119,11 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_x = np.array([10000., 1e-07]).astype("float32")
np_y = np.array([10000.1, 1e-08]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
@ -137,10 +133,8 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
np_result2 = result2.numpy()
# [False]
np_x = np.array([1.0, float('nan')]).astype("float32")
np_y = np.array([1.0, float('nan')]).astype("float32")
x = paddle.to_tensor(np_x)
y = paddle.to_tensor(np_y)
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
@ -195,12 +189,11 @@ def equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False]
"""
@ -227,12 +220,11 @@ def greater_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True]
"""
@ -259,12 +251,11 @@ def greater_than(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True]
"""
@ -292,12 +283,11 @@ def less_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False]
"""
@ -325,12 +315,11 @@ def less_than(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False]
"""
@ -358,12 +347,12 @@ def not_equal(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = paddle.to_variable(np.array([1, 2, 3]))
y = paddle.to_variable(np.array([1, 3, 2]))
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True]
"""

@ -98,18 +98,14 @@ def concat(x, axis=0, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static() # Now we are in imperative mode
in1 = np.array([[1, 2, 3],
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
in2 = np.array([[11, 12, 13],
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
in3 = np.array([[21, 22],
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
x1 = paddle.to_tensor(in1)
x2 = paddle.to_tensor(in2)
x3 = paddle.to_tensor(in3)
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
@ -158,7 +154,7 @@ def flip(x, axis, name=None):
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_variable(x)
img = paddle.to_tensor(x)
out = paddle.flip(img, [0,1])
print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
@ -250,7 +246,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32')
img = paddle.to_variable(x)
img = paddle.to_tensor(x)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
"""
@ -315,15 +311,13 @@ def roll(x, shifts, axis=None, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
data = np.array([[1.0, 2.0, 3.0],
paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
paddle.disable_static()
x = paddle.to_variable(data)
out_z1 = paddle.roll(x, shifts=1)
print(out_z1.numpy())
#[[9. 1. 2.]
@ -447,7 +441,6 @@ def stack(x, axis=0, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 2.0]])
@ -632,12 +625,10 @@ def unique(x,
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x_data = np.array([2, 3, 3, 1, 5, 3])
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
@ -645,8 +636,7 @@ def unique(x,
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
x_data = np.array([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
@ -815,14 +805,11 @@ def gather(x, index, axis=None, name=None):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
input_1 = np.array([[1,2],[3,4],[5,6]])
index_1 = np.array([0,1])
input = paddle.to_tensor(input_1)
index = paddle.to_tensor(index_1)
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
@ -958,16 +945,11 @@ def scatter(x, index, updates, overwrite=True, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
index_data = np.array([2, 1, 0, 1]).astype(np.int64)
updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)
x = paddle.to_tensor(x_data)
index = paddle.to_tensor(index_data)
updates = paddle.to_tensor(updates_data)
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
@ -1074,11 +1056,9 @@ def tile(x, repeat_times, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_tensor(np_data)
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1087,8 +1067,7 @@ def tile(x, repeat_times, name=None):
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
np_repeat_times = np.array([2, 1]).astype("int32")
repeat_times = paddle.to_tensor(np_repeat_times)
repeat_times = paddle.to_tensor([2, 1], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1156,15 +1135,12 @@ def expand_as(x, y, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
np_data_x = np.array([1, 2, 3]).astype('int32')
np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype('int32')
data_x = paddle.to_tensor(np_data_x)
data_y = paddle.to_tensor(np_data_y)
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1212,12 +1188,10 @@ def expand(x, shape, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
np_data = np.array([1, 2, 3]).astype('int32')
data = paddle.to_tensor(np_data)
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
@ -1416,14 +1390,11 @@ def gather_nd(x, index, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
np_x = np.array([[[1, 2], [3, 4], [5, 6]],
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
np_index = [[0, 1]]
x = paddle.to_tensor(np_x)
index = paddle.to_tensor(np_index)
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]

File diff suppressed because it is too large Load Diff

@ -60,7 +60,6 @@ def bernoulli(x, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -188,7 +187,6 @@ def standard_normal(shape, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -209,8 +207,9 @@ def standard_normal(shape, dtype=None, name=None):
# [ 0.8086993 , 0.6868893 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor(np.array([2, 3]))
out3 = paddle.standard_normal(shape_tensor)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.standard_normal(shape_tensor)
# [[-2.878077 , 0.17099959, 0.05111201] # random
# [-0.3761474, -1.044801 , 1.1870178 ]] # random
@ -258,7 +257,6 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -266,11 +264,11 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [[ 0.17501129 0.32364586 1.561118 ] # random
# [-1.7232178 1.1545963 -0.76156676]] # random
mean_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0]))
mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out2 = paddle.normal(mean=mean_tensor)
# [ 0.18644847 -1.19434458 3.93694787] # random
std_tensor = paddle.to_tensor(np.array([1.0, 2.0, 3.0]))
std_tensor = paddle.to_tensor([1.0, 2.0, 3.0])
out3 = paddle.normal(mean=mean_tensor, std=std_tensor)
# [1.00780561 3.78457445 5.81058198] # random
@ -357,7 +355,6 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
@ -379,8 +376,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
# example 3:
# attr shape is a Tensor, the data type must be int64 or int32.
shape = np.array([2, 3])
shape_tensor = paddle.to_tensor(shape)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.tensor.random.uniform(shape_tensor)
# if shape_tensor's value is [2, 3]
# result_3 is:
@ -454,7 +450,6 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
@ -473,8 +468,10 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
# example 3:
# attr shape is a Tensor
shape_tensor = paddle.to_tensor(np.array([3]))
out3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
shape_tensor = paddle.to_tensor(3)
result_3 = paddle.randint(low=-5, high=5, shape=shape_tensor)
# [-2, 2, 3] # random
# example 4:
@ -604,7 +601,6 @@ def rand(shape, dtype=None, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor.
@ -624,8 +620,9 @@ def rand(shape, dtype=None, name=None):
# [0.870881 , 0.2984597 ]]] # random
# example 3: attr shape is a Tensor, the data type must be int64 or int32.
shape_tensor = paddle.to_tensor(np.array([2, 3]))
out2 = paddle.rand(shape_tensor)
shape_tensor = paddle.to_tensor([2, 3])
result_3 = paddle.rand(shape_tensor)
# [[0.22920267, 0.841956 , 0.05981819], # random
# [0.4836288 , 0.24573246, 0.7516129 ]] # random

@ -66,16 +66,15 @@ def argsort(x, axis=-1, descending=False, name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
input_array = np.array([[[5,8,9,5],
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
x = paddle.to_variable(input_array)
[1,7,0,6]]],
dtype='float32')
out1 = paddle.argsort(x=x, axis=-1)
out2 = paddle.argsort(x=x, axis=0)
out3 = paddle.argsort(x=x, axis=1)
@ -148,14 +147,12 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
data = np.array([[5,8,9,5],
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
x = paddle.to_variable(data)
out1 = paddle.argmax(x)
print(out1.numpy()) # 2
out2 = paddle.argmax(x, axis=1)
@ -222,14 +219,12 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
data = np.array([[5,8,9,5],
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
x = paddle.to_variable(data)
out1 = paddle.argmin(x)
print(out1.numpy()) # 4
out2 = paddle.argmin(x, axis=1)
@ -300,16 +295,12 @@ def index_select(x, index, axis=0, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static() # Now we are in imperative mode
data = np.array([[1.0, 2.0, 3.0, 4.0],
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
data_index = np.array([0, 1, 1]).astype('int32')
x = paddle.to_tensor(data)
index = paddle.to_tensor(data_index)
index = paddle.to_tensor([0, 1, 1], dtype='int32')
out_z1 = paddle.index_select(x=x, index=index)
#[[1. 2. 3. 4.]
# [5. 6. 7. 8.]
@ -363,18 +354,14 @@ def nonzero(input, as_tuple=False):
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data1 = np.array([[1.0, 0.0, 0.0],
paddle.disable_static()
x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
data2 = np.array([0.0, 1.0, 0.0, 3.0])
data3 = np.array([0.0, 0.0, 0.0])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3)
x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
x3 = paddle.to_tensor([0.0, 0.0, 0.0])
out_z1 = paddle.nonzero(x1)
print(out_z1.numpy())
#[[0 0]
@ -451,16 +438,15 @@ def sort(x, axis=-1, descending=False, name=None):
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
input_array = np.array([[[5,8,9,5],
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
x = paddle.to_variable(input_array)
[1,7,0,6]]],
dtype='float32')
out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1)
@ -536,15 +522,10 @@ def where(condition, x, y, name=None):
.. code-block:: python
import paddle
import numpy as np
import paddle.fluid as fluid
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32")
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(x_i)
y = fluid.dygraph.to_variable(y_i)
paddle.disable_static()
x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
out = paddle.where(x>1, x, y)
print(out.numpy())
@ -622,26 +603,17 @@ def index_sample(x, index):
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data = np.array([[1.0, 2.0, 3.0, 4.0],
paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]).astype('float32')
data_index = np.array([[0, 1, 2],
[9.0, 10.0, 11.0, 12.0]], dtype='float32')
index = paddle.to_tensor([[0, 1, 2],
[1, 2, 3],
[0, 0, 0]]).astype('int32')
target_data = np.array([[100, 200, 300, 400],
[0, 0, 0]], dtype='int32')
target = paddle.to_tensor([[100, 200, 300, 400],
[500, 600, 700, 800],
[900, 1000, 1100, 1200]]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data)
index = fluid.dygraph.to_variable(data_index)
target = fluid.dygraph.to_variable(target_data)
[900, 1000, 1100, 1200]], dtype='int32')
out_z1 = paddle.index_sample(x, index)
print(out_z1.numpy())
#[[1. 2. 3.]
@ -650,7 +622,7 @@ def index_sample(x, index):
# Use the index of the maximum value by topk op
# get the value of the element of the corresponding index in other tensors
top_value, top_index = fluid.layers.topk(x, k=2)
top_value, top_index = paddle.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index)
print(top_value.numpy())
#[[ 4. 3.]
@ -707,18 +679,15 @@ def masked_select(x, mask, name=None):
.. code-block:: python
import paddle
import numpy as np
paddle.disable_static()
data = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]).astype('float32')
mask_data = np.array([[True, False, False, False],
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
mask = paddle.to_tensor([[True, False, False, False],
[True, True, False, False],
[True, False, False, False]]).astype('bool')
x = paddle.to_tensor(data)
mask = paddle.to_tensor(mask_data)
[True, False, False, False]])
out = paddle.masked_select(x, mask)
#[1.0 5.0 6.0 9.0]
"""
@ -763,20 +732,17 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
data_1 = np.array([1, 4, 5, 7])
tensor_1 = paddle.to_tensor(data_1)
tensor_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(tensor_1, k=1)
print(value_1.numpy())
# [7]
print(indices_1.numpy())
# [3]
data_2 = np.array([[1, 4, 5, 7], [2, 6, 2, 5]])
tensor_2 = paddle.to_tensor(data_2)
tensor_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
value_2, indices_2 = paddle.topk(tensor_2, k=1)
print(value_2.numpy())
# [[7]

Loading…
Cancel
Save