You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Paddle/python/paddle/fluid/tests/unittests/test_variable.py

169 lines
6.3 KiB

# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
class TestVariable(unittest.TestCase):
def test_np_dtype_convert(self):
[WIP] Move DataType enum inside VarType (#8447) * Move Pod Types from DataType enum to Type enum * Fixed data_type.h * Fix type in TensorDesc * Add comment to framework.proto * Fixed type in data_type.h * Updated format of type in data_type.h * Fix var_desc.h * Fix op_kernel_type.h * Fixed data_type_transform_test.cc * Fix operator.h * Fixed data_type_transform.cc * Fixed op_kernel_type_test.cc * Fix operator.cc * Fixed data_layout_transform_test.cc * Fix var_desc.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * fixed protobuf.cc * Fix data_layout_transform_test.cc and op_kernel_type_test.cc * Fixed rnn_memory_helper_op.cc * Fix progrma_desc_test.cc * Fixed fill_constant_batch_size_like_op.cc * Fix operator_test.cc * Fixed fill_constant_op.cc * Fixed gaussian_random_op.cc * Fixed uniform_random_op.cc * Fixed edit_distance_op.cc * Fixed fill_constant_batch_size_like_op.cc * Fixed rnn_memory_helper_op.cc * Fixed chunk_eval_op.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * Fixed cast_op.h * Fixed cast_op.h * Fix fill constant op * Fixed clang for assign_value_op.cc * Fix one_hot_op.h * Fix one_hot_op.cc * Fix fill_op.cc * Fixed sum_op.cc * Fixed sum_op clang * Fix uniform_random_op.cc * Fix gaussian_random_op.cc * Fix backward.cc * Fix protobuf.cc * Fixed prune_test.cc * Fixed op_registry_test.cc * Fix data_device_transform_test.cu * Fix travis error * Fixed one_hot_op.cu * Fixed op_registry_test.cc * Fixed nccl_op.cc * Fixing python tests * Revert "Fixing python tests" This reverts commit fccaa4c5818ed9f379ea1ce4315066cc78076c64. * Fixing Pybind to remove data type * Fixing tensor.py * Updated the new files: * Resolve error in merge conflict of fill_constant_batch_size_like_op.cc
7 years ago
DT = core.VarDesc.VarType
convert = convert_np_dtype_to_dtype_
self.assertEqual(DT.FP32, convert(np.float32))
self.assertEqual(DT.FP16, convert("float16"))
self.assertEqual(DT.FP64, convert("float64"))
self.assertEqual(DT.INT32, convert("int32"))
self.assertEqual(DT.INT16, convert("int16"))
self.assertEqual(DT.INT64, convert("int64"))
self.assertEqual(DT.BOOL, convert("bool"))
self.assertEqual(DT.INT8, convert("int8"))
self.assertEqual(DT.UINT8, convert("uint8"))
def test_var(self):
b = default_main_program().current_block()
w = b.create_var(
dtype="float64", shape=[784, 100], lod_level=0, name="fc.w")
self.assertNotEqual(str(w), "")
[WIP] Move DataType enum inside VarType (#8447) * Move Pod Types from DataType enum to Type enum * Fixed data_type.h * Fix type in TensorDesc * Add comment to framework.proto * Fixed type in data_type.h * Updated format of type in data_type.h * Fix var_desc.h * Fix op_kernel_type.h * Fixed data_type_transform_test.cc * Fix operator.h * Fixed data_type_transform.cc * Fixed op_kernel_type_test.cc * Fix operator.cc * Fixed data_layout_transform_test.cc * Fix var_desc.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * fixed protobuf.cc * Fix data_layout_transform_test.cc and op_kernel_type_test.cc * Fixed rnn_memory_helper_op.cc * Fix progrma_desc_test.cc * Fixed fill_constant_batch_size_like_op.cc * Fix operator_test.cc * Fixed fill_constant_op.cc * Fixed gaussian_random_op.cc * Fixed uniform_random_op.cc * Fixed edit_distance_op.cc * Fixed fill_constant_batch_size_like_op.cc * Fixed rnn_memory_helper_op.cc * Fixed chunk_eval_op.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * Fixed cast_op.h * Fixed cast_op.h * Fix fill constant op * Fixed clang for assign_value_op.cc * Fix one_hot_op.h * Fix one_hot_op.cc * Fix fill_op.cc * Fixed sum_op.cc * Fixed sum_op clang * Fix uniform_random_op.cc * Fix gaussian_random_op.cc * Fix backward.cc * Fix protobuf.cc * Fixed prune_test.cc * Fixed op_registry_test.cc * Fix data_device_transform_test.cu * Fix travis error * Fixed one_hot_op.cu * Fixed op_registry_test.cc * Fixed nccl_op.cc * Fixing python tests * Revert "Fixing python tests" This reverts commit fccaa4c5818ed9f379ea1ce4315066cc78076c64. * Fixing Pybind to remove data type * Fixing tensor.py * Updated the new files: * Resolve error in merge conflict of fill_constant_batch_size_like_op.cc
7 years ago
self.assertEqual(core.VarDesc.VarType.FP64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
w = b.create_var(name='fc.w')
[WIP] Move DataType enum inside VarType (#8447) * Move Pod Types from DataType enum to Type enum * Fixed data_type.h * Fix type in TensorDesc * Add comment to framework.proto * Fixed type in data_type.h * Updated format of type in data_type.h * Fix var_desc.h * Fix op_kernel_type.h * Fixed data_type_transform_test.cc * Fix operator.h * Fixed data_type_transform.cc * Fixed op_kernel_type_test.cc * Fix operator.cc * Fixed data_layout_transform_test.cc * Fix var_desc.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * fixed protobuf.cc * Fix data_layout_transform_test.cc and op_kernel_type_test.cc * Fixed rnn_memory_helper_op.cc * Fix progrma_desc_test.cc * Fixed fill_constant_batch_size_like_op.cc * Fix operator_test.cc * Fixed fill_constant_op.cc * Fixed gaussian_random_op.cc * Fixed uniform_random_op.cc * Fixed edit_distance_op.cc * Fixed fill_constant_batch_size_like_op.cc * Fixed rnn_memory_helper_op.cc * Fixed chunk_eval_op.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * Fixed cast_op.h * Fixed cast_op.h * Fix fill constant op * Fixed clang for assign_value_op.cc * Fix one_hot_op.h * Fix one_hot_op.cc * Fix fill_op.cc * Fixed sum_op.cc * Fixed sum_op clang * Fix uniform_random_op.cc * Fix gaussian_random_op.cc * Fix backward.cc * Fix protobuf.cc * Fixed prune_test.cc * Fixed op_registry_test.cc * Fix data_device_transform_test.cu * Fix travis error * Fixed one_hot_op.cu * Fixed op_registry_test.cc * Fixed nccl_op.cc * Fixing python tests * Revert "Fixing python tests" This reverts commit fccaa4c5818ed9f379ea1ce4315066cc78076c64. * Fixing Pybind to remove data type * Fixing tensor.py * Updated the new files: * Resolve error in merge conflict of fill_constant_batch_size_like_op.cc
7 years ago
self.assertEqual(core.VarDesc.VarType.FP64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
self.assertRaises(ValueError,
lambda: b.create_var(name="fc.w", shape=(24, 100)))
def test_step_scopes(self):
prog = Program()
b = prog.current_block()
var = b.create_var(
name='step_scopes', type=core.VarDesc.VarType.STEP_SCOPES)
self.assertEqual(core.VarDesc.VarType.STEP_SCOPES, var.type)
def _test_slice(self, place):
b = default_main_program().current_block()
w = b.create_var(dtype="float64", shape=[784, 100, 100], lod_level=0)
for i in range(3):
nw = w[i]
self.assertEqual((1, 100, 100), nw.shape)
nw = w[:]
self.assertEqual((784, 100, 100), nw.shape)
nw = w[:, :, ...]
self.assertEqual((784, 100, 100), nw.shape)
nw = w[::2, ::2, :]
self.assertEqual((392, 50, 100), nw.shape)
nw = w[::-2, ::-2, :]
self.assertEqual((392, 50, 100), nw.shape)
self.assertEqual(0, nw.lod_level)
main = fluid.Program()
with fluid.program_guard(main):
exe = fluid.Executor(place)
tensor_array = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 11, 12], [13, 14, 15], [16, 17, 18]],
[[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype('float32')
var = fluid.layers.assign(tensor_array)
var1 = var[0, 1, 1]
var2 = var[1:]
var3 = var[0:1]
var4 = var[..., ]
var5 = var[2::-2]
var6 = var[1, 1:, 1:]
var7 = var[1, ..., 1:]
var8 = var[1, ...]
var_reshape = fluid.layers.reshape(var, [3, -1, 3])
var9 = var_reshape[1, ..., 2]
var10 = var_reshape[:, :, -1]
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.fc(input=x, size=1, act=None)
var11 = y[:, 0]
feeder = fluid.DataFeeder(place=place, feed_list=[x])
data = []
data.append((np.random.randint(10, size=[13]).astype('float32')))
exe.run(fluid.default_startup_program())
local_out = exe.run(main,
feed=feeder.feed([data]),
fetch_list=[
var, var1, var2, var3, var4, var5, var6,
var7, var8, var9, var10, var11
])
self.assertTrue((np.array(local_out[1]) == np.array(tensor_array[
0, 1, 1])).all())
self.assertTrue((np.array(local_out[2]) == np.array(tensor_array[
1:])).all())
self.assertTrue((np.array(local_out[3]) == np.array(tensor_array[
0:1])).all())
self.assertTrue((np.array(local_out[4]) == np.array(
tensor_array[..., ])).all())
self.assertTrue((np.array(local_out[5]) == np.array(tensor_array[
2::-2])).all())
self.assertTrue((np.array(local_out[6]) == np.array(tensor_array[
1, 1:, 1:])).all())
self.assertTrue((np.array(local_out[7]) == np.array(tensor_array[
1, ..., 1:])).all())
self.assertTrue((np.array(local_out[8]) == np.array(tensor_array[
1, ...])).all())
self.assertEqual(local_out[9].shape, (1, 3, 1))
self.assertEqual(local_out[10].shape, (3, 3, 1))
self.assertEqual(local_out[11].shape, (1, 1))
def test_slice(self):
place = fluid.CPUPlace()
self._test_slice(place)
if core.is_compiled_with_cuda():
self._test_slice(core.CUDAPlace(0))
def _tostring(self):
b = default_main_program().current_block()
w = b.create_var(dtype="float64", lod_level=0)
print(w)
self.assertTrue(isinstance(str(w), str))
if core.is_compiled_with_cuda():
wc = b.create_var(dtype="int", lod_level=0)
print(wc)
self.assertTrue(isinstance(str(wc), str))
def test_tostring(self):
with fluid.dygraph.guard():
self._tostring()
with fluid.program_guard(default_main_program()):
self._tostring()
if __name__ == '__main__':
unittest.main()