Refine VarBase init function (#21587)

* refine init function, test=develop

* add tests, test=develop

* remove extern, which may cause symbol error in gcc-4.8, test=develop
paddle_tiny_install
Leo Chen 6 years ago committed by GitHub
parent 56882ce432
commit 4f81d1bd5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -280,8 +280,8 @@ TEST(test_tracer, test_unique_name_generator) {
imperative::Tracer tracer; imperative::Tracer tracer;
auto fc_1 = tracer.GenerateUniqueName("fc"); auto fc_1 = tracer.GenerateUniqueName("fc");
auto fc_2 = tracer.GenerateUniqueName("fc"); auto fc_2 = tracer.GenerateUniqueName("fc");
ASSERT_STREQ("fc_1", fc_1.c_str()); ASSERT_STREQ("fc_0", fc_1.c_str());
ASSERT_STREQ("fc_2", fc_2.c_str()); ASSERT_STREQ("fc_1", fc_2.c_str());
} }
TEST(test_tracer, test_current_tracer) { TEST(test_tracer, test_current_tracer) {

@ -33,7 +33,7 @@ class UniqueNameGenerator {
public: public:
explicit UniqueNameGenerator(std::string prefix = "") : prefix_(prefix) {} explicit UniqueNameGenerator(std::string prefix = "") : prefix_(prefix) {}
std::string Generate(std::string key = "tmp") { std::string Generate(std::string key = "tmp") {
return prefix_ + key + "_" + std::to_string(++id_); return prefix_ + key + "_" + std::to_string(id_++);
} }
private: private:

File diff suppressed because it is too large Load Diff

@ -294,9 +294,9 @@ void _concatCompute(const std::vector<paddle::framework::Tensor> &ins,
} }
} }
void _getSliceinfo(const framework::Tensor &self, py::object obj, inline void _getSliceinfo(const framework::Tensor &self, py::object obj,
const int64_t dim, int64_t *pstart, int64_t *pstop, const int64_t dim, int64_t *pstart, int64_t *pstop,
int64_t *pstep, int64_t *pslicelength) { int64_t *pstep, int64_t *pslicelength) {
auto &start = *pstart; auto &start = *pstart;
auto &stop = *pstop; auto &stop = *pstop;
auto &step = *pstep; auto &step = *pstep;

@ -172,6 +172,7 @@ def _print_debug_msg(limit=5, is_test=False):
return unique_name_size, tracer_var_size, alive_cpp_var_size return unique_name_size, tracer_var_size, alive_cpp_var_size
# TODO(zhiqiu): Param 'block' should be deprecated, since block is meaningless in dygraph
@framework.dygraph_only @framework.dygraph_only
def to_variable(value, block=None, name=None, zero_copy=None): def to_variable(value, block=None, name=None, zero_copy=None):
""" """
@ -215,10 +216,10 @@ def to_variable(value, block=None, name=None, zero_copy=None):
zero_copy = False zero_copy = False
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
name=name,
persistable=False,
place=framework._current_expected_place(), place=framework._current_expected_place(),
zero_copy=zero_copy) persistable=False,
zero_copy=zero_copy,
name=name if name else '')
return py_var return py_var
elif isinstance(value, (core.VarBase, framework.Variable)): elif isinstance(value, (core.VarBase, framework.Variable)):
return value return value

@ -221,6 +221,23 @@ def _current_expected_place():
return _dygraph_current_expected_place_ return _dygraph_current_expected_place_
# TODO(zhiqiu): remove this function.
def _var_base_to_np(var_base):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
warnings.warn(
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return var_base.numpy()
def _cpu_num(): def _cpu_num():
if "CPU_NUM" not in os.environ.keys(): if "CPU_NUM" not in os.environ.keys():
if multiprocessing.cpu_count() > 1: if multiprocessing.cpu_count() > 1:

@ -73,7 +73,7 @@ class LayerHelperBase(object):
), "to_variable could only be called in dygraph mode" ), "to_variable could only be called in dygraph mode"
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
name=name, name=name if name else '',
persistable=False, persistable=False,
place=_current_expected_place(), place=_current_expected_place(),
zero_copy=False) zero_copy=False)

@ -0,0 +1,113 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import numpy as np
class TestVarBase(unittest.TestCase):
def setUp(self):
self.shape = [512, 1234]
self.dtype = np.float32
self.array = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
def test_to_variable(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array, name="abc")
self.assertTrue(np.array_equal(var.numpy(), self.array))
self.assertEqual(var.name, 'abc')
# default value
self.assertEqual(var.persistable, False)
self.assertEqual(var.stop_gradient, True)
self.assertEqual(var.shape, self.shape)
self.assertEqual(var.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR)
def test_write_property(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertEqual(var.name, 'generated_var_0')
var.name = 'test'
self.assertEqual(var.name, 'test')
self.assertEqual(var.persistable, False)
var.persistable = True
self.assertEqual(var.persistable, True)
self.assertEqual(var.stop_gradient, True)
var.stop_gradient = False
self.assertEqual(var.stop_gradient, False)
# test some patched methods
def test_set_value(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
tmp1 = np.random.uniform(0.1, 1, [2, 2, 3]).astype(self.dtype)
self.assertRaises(AssertionError, var.set_value, tmp1)
tmp2 = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
var.set_value(tmp2)
self.assertTrue(np.array_equal(var.numpy(), tmp2))
def test_to_string(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertTrue(isinstance(str(var.to_string(True)), str))
def test_backward(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False
loss = fluid.layers.relu(var)
loss.backward()
grad_var = var._grad_ivar()
self.assertEqual(grad_var.shape, self.shape)
def test_gradient(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False
loss = fluid.layers.relu(var)
loss.backward()
grad_var = var.gradient()
self.assertEqual(grad_var.shape, self.array.shape)
def test_block(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertEqual(var.block,
fluid.default_main_program().global_block())
def test_slice(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertTrue(np.array_equal(var[1, :].numpy(), self.array[1, :]))
def test_var_base_to_np(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
self.assertTrue(
np.array_equal(var.numpy(),
fluid.framework._var_base_to_np(var)))
if __name__ == '__main__':
unittest.main()
Loading…
Cancel
Save