Change return_numpy [ParallelExecutor] default value (#11713)

* change return_numpy[PE] default value

* Remove convert to numpy in unit test
port
chengduo 7 years ago committed by GitHub
parent b0f98849fd
commit c6e36e7738
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -160,7 +160,7 @@ class ParallelExecutor(object):
build_strategy, num_trainers, trainer_id)
self.scope = scope
def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=False):
def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
"""
Run a parallel executor with fetch_list.
@ -197,7 +197,7 @@ class ParallelExecutor(object):
feed_dict: Alias for feed parameter, for backward compatibility.
This parameter has been deprecated. Default None.
return_numpy(bool): Whether converts the fetched tensor to numpy.
Default: False.
Default: True.
Returns:
List: The fetched result list.

@ -81,7 +81,6 @@ class TestParallelExecutorBase(unittest.TestCase):
begin = time.time()
first_loss, = run_executor(
exe=exe, feed=feed_dict, fetch_list=[loss.name])
first_loss = np.array(first_loss)
for i in xrange(iter):
run_executor(exe=exe, feed=feed_dict, fetch_list=[])
@ -94,8 +93,6 @@ class TestParallelExecutorBase(unittest.TestCase):
print "%.4f Instance per second" % (
(batch_size * iter + 2) / (end - begin))
last_loss = np.array(last_loss)
print first_loss, last_loss
# self.assertGreater(first_loss[0], last_loss[0])
return first_loss, last_loss

@ -169,9 +169,8 @@ class TestCRFModel(unittest.TestCase):
data = train_data()
for i in xrange(10):
cur_batch = next(data)
print map(np.array,
pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name]))[0]
print pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name])[0]
@unittest.skip(reason="CI hangs")
def test_update_sparse_parameter_all_reduce(self):

@ -130,7 +130,7 @@ class TestFeedParallel(unittest.TestCase):
use_cuda=use_cuda, loss_name=loss.name, main_program=main)
for batch_id, data in enumerate(reader()):
loss_np = np.array(pe.run(feed=data, fetch_list=[loss.name])[0])
loss_np = pe.run(feed=data, fetch_list=[loss.name])[0]
print batch_id, loss_np
if batch_id == 2:
break

@ -70,10 +70,9 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase):
for i in xrange(5):
test_loss, = test_exe.run([loss.name], feed=feed_dict)
test_loss = np.array(test_loss)
train_loss, = train_exe.run([loss.name], feed=feed_dict)
train_loss = np.array(train_loss)
self.assertTrue(
np.allclose(
train_loss, test_loss, atol=1e-8),

Loading…
Cancel
Save