Fix the overfix of 2to3 for print function

revert-12646-feature/jit/xbyak
minqiyang 6 years ago
parent 559d36328c
commit 91f0573bc1

@ -72,6 +72,7 @@ option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VER
if(NOT PY_VERSION) if(NOT PY_VERSION)
set(PY_VERSION 2.7) set(PY_VERSION 2.7)
endif() endif()
set(PYBIND11_PYTHON_VERSION ${PY_VERSION})
# CMAKE_BUILD_TYPE # CMAKE_BUILD_TYPE
if(NOT CMAKE_BUILD_TYPE) if(NOT CMAKE_BUILD_TYPE)

@ -106,7 +106,7 @@ class Optimizer(object):
param_lr = param.optimize_attr['learning_rate'] param_lr = param.optimize_attr['learning_rate']
if type(param_lr) == Variable: if type(param_lr) == Variable:
# param learning rate has been updated (LARS) # param learning rate has been updated (LARS)
print(("returns updated param lr ", param_lr)) print("returns updated param lr ", param_lr)
return param_lr return param_lr
else: else:
if param_lr == 1.0: if param_lr == 1.0:

@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
results = inferencer.infer({'x': tensor_x}) results = inferencer.infer({'x': tensor_x})
print(("infer results: ", results[0])) print("infer results: ", results[0])
def main(use_cuda): def main(use_cuda):

@ -105,7 +105,7 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, accuracy = trainer.test( avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label']) reader=test_reader, feed_order=['pixel', 'label'])
print(('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))) print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI if accuracy > 0.01: # Low threshold for speeding up CI
if params_dirname is not None: if params_dirname is not None:
@ -134,7 +134,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
results = inferencer.infer({'pixel': tensor_img}) results = inferencer.infer({'pixel': tensor_img})
print(("infer results: ", results)) print("infer results: ", results)
def main(use_cuda): def main(use_cuda):

@ -82,7 +82,7 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, accuracy = trainer.test( avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label']) reader=test_reader, feed_order=['pixel', 'label'])
print(('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))) print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI if accuracy > 0.01: # Low threshold for speeding up CI
if params_dirname is not None: if params_dirname is not None:
@ -111,7 +111,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
results = inferencer.infer({'pixel': tensor_img}) results = inferencer.infer({'pixel': tensor_img})
print(("infer results: ", results)) print("infer results: ", results)
def main(use_cuda): def main(use_cuda):

@ -171,7 +171,7 @@ def train(use_cuda, train_program, params_dirname):
# get avg cost # get avg cost
avg_cost = np.array(avg_cost_set).mean() avg_cost = np.array(avg_cost_set).mean()
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
if float(avg_cost) < 100.0: # Large value to increase CI speed if float(avg_cost) < 100.0: # Large value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
@ -183,8 +183,8 @@ def train(use_cuda, train_program, params_dirname):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, fluid.EndStepEvent):
print(("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
@ -206,14 +206,14 @@ def infer(use_cuda, inference_program, params_dirname):
inference_program, param_path=params_dirname, place=place) inference_program, param_path=params_dirname, place=place)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -248,7 +248,7 @@ def infer(use_cuda, inference_program, params_dirname):
}, },
return_numpy=False) return_numpy=False)
print(("infer results: ", np.array(results[0]).shape)) print("infer results: ", np.array(results[0]).shape)
def main(use_cuda): def main(use_cuda):

@ -197,7 +197,7 @@ def train(use_cuda, is_sparse, is_local=True):
def event_handler(event): def event_handler(event):
if isinstance(event, fluid.EndStepEvent): if isinstance(event, fluid.EndStepEvent):
print(('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))) print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step))
if event.step == 10: if event.step == 10:
trainer.stop() trainer.stop()
@ -259,7 +259,7 @@ def decode_main(use_cuda, is_sparse):
feed=feed_dict, feed=feed_dict,
fetch_list=[translation_ids, translation_scores], fetch_list=[translation_ids, translation_scores],
return_numpy=False) return_numpy=False)
print((result_ids.recursive_sequence_lengths())) print(result_ids.recursive_sequence_lengths())
break break

@ -78,14 +78,14 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label']) reader=test_reader, feed_order=['img', 'label'])
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
print(("acc : %s" % acc)) print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
else: else:
print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, fluid.EndStepEvent):
@ -118,7 +118,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
results = inferencer.infer({'img': tensor_img}) results = inferencer.infer({'img': tensor_img})
print(("infer results: ", results[0])) print("infer results: ", results[0])
def main(use_cuda): def main(use_cuda):

@ -61,14 +61,14 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label']) reader=test_reader, feed_order=['img', 'label'])
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
print(("acc : %s" % acc)) print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
else: else:
print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
@ -96,7 +96,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
results = inferencer.infer({'img': tensor_img}) results = inferencer.infer({'img': tensor_img})
print(("infer results: ", results[0])) print("infer results: ", results[0])
def main(use_cuda): def main(use_cuda):

@ -180,7 +180,7 @@ def train(use_cuda, train_program, params_dirname):
# get avg cost # get avg cost
avg_cost = np.array(avg_cost_set).mean() avg_cost = np.array(avg_cost_set).mean()
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
if float(avg_cost) < 4: # Smaller value to increase CI speed if float(avg_cost) < 4: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
@ -240,7 +240,7 @@ def infer(use_cuda, inference_program, params_dirname):
}, },
return_numpy=False) return_numpy=False)
print(("infer results: ", np.array(results[0]))) print("infer results: ", np.array(results[0]))
def main(use_cuda): def main(use_cuda):

@ -82,21 +82,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['words', 'label']) reader=test_reader, feed_order=['words', 'label'])
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
print(("acc : %s" % acc)) print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
else: else:
print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, fluid.EndStepEvent):
print(("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
@ -123,14 +123,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place=place) place=place)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -138,7 +138,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print(("infer results: ", results)) print("infer results: ", results)
def main(use_cuda): def main(use_cuda):

@ -97,21 +97,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['words', 'label']) reader=test_reader, feed_order=['words', 'label'])
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
print(("acc : %s" % acc)) print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
else: else:
print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, fluid.EndStepEvent):
print(("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
@ -138,14 +138,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place=place) place=place)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -153,7 +153,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print(("infer results: ", results)) print("infer results: ", results)
def main(use_cuda): def main(use_cuda):

@ -91,21 +91,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost, acc = trainer.test( avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['words', 'label']) reader=test_reader, feed_order=['words', 'label'])
print(("avg_cost: %s" % avg_cost)) print("avg_cost: %s" % avg_cost)
print(("acc : %s" % acc)) print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
else: else:
print(('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))) event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost): if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
elif isinstance(event, fluid.EndStepEvent): elif isinstance(event, fluid.EndStepEvent):
print(("Step {0}, Epoch {1} Metrics {2}".format( print("Step {0}, Epoch {1} Metrics {2}".format(
event.step, event.epoch, list(map(np.array, event.metrics))))) event.step, event.epoch, list(map(np.array, event.metrics))))
if event.step == 1: # Run 2 iterations to speed CI if event.step == 1: # Run 2 iterations to speed CI
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.stop() trainer.stop()
@ -133,14 +133,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place=place) place=place)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -148,7 +148,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words = fluid.create_random_int_lodtensor( tensor_words = fluid.create_random_int_lodtensor(
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print(("infer results: ", results)) print("infer results: ", results)
def main(use_cuda): def main(use_cuda):

@ -98,7 +98,7 @@ def train(use_cuda, train_program, params_dirname):
reader=test_reader, reader=test_reader,
feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw'])
avg_cost = outs[0] avg_cost = outs[0]
print(("loss= ", avg_cost)) print("loss= ", avg_cost)
if avg_cost < 10.0: if avg_cost < 10.0:
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
@ -149,7 +149,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
'forthw': fourth_word 'forthw': fourth_word
}, },
return_numpy=False) return_numpy=False)
print((np.array(result[0]))) print(np.array(result[0]))
def main(use_cuda, is_sparse): def main(use_cuda, is_sparse):

@ -180,7 +180,7 @@ def train(word_dict,
cost_val, acc_val = exe.run(main_program, cost_val, acc_val = exe.run(main_program,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[cost, acc_out]) fetch_list=[cost, acc_out])
print(("cost=" + str(cost_val) + " acc=" + str(acc_val))) print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if cost_val < 0.4 and acc_val > 0.8: if cost_val < 0.4 and acc_val > 0.8:
if save_dirname is not None: if save_dirname is not None:
fluid.io.save_inference_model(save_dirname, ["words"], fluid.io.save_inference_model(save_dirname, ["words"],
@ -235,14 +235,14 @@ def infer(word_dict, use_cuda, save_dirname=None):
word_dict_len = len(word_dict) word_dict_len = len(word_dict)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -261,10 +261,10 @@ def infer(word_dict, use_cuda, save_dirname=None):
feed={feed_target_names[0]: tensor_words}, feed={feed_target_names[0]: tensor_words},
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print((results[0].recursive_sequence_lengths())) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print(("Inference Shape: ", np_data.shape)) print("Inference Shape: ", np_data.shape)
print(("Inference results: ", np_data)) print("Inference results: ", np_data)
def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None):

@ -124,9 +124,9 @@ def infer(use_cuda, save_dirname=None):
results = exe.run(inference_program, results = exe.run(inference_program,
feed={feed_target_names[0]: numpy.array(test_feat)}, feed={feed_target_names[0]: numpy.array(test_feat)},
fetch_list=fetch_targets) fetch_list=fetch_targets)
print(("infer shape: ", results[0].shape)) print("infer shape: ", results[0].shape)
print(("infer results: ", results[0])) print("infer results: ", results[0])
print(("ground truth: ", test_label)) print("ground truth: ", test_label)
def main(use_cuda, is_local=True): def main(use_cuda, is_local=True):

@ -119,7 +119,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
avg_cost = fluid.layers.mean(cost) avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label) acc = fluid.layers.accuracy(input=predict, label=label)
# Test program # Test program
test_program = fluid.default_main_program().clone(for_test=True) test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer = fluid.optimizer.Adam(learning_rate=0.001)
@ -163,10 +163,10 @@ def train(net_type, use_cuda, save_dirname, is_local):
acc_value = numpy.array(acc_list).mean() acc_value = numpy.array(acc_list).mean()
avg_loss_value = numpy.array(avg_loss_list).mean() avg_loss_value = numpy.array(avg_loss_list).mean()
print(( print(
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1, format(pass_id, batch_id + 1,
float(avg_loss_value), float(acc_value)))) float(avg_loss_value), float(acc_value)))
if acc_value > 0.01: # Low threshold for speeding up CI if acc_value > 0.01: # Low threshold for speeding up CI
fluid.io.save_inference_model(save_dirname, ["pixel"], fluid.io.save_inference_model(save_dirname, ["pixel"],
@ -239,7 +239,7 @@ def infer(use_cuda, save_dirname=None):
np.testing.assert_almost_equal( np.testing.assert_almost_equal(
results[0][i], transpiler_results[0][i], decimal=5) results[0][i], transpiler_results[0][i], decimal=5)
print(("infer results: ", results[0])) print("infer results: ", results[0])
fluid.io.save_inference_model(save_dirname, feed_target_names, fluid.io.save_inference_model(save_dirname, feed_target_names,
fetch_targets, exe, fetch_targets, exe,

@ -189,10 +189,10 @@ def train(use_cuda, save_dirname=None, is_local=True):
cost = cost[0] cost = cost[0]
if batch_id % 10 == 0: if batch_id % 10 == 0:
print(("avg_cost:" + str(cost))) print("avg_cost:" + str(cost))
if batch_id != 0: if batch_id != 0:
print(("second per batch: " + str( print("second per batch: " + str((time.time(
(time.time() - start_time) / batch_id))) ) - start_time) / batch_id))
# Set the threshold low to speed up the CI test # Set the threshold low to speed up the CI test
if float(cost) < 60.0: if float(cost) < 60.0:
if save_dirname is not None: if save_dirname is not None:
@ -248,14 +248,14 @@ def infer(use_cuda, save_dirname=None):
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of # element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively. # length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[3, 4, 2]] recursive_seq_lens = [[3, 4, 2]]
base_shape = [1] base_shape = [1]
@ -333,9 +333,9 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print((results[0].recursive_sequence_lengths())) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print(("Inference Shape: ", np_data.shape)) print("Inference Shape: ", np_data.shape)
def main(use_cuda, is_local=True): def main(use_cuda, is_local=True):

@ -205,8 +205,8 @@ def train_main(use_cuda, is_sparse, is_local=True):
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[avg_cost]) fetch_list=[avg_cost])
avg_cost_val = np.array(outs[0]) avg_cost_val = np.array(outs[0])
print(('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
" avg_cost=" + str(avg_cost_val))) " avg_cost=" + str(avg_cost_val))
if batch_id > 3: if batch_id > 3:
break break
batch_id += 1 batch_id += 1
@ -282,7 +282,7 @@ def decode_main(use_cuda, is_sparse):
feed=feed_dict, feed=feed_dict,
fetch_list=[translation_ids, translation_scores], fetch_list=[translation_ids, translation_scores],
return_numpy=False) return_numpy=False)
print((result_ids.recursive_sequence_lengths())) print(result_ids.recursive_sequence_lengths())
break break

@ -142,10 +142,10 @@ def train(nn_type,
params_filename=params_filename) params_filename=params_filename)
return return
else: else:
print(( print(
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1, format(pass_id, batch_id + 1,
float(avg_loss_val), float(acc_val)))) float(avg_loss_val), float(acc_val)))
if math.isnan(float(avg_loss_val)): if math.isnan(float(avg_loss_val)):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
raise AssertionError("Loss of recognize digits is too large") raise AssertionError("Loss of recognize digits is too large")
@ -206,7 +206,7 @@ def infer(use_cuda,
results = exe.run(inference_program, results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img}, feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets) fetch_list=fetch_targets)
print(("infer results: ", results[0])) print("infer results: ", results[0])
def main(use_cuda, parallel, nn_type, combine): def main(use_cuda, parallel, nn_type, combine):

@ -260,15 +260,15 @@ def infer(use_cuda, save_dirname=None):
# Use the first data from paddle.dataset.movielens.test() as input # Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id" assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API # Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index # to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail # numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`. # (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains # For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively. # two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences # level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively. # of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[1]], [[1]], place) user_id = fluid.create_lod_tensor([[1]], [[1]], place)
assert feed_target_names[1] == "gender_id" assert feed_target_names[1] == "gender_id"
@ -304,7 +304,7 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print(("inferred score: ", np.array(results[0]))) print("inferred score: ", np.array(results[0]))
def main(use_cuda): def main(use_cuda):

@ -182,8 +182,8 @@ def train(use_cuda, save_dirname=None):
fetch_list=[avg_cost]) fetch_list=[avg_cost])
avg_cost_val = np.array(outs[0]) avg_cost_val = np.array(outs[0])
print(('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
" avg_cost=" + str(avg_cost_val))) " avg_cost=" + str(avg_cost_val))
if math.isnan(float(avg_cost_val[0])): if math.isnan(float(avg_cost_val[0])):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
if batch_id > 3: if batch_id > 3:
@ -213,14 +213,14 @@ def infer(use_cuda, save_dirname=None):
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Setup input by creating LoDTensor to represent sequence of words. # Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of # Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector. # look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[4, 6]], # Suppose the recursive_sequence_lengths info is set to [[4, 6]],
# which has only one level of detail. Then the created LoDTensor will have only # which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for two sentences of # element (word). Hence the LoDTensor will hold data for two sentences of
# length 4 and 6, respectively. # length 4 and 6, respectively.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[4, 6]] recursive_seq_lens = [[4, 6]]
base_shape = [1] base_shape = [1]
@ -241,10 +241,10 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print((results[0].recursive_sequence_lengths())) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print(("Inference shape: ", np_data.shape)) print("Inference shape: ", np_data.shape)
print(("Inference results: ", np_data)) print("Inference results: ", np_data)
def main(use_cuda): def main(use_cuda):

@ -169,11 +169,11 @@ def infer(use_cuda, save_dirname=None):
word_dict = paddle.dataset.imikolov.build_dict() word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict) dict_size = len(word_dict)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# is simply an index to look up for the corresponding word vector and hence # is simply an index to look up for the corresponding word vector and hence
# the shape of word (base_shape) should be [1]. The recursive_sequence_lengths, # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# which is length-based level of detail (lod) of each LoDTensor, should be [[1]] # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# meaning there is only one level of detail and there is only one sequence of # meaning there is only one level of detail and there is only one sequence of
# one word on this level. # one word on this level.
# Note that recursive_sequence_lengths should be a list of lists. # Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[1]] recursive_seq_lens = [[1]]
@ -204,9 +204,9 @@ def infer(use_cuda, save_dirname=None):
}, },
fetch_list=fetch_targets, fetch_list=fetch_targets,
return_numpy=False) return_numpy=False)
print((results[0].recursive_sequence_lengths())) print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0]) np_data = np.array(results[0])
print(("Inference Shape: ", np_data.shape)) print("Inference Shape: ", np_data.shape)
def main(use_cuda, is_sparse, is_parallel): def main(use_cuda, is_sparse, is_parallel):

@ -78,7 +78,7 @@ for pass_id in range(PASS_NUM):
if avg_loss_value[0] < 10.0: if avg_loss_value[0] < 10.0:
exit(0) # if avg cost less than 10.0, we think our code is good. exit(0) # if avg cost less than 10.0, we think our code is good.
print((avg_loss_value[0])) print(avg_loss_value[0])
if math.isnan(float(avg_loss_value)): if math.isnan(float(avg_loss_value)):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
exit(1) exit(1)

@ -155,8 +155,8 @@ for pass_id in range(PASS_NUM):
fetch_list=[avg_cost, batch_acc, batch_size]) fetch_list=[avg_cost, batch_acc, batch_size])
accuracy.add(value=acc, weight=weight) accuracy.add(value=acc, weight=weight)
pass_acc = accuracy.eval() pass_acc = accuracy.eval()
print(("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
str(pass_acc))) pass_acc))
# this model is slow, so if we can train two mini batch, we think it works properly. # this model is slow, so if we can train two mini batch, we think it works properly.
if i > 0: if i > 0:
exit(0) exit(0)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save