Change iter_parameters back and port unittests code to Python3

revert-12646-feature/jit/xbyak
minqiyang 7 years ago
parent e84936206d
commit 35e6abd7bb

@ -963,9 +963,9 @@ class Block(object):
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self._iter_parameters())
return list(self.iter_parameters())
def _iter_parameters(self):
def iter_parameters(self):
return (item[1] for item in list(self.vars.items())
if isinstance(item[1], Parameter))
@ -1199,7 +1199,7 @@ class Block(object):
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other._iter_parameters():
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:

@ -155,7 +155,7 @@ def train_main(use_cuda):
]
feeder = fluid.DataFeeder(feed_list, place)
for pass_id in xrange(1):
for pass_id in range(1):
for batch_id, data in enumerate(train_reader()):
outs = exe.run(main_program,
feed=feeder.feed(data),
@ -204,8 +204,8 @@ def decode_main(use_cuda):
]
feeder = fluid.DataFeeder(feed_list, place)
data = train_reader().next()
feed_dict = feeder.feed(map(lambda x: [x[0]], data))
data = next(train_reader())
feed_dict = feeder.feed([[x[0]] for x in data])
feed_dict['init_ids'] = init_ids
feed_dict['init_scores'] = init_scores
@ -214,7 +214,7 @@ def decode_main(use_cuda):
feed=feed_dict,
fetch_list=[translation_ids, translation_scores],
return_numpy=False)
print result_ids.lod()
print(result_ids.lod())
class TestBeamSearchDecoder(unittest.TestCase):

@ -301,7 +301,7 @@ class DistSeResneXt2x2:
trainer_id=trainer_id)
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
@ -309,7 +309,7 @@ class DistSeResneXt2x2:
reader_generator = train_reader()
first_loss, = exe.run(fetch_list=[avg_cost.name])
print(first_loss)
for i in xrange(5):
for i in range(5):
loss, = exe.run(fetch_list=[avg_cost.name])
last_loss, = exe.run(fetch_list=[avg_cost.name])
print(last_loss)

@ -25,14 +25,16 @@ from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from .testsuite import create_op, set_input, append_input_output, append_loss_ops
from functools import reduce
from six.moves import zip
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in xrange(len(prob)):
for i in range(len(prob)):
prob[i] /= prob_sum[i]
return prob
@ -86,7 +88,7 @@ def get_numeric_gradient(place,
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
for i in range(tensor_size):
if in_place:
set_input(scope, op, inputs, place)
@ -139,7 +141,7 @@ class OpTest(unittest.TestCase):
assert isinstance(
numpy_dict,
dict), "self.inputs, self.outputs must be numpy_dict"
for var_name, var_value in numpy_dict.iteritems():
for var_name, var_value in numpy_dict.items():
if isinstance(var_value, (np.ndarray, np.generic)):
self.try_call_once(var_value.dtype)
elif isinstance(var_value, (list, tuple)):
@ -197,7 +199,7 @@ class OpTest(unittest.TestCase):
def _get_io_vars(self, block, numpy_inputs):
inputs = {}
for name, value in numpy_inputs.iteritems():
for name, value in numpy_inputs.items():
if isinstance(value, list):
var_list = [
block.var(sub_name) for sub_name, sub_value in value
@ -240,7 +242,7 @@ class OpTest(unittest.TestCase):
# if the fetch_list is customized by user, we use it directly.
# if not, fill the fetch_list by the user configured outputs in test.
if len(fetch_list) == 0:
for var_name, var in outputs.iteritems():
for var_name, var in outputs.items():
if isinstance(var, list):
for v in var:
fetch_list.append(v)
@ -252,7 +254,7 @@ class OpTest(unittest.TestCase):
fetch_list.append(str(out_name))
# fetch_list = map(block.var, fetch_list)
if not isinstance(fetch_list[0], fluid.framework.Variable):
fetch_list = map(block.var, fetch_list)
fetch_list = list(map(block.var, fetch_list))
outs = executor.run(program,
feed=feed_map,
fetch_list=fetch_list,
@ -334,7 +336,7 @@ class OpTest(unittest.TestCase):
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
for a, b, name in zip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
@ -460,6 +462,6 @@ class OpTest(unittest.TestCase):
use_cuda=use_cuda, loss_name=loss.name, main_program=program)
else:
executor = Executor(place)
return map(np.array,
executor.run(prog, feed_dict, fetch_list,
return_numpy=False))
return list(
map(np.array,
executor.run(prog, feed_dict, fetch_list, return_numpy=False)))

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAccuracyOp(OpTest):
@ -26,7 +26,7 @@ class TestAccuracyOp(OpTest):
label = np.random.randint(0, 2, (n, 1))
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in xrange(n):
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1

@ -15,9 +15,9 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
from scipy.special import expit
from test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
from .test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
class TestMKLDNNReluDim2(TestRelu):

@ -15,7 +15,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
from scipy.special import expit

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAdadeltaOp1(OpTest):

@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
from .op_test import OpTest
import math

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
@ -273,7 +273,7 @@ class TestSparseAdamOp(unittest.TestCase):
self.setup(scope, place)
op_args = dict()
for key, np_array in self.dense_inputs.iteritems():
for key, np_array in self.dense_inputs.items():
var = scope.var(key).get_tensor()
var.set(np_array, place)
op_args[key] = key
@ -290,7 +290,7 @@ class TestSparseAdamOp(unittest.TestCase):
adam_op = Operator("adam", **op_args)
adam_op.run(scope, place)
for key, np_array in self.outputs.iteritems():
for key, np_array in self.outputs.items():
out_var = scope.var(key).get_tensor()
actual = np.array(out_var)
actual = actual.reshape([actual.size])

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAdamaxOp1(OpTest):

@ -16,7 +16,7 @@ import unittest
import numpy as np
import sys
import math
from op_test import OpTest
from .op_test import OpTest
def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios,

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class BaseTestCase(OpTest):

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestArgsortOp(OpTest):

@ -80,8 +80,9 @@ class TestArrayReadWrite(unittest.TestCase):
append_backward(total_sum_scaled)
g_vars = map(default_main_program().global_block().var,
[each_x.name + "@GRAD" for each_x in x])
g_vars = list(
map(default_main_program().global_block().var,
[each_x.name + "@GRAD" for each_x in x]))
g_out = [
item.sum()
for item in exe.run(

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import op_test
from . import op_test
import numpy
import unittest

@ -14,7 +14,7 @@
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import op_test
from . import op_test
import numpy
import unittest
import paddle.fluid.framework as framework

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid import metrics

@ -17,9 +17,9 @@ import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid.framework import grad_var_name
from test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad
from .test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad
class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):

@ -17,7 +17,7 @@ import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid.framework import grad_var_name
@ -415,7 +415,7 @@ class TestBatchNormOpTraining(unittest.TestCase):
self.__assert_close(scale_grad, out[6], "scale_grad")
self.__assert_close(bias_grad, out[7], "bias_grad")
print "op test forward passed: ", str(place), data_layout
print("op test forward passed: ", str(place), data_layout)
places = [core.CPUPlace()]

@ -59,8 +59,7 @@ class BeamSearchOpTester(unittest.TestCase):
np.allclose(
np.array(selected_scores),
np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis]))
self.assertEqual(selected_ids.lod(),
[[0L, 2L, 4L], [0L, 1L, 2L, 3L, 4L]])
self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]])
def _create_pre_ids(self):
np_data = np.array([[1, 2, 3, 4]], dtype='int64')

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
import paddle.fluid.core as core

@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestBilinearTensorProductOp(OpTest):

@ -13,7 +13,7 @@
#limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def bipartite_match(distance, match_indices, match_dist):
@ -48,7 +48,7 @@ def bipartite_match(distance, match_indices, match_dist):
def argmax_match(distance, match_indices, match_dist, threshold):
r, c = distance.shape
for j in xrange(c):
for j in range(c):
if match_indices[j] != -1:
continue
col_dist = distance[:, j]

@ -16,7 +16,7 @@ import unittest
import numpy as np
import sys
import math
from op_test import OpTest
from .op_test import OpTest
def box_coder(target_box, prior_box, prior_box_var, output_box, code_type,

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save