Fix bug in test_prelu and test_xe

They were using float64 for FP32 kernel before.
tonyyang-svail-feed-op-desgin
Yu Yang 8 years ago
parent 55b68c6e90
commit 6efcbc4fcb

@ -80,7 +80,7 @@ class TestCrossEntropyOp3(OpTest):
cross_entropy2 = (-label * np.log(X)).sum(
axis=1, keepdims=True).astype("float32")
self.inputs = {"X": X, "Label": label}
self.inputs = {"X": X, "Label": label.astype(np.float32)}
self.outputs = {"Y": cross_entropy}
self.attrs = {"softLabel": True}

@ -17,7 +17,7 @@ class PReluTest(OpTest):
x_np_sign = np.sign(x_np)
x_np = x_np_sign * np.maximum(x_np, .005)
alpha_np = np.array([.1])
alpha_np = np.array([.1], dtype="float32")
self.inputs = {'X': x_np, 'Alpha': alpha_np}
out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'],

Loading…
Cancel
Save