|
|
|
@ -18,8 +18,8 @@ import unittest
|
|
|
|
|
import numpy as np
|
|
|
|
|
import paddle.fluid.core as core
|
|
|
|
|
from paddle.fluid.tests.unittests.op_test import OpTest
|
|
|
|
|
from scipy.special import expit
|
|
|
|
|
from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestMKLDNNReluDim2(TestRelu):
|
|
|
|
@ -97,5 +97,64 @@ class TestMKLDNNAbsDim4(TestAbs):
|
|
|
|
|
self.attrs = {"use_mkldnn": True}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Check if primitives already exist in backward
|
|
|
|
|
class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase):
|
|
|
|
|
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
|
|
|
|
|
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
|
|
|
|
|
|
|
|
|
|
def test_check_forward_backward(self):
|
|
|
|
|
place = core.CPUPlace()
|
|
|
|
|
|
|
|
|
|
np.random.seed(123)
|
|
|
|
|
x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32)
|
|
|
|
|
out = np.abs(x)
|
|
|
|
|
|
|
|
|
|
out_grad = np.random.random_sample(x.shape).astype(np.float32)
|
|
|
|
|
x_grad = out_grad * np.sign(x) # Abs grad calculation
|
|
|
|
|
|
|
|
|
|
var_dict = {'x':x, 'out':out, 'out@GRAD':out_grad, 'x@GRAD':x_grad}
|
|
|
|
|
var_names = list(var_dict.keys())
|
|
|
|
|
ground_truth = {name: var_dict[name] for name in var_names}
|
|
|
|
|
|
|
|
|
|
program = fluid.Program()
|
|
|
|
|
with fluid.program_guard(program):
|
|
|
|
|
block = program.global_block()
|
|
|
|
|
for name in ground_truth:
|
|
|
|
|
block.create_var(
|
|
|
|
|
name=name,
|
|
|
|
|
dtype='float32',
|
|
|
|
|
shape=ground_truth[name].shape)
|
|
|
|
|
|
|
|
|
|
relu_op = block.append_op(
|
|
|
|
|
type="abs",
|
|
|
|
|
inputs={"X": block.var('x'),},
|
|
|
|
|
outputs={"Out": block.var('out') },
|
|
|
|
|
attrs={"use_mkldnn": True})
|
|
|
|
|
|
|
|
|
|
# Generate backward op_desc
|
|
|
|
|
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
|
|
|
|
|
relu_op.desc, set(), [])
|
|
|
|
|
grad_op_desc = grad_op_desc_list[0]
|
|
|
|
|
new_op_desc = block.desc.append_op()
|
|
|
|
|
new_op_desc.copy_from(grad_op_desc)
|
|
|
|
|
for var_name in grad_op_desc.output_arg_names():
|
|
|
|
|
block.desc.var(var_name.encode("ascii"))
|
|
|
|
|
grad_op_desc.infer_var_type(block.desc)
|
|
|
|
|
grad_op_desc.infer_shape(block.desc)
|
|
|
|
|
for arg in grad_op_desc.output_arg_names():
|
|
|
|
|
grad_var = block.desc.find_var(arg.encode("ascii"))
|
|
|
|
|
grad_var.set_dtype(core.VarDesc.VarType.FP32)
|
|
|
|
|
|
|
|
|
|
exe = fluid.Executor(place)
|
|
|
|
|
|
|
|
|
|
# Do at least 2 iterations
|
|
|
|
|
for i in range(2):
|
|
|
|
|
out = exe.run(program,
|
|
|
|
|
feed={name: var_dict[name] for name in ['x', 'out@GRAD']},
|
|
|
|
|
fetch_list=['x@GRAD'])
|
|
|
|
|
|
|
|
|
|
self.__assert_close(x_grad, out[0], "x@GRAD")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
unittest.main()
|
|
|
|
|