diff --git a/paddle/fluid/operators/instance_norm_op.cu b/paddle/fluid/operators/instance_norm_op.cu index c060950903..4c04f6c315 100644 --- a/paddle/fluid/operators/instance_norm_op.cu +++ b/paddle/fluid/operators/instance_norm_op.cu @@ -328,7 +328,7 @@ class InstanceNormGradKernel epsilon, saved_mean_data, saved_var_data)); } else { if (d_x) { - GradComputeDX<<>>( + GradComputeDX<<>>( d_y->data(), scale->data>(), saved_mean_data, x->data(), saved_var_data, C, H * W * D, d_x->data()); diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py index ccdf12849c..c02e48bd71 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py @@ -79,7 +79,7 @@ class TestInstanceNormOpTraining(unittest.TestCase): self.init_test_case() def init_test_case(self): - self.use_global_stats = False + self.shape = [2, 3, 4, 5] self.no_grad_set = set() self.fetch_list = [ 'y', 'saved_mean', 'saved_variance', 'x@GRAD', 'scale@GRAD', @@ -181,12 +181,19 @@ class TestInstanceNormOpTraining(unittest.TestCase): "instance_norm"): places.append(core.CUDAPlace(0)) for place in places: - test_with_place(place, [2, 3, 4, 5]) + test_with_place(place, self.shape) class TestInstanceNormOpTrainingCase1(TestInstanceNormOpTraining): def init_test_case(self): - self.use_global_stats = False + self.shape = [2, 3, 4, 5] + self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) + self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD'] + + +class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining): + def init_test_case(self): + self.shape = [20, 50, 4, 5] self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD']