@ -153,8 +153,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid . dygraph . to_variable ( value2 )
loss = case1 ( v1 , v2 )
loss . backward ( )
self . assertTrue ( case1 . fc2 . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case1 . fc1 . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case1 . fc2 . weight . _grad_ivar ( ) is not None )
self . assertTrue ( case1 . fc1 . weight . _grad_ivar ( ) is not None )
def test_auto_prune2 ( self ) :
with fluid . dygraph . guard ( ) :
@ -166,8 +166,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss = case2 ( v1 , v2 )
loss . backward ( )
self . assertTrue ( case2 . fc2 . _ w. _grad_ivar ( ) is None )
self . assertTrue ( case2 . fc1 . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case2 . fc2 . weight . _grad_ivar ( ) is None )
self . assertTrue ( case2 . fc1 . weight . _grad_ivar ( ) is not None )
def test_auto_prune3 ( self ) :
with fluid . dygraph . guard ( ) :
@ -178,7 +178,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid . dygraph . to_variable ( value2 )
loss , part2 = case3 ( v1 , v2 , 1 )
loss . backward ( )
self . assertTrue ( case3 . fc . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case3 . fc . weight . _grad_ivar ( ) is not None )
self . assertTrue ( ( part2 . gradient ( ) == 0 ) . all ( ) )
def test_auto_prune4 ( self ) :
@ -190,7 +190,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid . dygraph . to_variable ( value2 )
loss , part2 = case4 ( v1 , v2 , 1 )
part2 . backward ( )
self . assertTrue ( case4 . fc . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case4 . fc . weight . _grad_ivar ( ) is not None )
self . assertTrue ( ( part2 . gradient ( ) == 1 ) . all ( ) )
def test_auto_prune5 ( self ) :
@ -202,7 +202,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid . dygraph . to_variable ( value2 )
loss , part1 , part2 = case4 ( v1 , v2 , 2 )
part1 . backward ( )
self . assertTrue ( case4 . fc . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case4 . fc . weight . _grad_ivar ( ) is not None )
self . assertTrue ( ( part2 . gradient ( ) == 0 ) . all ( ) )
def test_auto_prune6 ( self ) :
@ -220,7 +220,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out1 . stop_gradient = True
out = fluid . layers . concat ( input = [ out1 , out2 , c ] , axis = 1 )
out . backward ( )
self . assertTrue ( ( fc . _ w. gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( fc . weight . gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( out1 . gradient ( ) == 0 ) . all ( ) )
def test_auto_prune7 ( self ) :
@ -239,7 +239,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out = fluid . layers . concat ( input = [ out1 , out2 , c ] , axis = 1 )
backward_strategy = fluid . dygraph . BackwardStrategy ( )
out . backward ( backward_strategy )
self . assertTrue ( ( fc . _ w. gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( fc . weight . gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( out1 . gradient ( ) == 0 ) . all ( ) )
def test_auto_prune8 ( self ) :
@ -253,17 +253,17 @@ class TestImperativeAutoPrune(unittest.TestCase):
b = fluid . dygraph . to_variable ( value1 )
c = fluid . dygraph . to_variable ( value2 )
out1 = fc ( a )
fc_origin = fc . _ w. numpy ( )
fc_origin = fc . weight . numpy ( )
out2 = fc2 ( out1 )
fc2_origin = fc2 . _ w. numpy ( )
fc2 . _ w. stop_gradient = True
fc2_origin = fc2 . weight . numpy ( )
fc2 . weight . stop_gradient = True
out2 . backward ( )
optimizer = fluid . optimizer . SGD (
learning_rate = 0.003 ,
parameter_list = ( fc . parameters ( ) + fc2 . parameters ( ) ) )
optimizer . minimize ( out2 )
self . assertTrue ( np . array_equal ( fc2_origin , fc2 . _ w. numpy ( ) ) )
self . assertFalse ( np . array_equal ( fc_origin , fc . _ w. numpy ( ) ) )
self . assertTrue ( np . array_equal ( fc2_origin , fc2 . weight . numpy ( ) ) )
self . assertFalse ( np . array_equal ( fc_origin , fc . weight . numpy ( ) ) )
def test_auto_prune9 ( self ) :
with fluid . dygraph . guard ( ) :
@ -276,19 +276,19 @@ class TestImperativeAutoPrune(unittest.TestCase):
b = fluid . dygraph . to_variable ( value1 )
c = fluid . dygraph . to_variable ( value2 )
out1 = fc ( a )
fc_origin = fc . _ w. numpy ( )
fc_origin = fc . weight . numpy ( )
out2 = fc2 ( out1 )
fc2_origin = fc2 . _ w. numpy ( )
fc2_origin = fc2 . weight . numpy ( )
out2 . stop_gradient = True
out2 . backward ( )
optimizer = fluid . optimizer . SGD (
learning_rate = 0.003 ,
parameter_list = ( fc . parameters ( ) + fc2 . parameters ( ) ) )
optimizer . minimize ( out2 )
self . assertTrue ( np . array_equal ( fc2_origin , fc2 . _ w. numpy ( ) ) )
self . assertTrue ( np . array_equal ( fc_origin , fc . _ w. numpy ( ) ) )
self . assertTrue ( np . array_equal ( fc2_origin , fc2 . weight . numpy ( ) ) )
self . assertTrue ( np . array_equal ( fc_origin , fc . weight . numpy ( ) ) )
try :
fc2 . _ w. gradient ( )
fc2 . weight . gradient ( )
except ValueError as e :
assert type ( e ) == ValueError
@ -309,7 +309,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
backward_strategy = fluid . dygraph . BackwardStrategy ( )
backward_strategy . sort_sum_gradient = True
out . backward ( backward_strategy )
self . assertTrue ( ( fc . _ w. gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( fc . weight . gradient ( ) == 0 ) . all ( ) )
self . assertTrue ( ( out1 . gradient ( ) == 0 ) . all ( ) )
def test_auto_prune_with_optimizer ( self ) :
@ -336,10 +336,10 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss . backward ( )
_ , params_grads = optimizer . minimize ( loss , grad_clip = grad_clip )
for items in params_grads :
assert items [ 0 ] . name is not model . embed1 . _ w. name
assert items [ 0 ] . name is not model . fc1 . _ w. name
assert model . embed1 . _ w. _grad_ivar ( ) is None
assert model . fc1 . _ w. _grad_ivar ( ) is None
assert items [ 0 ] . name is not model . embed1 . weight . name
assert items [ 0 ] . name is not model . fc1 . weight . name
assert model . embed1 . weight . _grad_ivar ( ) is None
assert model . fc1 . weight . _grad_ivar ( ) is None
with fluid . dygraph . guard ( place ) :
model = MyLayer2 ( " mylayer " , vocab_size , size )
@ -355,10 +355,10 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss . backward ( )
optimizer . minimize ( loss , grad_clip = grad_clip )
for items in params_grads :
assert items [ 0 ] . name is not model . embed1 . _ w. name
assert items [ 0 ] . name is not model . fc1 . _ w. name
assert model . embed1 . _ w. _grad_ivar ( ) is None
assert model . fc1 . _ w. _grad_ivar ( ) is None
assert items [ 0 ] . name is not model . embed1 . weight . name
assert items [ 0 ] . name is not model . fc1 . weight . name
assert model . embed1 . weight . _grad_ivar ( ) is None
assert model . fc1 . weight . _grad_ivar ( ) is None
def test_case2_prune_no_grad_branch ( self ) :
with fluid . dygraph . guard ( ) :
@ -369,8 +369,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
case3 = AutoPruneLayer2 ( " l2 " )
loss = case3 ( v1 , v2 )
loss . backward ( )
self . assertTrue ( case3 . fc2 . _ w. _grad_ivar ( ) is None )
self . assertTrue ( case3 . fc . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case3 . fc2 . weight . _grad_ivar ( ) is None )
self . assertTrue ( case3 . fc . weight . _grad_ivar ( ) is not None )
def test_case2_prune_no_grad_branch ( self ) :
with fluid . dygraph . guard ( ) :
@ -381,8 +381,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
case3 = AutoPruneLayer2 ( " l2 " )
loss = case3 ( v1 , v2 )
loss . backward ( )
self . assertTrue ( case3 . fc2 . _ w. _grad_ivar ( ) is None )
self . assertTrue ( case3 . fc . _ w. _grad_ivar ( ) is not None )
self . assertTrue ( case3 . fc2 . weight . _grad_ivar ( ) is None )
self . assertTrue ( case3 . fc . weight . _grad_ivar ( ) is not None )
def test_case3_prune_no_grad_branch2 ( self ) :
with fluid . dygraph . guard ( ) :
@ -395,7 +395,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out = fluid . layers . one_hot ( input = label , depth = 100 )
loss = fluid . layers . mean ( out )
loss . backward ( )
self . assertTrue ( fc . _ w. _grad_ivar ( ) is None )
self . assertTrue ( fc . weight . _grad_ivar ( ) is None )
def test_case4_with_no_grad_op_maker ( self ) :
with fluid . dygraph . guard ( ) :