@ -18,82 +18,130 @@ import unittest
import numpy
import paddle . fluid . core as core
from paddle . fluid . executor import Executor
from paddle. fluid . layers import mul , data
import paddle . fluid as fluid
from test_eager_deletion_padding_rnn import RNNConfig , PaddingRNNTestBase
class TestExecutor ( unittest . TestCase ) :
def test_mul ( self ) :
a = data ( name = ' a ' , shape = [ 784 ] , dtype = ' float32 ' )
b = data (
name = ' b ' ,
shape = [ 784 , 100 ] ,
dtype = ' float32 ' ,
append_batch_size = False )
output = mul ( x = a , y = b )
place = core . CPUPlace ( )
main_program = fluid . Program ( )
startup_program = fluid . Program ( )
with fluid . program_guard ( main_program , startup_program ) :
a = fluid . layers . data ( name = ' a ' , shape = [ 784 ] , dtype = ' float32 ' )
b = fluid . layers . data (
name = ' b ' ,
shape = [ 784 , 100 ] ,
dtype = ' float32 ' ,
append_batch_size = False )
output = fluid . layers . mul ( x = a , y = b )
# Compute with numpy
a_np = numpy . random . random ( ( 100 , 784 ) ) . astype ( ' float32 ' )
b_np = numpy . random . random ( ( 784 , 100 ) ) . astype ( ' float32 ' )
exe = Executor ( place )
import time
use_cache = True
step_num = 3
run_time = 0.0
for i in range ( step_num ) :
begin = time . time ( )
outs = exe . run ( feed = { ' a ' : a_np ,
' b ' : b_np } ,
fetch_list = [ output . name ] ,
use_program_cache = use_cache )
end = time . time ( )
run_time + = end - begin
out = outs [ 0 ]
self . assertEqual ( ( 100 , 100 ) , out . shape )
self . assertTrue ( numpy . allclose ( out , numpy . dot ( a_np , b_np ) ) )
print ( " run time %f " % run_time )
use_cache = False
run_time = 0.0
for i in range ( step_num ) :
begin = time . time ( )
outs = exe . run ( feed = { ' a ' : a_np ,
' b ' : b_np } ,
fetch_list = [ output . name ] ,
use_program_cache = use_cache )
end = time . time ( )
run_time + = end - begin
out = outs [ 0 ]
self . assertEqual ( ( 100 , 100 ) , out . shape )
self . assertTrue ( numpy . allclose ( out , numpy . dot ( a_np , b_np ) ) )
print ( " run time %f " % run_time )
use_cache = True
run_time = 0.0
for i in range ( step_num ) :
begin = time . time ( )
outs = exe . run ( feed = { ' a ' : a_np ,
' b ' : b_np } ,
fetch_list = [ output . name ] ,
use_program_cache = use_cache )
end = time . time ( )
run_time + = end - begin
out = outs [ 0 ]
self . assertEqual ( ( 100 , 100 ) , out . shape )
self . assertTrue ( numpy . allclose ( out , numpy . dot ( a_np , b_np ) ) )
print ( " run time %f " % run_time )
use_cache = True
run_time = 0.0
for i in range ( step_num ) :
begin = time . time ( )
outs = exe . run ( feed = { ' a ' : a_np ,
' b ' : b_np } ,
fetch_list = [ output ] ,
use_program_cache = use_cache )
end = time . time ( )
run_time + = end - begin
out = outs [ 0 ]
self . assertEqual ( ( 100 , 100 ) , out . shape )
self . assertTrue ( numpy . allclose ( out , numpy . dot ( a_np , b_np ) ) )
print ( " run time %f " % run_time )
out_np = numpy . dot ( a_np , b_np )
place = core . CPUPlace ( )
exe = fluid . Executor ( place )
def _train ( use_program_cache , max_iters = 1 ) :
import time
run_time = 0.0
for i in range ( max_iters ) :
begin = time . time ( )
outs = exe . run ( program = main_program ,
feed = { ' a ' : a_np ,
' b ' : b_np } ,
fetch_list = [ output . name ] ,
use_program_cache = use_program_cache )
end = time . time ( )
run_time + = end - begin
out = outs [ 0 ]
self . assertEqual ( ( 100 , 100 ) , out . shape )
self . assertTrue ( numpy . allclose ( out , out_np ) )
return run_time
max_iters = 3
run_time_with_cache = _train (
use_program_cache = True , max_iters = max_iters )
print ( " run time with program cache: %f " % run_time_with_cache )
run_time_without_cache = _train (
use_program_cache = False , max_iters = max_iters )
print ( " run time without program cache: %f " % run_time_without_cache )
run_time_with_cache = _train (
use_program_cache = True , max_iters = max_iters )
print ( " run time with program cache: %f " % run_time_with_cache )
run_time_with_cache = _train (
use_program_cache = True , max_iters = max_iters )
print ( " run time with program cache: %f " % run_time_with_cache )
class ExecutorPaddingRNNTest ( PaddingRNNTestBase ) :
def train_and_save_inference_program ( self ,
rnn_model = " static " ,
parallel = True ,
use_program_cache = True ) :
config = RNNConfig ( " test " , rnn_model )
with fluid . scope_guard ( fluid . Scope ( ) ) :
self . train ( config , parallel , use_program_cache )
fluid . io . save_inference_model (
main_program = self . main_program ,
feeded_var_names = self . feed_order ,
target_vars = [ self . loss , self . last_hidden , self . last_cell ] ,
executor = self . exe ,
dirname = " padding_rnn. " + rnn_model + " .inference_model " ,
params_filename = " __params__ " )
def test_inference_output ( self ) :
for rnn_model in [ " static " , " padding " ] :
# Set parallel to False to use the default executor.
self . train_and_save_inference_program (
rnn_model = rnn_model , parallel = True , use_program_cache = True )
x_np = numpy . random . random (
( self . config . batch_size , self . config . num_steps ,
1 ) ) . astype ( " int64 " )
y_np = numpy . random . random (
( self . config . batch_size * self . config . num_steps ,
1 ) ) . astype ( " int64 " )
init_hidden_np = numpy . random . random (
( self . config . num_layers , self . config . batch_size ,
self . config . hidden_size ) ) . astype ( " float32 " )
init_cell_np = numpy . random . random (
( self . config . num_layers , self . config . batch_size ,
self . config . hidden_size ) ) . astype ( " float32 " )
for use_program_cache in [ False , True ] :
with fluid . scope_guard ( fluid . Scope ( ) ) :
save_dirname = " padding_rnn. " + rnn_model + " .inference_model "
[ inference_program , feed_target_names ,
fetch_targets ] = fluid . io . load_inference_model (
save_dirname , self . exe , params_filename = " __params__ " )
results = self . exe . run ( program = inference_program ,
feed = {
" x " : x_np ,
" y " : y_np ,
" init_hidden " : init_hidden_np ,
" init_cell " : init_cell_np
} ,
fetch_list = fetch_targets ,
use_program_cache = use_program_cache )
if use_program_cache is True :
results_with_cache = results
else :
results_without_cache = results
self . assertEqual (
len ( results_with_cache ) , len ( results_without_cache ) )
for i in range ( len ( results_with_cache ) ) :
self . assertEqual ( results_with_cache [ i ] . shape ,
results_without_cache [ i ] . shape )
self . assertTrue (
numpy . allclose ( results_with_cache [ i ] , results_without_cache [
i ] ) )
if __name__ == ' __main__ ' :