fix logical error

del_some_in_makelist
tensor-tang 7 years ago
parent 86b8bdc0af
commit a785496b69

@ -6,7 +6,7 @@ height = 227
width = 227 width = 227
num_class = 1000 num_class = 1000
batch_size = get_config_arg('batch_size', int, 128) batch_size = get_config_arg('batch_size', int, 128)
use_mkldnn = get_config_arg('use_mkldnn', bool, False) gp = get_config_arg('layer_num', int, 1)
is_infer = get_config_arg("is_infer", bool, False) is_infer = get_config_arg("is_infer", bool, False)
num_samples = get_config_arg('num_samples', int, 2560) num_samples = get_config_arg('num_samples', int, 2560)
@ -41,12 +41,7 @@ net = img_pool_layer(input=net, pool_size=3, stride=2)
# conv2 # conv2
net = img_conv_layer( net = img_conv_layer(
input=net, input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=gp)
filter_size=5,
num_filters=256,
stride=1,
padding=2,
groups=2 if use_mkldnn else 1)
net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75) net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75)
net = img_pool_layer(input=net, pool_size=3, stride=2) net = img_pool_layer(input=net, pool_size=3, stride=2)
@ -55,21 +50,11 @@ net = img_conv_layer(
input=net, filter_size=3, num_filters=384, stride=1, padding=1) input=net, filter_size=3, num_filters=384, stride=1, padding=1)
# conv4 # conv4
net = img_conv_layer( net = img_conv_layer(
input=net, input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=gp)
filter_size=3,
num_filters=384,
stride=1,
padding=1,
groups=2 if use_mkldnn else 1)
# conv5 # conv5
net = img_conv_layer( net = img_conv_layer(
input=net, input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=gp)
filter_size=3,
num_filters=256,
stride=1,
padding=1,
groups=2 if use_mkldnn else 1)
net = img_pool_layer(input=net, pool_size=3, stride=2) net = img_pool_layer(input=net, pool_size=3, stride=2)
net = fc_layer( net = fc_layer(
@ -84,6 +69,9 @@ net = fc_layer(
layer_attr=ExtraAttr(drop_rate=0.5)) layer_attr=ExtraAttr(drop_rate=0.5))
net = fc_layer(input=net, size=1000, act=SoftmaxActivation()) net = fc_layer(input=net, size=1000, act=SoftmaxActivation())
lab = data_layer('label', num_class) if is_infer:
loss = cross_entropy(input=net, label=lab) outputs(net)
outputs(loss) else:
lab = data_layer('label', num_class)
loss = cross_entropy(input=net, label=lab)
outputs(loss)

@ -79,7 +79,7 @@ fi
# inference benchmark # inference benchmark
for use_mkldnn in True False; do for use_mkldnn in True False; do
for batchsize in 1 2 4 8 16; do for batchsize in 1 2 4 8 16; do
infer alexnet group2 $batchsize $use_mkldnn infer alexnet 2 $batchsize $use_mkldnn
infer googlenet v1 $batchsize $use_mkldnn infer googlenet v1 $batchsize $use_mkldnn
infer resnet 50 $batchsize $use_mkldnn infer resnet 50 $batchsize $use_mkldnn
infer vgg 19 $batchsize $use_mkldnn infer vgg 19 $batchsize $use_mkldnn

@ -47,6 +47,6 @@ for use_mkldnn in True False; do
train vgg 19 $batchsize $use_mkldnn train vgg 19 $batchsize $use_mkldnn
train resnet 50 $batchsize $use_mkldnn train resnet 50 $batchsize $use_mkldnn
train googlenet v1 $batchsize $use_mkldnn train googlenet v1 $batchsize $use_mkldnn
train alexnet group2 $batchsize $use_mkldnn train alexnet 2 $batchsize $use_mkldnn
done done
done done

@ -56,7 +56,7 @@ fi
# inference benchmark # inference benchmark
for batchsize in 1 2 4 8 16; do for batchsize in 1 2 4 8 16; do
infer alexnet group2 $batchsize $use_mkldnn infer alexnet 2 $batchsize $use_mkldnn
infer googlenet v1 $batchsize infer googlenet v1 $batchsize
infer resnet 50 $batchsize infer resnet 50 $batchsize
infer vgg 19 $batchsize infer vgg 19 $batchsize

@ -36,5 +36,5 @@ for batchsize in 64 128 256; do
train vgg 19 $batchsize train vgg 19 $batchsize
train resnet 50 $batchsize train resnet 50 $batchsize
train googlenet v1 $batchsize train googlenet v1 $batchsize
train alexnet group2 $batchsize $use_mkldnn train alexnet 2 $batchsize $use_mkldnn
done done

Loading…
Cancel
Save