remove legacy python code

inference-pre-release-gpu
Tao Luo 6 years ago
parent 81da854903
commit ef038743f1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,13 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

File diff suppressed because it is too large Load Diff

@ -1,39 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.proto.DataConfig_pb2 import DataConfig
g_config = None
def SimpleData(files=None,
feat_dim=None,
context_len=None,
buffer_capacity=None):
data_config = DataConfig()
data_config.type = 'simple'
data_config.files = files
data_config.feat_dim = feat_dim
if context_len is not None:
data_config.context_len = context_len
if buffer_capacity:
data_config.buffer_capacity = buffer_capacity
return data_config
def get_config_funcs(trainer_config):
global g_config
g_config = trainer_config
return dict(SimpleData=SimpleData)

File diff suppressed because it is too large Load Diff

@ -1,25 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from activations import *
from data_sources import *
from poolings import *
from evaluators import *
from layers import *
from networks import *
from optimizers import *
from attrs import *
from config_parser_utils import *
# This will enable operator overload for LayerOutput
import layer_math

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,51 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import paddle.trainer.config_parser as config_parser
from paddle.proto.TrainerConfig_pb2 import OptimizationConfig
'''
This file is a wrapper of formal config_parser. The main idea of this file is to
separete different config logic into different function, such as network configuration
and optimizer configuration.
'''
__all__ = [
"parse_trainer_config", "parse_network_config", "parse_optimizer_config",
"reset_parser"
]
def parse_trainer_config(trainer_conf, config_arg_str):
return config_parser.parse_config(trainer_conf, config_arg_str)
def parse_network_config(network_conf, config_arg_str=''):
config = config_parser.parse_config(network_conf, config_arg_str)
return config.model_config
def parse_optimizer_config(optimizer_conf, config_arg_str=''):
config_parser.settings = copy.deepcopy(config_parser.DEFAULT_SETTING)
optimizer_conf()
opt_config = OptimizationConfig()
for k, v in config_parser.settings.iteritems():
if v is None:
continue
opt_config.__setattr__(k, v)
return opt_config
def reset_parser():
config_parser.begin_parse()

@ -1,213 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data Sources are helpers to define paddle training data or testing data.
"""
from paddle.trainer.config_parser import *
from .utils import deprecated
try:
import cPickle as pickle
except ImportError:
import six.moves.cPickle as pickle
__all__ = ['define_py_data_sources2']
def define_py_data_source(file_list,
cls,
module,
obj,
args=None,
async=False,
data_cls=PyData):
"""
Define a python data source.
For example, the simplest usage in trainer_config.py as follow:
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process")
Or. if you want to pass arguments from trainer_config to data_provider.py, then
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process",
args={"dictionary": dict_name})
:param data_cls:
:param file_list: file list name, which contains all data file paths
:type file_list: basestring
:param cls: Train or Test Class.
:type cls: TrainData or TestData
:param module: python module name.
:type module: basestring
:param obj: python object name. May be a function name if using
PyDataProviderWrapper.
:type obj: basestring
:param args: The best practice is using dict to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to
receive arguments.
:type args: string or picklable object
:param async: Load Data asynchronously or not.
:type async: bool
:return: None
:rtype: None
"""
if isinstance(file_list, list):
file_list_name = 'train.list'
if cls == TestData:
file_list_name = 'test.list'
with open(file_list_name, 'w') as f:
f.writelines(file_list)
file_list = file_list_name
if not isinstance(args, basestring) and args is not None:
args = pickle.dumps(args, 0)
cls(
data_cls(
files=file_list,
load_data_module=module,
load_data_object=obj,
load_data_args=args,
async_load_data=async))
def define_py_data_sources(train_list,
test_list,
module,
obj,
args=None,
train_async=False,
data_cls=PyData):
"""
The annotation is almost the same as define_py_data_sources2, except that
it can specific train_async and data_cls.
:param data_cls:
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:param train_async: Is training data load asynchronously or not.
:type train_async: bool
:return: None
:rtype: None
"""
def __is_splitable__(o):
return (isinstance(o, list) or
isinstance(o, tuple)) and hasattr(o, '__len__') and len(o) == 2
assert train_list is not None or test_list is not None
assert module is not None and obj is not None
test_module = module
train_module = module
if __is_splitable__(module):
train_module, test_module = module
test_obj = obj
train_obj = obj
if __is_splitable__(obj):
train_obj, test_obj = obj
if args is None:
args = ""
train_args = args
test_args = args
if __is_splitable__(args):
train_args, test_args = args
if train_list is not None:
define_py_data_source(train_list, TrainData, train_module, train_obj,
train_args, train_async, data_cls)
if test_list is not None:
define_py_data_source(test_list, TestData, test_module, test_obj,
test_args, False, data_cls)
def define_py_data_sources2(train_list, test_list, module, obj, args=None):
"""
Define python Train/Test data sources in one method. If train/test use
the same Data Provider configuration, module/obj/args contain one argument,
otherwise contain a list or tuple of arguments. For example\:
.. code-block:: python
define_py_data_sources2(train_list="train.list",
test_list="test.list",
module="data_provider"
# if train/test use different configurations,
# obj=["process_train", "process_test"]
obj="process",
args={"dictionary": dict_name})
The related data provider can refer to :ref:`api_pydataprovider2_sequential_model` .
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:return: None
:rtype: None
"""
def py_data2(files, load_data_module, load_data_object, load_data_args,
**kwargs):
data = create_data_config_proto()
data.type = 'py2'
data.files = files
data.load_data_module = load_data_module
data.load_data_object = load_data_object
data.load_data_args = load_data_args
data.async_load_data = False
return data
define_py_data_sources(
train_list=train_list,
test_list=test_list,
module=module,
obj=obj,
args=args,
data_cls=py_data2)

@ -1,164 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
from .attrs import ParamAttr
from .activations import TanhActivation
from paddle.trainer.config_parser import *
__all__ = [
'wrap_name_default', 'wrap_param_attr_default', 'wrap_bias_attr_default',
'wrap_act_default', 'wrap_param_default'
]
def __default_not_set_callback__(kwargs, name):
return name not in kwargs or kwargs[name] is None
def wrap_param_default(param_names=None,
default_factory=None,
not_set_callback=__default_not_set_callback__):
assert param_names is not None
assert isinstance(param_names, list) or isinstance(param_names, tuple)
for each_param_name in param_names:
assert isinstance(each_param_name, basestring)
def __impl__(func):
@functools.wraps(func)
def __wrapper__(*args, **kwargs):
if len(args) != 0:
argspec = inspect.getargspec(func)
num_positional = len(argspec.args)
if argspec.defaults:
num_positional -= len(argspec.defaults)
if not argspec.varargs and len(args) > num_positional:
logger.fatal(
"Must use keyword arguments for non-positional args")
for name in param_names:
if not_set_callback(kwargs, name): # Not set
kwargs[name] = default_factory(func)
return func(*args, **kwargs)
if hasattr(func, 'argspec'):
__wrapper__.argspec = func.argspec
else:
__wrapper__.argspec = inspect.getargspec(func)
return __wrapper__
return __impl__
class DefaultNameFactory(object):
def __init__(self, name_prefix):
self.__counter__ = 0
self.__name_prefix__ = name_prefix
def __call__(self, func):
if self.__name_prefix__ is None:
self.__name_prefix__ = func.__name__
tmp = "__%s_%d__" % (self.__name_prefix__, self.__counter__)
self.__check_name__(tmp)
self.__counter__ += 1
return tmp
def __check_name__(self, nm):
"""
@TODO(yuyang18): Implement it!
@param nm:
@return:
"""
pass
def reset(self):
self.__counter__ = 0
_name_factories = []
def reset_hook():
for factory in _name_factories:
factory.reset()
register_parse_config_hook(reset_hook)
def wrap_name_default(name_prefix=None, name_param="name"):
"""
Decorator to set "name" arguments default to "{name_prefix}_{invoke_count}".
.. code:: python
@wrap_name_default("some_name")
def func(name=None):
print name # name will never be None. If name is not set,
# name will be "some_name_%d"
:param name_prefix: name prefix. wrapped function's __name__ if None.
:type name_prefix: basestring
:return: a decorator to set default name
:rtype: callable
"""
factory = DefaultNameFactory(name_prefix)
_name_factories.append(factory)
return wrap_param_default([name_param], factory)
def wrap_param_attr_default(param_names=None, default_factory=None):
"""
Setting Default Parameter Attributes Decorator.
:param default_factory:
:param param_names: Parameter Attribute's Names, list of string
:type param_names: list
:return: decorator
"""
if param_names is None:
param_names = ['param_attr']
if default_factory is None:
default_factory = lambda _: ParamAttr()
return wrap_param_default(param_names, default_factory)
def wrap_bias_attr_default(param_names=None,
default_factory=None,
has_bias=True):
if param_names is None:
param_names = ['bias_attr']
if default_factory is None:
default_factory = lambda _: ParamAttr(initial_std=0., initial_mean=0.)
def __bias_attr_not_set__(kwargs, name):
if has_bias:
return name not in kwargs or kwargs[name] is None or \
kwargs[name] == True
else:
return name in kwargs and kwargs[name] == True
return wrap_param_default(param_names, default_factory,
__bias_attr_not_set__)
def wrap_act_default(param_names=None, act=None):
if param_names is None:
param_names = ["act"]
if act is None:
act = TanhActivation()
return wrap_param_default(param_names, lambda _: act)

File diff suppressed because it is too large Load Diff

@ -1,113 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .layers import LayerOutput, mixed_layer, identity_projection, \
slope_intercept_layer, scaling_layer, repeat_layer
from .attrs import is_compatible_with
from .default_decorators import *
import activations as act
from paddle.trainer.config_parser import logger
__all__ = []
def register_unary_math_op(op_name, act):
def op(input, name=None):
return mixed_layer(
input=[identity_projection(input=input)], name=name, act=act)
op = wrap_name_default(op_name)(op)
op.__doc__ = type(act).__doc__
globals()[op_name] = op
__all__.append(op_name)
register_unary_math_op('exp', act.ExpActivation())
register_unary_math_op('log', act.LogActivation())
register_unary_math_op('abs', act.AbsActivation())
register_unary_math_op('sigmoid', act.SigmoidActivation())
register_unary_math_op('tanh', act.TanhActivation())
register_unary_math_op('square', act.SquareActivation())
register_unary_math_op('relu', act.ReluActivation())
register_unary_math_op('sqrt', act.SqrtActivation())
register_unary_math_op('reciprocal', act.ReciprocalActivation())
def add(layeroutput, other):
if is_compatible_with(other, float):
return slope_intercept_layer(input=layeroutput, intercept=other)
if not isinstance(other, LayerOutput):
logger.fatal("LayerOutput can only be added with"
" another LayerOutput or a number")
if layeroutput.size == other.size:
return mixed_layer(input=[
identity_projection(input=layeroutput),
identity_projection(input=other)
])
if other.size != 1 and layeroutput.size != 1:
logger.fatal("Two LayerOutput can be added only if they have equal size"
" or one of their sizes is 1. sizes are %s and %s" %
(layeroutput.size, other.size))
elif layeroutput.size == 1:
tmp = layeroutput
layeroutput = other
other = tmp
other = repeat_layer(other, layeroutput.size)
return mixed_layer(input=[
identity_projection(input=layeroutput), identity_projection(input=other)
])
LayerOutput.__radd__ = add
LayerOutput.__add__ = add
def sub(layeroutput, other):
if is_compatible_with(other, float):
return slope_intercept_layer(input=layeroutput, intercept=-other)
if not isinstance(other, LayerOutput):
logger.fatal("LayerOutput can only be subtracted with"
" another Layeroutput or a number")
neg = slope_intercept_layer(input=other, slope=-1.0)
return add(layeroutput, neg)
LayerOutput.__sub__ = sub
def rsub(layeroutput, other):
neg = slope_intercept_layer(input=layeroutput, slope=-1.0)
return add(neg, other)
LayerOutput.__rsub__ = rsub
def mul(layeroutput, other):
if is_compatible_with(other, float):
return slope_intercept_layer(input=layeroutput, slope=other)
if not isinstance(other, LayerOutput):
logger.fatal("LayerOutput can only be multiplied with"
" another Layeroutput or a number")
elif layeroutput.size == 1:
return scaling_layer(input=other, weight=layeroutput)
elif other.size == 1:
return scaling_layer(input=layeroutput, weight=other)
else:
logger.fatal("At least one of the operand of '*' must be a number"
" or a LayerOutput with size=1")
LayerOutput.__mul__ = mul
LayerOutput.__rmul__ = mul

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,148 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
__all__ = [
"BasePoolingType", "MaxPooling", "AvgPooling", "MaxWithMaskPooling",
"CudnnMaxPooling", "CudnnAvgPooling", "CudnnAvgInclPadPooling",
"SumPooling", "SquareRootNPooling"
]
class BasePoolingType(object):
"""
Base Pooling Type.
Note these pooling types are used for sequence input, not for images.
Each PoolingType contains one parameter:
:param name: pooling layer type name used by paddle.
:type name: basestring
"""
def __init__(self, name):
self.name = name
class MaxPooling(BasePoolingType):
"""
Max pooling.
Return the very large values for each dimension in sequence or time steps.
.. math::
max(samples\\_of\\_a\\_sequence)
:param output_max_index: True if output sequence max index instead of max
value. None means use default value in proto.
:type output_max_index: bool|None
"""
def __init__(self, output_max_index=None):
BasePoolingType.__init__(self, "max")
self.output_max_index = output_max_index
class MaxWithMaskPooling(BasePoolingType):
"""
MaxWithMask pooling.
Not only return the very large values for each dimension in sequence or time steps,
but also the location indices of found maxinum values.
"""
def __init__(self):
BasePoolingType.__init__(self, "max-pool-with-mask")
class CudnnMaxPooling(BasePoolingType):
"""
Cudnn max pooling only support GPU. Return the maxinum value in the
pooling window.
"""
def __init__(self):
BasePoolingType.__init__(self, "cudnn-max-pool")
class CudnnAvgPooling(BasePoolingType):
"""
Cudnn average pooling only support GPU. Return the average value in the
pooling window.
"""
def __init__(self):
BasePoolingType.__init__(self, "cudnn-avg-pool")
class CudnnAvgInclPadPooling(BasePoolingType):
"""
Cudnn average pooling only support GPU. Return the average value in the
pooling window taking into account the padding cells.
"""
def __init__(self):
BasePoolingType.__init__(self, "cudnn-avg-incl-pad-pool")
class AvgPooling(BasePoolingType):
"""
Average pooling.
Return the average values for each dimension in sequence or time steps.
.. math::
sum(samples\\_of\\_a\\_sequence)/sample\\_num
"""
STRATEGY_AVG = "average"
STRATEGY_SUM = "sum"
STRATEGY_SQROOTN = "squarerootn"
def __init__(self, strategy=STRATEGY_AVG):
BasePoolingType.__init__(self, "average")
self.strategy = strategy
class SumPooling(AvgPooling):
"""
Sum pooling.
Return the sum values of each dimension in sequence or time steps.
.. math::
sum(samples\\_of\\_a\\_sequence)
"""
def __init__(self):
AvgPooling.__init__(self, AvgPooling.STRATEGY_SUM)
class SquareRootNPooling(AvgPooling):
"""
Square Root Pooling.
Return the square root values of each dimension in sequence or time steps.
.. math::
sum(samples\\_of\\_a\\_sequence)/sqrt(sample\\_num)
"""
def __init__(self):
AvgPooling.__init__(self, AvgPooling.STRATEGY_SQROOTN)

@ -1,17 +0,0 @@
#################### test_config_parser #########################
add_test(NAME layers_test
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/
${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/layers_test.py
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle)
add_test(NAME test_reset_hook
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/
${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle)
add_paddle_exe(protobuf_equal ProtobufEqualMain.cpp)
add_test(NAME test_layerHelpers
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/
${PADDLE_BINARY_DIR}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE}
${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal
)

@ -1,59 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <google/protobuf/text_format.h>
#include <google/protobuf/util/message_differencer.h>
#include <fstream>
#include <iostream>
#include "TrainerConfig.pb.h"
bool loadPb(google::protobuf::Message* conf, const std::string& filename) {
std::ifstream fin;
fin.open(filename.c_str());
if (fin.is_open()) {
std::string str((std::istreambuf_iterator<char>(fin)),
std::istreambuf_iterator<char>());
bool ok = google::protobuf::TextFormat::ParseFromString(str, conf);
fin.close();
return ok;
} else {
return false;
}
}
int main(int argc, char** argv) {
std::unique_ptr<google::protobuf::Message> config1;
std::unique_ptr<google::protobuf::Message> config2;
if (argc == 3) {
config1.reset(new paddle::ModelConfig());
config2.reset(new paddle::ModelConfig());
} else if (argc == 4) {
config1.reset(new paddle::TrainerConfig());
config2.reset(new paddle::TrainerConfig());
}
if (!config1 || !config2) {
return 1;
} else if (!loadPb(config1.get(), argv[1])) {
return 2;
} else if (!loadPb(config2.get(), argv[2])) {
return 3;
} else {
if (google::protobuf::util::MessageDifferencer::ApproximatelyEquals(
*config1, *config2)) {
return 0;
} else {
return 4;
}
}
}

@ -1,17 +0,0 @@
#!/bin/bash
export configs=(test_repeat_layer test_fc layer_activations projections test_print_layer
test_sequence_pooling test_lstmemory_layer test_grumemory_layer
last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer
test_scale_sub_region_layer test_dot_prod_layer test_l2_distance_layer
test_factorization_machine)
export whole_configs=(test_split_datasource)

@ -1,27 +0,0 @@
#!/bin/bash
set -e
cd `dirname $0`
protostr=$PWD/protostr
. file_list.sh
for conf in ${configs[*]}
do
echo "Generating " $conf
$1 -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest
if [ ! -f "$protostr/$conf.protostr" ]; then
cp $protostr/$conf.protostr.unittest $protostr/$conf.protostr
fi
cat ${conf}.py |$1 test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest
done
for conf in ${whole_configs[*]}
do
echo "Generating " $conf
$1 -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest
if [ ! -f "$protostr/$conf.protostr" ]; then
cp $protostr/$conf.protostr.unittest $protostr/$conf.protostr
fi
cat ${conf}.py |$1 test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest
done

@ -1,38 +0,0 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-3, batch_size=1000)
img = data_layer(name='image', size=256 * 256)
# the parse_conv in config_parse.py is not strictly accurate when filter_size
# is not square. So here set square filter_size.
img_conv = img_conv_layer(
input=img,
num_channels=1,
num_filters=64,
filter_size=(32, 32),
padding=(1, 1),
dilation=(1, 1),
stride=(1, 1),
act=LinearActivation())
img_bn = batch_norm_layer(input=img_conv, act=ReluActivation())
img_norm = img_cmrnorm_layer(input=img_bn, size=32)
img_pool = img_pool_layer(input=img_conv, pool_size=32, pool_type=MaxPooling())
outputs(img_pool, img_norm)

@ -1,38 +0,0 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-3, batch_size=1000)
img = data_layer(name='image', size=227 * 227)
# the parse_conv in config_parse.py is not strictly accurate when filter_size
# is not square. So here set square filter_size.
img_conv = img_conv_layer(
input=img,
num_channels=1,
num_filters=64,
filter_size=(32, 32),
padding=(1, 1),
stride=(1, 1),
act=LinearActivation(),
trans=True)
img_bn = batch_norm_layer(input=img_conv, act=ReluActivation())
img_norm = img_cmrnorm_layer(input=img_bn, size=32)
img_pool = img_pool_layer(input=img_conv, pool_size=32, pool_type=MaxPooling())
outputs(img_pool, img_norm)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save