commit
bd38facada
@ -0,0 +1,14 @@
|
||||
Thank you for contributing to PaddlePaddle. Submitting an issue is a great help for us.
|
||||
Both Chinese and English issues are welcome.
|
||||
|
||||
It's hard to solve a problem when important details are missing.
|
||||
Before submitting the issue, look over the following criteria before handing your request in.
|
||||
|
||||
- [ ] Was there a similar issue submitted or resolved before ? You could search issue in the github.
|
||||
- [ ] Did you retrieve your issue from widespread search engines ?
|
||||
- [ ] Is my description of the issue clear enough to reproduce this problem?
|
||||
* If some errors occured, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc.
|
||||
* If you use an recording [asciinema](https://asciinema.org/) to show what you are doing to make it happen, that's awesome! We could help you solve the problem more quickly.
|
||||
- [ ] Is my description of the issue use the github markdown correctly?
|
||||
* Please use the proper markdown syntaxes for styling all forms of writing, e.g, source code, error information, etc.
|
||||
* Check out [this page](https://guides.github.com/features/mastering-markdown/) to find out much more about markdown.
|
@ -0,0 +1,103 @@
|
||||
# CMake script for code coverage.
|
||||
# If _COVERALLS_UPLOAD is ON, it will upload json files to overalls.io automatically.
|
||||
|
||||
# Param _COVERAGE_SRCS A list of coverage source files.
|
||||
# Param _COVERALLS_UPLOAD Upload the result to coveralls.
|
||||
# Param _CMAKE_SCRIPT_PATH CMake script path.
|
||||
function(code_coverage _COVERAGE_SRCS _COVERALLS_UPLOAD _CMAKE_SCRIPT_PATH)
|
||||
# clean previous gcov data.
|
||||
file(REMOVE_RECURSE ${PROJECT_BINARY_DIR}/*.gcda)
|
||||
|
||||
# find curl for upload JSON soon.
|
||||
if (_COVERALLS_UPLOAD)
|
||||
find_program(CURL_EXECUTABLE curl)
|
||||
if (NOT CURL_EXECUTABLE)
|
||||
message(FATAL_ERROR "Coveralls: curl not found!")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# When passing a CMake list to an external process, the list
|
||||
# will be converted from the format "1;2;3" to "1 2 3".
|
||||
set(COVERAGE_SRCS "")
|
||||
foreach (SINGLE_SRC ${_COVERAGE_SRCS})
|
||||
set(COVERAGE_SRCS "${COVERAGE_SRCS}*${SINGLE_SRC}")
|
||||
endforeach()
|
||||
|
||||
# query number of logical cores
|
||||
cmake_host_system_information(RESULT core_size QUERY NUMBER_OF_LOGICAL_CORES)
|
||||
# coveralls json file.
|
||||
set(COVERALLS_FILE ${PROJECT_BINARY_DIR}/coveralls.json)
|
||||
add_custom_target(coveralls_generate
|
||||
# Run regress tests.
|
||||
COMMAND ${CMAKE_CTEST_COMMAND}
|
||||
-j ${core_size}
|
||||
--output-on-failure
|
||||
# Generate Gcov and translate it into coveralls JSON.
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
-DCOVERAGE_SRCS="${COVERAGE_SRCS}"
|
||||
-DCOVERALLS_OUTPUT_FILE="${COVERALLS_FILE}"
|
||||
-DCOV_PATH="${PROJECT_BINARY_DIR}"
|
||||
-DPROJECT_ROOT="${PROJECT_SOURCE_DIR}"
|
||||
-P "${_CMAKE_SCRIPT_PATH}/coverallsGcovJsons.cmake"
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Coveralls: generating coveralls output..."
|
||||
)
|
||||
|
||||
if (_COVERALLS_UPLOAD)
|
||||
message("COVERALLS UPLOAD: ON")
|
||||
# Upload the JSON to coveralls.
|
||||
add_custom_target(coveralls_upload
|
||||
COMMAND ${CURL_EXECUTABLE}
|
||||
-S -F json_file=@${COVERALLS_FILE}
|
||||
https://coveralls.io/api/v1/jobs
|
||||
DEPENDS coveralls_generate
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMENT "Coveralls: uploading coveralls output...")
|
||||
|
||||
add_custom_target(coveralls DEPENDS coveralls_upload)
|
||||
else()
|
||||
message("COVERALLS UPLOAD: OFF")
|
||||
add_custom_target(coveralls DEPENDS coveralls_generate)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
if(ON_COVERALLS)
|
||||
set(CMAKE_BUILD_TYPE "Debug")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
|
||||
|
||||
set(EXCLUDE_DIRS
|
||||
"demo/"
|
||||
"build/"
|
||||
"tests/"
|
||||
".test_env/"
|
||||
)
|
||||
|
||||
if(WITH_GPU)
|
||||
file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" ".c" "*.cu")
|
||||
else()
|
||||
file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" "*.c")
|
||||
endif()
|
||||
|
||||
# exclude trivial files in PADDLE_SOURCES
|
||||
foreach(EXCLUDE_DIR ${EXCLUDE_DIRS})
|
||||
foreach(TMP_PATH ${PADDLE_SOURCES})
|
||||
string(FIND ${TMP_PATH} ${EXCLUDE_DIR} EXCLUDE_DIR_FOUND)
|
||||
if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1)
|
||||
list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH})
|
||||
endif()
|
||||
endforeach(TMP_PATH)
|
||||
endforeach()
|
||||
|
||||
# convert to absolute path
|
||||
set(PADDLE_SRCS "")
|
||||
foreach(PADDLE_SRC ${PADDLE_SOURCES})
|
||||
set(PADDLE_SRCS "${PADDLE_SRCS};${PROJECT_SOURCE_DIR}/${PADDLE_SRC}")
|
||||
endforeach()
|
||||
|
||||
code_coverage(
|
||||
"${PADDLE_SRCS}"
|
||||
${COVERALLS_UPLOAD}
|
||||
"${PROJECT_SOURCE_DIR}/cmake"
|
||||
)
|
||||
endif()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,76 @@
|
||||
# user should download rdma first from subversion repository
|
||||
|
||||
# execute following instruction to download svn mannally
|
||||
# svn co https://svn.baidu.com/sys/ip/trunk/rdma/sockrdmav1 rdma/
|
||||
# svn co https://svn.baidu.com/sys/ip/trunk/rdma/thirdparty rdma/
|
||||
# we use static output in svn repositories to avoid implict bugs from not standard runtime env.
|
||||
|
||||
set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library")
|
||||
|
||||
function(generate_rdma_links)
|
||||
#redirect to current DIR to isolate the pollution from system runtime environment
|
||||
#it can benifits unified control for different gcc environment.
|
||||
#e.g, by default gcc48 did not refer /usr/lib64 which could contain low version
|
||||
#runtime libraries that will crash process while loading it. That redirect trick
|
||||
#can fix it.
|
||||
execute_process(
|
||||
COMMAND mkdir -p librdma
|
||||
COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1
|
||||
COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so
|
||||
COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1
|
||||
COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
endfunction(generate_rdma_links)
|
||||
|
||||
|
||||
#check and set headers
|
||||
find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include)
|
||||
find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio)
|
||||
find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent)
|
||||
find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma)
|
||||
|
||||
#check and set libs
|
||||
find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output)
|
||||
find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio)
|
||||
find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent)
|
||||
find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent)
|
||||
find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent)
|
||||
find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent)
|
||||
find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma)
|
||||
|
||||
if(
|
||||
RDMA_INC_SXISOCK AND
|
||||
RDMA_INC_XIO AND
|
||||
RDMA_INC_EVENT AND
|
||||
RDMA_INC_NUMA AND
|
||||
RDMA_LIB_SXISOCK AND
|
||||
RDMA_LIB_XIO AND
|
||||
RDMA_LIB_EVENT AND
|
||||
RDMA_LIB_EVENT_CORE AND
|
||||
RDMA_LIB_EVENT_EXTRA AND
|
||||
RDMA_LIB_EVENT_PTHREADS AND
|
||||
RDMA_LIB_NUMA
|
||||
)
|
||||
|
||||
set(RDMA_INC_DIR
|
||||
${RDMA_INC_SXISOCK}
|
||||
${RDMA_INC_XIO}
|
||||
${RDMA_INC_EVENT}
|
||||
${RDMA_INC_NUMA})
|
||||
set(RDMA_LIBS
|
||||
${RDMA_LIB_SXISOCK}
|
||||
${RDMA_LIB_XIO}
|
||||
${RDMA_LIB_EVENT}
|
||||
${RDMA_LIB_EVENT_CORE}
|
||||
${RDMA_LIB_EVENT_EXTRA}
|
||||
${RDMA_LIB_EVENT_PTHREADS}
|
||||
${RDMA_LIB_NUMA}
|
||||
)
|
||||
set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma")
|
||||
return()
|
||||
endif()
|
||||
|
||||
#if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable
|
||||
|
||||
message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.")
|
@ -0,0 +1,4 @@
|
||||
This folder contains scripts used in PaddlePaddle introduction.
|
||||
- use `bash train.sh` to train a simple linear regression model
|
||||
- use `python evaluate_model.py` to read model parameters. You can see that `w` and `b` are very close to [2, 0.3].
|
||||
|
@ -0,0 +1,24 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from paddle.trainer.PyDataProvider2 import *
|
||||
import random
|
||||
|
||||
# define data types of input: 2 real numbers
|
||||
@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False)
|
||||
def process(settings, input_file):
|
||||
for i in xrange(2000):
|
||||
x = random.random()
|
||||
yield [x], [2*x+0.3]
|
||||
|
@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Print model parameters in last model
|
||||
|
||||
Usage:
|
||||
python evaluate_model.py
|
||||
"""
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
def load(file_name):
|
||||
with open(file_name, 'rb') as f:
|
||||
f.read(16) # skip header for float type.
|
||||
return np.fromfile(f, dtype=np.float32)
|
||||
|
||||
def main():
|
||||
print 'w=%.6f, b=%.6f from pass 29' % (load('output/pass-00029/w'),
|
||||
load('output/pass-00029/b'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
paddle train \
|
||||
--config=trainer_config.py \
|
||||
--save_dir=./output \
|
||||
--num_passes=30 \
|
||||
2>&1 |tee 'train.log'
|
@ -0,0 +1,32 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
# 1. read data. Suppose you saved above python code as dataprovider.py
|
||||
data_file = 'empty.list'
|
||||
with open(data_file, 'w') as f: f.writelines(' ')
|
||||
define_py_data_sources2(train_list=data_file, test_list=None,
|
||||
module='dataprovider', obj='process',args={})
|
||||
|
||||
# 2. learning algorithm
|
||||
settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer())
|
||||
|
||||
# 3. Network configuration
|
||||
x = data_layer(name='x', size=1)
|
||||
y = data_layer(name='y', size=1)
|
||||
y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b'))
|
||||
cost = regression_cost(input=y_predict, label=y)
|
||||
outputs(cost)
|
||||
|
@ -0,0 +1,114 @@
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import random
|
||||
|
||||
from paddle.trainer.config_parser import parse_config
|
||||
from py_paddle import swig_paddle as api
|
||||
from py_paddle import DataProviderConverter
|
||||
from paddle.trainer.PyDataProvider2 \
|
||||
import integer_value, integer_value_sequence, sparse_binary_vector
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--train_data",
|
||||
type=str, required=False, help="train data file")
|
||||
parser.add_argument("--test_data", type=str, help="test data file")
|
||||
parser.add_argument("--config",
|
||||
type=str, required=True, help="config file name")
|
||||
parser.add_argument("--dict_file", required=True, help="dictionary file")
|
||||
parser.add_argument("--seq",
|
||||
default=1, type=int,
|
||||
help="whether use sequence training")
|
||||
parser.add_argument("--use_gpu", default=0, type=int,
|
||||
help="whether use GPU for training")
|
||||
parser.add_argument("--trainer_count", default=1, type=int,
|
||||
help="Number of threads for training")
|
||||
parser.add_argument("--num_passes", default=5, type=int,
|
||||
help="Number of training passes")
|
||||
return parser.parse_args()
|
||||
|
||||
UNK_IDX = 0
|
||||
|
||||
def load_data(file_name, word_dict):
|
||||
with open(file_name, 'r') as f:
|
||||
for line in f:
|
||||
label, comment = line.strip().split('\t')
|
||||
words = comment.split()
|
||||
word_slot = [word_dict.get(w, UNK_IDX) for w in words]
|
||||
yield word_slot, int(label)
|
||||
|
||||
def load_dict(dict_file):
|
||||
word_dict = dict()
|
||||
with open(dict_file, 'r') as f:
|
||||
for i, line in enumerate(f):
|
||||
w = line.strip().split()[0]
|
||||
word_dict[w] = i
|
||||
return word_dict
|
||||
|
||||
def main():
|
||||
options = parse_arguments()
|
||||
api.initPaddle("--use_gpu=%s" % options.use_gpu,
|
||||
"--trainer_count=%s" % options.trainer_count)
|
||||
|
||||
word_dict = load_dict(options.dict_file)
|
||||
train_dataset = list(load_data(options.train_data, word_dict))
|
||||
if options.test_data:
|
||||
test_dataset = list(load_data(options.test_data, word_dict))
|
||||
else:
|
||||
test_dataset = None
|
||||
|
||||
trainer_config = parse_config(options.config,
|
||||
"dict_file=%s" % options.dict_file)
|
||||
# No need to have data provider for trainer
|
||||
trainer_config.ClearField('data_config')
|
||||
trainer_config.ClearField('test_data_config')
|
||||
|
||||
# create a GradientMachine from the model configuratin
|
||||
model = api.GradientMachine.createFromConfigProto(
|
||||
trainer_config.model_config)
|
||||
# create a trainer for the gradient machine
|
||||
trainer = api.Trainer.create(trainer_config, model)
|
||||
|
||||
# create a data converter which converts data to PaddlePaddle
|
||||
# internal format
|
||||
input_types = [
|
||||
integer_value_sequence(len(word_dict)) if options.seq
|
||||
else sparse_binary_vector(len(word_dict)),
|
||||
integer_value(2)]
|
||||
converter = DataProviderConverter(input_types)
|
||||
|
||||
batch_size = trainer_config.opt_config.batch_size
|
||||
trainer.startTrain()
|
||||
for train_pass in xrange(options.num_passes):
|
||||
trainer.startTrainPass()
|
||||
random.shuffle(train_dataset)
|
||||
for pos in xrange(0, len(train_dataset), batch_size):
|
||||
batch = itertools.islice(train_dataset, pos, pos + batch_size)
|
||||
size = min(batch_size, len(train_dataset) - pos)
|
||||
trainer.trainOneDataBatch(size, converter(batch))
|
||||
trainer.finishTrainPass()
|
||||
if test_dataset:
|
||||
trainer.startTestPeriod();
|
||||
for pos in xrange(0, len(test_dataset), batch_size):
|
||||
batch = itertools.islice(test_dataset, pos, pos + batch_size)
|
||||
size = min(batch_size, len(test_dataset) - pos)
|
||||
trainer.testOneDataBatch(size, converter(batch))
|
||||
trainer.finishTestPeriod()
|
||||
trainer.finishTrain()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
# Note: if using trainer_config.emb.py, trainer_config.cnn.py
|
||||
# or trainer_config.lstm.py, you need to change --seq to --seq=1
|
||||
# because they are sequence models.
|
||||
python api_train.py \
|
||||
--config=trainer_config.lr.py \
|
||||
--trainer_count=2 \
|
||||
--num_passes=15 \
|
||||
--use_gpu=0 \
|
||||
--seq=0 \
|
||||
--train_data=data/train.txt \
|
||||
--test_data=data/test.txt \
|
||||
--dict_file=data/dict.txt \
|
||||
2>&1 | tee 'train.log'
|
@ -0,0 +1,10 @@
|
||||
*.pyc
|
||||
train.log
|
||||
data/feature
|
||||
data/conll05st-release/
|
||||
data/src.dict
|
||||
data/test.wsj.props
|
||||
data/test.wsj.seq_pair
|
||||
data/test.wsj.words
|
||||
data/tgt.dict
|
||||
output
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue