remove legacy C++ code

inference-pre-release-gpu
Tao Luo 6 years ago
parent 958ca2c7c5
commit 81da854903

@ -1,174 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "paddle/legacy/parameter/Argument.h"
size_t Arguments::getSlotNum() const { return m->outputs.size(); }
Arguments* Arguments::createArguments(size_t slotNum) {
auto args = new Arguments();
args->m->outputs.resize(slotNum);
return args;
}
void Arguments::resize(size_t slotNum) { m->outputs.resize(slotNum); }
Arguments::Arguments() : m(new ArgumentsPrivate()) {}
Arguments::~Arguments() { delete m; }
Arguments* Arguments::createByPaddleArgumentVector(void* ptr) {
auto p = (std::vector<paddle::Argument>*)(ptr);
auto args = new Arguments();
args->m->outputs = *p;
return args;
}
Arguments* Arguments::createByPaddleArgument(const void* ptr) {
auto p = (paddle::Argument*)(ptr);
auto args = new Arguments();
args->m->outputs.push_back(*p);
return args;
}
Matrix* Arguments::getSlotValue(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.value);
}
Matrix* Arguments::getSlotGrad(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.grad);
}
IVector* Arguments::getSlotIds(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return IVector::createByPaddleVectorPtr(&a.ids);
}
Matrix* Arguments::getSlotIn(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.in);
}
void Arguments::setSlotValue(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.value = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotGrad(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.grad = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotIn(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.in = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotIds(size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.ids = v;
}
template <typename T1>
static inline void doCopyFromSafely(std::shared_ptr<T1>& dest,
std::shared_ptr<T1>& src) {
if (src) {
if (dest) {
dest->copyFrom(*src);
} else {
dest = src;
}
}
}
IVector* Arguments::getSlotSequenceStartPositions(size_t idx) const
throw(RangeError) {
auto& a = m->getArg(idx);
if (a.sequenceStartPositions) {
return IVector::createByPaddleVectorPtr(
&a.sequenceStartPositions->getMutableVector(false));
} else {
return nullptr;
}
}
IVector* Arguments::getSlotSubSequenceStartPositions(size_t idx) const
throw(RangeError) {
auto& a = m->getArg(idx);
if (a.subSequenceStartPositions) {
return IVector::createByPaddleVectorPtr(
&a.subSequenceStartPositions->getMutableVector(false));
} else {
return nullptr;
}
}
void Arguments::setSlotSequenceStartPositions(size_t idx,
IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.sequenceStartPositions = std::make_shared<paddle::ICpuGpuVector>(v);
}
void Arguments::setSlotSubSequenceStartPositions(
size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.subSequenceStartPositions = std::make_shared<paddle::ICpuGpuVector>(v);
}
IVector* Arguments::getSlotSequenceDim(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return IVector::createByPaddleVectorPtr(&a.cpuSequenceDims);
}
void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
a.cpuSequenceDims = m->cast<paddle::IVector>(vec->getSharedPtr());
}
float Arguments::sum() const { return paddle::Argument::sum(m->outputs); }
int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getBatchSize();
}
void Arguments::setSlotFrameHeight(size_t idx, size_t h) throw(RangeError) {
auto& a = m->getArg(idx);
a.setFrameHeight(h);
}
void Arguments::setSlotFrameWidth(size_t idx, size_t w) throw(RangeError) {
auto& a = m->getArg(idx);
a.setFrameWidth(w);
}
size_t Arguments::getSlotFrameHeight(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getFrameHeight();
}
size_t Arguments::getSlotFrameWidth(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getFrameWidth();
}
void* Arguments::getInternalArgumentsPtr() const { return &m->outputs; }

@ -1,120 +0,0 @@
set(API_SOURCES
Arguments.cpp
ConfigParser.cpp
Evaluator.cpp
GradientMachine.cpp
Matrix.cpp
Parameter.cpp
ParameterOptimizer.cpp
ParameterUpdater.cpp
SequenceGenerator.cpp
Trainer.cpp
Util.cpp
Vector.cpp)
set(API_HEADER
PaddleAPI.h
Internal.h)
add_library(paddle_api STATIC ${API_SOURCES})
add_dependencies(paddle_api paddle_proto paddle_trainer_lib)
INCLUDE(${SWIG_USE_FILE})
INCLUDE_DIRECTORIES(${PADDLE_SOURCE_DIR}/paddle)
FILE(GLOB PY_PADDLE_PYTHON_FILES ${PADDLE_SOURCE_DIR}/paddle/py_paddle/*.py)
SET_SOURCE_FILES_PROPERTIES(Paddle.i PROPERTIES CPLUSPLUS ON)
SET(SWIG_NEED_FLAGS
-ftls-model=global-dynamic
-Wno-parentheses-equality
-Wno-self-assign
-Wno-maybe-uninitialized
-Wno-missing-field-initializers)
FOREACH(flag ${SWIG_NEED_FLAGS})
safe_set_cxxflag(SWIG_CXX_FLAGS ${flag})
ENDFOREACH()
SET(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_BINARY_DIR})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SWIG_CXX_FLAGS}")
SET(SWIG_MODULE_swig_paddle_EXTRA_DEPS
paddle_parameter
paddle_function
paddle_math
paddle_utils
paddle_gserver
paddle_pserver
paddle_api
paddle_cuda
paddle_trainer_lib
paddle_network
paddle_proto
${external_project_dependencies}
${RDMA_LIBS}
)
IF(APPLE)
SET(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load -framework CoreFoundation -framework Security")
ELSE(APPLE)
SET(START_GROUP "-Xlinker -start-group")
SET(END_GROUP "-Xlinker -end-group")
SET(ARCHIVE_START "-Wl,--whole-archive")
SET(ARCHIVE_END "-Wl,--no-whole-archive")
ENDIF(APPLE)
SWIG_ADD_MODULE(swig_paddle python Paddle.i)
SWIG_LINK_LIBRARIES(swig_paddle
${MACOS_LD_FLAGS}
${START_GROUP}
${ARCHIVE_START}
paddle_gserver
paddle_function
${METRIC_LIBS}
${ARCHIVE_END}
paddle_pserver
paddle_trainer_lib
paddle_network
paddle_parameter
paddle_optimizer
paddle_math
paddle_utils
paddle_proto
paddle_cuda
paddle_api
${CMAKE_DL_LIBS}
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${RDMA_LD_FLAGS}
${START_END}
)
add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/.timestamp
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle
DEPENDS _swig_paddle
)
# TODO(yuyang18) : make wheel name calculated by cmake
add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so)
if(WITH_TESTING)
IF(NOT PY_PIP_FOUND)
SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip)
ExternalProject_Add(pip
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY https://github.com/pypa/pip.git
GIT_TAG 9.0.1
PREFIX ${PIP_SOURCES_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install
BUILD_IN_SOURCE 1
#DEPENDS python setuptools python_api_wheel
)
ENDIF()
add_subdirectory(test)
endif()

@ -1,114 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "paddle/legacy/trainer/Trainer.h"
struct ParameterConfigPrivate {
paddle::ParameterPtr parameter;
paddle::ParameterConfig config;
inline paddle::ParameterConfig* getConfigPtr() {
if (parameter != nullptr) {
auto& conf = parameter->getConfig();
return const_cast<paddle::ParameterConfig*>(&conf);
} else {
return &config;
}
}
};
TrainerConfig::TrainerConfig() : m(new TrainerConfigPrivate()) {}
TrainerConfig::~TrainerConfig() { delete m; }
TrainerConfig* TrainerConfig::createFromTrainerConfigFile(
const std::string& confPath) {
LOG(INFO) << "load trainer config from " << confPath;
auto conf = std::make_shared<paddle::TrainerConfigHelper>(confPath);
auto retv = new TrainerConfig();
retv->m->conf = conf;
return retv;
}
TrainerConfig* TrainerConfig::createFromProtoString(const std::string& str) {
auto retv = new TrainerConfig();
paddle::TrainerConfig trainerConfigProto;
auto conf = std::make_shared<paddle::TrainerConfigHelper>(trainerConfigProto);
CHECK(conf->getMutableConfig().ParseFromString(str));
retv->m->conf = conf;
return retv;
}
ModelConfig::ModelConfig() : m(new ModelConfigPrivate()) {}
ModelConfig::~ModelConfig() { delete m; }
ModelConfig* TrainerConfig::getModelConfig() const {
auto retv = new ModelConfig();
retv->m->conf = m->conf;
return retv;
}
ParameterConfig::ParameterConfig() : m(new ParameterConfigPrivate()) {}
ParameterConfig::~ParameterConfig() { delete m; }
ParameterConfig* ParameterConfig::createParameterConfigFromParameterSharedPtr(
void* ptr) {
auto& p = *(paddle::ParameterPtr*)(ptr);
if (p != nullptr) {
auto conf = new ParameterConfig();
conf->m->parameter = p;
return conf;
} else {
return nullptr;
}
}
ParameterConfig* ParameterConfig::createParameterConfigFromParameterPtr(
void* ptr) {
auto& p = *(paddle::Parameter*)(ptr);
auto conf = new ParameterConfig();
conf->m->config = p.getConfig();
return conf;
}
std::string ParameterConfig::toProtoString() const {
return m->getConfigPtr()->SerializeAsString();
}
void* ParameterConfig::getRawPtr() { return m->getConfigPtr(); }
OptimizationConfig::OptimizationConfig() : m(new OptimizationConfigPrivate()) {}
OptimizationConfig::~OptimizationConfig() { delete m; }
std::string OptimizationConfig::toProtoString() {
return m->getConfig().SerializeAsString();
}
OptimizationConfig* TrainerConfig::getOptimizationConfig() const {
auto opt_config = new OptimizationConfig();
opt_config->m->trainer_config = m->conf;
return opt_config;
}
OptimizationConfig* OptimizationConfig::createFromProtoString(
const std::string& str) {
auto conf = new OptimizationConfig();
conf->m->config.ParseFromString(str);
return conf;
}

@ -1,44 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <sstream>
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
Evaluator::Evaluator() : m(new EvaluatorPrivate()) {}
Evaluator::~Evaluator() { delete m; }
void Evaluator::start() { m->rawPtr->start(); }
void Evaluator::finish() { m->rawPtr->finish(); }
std::string Evaluator::toString() {
std::ostringstream sout;
m->rawPtr->printStats(sout);
return sout.str();
}
std::vector<std::string> Evaluator::getNames() const {
std::vector<std::string> retv;
m->rawPtr->getNames(&retv);
return retv;
}
double Evaluator::getValue(const std::string name) const {
paddle::Error err;
double v = m->rawPtr->getValue(name, &err);
if (!err.isOK()) {
throw std::runtime_error(err.msg());
}
return v;
}

@ -1,196 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "Internal.h"
#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h"
std::vector<int> GradientMachine::defaultParamTypes = {
PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM};
GradientMachine::GradientMachine() : m(new GradientMachinePrivate()) {}
GradientMachine::~GradientMachine() { delete m; }
GradientMachine* GradientMachine::createFromPaddleModelPtr(
const void* confPtr,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
auto& conf = *(const paddle::ModelConfig*)(confPtr);
std::vector<ParameterType> realTypes;
staticCastVector(&realTypes, types);
auto machineRawPtr = paddle::GradientMachine::create(conf, mode, realTypes);
auto machinePtr = std::shared_ptr<paddle::GradientMachine>(machineRawPtr);
if (machinePtr != nullptr) {
auto machine = new GradientMachine();
machine->m->machine = machinePtr;
return machine;
} else {
return nullptr;
}
}
GradientMachine* GradientMachine::createByConfigProtoStr(
const std::string& protoStr,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
paddle::ModelConfig conf;
conf.ParseFromString(protoStr);
if (conf.IsInitialized()) {
return GradientMachine::createFromPaddleModelPtr(&conf, mode, types);
} else {
return nullptr;
}
}
GradientMachine* GradientMachine::createByModelConfig(
ModelConfig* conf,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
auto confPtr = &conf->m->conf->getModelConfig();
return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types);
}
void GradientMachine::start() { m->machine->start(); }
void GradientMachine::finish() { m->machine->finish(); }
void GradientMachine::onPassEnd() { m->machine->onPassEnd(); }
void GradientMachine::prefetch(const Arguments& inArgs) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
m->machine->prefetch(in);
}
void GradientMachine::forward(const Arguments& inArgs,
Arguments* outArgs,
PassType passType) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
auto& out = m->cast<std::vector<paddle::Argument>>(
outArgs->getInternalArgumentsPtr());
paddle::PassType pt = (paddle::PassType)(passType);
m->machine->forward(in, &out, pt);
}
UpdateCallback::~UpdateCallback() {}
void UpdateCallback::apply(Parameter* p) {
// UNUSED(p);
}
class UpdateCallbackWrapper {
public:
explicit UpdateCallbackWrapper(const UpdateCallback& callback)
: callback(const_cast<UpdateCallback&>(callback)) {}
void operator()(paddle::Parameter* param) {
auto p = Parameter::createFromRawPtr(&param);
// @TODO Use Stack variable instead.
callback.apply(p);
delete p;
}
private:
UpdateCallback& callback;
};
void GradientMachine::backward(const UpdateCallback& callback) {
m->machine->backward(UpdateCallbackWrapper(callback));
}
void GradientMachine::forwardBackward(const Arguments& inArgs,
Arguments* outArgs,
PassType passType,
const UpdateCallback& callback) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
auto& out = m->cast<std::vector<paddle::Argument>>(
outArgs->getInternalArgumentsPtr());
paddle::PassType pt = (paddle::PassType)(passType);
m->machine->forwardBackward(in, &out, pt, UpdateCallbackWrapper(callback));
}
void GradientMachine::loadParameters(const std::string& path) {
m->machine->loadParameters(path);
}
size_t GradientMachine::getParameterSize() const {
return m->machine->getParameters().size();
}
Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
auto params = m->machine->getParameters();
if (i < params.size()) {
return Parameter::createFromSharedPtr(&m->machine->getParameters()[i]);
} else {
throw RangeError();
}
}
size_t GradientMachine::getNonStaticParameterSize() const {
return m->machine->getNonStaticParameters().size();
}
Parameter* GradientMachine::getNonStaticParameter(size_t i) throw(RangeError) {
auto params = m->machine->getNonStaticParameters();
if (i < params.size()) {
return Parameter::createFromSharedPtr(
&m->machine->getNonStaticParameters()[i]);
} else {
throw RangeError();
}
}
void GradientMachine::randParameters() { m->machine->randParameters(); }
Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const
throw(UnsupportError) {
auto nn = m->machine;
if (nn) {
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
} else {
throw UnsupportError();
}
}
SequenceGenerator* GradientMachine::asSequenceGenerator(
const std::vector<std::string>& dict,
size_t begin_id,
size_t end_id,
size_t max_length,
size_t beam_size) {
SequenceGenerator* r =
SequenceGenerator::createByGradientMachineSharedPtr(&m->machine);
r->setDict(dict);
r->setBos(begin_id);
r->setEos(end_id);
r->setMaxLength(max_length);
r->setBeamSize(beam_size);
return r;
}
Evaluator* GradientMachine::makeEvaluator() {
auto ev = new Evaluator();
ev->m->rawPtr = m->machine->makeEvaluator();
return ev;
}
void GradientMachine::eval(Evaluator* evaluator) {
m->machine->eval(evaluator->m->rawPtr);
}

@ -1,28 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "PaddleAPI.h"
#include <algorithm>
#include <vector>
template <typename T1, typename T2>
void staticCastVector(std::vector<T2>* dest, const std::vector<T1>& src) {
dest->resize(src.size());
std::transform(src.begin(), src.end(), dest->begin(), [](T1 t) {
return static_cast<T2>(t);
});
}

File diff suppressed because it is too large Load Diff

@ -1,202 +0,0 @@
%module(directors="1") swig_paddle
%include "std_string.i"
%{
#define SWIG_FILE_WITH_INIT
#include "legacy/api/PaddleAPI.h"
%}
%include "exception.i"
%typemap(throws) UnsupportError %{
SWIG_exception(SWIG_RuntimeError, $1.what());
SWIG_fail;
%}
%include "std_vector.i"
%include "std_pair.i"
#ifdef SWIGPYTHON
%include "numpy.i"
#endif
%init %{
#ifdef SWIGPYTHON
import_array();
#endif
%}
namespace std {
%template(vector_int) vector<int>;
%template(vector_uint) vector<unsigned int>;
%template(vector_float) vector<float>;
%template(vector_string) vector<string>;
%template(vector_vec_star) vector<Vector*>;
}
#ifdef SWIGPYTHON
%typemap(in) (int argc, char** argv) {
int i = 0;
if (!PyList_Check($input)) {
PyErr_SetString(PyExc_ValueError, "Expecting a list");
return NULL;
}
$1 = PyList_Size($input);
$2 = (char **) malloc(($1+1)*sizeof(char *));
for (i = 0; i < $1; i++) {
PyObject *s = PyList_GetItem($input,i);
if (!PyString_Check(s)) {
free($2);
PyErr_SetString(PyExc_ValueError, "List items must be strings");
return NULL;
}
$2[i] = PyString_AsString(s);
}
$2[i] = 0;
}
%typemap(freearg) (int argc, char** argv) {
if ($2) free($2);
}
%typemap(out) FloatArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyFloat_FromDouble($1.buf[i]));
}
if($1.needFree) {
delete [] $1.buf;
}
}
%typemap(out) IntArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyInt_FromLong($1.buf[i]));
}
if ($1.needFree) {
delete [] $1.buf;
}
}
%typemap(out) IntWithFloatArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyTuple_Pack(2,
PyInt_FromLong($1.idxBuf[i]),
PyFloat_FromDouble($1.valBuf[i])
));
}
if ($1.needFree) {
delete [] $1.idxBuf;
delete [] $1.valBuf;
}
}
%rename(__getitem__) IVector::get;
%rename(__setitem__) IVector::set;
%rename(__len__) IVector::getSize;
%rename(__getitem__) Vector::get;
%rename(__setitem__) Vector::set;
%rename(__len__) Vector::getSize;
%rename(__len__) Parameter::getSize;
%rename(__call__) ParameterTraverseCallback::apply;
%rename(__repr__) Evaluator::toString;
%apply (float* INPLACE_ARRAY2, int DIM1, int DIM2) {
(float* data, int dim1, int dim2)
}
%apply (float** ARGOUTVIEW_ARRAY2, int* DIM1, int* DIM2) {
(float** view_data, int* dim1, int* dim2)
}
%apply (float** ARGOUTVIEWM_ARRAY2, int* DIM1, int* DIM2) {
(float** view_m_data, int* dim1, int* dim2)
}
%apply (int** ARGOUTVIEWM_ARRAY1, int* DIM1) {
(int** view_m_data, int* dim1)
}
%apply (int* INPLACE_ARRAY1, int DIM1) {
(int* data, int dim)
}
%apply (int** ARGOUTVIEW_ARRAY1, int* DIM1) {
(int** view_data, int* dim1)
}
%apply (float* INPLACE_ARRAY1, int DIM1) {
(float* data, int dim)
}
%apply (float** ARGOUTVIEW_ARRAY1, int* DIM1) {
(float** view_data, int* dim1)
}
%apply (float** ARGOUTVIEWM_ARRAY1, int* DIM1) {
(float** view_m_data, int* dim1)
}
#endif
// The below functions internally create object by "new", so it should use
// use SWIG to handle gc. There are hints for SWIG to handle GC.
%newobject Matrix::createZero;
%newobject Matrix::createSparse;
%newobject Matrix::createDense;
%newobject Matrix::createDenseFromNumpy;
%newobject Matrix::createCpuDenseFromNumpy;
%newobject Matrix::createGpuDenseFromNumpy;
%newobject Vector::createZero;
%newobject Vector::create;
%newobject Vector::createVectorFromNumpy;
%newobject Vector::createCpuVectorFromNumpy;
%newobject Vector::createGpuVectorFromNumpy;
%newobject IVector::createZero;
%newobject IVector::create;
%newobject IVector::createVectorFromNumpy;
%newobject IVector::createCpuVectorFromNumpy;
%newobject IVector::createGpuVectorFromNumpy;
%newobject Trainer::createByCommandLine;
%newobject Trainer::getForwardOutput;
%newobject Trainer::getLayerOutput;
%newobject Arguments::getSlotValue;
%newobject Arguments::getSlotIds;
%newobject Arguments::getSlotIn;
%newobject Arguments::getSlotSequenceStartPositions;
%newobject Arguments::getSlotSequenceDim;
%newobject Arguments::createArguments;
%newobject GradientMachine::createByConfigProtoStr;
%newobject GradientMachine::createByModelConfig;
%newobject GradientMachine::asSequenceGenerator;
%newobject GradientMachine::getParameter;
%newobject GradientMachine::getLayerOutput;
%newobject GradientMachine::makeEvaluator;
%newobject TrainerConfig::createFromTrainerConfigFile;
%newobject TrainerConfig::getModelConfig;
%newobject TrainerConfig::getOptimizationConfig;
%newobject Parameter::getBuf;
%newobject Parameter::getConfig;
%newobject ParameterOptimizer::create;
%newobject ParameterOptimizer::needSpecialTraversal;
%newobject ParameterUpdater::createLocalUpdater;
%newobject ParameterUpdater::createRemoteUpdater;
%newobject ParameterUpdater::createNewRemoteUpdater;
%feature("director") UpdateCallback;
%feature("autodoc", 1); // To generate method stub, for code hint in ide
// Ignore many private class, and method cannot be handled by swig.
%ignore MatrixPrivate;
%ignore TrainerPrivate;
%ignore IVector::operator[];
%ignore ArgumentsPrivate;
%ignore GradientMachinePrivate;
%ignore TrainerConfigPrivate;
%ignore ModelConfigPrivate;
%ignore ParameterPrivate;
%ignore SequenceGeneratorPrivate;
%ignore VectorPrivate;
%ignore ParameterConfigPrivate;
%ignore OptimizationConfigPrivate;
%ignore ParameterTraverseCallbackPrivate;
%include "legacy/utils/GlobalConstants.h"
%include "legacy/api/PaddleAPI.h"

File diff suppressed because it is too large Load Diff

@ -1,97 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include "PaddleAPI.h"
#include "paddle/legacy/gserver/evaluators/Evaluator.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/legacy/parameter/ParameterUpdaterBase.h"
#include "paddle/legacy/trainer/TrainerConfigHelper.h"
struct GradientMachinePrivate {
std::shared_ptr<paddle::GradientMachine> machine;
template <typename T>
inline T& cast(void* ptr) {
return *(T*)(ptr);
}
};
struct OptimizationConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> trainer_config;
paddle::OptimizationConfig config;
const paddle::OptimizationConfig& getConfig() {
if (trainer_config != nullptr) {
return trainer_config->getOptConfig();
} else {
return config;
}
}
};
struct TrainerConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> conf;
TrainerConfigPrivate() {}
};
struct ModelConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> conf;
};
struct ArgumentsPrivate {
std::vector<paddle::Argument> outputs;
inline paddle::Argument& getArg(size_t idx) throw(RangeError) {
if (idx < outputs.size()) {
return outputs[idx];
} else {
RangeError e;
throw e;
}
}
template <typename T>
std::shared_ptr<T>& cast(void* rawPtr) const {
return *(std::shared_ptr<T>*)(rawPtr);
}
};
struct ParameterUpdaterPrivate {
std::unique_ptr<paddle::ParameterUpdater> updater;
};
struct ParameterPrivate {
std::shared_ptr<paddle::Parameter> sharedPtr;
paddle::Parameter* rawPtr; // rawPtr only used in ParameterUpdater,
// in other situation sharedPtr should
// contains value.
ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {}
paddle::Parameter* getPtr() {
if (sharedPtr) {
return sharedPtr.get();
} else {
return rawPtr;
}
}
};
struct EvaluatorPrivate {
paddle::Evaluator* rawPtr;
EvaluatorPrivate() : rawPtr(nullptr) {}
~EvaluatorPrivate() { delete rawPtr; }
};

@ -1,68 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/parameter/Parameter.h"
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
Parameter::Parameter() : m(new ParameterPrivate()) {}
Parameter::~Parameter() { delete m; }
Parameter* Parameter::createFromRawPtr(void* ptr) {
auto p = new Parameter();
p->m->rawPtr = *static_cast<paddle::Parameter**>(ptr);
return p;
}
Parameter* Parameter::createFromSharedPtr(void* ptr) {
auto& p = *(paddle::ParameterPtr*)(ptr);
if (p == nullptr) {
return nullptr;
} else {
auto retParam = new Parameter();
retParam->m->sharedPtr = p;
return retParam;
}
}
std::string Parameter::getName() const { return m->getPtr()->getName(); }
Vector* Parameter::getBuf(ParameterType type) {
auto buf = m->getPtr()->getBuf(type);
return Vector::createByPaddleVectorPtr(&buf);
}
ParameterConfig* Parameter::getConfig() {
if (m->sharedPtr) {
return ParameterConfig::createParameterConfigFromParameterSharedPtr(
&m->sharedPtr);
} else {
return ParameterConfig::createParameterConfigFromParameterPtr(m->rawPtr);
}
}
size_t Parameter::getID() const { return m->getPtr()->getID(); }
void Parameter::setValueUpdated() { m->getPtr()->setValueUpdated(); }
bool Parameter::save(const std::string& filename) const {
return m->getPtr()->save(filename);
}
bool Parameter::load(const std::string& filename) const {
return m->getPtr()->load(filename);
}
size_t Parameter::getSize() const { return m->getPtr()->getSize(); }

@ -1,124 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/parameter/ParameterOptimizer.h"
#include <algorithm>
#include "Internal.h"
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
struct ParameterOptimizerPrivate {
std::unique_ptr<paddle::ParameterOptimizer> optimizer;
};
struct ParameterTraverseCallbackPrivate {
paddle::ParameterOptimizer::TraverseCallback callback;
ParameterTraverseCallbackPrivate() {}
ParameterTraverseCallbackPrivate(
const paddle::ParameterOptimizer::TraverseCallback& callback)
: callback(callback) {}
void apply(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
std::vector<paddle::VectorPtr> real_vecs;
real_vecs.resize(vecs.size());
std::transform(vecs.begin(), vecs.end(), real_vecs.begin(), [](Vector* v) {
if (v) {
return *(paddle::VectorPtr*)(v->getSharedPtr());
} else {
return paddle::VectorPtr();
}
});
paddle::ParameterConfig& real_conf =
*(paddle::ParameterConfig*)(const_cast<ParameterConfig&>(conf)
.getRawPtr());
callback(real_vecs.data(), real_conf, sparseId);
}
};
ParameterOptimizer::ParameterOptimizer() : m(new ParameterOptimizerPrivate()) {}
ParameterOptimizer::~ParameterOptimizer() { delete m; }
ParameterOptimizer* ParameterOptimizer::create(OptimizationConfig* config) {
CHECK(config != nullptr);
auto retOptimizer = new ParameterOptimizer();
retOptimizer->m->optimizer.reset(
paddle::ParameterOptimizer::create(config->m->getConfig(), false));
return retOptimizer;
}
void ParameterOptimizer::init(size_t numRows, const ParameterConfig* config) {
auto& conf = *(paddle::ParameterConfig*)(const_cast<ParameterConfig*>(config)
->getRawPtr());
m->optimizer->init(numRows, &conf);
}
void ParameterOptimizer::startPass() { m->optimizer->startPass(); }
void ParameterOptimizer::finishPass() { m->optimizer->finishPass(); }
void ParameterOptimizer::startBatch(size_t numSamplesProcessed) {
constexpr size_t high_1 = 1UL << (sizeof(size_t) * 8 - 1);
CHECK_EQ(numSamplesProcessed & high_1, 0UL); // Safely cast.
m->optimizer->startBatch((int64_t)numSamplesProcessed);
}
void ParameterOptimizer::finishBatch() { m->optimizer->finishBatch(); }
void ParameterOptimizer::update(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
ParameterTraverseCallbackPrivate invoker(
[&](const paddle::VectorPtr _vecs[],
const paddle::ParameterConfig& config,
size_t sid = -1UL) { m->optimizer->update(_vecs, config, sid); });
invoker.apply(vecs, conf, sparseId);
}
std::vector<int> ParameterOptimizer::getParameterTypes() const {
std::vector<int> returnValue;
staticCastVector(&returnValue, m->optimizer->getParameterTypes());
return returnValue;
}
ParameterTraverseCallback::ParameterTraverseCallback()
: m(new ParameterTraverseCallbackPrivate()) {}
ParameterTraverseCallback::~ParameterTraverseCallback() { delete m; }
void ParameterTraverseCallback::apply(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
m->apply(vecs, conf, sparseId);
}
ParameterTraverseCallback* ParameterOptimizer::needSpecialTraversal(
const ParameterConfig& config) const {
auto& param_config =
*(paddle::ParameterConfig*)const_cast<ParameterConfig&>(config)
.getRawPtr();
auto callback = m->optimizer->needSpecialTraversal(param_config);
if (callback) {
auto retCallback = new ParameterTraverseCallback();
retCallback->m->callback = callback;
return retCallback;
} else {
return nullptr;
}
}

@ -1,99 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#ifndef PADDLE_WITHOUT_GOLANG
#include "paddle/legacy/trainer/NewRemoteParameterUpdater.h"
#endif
#include "paddle/legacy/trainer/RemoteParameterUpdater.h"
#include "paddle/legacy/trainer/ThreadParameterUpdater.h"
ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {}
ParameterUpdater *ParameterUpdater::createLocalUpdater(
OptimizationConfig *config) {
auto updater = new ParameterUpdater();
updater->m->updater.reset(
new paddle::SgdThreadUpdater(config->m->getConfig()));
return updater;
}
ParameterUpdater *ParameterUpdater::createNewRemoteUpdater(
OptimizationConfig *config,
const std::string pserverSpec,
const bool useEtcd) throw(UnsupportError) {
#ifndef PADDLE_WITHOUT_GOLANG
auto updater = new ParameterUpdater();
updater->m->updater.reset(new paddle::NewRemoteParameterUpdater(
config->m->getConfig(), pserverSpec, useEtcd));
return updater;
#else
throw UnsupportError("not compiled with WITH_GOLANG");
#endif
}
ParameterUpdater *ParameterUpdater::createRemoteUpdater(
OptimizationConfig *config, int passCount, bool useSparseUpdater) {
auto updater = new ParameterUpdater();
auto remoteUpdater = new paddle::RemoteParameterUpdater(
config->m->getConfig(), passCount, nullptr);
if (useSparseUpdater) {
std::unique_ptr<paddle::ParameterUpdater> remoteUpdaterPtr(remoteUpdater);
auto sparseRemoteUpdater =
new paddle::SparseRemoteParameterUpdaterComposite(
config->m->getConfig(),
passCount,
false,
std::move(remoteUpdaterPtr));
updater->m->updater.reset(sparseRemoteUpdater);
} else {
updater->m->updater.reset(remoteUpdater);
}
return updater;
}
ParameterUpdater::~ParameterUpdater() { delete m; }
void ParameterUpdater::init(const GradientMachine &gm) {
m->updater->init(gm.m->machine->getNonStaticParameters());
}
void ParameterUpdater::startPass() { m->updater->startPass(); }
void ParameterUpdater::finishPass() { m->updater->finishPass(); }
PassType ParameterUpdater::startBatch(size_t batchSize) {
return m->updater->startBatch((int64_t)batchSize);
}
void ParameterUpdater::finishBatch(float cost) {
m->updater->finishBatch(cost);
}
void ParameterUpdater::update(Parameter *param) {
auto paddleParam = param->m->getPtr();
m->updater->update(paddleParam);
}
void ParameterUpdater::getParametersRemote(bool fullSize, bool apply) {
m->updater->getParametersRemote(fullSize, apply);
}
void ParameterUpdater::restore() { m->updater->restore(); }
void ParameterUpdater::apply() { m->updater->apply(); }
void ParameterUpdater::catchUpWith() { m->updater->catchUpWith(); }

@ -1,242 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <iterator>
#include <sstream>
#include <vector>
#include "PaddleAPI.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/legacy/parameter/Argument.h"
#include "paddle/legacy/utils/Flags.h"
// used to represent partial sequence
struct Path {
std::vector<int> ids;
float logProb;
paddle::MachineState machineState;
Path() { logProb = 0; }
Path(std::vector<int>& ids, float logProb, paddle::MachineState& machineState)
: ids(ids), logProb(logProb), machineState(machineState) {}
bool operator<(const Path& other) const { return (logProb > other.logProb); }
};
// Return top k (k == beam_size) optimal paths using beam search. The last
// element of inArgs is the Argument of feedback. gradMachine has MaxIdLayer
// as output and outArgs thus stores top k labels and their probabilities per
// position
static void findNBest(paddle::GradientMachine* gradMachine,
std::vector<paddle::Argument>& inArgs,
std::vector<Path>& finalPaths,
size_t bos_id,
size_t eos_id,
size_t max_length) {
std::vector<Path> paths;
Path emptyPath;
paths.push_back(emptyPath);
finalPaths.clear();
gradMachine->resetState();
paddle::Argument feedback = inArgs.back();
feedback.ids->setElement(0, (int)(bos_id));
float minFinalPathLogProb = 0;
size_t beam = 0;
int id;
std::vector<paddle::Argument> outArgs;
while (true) { // iterate over each generated word
std::vector<Path> newPaths;
paddle::MachineState machineState;
for (size_t j = 0; j < paths.size(); j++) {
Path& path = paths[j];
if (path.machineState.size() > 0) {
gradMachine->setState(path.machineState);
feedback.ids->setElement(0, path.ids.back());
}
gradMachine->forward(inArgs, &outArgs, paddle::PASS_TEST);
gradMachine->getState(machineState);
beam = outArgs[0].ids->getSize();
for (size_t k = 0; k < beam; k++) {
id = outArgs[0].ids->getElement(k);
float prob = outArgs[0].in->getElement(0, k);
std::vector<int> nids(path.ids);
nids.push_back(id);
float newLogProb = path.logProb + log(prob);
Path newPath(nids, newLogProb, machineState);
if (id == (int)eos_id || nids.size() >= max_length) {
finalPaths.push_back(newPath);
if (minFinalPathLogProb > newPath.logProb) {
minFinalPathLogProb = newPath.logProb;
}
} else {
newPaths.push_back(newPath);
}
}
}
if (newPaths.size() == 0) {
break;
}
std::nth_element(newPaths.begin(),
newPaths.begin() + std::min(beam, newPaths.size()),
newPaths.end());
if (newPaths.size() > beam) {
newPaths.resize(beam);
}
// pathA < pathB means pathA.logProb > pathB.logProb
float maxPathLogProb =
std::min_element(newPaths.begin(), newPaths.end())->logProb;
if (finalPaths.size() >= beam && minFinalPathLogProb >= maxPathLogProb) {
break;
}
paths = newPaths;
} // end while
std::partial_sort(finalPaths.begin(),
finalPaths.begin() + std::min(beam, finalPaths.size()),
finalPaths.end());
if (finalPaths.size() > beam) {
finalPaths.resize(beam);
}
}
struct SequenceGeneratorPrivate {
std::shared_ptr<paddle::GradientMachine> machine;
std::shared_ptr<std::vector<std::string>> dict;
size_t beginPos;
size_t endPos;
size_t maxLength;
paddle::Argument feedback;
template <typename T>
inline T& cast(void* ptr) {
return *(T*)(ptr);
}
inline void findNBest(std::vector<paddle::Argument>& inArgs,
std::vector<Path>& path) {
::findNBest(machine.get(), inArgs, path, beginPos, endPos, maxLength);
}
SequenceGeneratorPrivate()
: dict(std::make_shared<std::vector<std::string>>()),
beginPos(0UL),
endPos(0UL),
maxLength(0UL),
feedback(__create_feedback__()) {}
private:
static paddle::Argument __create_feedback__() {
paddle::Argument feedback;
feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu);
feedback.sequenceStartPositions =
paddle::ICpuGpuVector::create(/* size= */ 2, /* useGpu= */ false);
feedback.sequenceStartPositions->getMutableData(false)[0] = 0;
feedback.sequenceStartPositions->getMutableData(false)[1] = 1;
return feedback;
}
};
SequenceGenerator::SequenceGenerator() : m(new SequenceGeneratorPrivate()) {}
SequenceGenerator::~SequenceGenerator() { delete m; }
class PathSequenceResults : public ISequenceResults {
// ISequenceResults interface
public:
PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path,
const std::shared_ptr<std::vector<std::string>>& dict)
: path_(path), dict_(dict) {}
size_t getSize() const { return path_->size(); }
std::string getSentence(size_t id, bool split) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
std::ostringstream sout;
std::transform(p.ids.begin(),
p.ids.end(),
std::ostream_iterator<std::string>(sout, split ? " " : ""),
[&](int id) { return (*dict_)[id]; });
return sout.str();
} else {
RangeError e;
throw e;
}
}
std::vector<int> getSequence(size_t id) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
return p.ids;
} else {
RangeError e;
throw e;
}
}
float getScore(size_t id) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
return p.logProb;
} else {
RangeError e;
throw e;
}
}
private:
std::shared_ptr<std::vector<Path>> path_;
std::shared_ptr<std::vector<std::string>> dict_;
};
ISequenceResults* SequenceGenerator::generateSequence(
const Arguments& inArgs) const {
auto& in_args =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
for (auto& arg : in_args) {
arg.sequenceStartPositions = m->feedback.sequenceStartPositions;
}
in_args.push_back(m->feedback);
auto path = std::make_shared<std::vector<Path>>();
m->findNBest(in_args, *path);
return new PathSequenceResults(path, m->dict);
}
SequenceGenerator* SequenceGenerator::createByGradientMachineSharedPtr(
void* ptr) {
SequenceGenerator* r = new SequenceGenerator();
r->m->machine = r->m->cast<std::shared_ptr<paddle::GradientMachine>>(ptr);
return r;
}
void SequenceGenerator::setDict(const std::vector<std::string>& dict) {
*m->dict = dict;
}
void SequenceGenerator::setBos(size_t bos) { m->beginPos = bos; }
void SequenceGenerator::setEos(size_t eos) { m->endPos = eos; }
void SequenceGenerator::setMaxLength(size_t maxLength) {
m->maxLength = maxLength;
}
void SequenceGenerator::setBeamSize(size_t beamSize) {
if (beamSize != -1UL) {
FLAGS_beam_size = beamSize;
}
}
ISequenceResults::~ISequenceResults() {}

@ -1,175 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include <stdlib.h>
#include <atomic>
#include <memory>
#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h"
#include "paddle/legacy/trainer/ParamUtil.h"
#include "paddle/legacy/trainer/Trainer.h"
#include "paddle/legacy/trainer/TrainerInternal.h"
#include "paddle/legacy/utils/Flags.h"
using paddle::real;
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_int32(start_pass);
struct TrainerPrivate : public paddle::Trainer {
bool _trainOneBatch(size_t batchSize);
bool forwardOneBatch(size_t batchSize);
void forwardOneDataBatch(const std::vector<paddle::Argument>& inArgs);
void setBatchSize(size_t batchSize);
std::vector<paddle::Argument>& getForwardOutput();
void startTestPeriod();
void finishTestPeriod();
void testOneDataBatch(const paddle::DataBatch& dataBatch);
TrainerPrivate() : paddle::Trainer() {}
};
Trainer::Trainer() : m(new TrainerPrivate()) {
auto conf = paddle::TrainerConfigHelper::createFromFlags();
if (conf != nullptr) {
m->init(conf);
}
}
Trainer::~Trainer() { delete m; }
Trainer* Trainer::createByCommandLine() throw(IOError) {
auto retv = new Trainer();
if (retv->m->getConfig().IsInitialized()) {
return retv;
} else {
throw IOError();
}
}
Trainer::Trainer(TrainerConfig* config, GradientMachine* gm)
: m(new TrainerPrivate()) {
m->init(config->m->conf, /* testing= */ false, gm ? gm->m->machine : nullptr);
}
Trainer* Trainer::create(TrainerConfig* config,
GradientMachine* gm) throw(IOError) {
auto retv = new Trainer(config, gm);
if (retv->m->getConfig().IsInitialized()) {
return retv;
} else {
retv->m->getConfig().CheckInitialized();
throw IOError();
}
}
void Trainer::startTrain() { m->startTrain(); }
void Trainer::finishTrain() { m->finishTrain(); }
void Trainer::startTrainPass() { m->startTrainPass(); }
void Trainer::finishTrainPass() { m->finishTrainPass(); }
void Trainer::trainOneDataBatch(size_t batchSize, const Arguments& inArgs) {
paddle::DataBatch dataBatch;
dataBatch.getStreams() = inArgs.m->outputs;
dataBatch.setSize(batchSize);
m->trainOneDataBatch(dataBatch);
}
bool Trainer::trainOneBatch(size_t batchSize) {
return m->_trainOneBatch(batchSize);
}
bool TrainerPrivate::_trainOneBatch(size_t batchSize) {
paddle::DataBatch dataBatch;
CHECK(dataProvider_) << "data_provider is not specified";
int num = dataProvider_->getNextBatch(batchSize, &dataBatch);
if (num == 0) {
return false;
}
trainOneDataBatch(dataBatch);
return false;
}
void TrainerPrivate::startTestPeriod() {
if (!tester_) {
createTester();
}
tester_->startTestPeriod();
}
void Trainer::startTestPeriod() { m->startTestPeriod(); }
void TrainerPrivate::testOneDataBatch(const paddle::DataBatch& dataBatch) {
tester_->testOneDataBatch(dataBatch, &forwardOutput_);
}
void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) {
paddle::DataBatch dataBatch;
dataBatch.getStreams() = args.m->outputs;
dataBatch.setSize(batchSize);
m->testOneDataBatch(dataBatch);
}
void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); }
void Trainer::finishTestPeriod() { m->finishTestPeriod(); }
Arguments* Trainer::getLayerOutput(const std::string& layerName) const {
auto nn = this->m->getGradientMachine();
CHECK(nn) << "trainerInternal_.getGradientMachine() is not NeuralNetwork";
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
}
void Trainer::forwardOneBatch(size_t batchSize) {
m->forwardOneBatch(batchSize);
}
bool TrainerPrivate::forwardOneBatch(size_t batchSize) {
CHECK(dataProvider_) << "data_provider is not specified";
paddle::DataBatch dataBatch;
int num = dataProvider_->getNextBatch(batchSize, &dataBatch);
if (num == 0) {
return false;
}
forwardOneDataBatch(dataBatch.getStreams());
return true;
}
void TrainerPrivate::forwardOneDataBatch(
const std::vector<paddle::Argument>& inArgs) {
std::vector<paddle::Argument>& outArgs = forwardOutput_;
if (config_->getOptConfig().use_sparse_remote_updater()) {
trainerInternal_.getGradientMachine()->prefetch(inArgs);
trainerInternal_.getParameterUpdater()->getParametersRemote();
}
trainerInternal_.getGradientMachine()->forward(
inArgs, &outArgs, paddle::PASS_TEST);
}
Arguments* Trainer::getForwardOutput() {
return Arguments::createByPaddleArgumentVector(&m->getForwardOutput());
}
std::vector<paddle::Argument>& TrainerPrivate::getForwardOutput() {
return forwardOutput_;
}

@ -1,60 +0,0 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "paddle/legacy/parameter/Parameter.h"
#include "paddle/legacy/utils/Common.h"
#include "paddle/legacy/utils/Flags.h"
#include "paddle/legacy/utils/PythonUtil.h"
#include "paddle/legacy/utils/Util.h"
#include <algorithm>
#include <iostream>
#include <iterator>
void initPaddle(int argc, char** argv) {
paddle::initMain(argc, argv);
paddle::initPython(argc, argv);
feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);
}
FloatArray::FloatArray(const float* b, const size_t l)
: buf(b), length(l), needFree(false) {}
IntArray::IntArray(const int* b, const size_t l, bool f)
: buf(b), length(l), needFree(f) {}
IntWithFloatArray::IntWithFloatArray(const float* v,
const int* i,
size_t l,
bool f)
: valBuf(v), idxBuf(i), length(l), needFree(f) {}
bool isUsingGpu() { return FLAGS_use_gpu; }
void setUseGpu(bool useGpu) { FLAGS_use_gpu = useGpu; }
bool isGpuVersion() {
#ifndef PADDLE_WITH_CUDA
return false;
#else
return true;
#endif
}
int getTrainerCount() { return FLAGS_trainer_count; }
static_assert(NUM_PARAMETER_TYPES == paddle::NUM_PARAMETER_TYPES,
"The Parameter Type should be same in core/api and core/common");

File diff suppressed because it is too large Load Diff

@ -1,13 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

File diff suppressed because it is too large Load Diff

@ -1,2 +0,0 @@
*.w0
*.wbias

@ -1,11 +0,0 @@
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/testTrain.py
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/*.py ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(copy_api_test ALL DEPENDS testTrain.py)
py_test(testTrain SRCS testTrain.py)
py_test(testMatrix SRCS testMatrix.py)
py_test(testVector SRCS testVector.py)
py_test(testTrainer SRCS testTrainer.py)
py_test(testArguments SRCS testArguments.py)
py_test(testGradientMachine SRCS testGradientMachine.py)

@ -1,54 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import numpy as np
import unittest
class TestArguments(unittest.TestCase):
def test_load_arguments(self):
m = swig_paddle.Matrix.createDense([4, 2, 4, 3, 9, 5], 2, 3)
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)
self.assertAlmostEqual(27.0, args.sum())
mat = args.getSlotValue(0)
assert isinstance(mat, swig_paddle.Matrix)
np_mat = mat.toNumpyMatInplace()
# The matrix unittest is in testMatrix.py
self.assertEqual(np_mat.shape, (2, 3))
args.setSlotIds(0, swig_paddle.IVector.create([1, 2, 3, 4, 5, 6]))
iv = args.getSlotIds(0)
assert isinstance(iv, swig_paddle.IVector)
np_arr = iv.toNumpyArrayInplace()
self.assertEqual(np_arr.shape, (6, ))
def test_arguments_shape(self):
h, w = 4, 6
v = np.random.rand(2, h * w)
m = swig_paddle.Matrix.createDense(v.flatten(), 2, h * w)
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)
args.setSlotFrameHeight(0, h)
args.setSlotFrameWidth(0, w)
self.assertEqual(args.getSlotFrameHeight(), h)
self.assertEqual(args.getSlotFrameWidth(), w)
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0")
unittest.main()

@ -1,116 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import paddle.proto.ParameterConfig_pb2
import util
import unittest
import numpy
class TestGradientMachine(unittest.TestCase):
def test_create_gradient_machine(self):
conf_file_path = "./testTrainConfig.py"
trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
conf_file_path)
self.assertIsNotNone(trainer_config)
opt_config = trainer_config.getOptimizationConfig()
model_config = trainer_config.getModelConfig()
self.assertIsNotNone(model_config)
machine = swig_paddle.GradientMachine.createByModelConfig(
model_config, swig_paddle.CREATE_MODE_NORMAL,
swig_paddle.ParameterOptimizer.create(opt_config).getParameterTypes(
))
self.assertIsNotNone(machine)
ipt, _ = util.loadMNISTTrainData()
output = swig_paddle.Arguments.createArguments(0)
optimizers = {}
# Initial Machine Parameter all to 0.1
for param in machine.getParameters():
assert isinstance(param, swig_paddle.Parameter)
val = param.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(val, swig_paddle.Vector)
arr = numpy.full((len(val), ), 0.1, dtype="float32")
val.copyFromNumpyArray(arr)
self.assertTrue(param.save(param.getName()))
param_config = param.getConfig().toProto()
assert isinstance(param_config,
paddle.proto.ParameterConfig_pb2.ParameterConfig)
opt = swig_paddle.ParameterOptimizer.create(opt_config)
optimizers[param.getID()] = opt
num_rows = param_config.dims[1]
opt.init(num_rows, param.getConfig())
for k in optimizers:
opt = optimizers[k]
opt.startPass()
batch_size = ipt.getSlotValue(0).getHeight()
for k in optimizers:
opt = optimizers[k]
opt.startBatch(batch_size)
machine.forward(ipt, output, swig_paddle.PASS_TRAIN)
self.assertEqual(1, output.getSlotNum())
self.isCalled = False
def backward_callback(param_):
self.isCalled = isinstance(param_, swig_paddle.Parameter)
assert isinstance(param_, swig_paddle.Parameter)
vec = param_.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(vec, swig_paddle.Vector)
vec = vec.copyToNumpyArray()
for val_ in vec:
self.assertTrue(
util.doubleEqual(val_, 0.1)) # Assert All Value is 0.1
vecs = list(param_.getBufs())
opt_ = optimizers[param_.getID()]
opt_.update(vecs, param_.getConfig())
machine.backward(backward_callback)
for k in optimizers:
opt = optimizers[k]
opt.finishBatch()
for k in optimizers:
opt = optimizers[k]
opt.finishPass()
self.assertTrue(self.isCalled)
for param in machine.getParameters():
self.assertTrue(param.load(param.getName()))
def test_train_one_pass(self):
conf_file_path = './testTrainConfig.py'
trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
conf_file_path)
model_config = trainer_config.getModelConfig()
machine = swig_paddle.GradientMachine.createByModelConfig(model_config)
at_end = False
output = swig_paddle.Arguments.createArguments(0)
if not at_end:
input_, at_end = util.loadMNISTTrainData(1000)
machine.forwardBackward(input_, output, swig_paddle.PASS_TRAIN)
if __name__ == '__main__':
swig_paddle.initPaddle('--use_gpu=0')
unittest.main()

@ -1,120 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import numpy as np
import unittest
class TestMatrix(unittest.TestCase):
def test_createZero_get_set(self):
m = swig_paddle.Matrix.createZero(32, 24)
self.assertEqual(m.getWidth(), 24)
self.assertEqual(m.getHeight(), 32)
for x in xrange(24):
for y in xrange(32):
self.assertEqual(0.0, m.get(x, y))
with self.assertRaises(swig_paddle.RangeError):
m.get(51, 47)
m.set(3, 3, 3.0)
self.assertEqual(m.get(3, 3), 3.0)
def test_sparse(self):
m = swig_paddle.Matrix.createSparse(3, 3, 6, True, False, False)
self.assertIsNotNone(m)
self.assertTrue(m.isSparse())
self.assertEqual(m.getSparseValueType(), swig_paddle.SPARSE_NON_VALUE)
self.assertEqual(m.getSparseFormat(), swig_paddle.SPARSE_CSR)
m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [])
self.assertEqual(m.getSparseRowCols(0), [0, 1])
self.assertEqual(m.getSparseRowCols(1), [2])
self.assertEqual(m.getSparseRowCols(2), [])
def test_sparse_value(self):
m = swig_paddle.Matrix.createSparse(3, 3, 6, False, False, False)
self.assertIsNotNone(m)
m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [7.3, 4.2, 3.2])
def assertKVArraySame(actual, expect):
self.assertEqual(len(actual), len(expect))
for i in xrange(len(actual)):
a = actual[i]
e = expect[i]
self.assertIsInstance(a, tuple)
self.assertIsInstance(e, tuple)
self.assertEqual(len(a), 2)
self.assertEqual(len(e), 2)
self.assertEqual(a[0], e[0])
self.assertTrue(abs(a[1] - e[1]) < 1e-5)
first_row = m.getSparseRowColsVal(0)
assertKVArraySame(first_row, [(0, 7.3), (1, 4.2)])
def test_createDenseMat(self):
m = swig_paddle.Matrix.createDense([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], 2, 3)
self.assertIsNotNone(m)
self.assertTrue(abs(m.get(1, 1) - 0.5) < 1e-5)
def test_numpyCpu(self):
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat, False)
self.assertEqual((int(m.getHeight()), int(m.getWidth())),
numpy_mat.shape)
# the numpy matrix and paddle matrix shared the same memory.
numpy_mat[0, 1] = 342.23
for h in xrange(m.getHeight()):
for w in xrange(m.getWidth()):
self.assertEqual(m.get(h, w), numpy_mat[h, w])
mat2 = m.toNumpyMatInplace()
mat2[1, 1] = 32.2
self.assertTrue(np.array_equal(mat2, numpy_mat))
def test_numpyGpu(self):
if swig_paddle.isGpuVersion():
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype='float32')
gpu_m = swig_paddle.Matrix.createGpuDenseFromNumpy(numpy_mat)
assert isinstance(gpu_m, swig_paddle.Matrix)
self.assertEqual((int(gpu_m.getHeight()), int(gpu_m.getWidth())),
numpy_mat.shape)
self.assertTrue(gpu_m.isGpu())
numpy_mat = gpu_m.copyToNumpyMat()
numpy_mat[0, 1] = 3.23
for a, e in zip(gpu_m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
gpu_m.copyFromNumpyMat(numpy_mat)
for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
def test_numpy(self):
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat)
self.assertEqual((int(m.getHeight()), int(m.getWidth())),
numpy_mat.shape)
self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
if __name__ == "__main__":
swig_paddle.initPaddle("--use_gpu=0")
suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrix)
unittest.TextTestRunner().run(suite)
if swig_paddle.isGpuVersion():
swig_paddle.setUseGpu(True)
unittest.main()

@ -1,116 +0,0 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import paddle.trainer.config_parser
import numpy
import util
def init_params(params):
def init_param(p):
assert isinstance(p, swig_paddle.Parameter)
val = p.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(val, swig_paddle.Vector)
arr = val.toNumpyArrayInplace()
for i in xrange(len(arr)):
arr[i] = numpy.random.uniform(-1.0, 1.0)
for p in params:
init_param(p)
def init_optimizers(opt_conf, params):
opts = {}
for param in params:
param_conf = param.getConfig().toProto()
opts[param.getID()] = swig_paddle.ParameterOptimizer.create(opt_conf)
opts[param.getID()].init(param_conf.dims[1], param.getConfig())
retv_opts = [None for _ in xrange(len(opts))]
for k in opts:
assert k < len(retv_opts)
retv_opts[k] = opts[k]
return retv_opts
def main():
trainer_config = paddle.trainer.config_parser.parse_config(
"./testTrainConfig.py", "")
opt_config = trainer_config.opt_config
print "========Optimization Config ======="
print opt_config
print "==================================="
opt_config = swig_paddle.OptimizationConfig.createFromProto(opt_config)
_temp_optimizer_ = swig_paddle.ParameterOptimizer.create(opt_config)
enable_types = _temp_optimizer_.getParameterTypes()
m = swig_paddle.GradientMachine.createFromConfigProto(
trainer_config.model_config, swig_paddle.CREATE_MODE_NORMAL,
enable_types)
assert m is not None
assert isinstance(m, swig_paddle.GradientMachine)
init_params(m.getParameters())
optimizers = init_optimizers(opt_config, m.getParameters())
# Train One Pass.
for optimizer in optimizers:
optimizer.startPass()
batch_id = 0
while True: # Train one batch
batch_size = 1000
inArgs, atEnd = util.loadMNISTTrainData(batch_size)
if atEnd:
break
outArgs = swig_paddle.Arguments.createArguments(0)
for optimizer in optimizers:
optimizer.startBatch(batch_size)
def update_callback(param):
try:
bufs = list(param.getBufs())
opt = optimizers[param.getID()]
opt.update(bufs, param.getConfig())
callback = opt.needSpecialTraversal(param.getConfig())
if callback is not None:
callback(bufs, param.getConfig(), swig_paddle.NO_SPARSE_ID)
except Exception as e:
print e
ev = m.makeEvaluator()
ev.start()
m.forwardBackward(inArgs, outArgs, swig_paddle.PASS_TRAIN,
update_callback)
m.eval(ev)
ev.finish()
for name in ev.getNames():
print name, ev.getValue(name)
for optimizer in optimizers:
optimizer.finishBatch()
cost_vec = outArgs.getSlotValue(0)
assert isinstance(cost_vec, swig_paddle.Matrix)
cost_vec = cost_vec.copyToNumpyMat()
print 'Finish Batch', batch_id, 'with cost ', cost_vec.sum(
) / batch_size
batch_id += 1
for optimizer in optimizers:
optimizer.finishPass()
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0", "--trainer_count=1")
main()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save