You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
154 lines
4.9 KiB
154 lines
4.9 KiB
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License. */
|
|
|
|
|
|
#include "PaddleAPI.h"
|
|
#include "PaddleAPIPrivate.h"
|
|
|
|
#include "paddle/gserver/gradientmachines/NeuralNetwork.h"
|
|
#include "Internal.h"
|
|
|
|
std::vector<int> GradientMachine::defaultParamTypes = {
|
|
PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM};
|
|
|
|
GradientMachine::GradientMachine() : m(new GradientMachinePrivate()) {}
|
|
|
|
GradientMachine::~GradientMachine() { delete m; }
|
|
|
|
GradientMachine* GradientMachine::createFromPaddleModelPtr(
|
|
const void* confPtr, GradientMatchineCreateMode mode,
|
|
const std::vector<int>& types) {
|
|
auto& conf = *(const paddle::ModelConfig*)(confPtr);
|
|
std::vector<ParameterType> realTypes;
|
|
staticCastVector(&realTypes, types);
|
|
auto machineRawPtr = paddle::GradientMachine::create(conf, mode, realTypes);
|
|
auto machinePtr = std::shared_ptr<paddle::GradientMachine>(machineRawPtr);
|
|
if (machinePtr != nullptr) {
|
|
auto machine = new GradientMachine();
|
|
machine->m->machine = machinePtr;
|
|
return machine;
|
|
} else {
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
GradientMachine* GradientMachine::createByConfigProtoStr(
|
|
const std::string& protoStr, GradientMatchineCreateMode mode,
|
|
const std::vector<int>& types) {
|
|
paddle::ModelConfig conf;
|
|
conf.ParseFromString(protoStr);
|
|
if (conf.IsInitialized()) {
|
|
return GradientMachine::createFromPaddleModelPtr(&conf, mode, types);
|
|
} else {
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
GradientMachine* GradientMachine::createByModelConfig(
|
|
ModelConfig* conf, GradientMatchineCreateMode mode,
|
|
const std::vector<int>& types) {
|
|
auto confPtr = &conf->m->conf->getModelConfig();
|
|
return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types);
|
|
}
|
|
|
|
void GradientMachine::forward(const Arguments& inArgs, Arguments* outArgs,
|
|
PassType passType) {
|
|
auto& in =
|
|
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
|
|
auto& out = m->cast<std::vector<paddle::Argument>>(
|
|
outArgs->getInternalArgumentsPtr());
|
|
paddle::PassType pt = (paddle::PassType)(passType);
|
|
m->machine->forward(in, &out, pt);
|
|
}
|
|
|
|
UpdateCallback::~UpdateCallback() {}
|
|
|
|
void UpdateCallback::apply(Parameter* p) {
|
|
// UNUSED(p);
|
|
}
|
|
|
|
class UpdateCallbackWrapper {
|
|
public:
|
|
explicit UpdateCallbackWrapper(const UpdateCallback& callback)
|
|
: callback(const_cast<UpdateCallback&>(callback)) {}
|
|
|
|
void operator()(paddle::Parameter* param) {
|
|
auto p = Parameter::createFromRawPtr(¶m);
|
|
// @TODO Use Stack variable instead.
|
|
callback.apply(p);
|
|
delete p;
|
|
}
|
|
|
|
private:
|
|
UpdateCallback& callback;
|
|
};
|
|
|
|
void GradientMachine::backward(const UpdateCallback& callback) {
|
|
m->machine->backward(UpdateCallbackWrapper(callback));
|
|
}
|
|
|
|
void GradientMachine::forwardBackward(const Arguments& inArgs,
|
|
Arguments* outArgs, PassType passType,
|
|
const UpdateCallback& callback) {
|
|
auto& in =
|
|
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
|
|
auto& out = m->cast<std::vector<paddle::Argument>>(
|
|
outArgs->getInternalArgumentsPtr());
|
|
paddle::PassType pt = (paddle::PassType)(passType);
|
|
m->machine->forwardBackward(in, &out, pt, UpdateCallbackWrapper(callback));
|
|
}
|
|
|
|
void GradientMachine::loadParameters(const std::string& path) {
|
|
m->machine->loadParameters(path);
|
|
}
|
|
|
|
size_t GradientMachine::getParameterSize() const {
|
|
return m->machine->getParameters().size();
|
|
}
|
|
|
|
Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
|
|
auto params = m->machine->getParameters();
|
|
if (i < params.size()) {
|
|
return Parameter::createFromSharedPtr(&m->machine->getParameters()[i]);
|
|
} else {
|
|
throw RangeError();
|
|
}
|
|
}
|
|
|
|
void GradientMachine::randParameters() { m->machine->randParameters(); }
|
|
|
|
Matrix* GradientMachine::getLayerOutput(const std::string& layerName) const
|
|
throw(UnsupportError) {
|
|
auto nn = std::dynamic_pointer_cast<paddle::NeuralNetwork>(m->machine);
|
|
if (nn) {
|
|
auto mat = nn->getLayerOutput(layerName);
|
|
return Matrix::createByPaddleMatrixPtr(&mat);
|
|
} else {
|
|
throw UnsupportError();
|
|
}
|
|
}
|
|
|
|
SequenceGenerator* GradientMachine::asSequenceGenerator(
|
|
const std::vector<std::string>& dict, size_t begin_id, size_t end_id,
|
|
size_t max_length, size_t beam_size) {
|
|
SequenceGenerator* r =
|
|
SequenceGenerator::createByGradientMachineSharedPtr(&m->machine);
|
|
r->setDict(dict);
|
|
r->setBos(begin_id);
|
|
r->setEos(end_id);
|
|
r->setMaxLength(max_length);
|
|
r->setBeamSize(beam_size);
|
|
return r;
|
|
}
|