Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into cmake_go_vendor
commit
ff4eaa2999
@ -1,3 +1,5 @@
|
||||
cc_library(paddle_go_optimizer DEPS paddle_optimizer paddle_proto glog gflags protobuf)
|
||||
go_library(paddle_pserver_cclient STATIC)
|
||||
|
||||
if(WITH_TESTING)
|
||||
add_subdirectory(test)
|
||||
endif()
|
||||
|
@ -1,3 +1,2 @@
|
||||
|
||||
cc_binary(main SRCS main.c DEPS paddle_pserver_cclient)
|
||||
cc_test(test_cclient SRCS test_cclient.c DEPS paddle_pserver_cclient)
|
||||
add_style_check_target(test_cclient test_cclient.c)
|
||||
|
@ -1,93 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libpaddle_pserver_cclient.h"
|
||||
|
||||
// TODO(helin): Fix: gtest using cmake is not working, using this
|
||||
// hacky way for now.
|
||||
#define fail() \
|
||||
fprintf(stderr, "info: %s:%d: ", __FILE__, __LINE__); \
|
||||
exit(-1);
|
||||
|
||||
void sendGrads(paddle_pserver_client c) {
|
||||
unsigned char grad_a[2000] = {2};
|
||||
unsigned char grad_b[3000] = {3};
|
||||
paddle_gradient grad1 = {
|
||||
"param_a", PADDLE_ELEMENT_TYPE_FLOAT32, grad_a, 2000};
|
||||
paddle_gradient grad2 = {
|
||||
"param_b", PADDLE_ELEMENT_TYPE_FLOAT32, grad_b, 3000};
|
||||
paddle_gradient* grads[2] = {&grad1, &grad2};
|
||||
if (paddle_send_grads(c, grads, 2)) {
|
||||
fail();
|
||||
}
|
||||
}
|
||||
|
||||
void getParams(paddle_pserver_client c) {
|
||||
paddle_parameter param_a;
|
||||
paddle_parameter param_b;
|
||||
char name_a[] = "param_a";
|
||||
char name_b[] = "param_b";
|
||||
// Must pre-allocate the prameter content before calling paddle_get_params.
|
||||
unsigned char content_a[2000] = {};
|
||||
unsigned char content_b[3000] = {};
|
||||
param_a.element_type = PADDLE_ELEMENT_TYPE_FLOAT32;
|
||||
param_a.name = name_a;
|
||||
param_a.content = content_a;
|
||||
param_a.content_len = 2000;
|
||||
param_b.element_type = PADDLE_ELEMENT_TYPE_FLOAT32;
|
||||
param_b.name = name_b;
|
||||
param_b.content = content_b;
|
||||
param_b.content_len = 3000;
|
||||
|
||||
paddle_parameter* params[2] = {¶m_a, ¶m_b};
|
||||
if (paddle_get_params(c, params, 2)) {
|
||||
fail();
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
char addr[] = "localhost:3000";
|
||||
paddle_pserver_client c = paddle_new_pserver_client(addr, 1);
|
||||
retry:
|
||||
if (paddle_begin_init_params(c)) {
|
||||
paddle_parameter param;
|
||||
char name_a[] = "param_a";
|
||||
char name_b[] = "param_b";
|
||||
unsigned char content_a[2000] = {1};
|
||||
unsigned char content_b[3000] = {0};
|
||||
param.element_type = PADDLE_ELEMENT_TYPE_FLOAT32;
|
||||
param.name = name_a;
|
||||
param.content = content_a;
|
||||
param.content_len = 2000;
|
||||
int error = paddle_init_param(c, param, NULL, 0);
|
||||
if (error != 0) {
|
||||
goto retry;
|
||||
}
|
||||
|
||||
param.element_type = PADDLE_ELEMENT_TYPE_FLOAT32;
|
||||
param.name = name_b;
|
||||
param.content = content_b;
|
||||
param.content_len = 3000;
|
||||
error = paddle_init_param(c, param, NULL, 0);
|
||||
if (error != 0) {
|
||||
goto retry;
|
||||
}
|
||||
|
||||
error = paddle_finish_init_params(c);
|
||||
if (error != 0) {
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
int i;
|
||||
for (i = 0; i < 100; i++) {
|
||||
sendGrads(c);
|
||||
getParams(c);
|
||||
}
|
||||
|
||||
if (paddle_save_model(c, "/tmp/")) {
|
||||
fail();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Binary file not shown.
@ -1,58 +0,0 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "optimizer.h"
|
||||
|
||||
typedef int (*update_func)(void*, void*, paddle_element_type, const void*, int);
|
||||
typedef void (*release_func)(void*);
|
||||
|
||||
typedef struct paddle_optimizer {
|
||||
update_func update;
|
||||
release_func release;
|
||||
void* optimizer;
|
||||
} paddle_optimizer;
|
||||
|
||||
void paddle_release_optimizer(paddle_optimizer* o) {
|
||||
o->release(o->optimizer);
|
||||
free(o);
|
||||
}
|
||||
|
||||
int paddle_update_parameter(paddle_optimizer* o,
|
||||
void* buffer,
|
||||
paddle_element_type element_type,
|
||||
const void* gradient,
|
||||
int num_bytes) {
|
||||
return o->update(o->optimizer, buffer, element_type, gradient, num_bytes);
|
||||
}
|
||||
|
||||
typedef struct { double learning_rate; } SGD_optimizer;
|
||||
|
||||
int update_SGD(void* optimizer,
|
||||
void* buffer,
|
||||
paddle_element_type element_type,
|
||||
const void* gradient,
|
||||
int num_bytes) {
|
||||
SGD_optimizer* o = (SGD_optimizer*)optimizer;
|
||||
float* parameter = (float*)buffer;
|
||||
float* grad = (float*)gradient;
|
||||
|
||||
int i;
|
||||
for (i = 0; i < num_bytes / sizeof(float); ++i) {
|
||||
parameter[i] -= o->learning_rate * grad[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void release_SGD(void* optimizer) {
|
||||
SGD_optimizer* o = (SGD_optimizer*)optimizer;
|
||||
// nothing allocated on heap
|
||||
}
|
||||
|
||||
paddle_optimizer* paddle_create_SGD_optimizer(double learning_rate) {
|
||||
SGD_optimizer* impl = (SGD_optimizer*)malloc(sizeof(SGD_optimizer));
|
||||
impl->learning_rate = learning_rate;
|
||||
paddle_optimizer* opt = (paddle_optimizer*)malloc(sizeof(paddle_optimizer));
|
||||
opt->update = update_SGD;
|
||||
opt->release = release_SGD;
|
||||
opt->optimizer = impl;
|
||||
return opt;
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
#ifndef PADDLE_PSERVER_OPTIMIZER_H
|
||||
#define PADDLE_PSERVER_OPTIMIZER_H
|
||||
|
||||
typedef enum {
|
||||
PADDLE_ELEMENT_TYPE_INT32 = 0,
|
||||
PADDLE_ELEMENT_TYPE_UINT32 = 1,
|
||||
PADDLE_ELEMENT_TYPE_INT64 = 2,
|
||||
PADDLE_ELEMENT_TYPE_UINT64 = 3,
|
||||
PADDLE_ELEMENT_TYPE_FLOAT32 = 4,
|
||||
PADDLE_ELEMENT_TYPE_FLOAT64 = 5,
|
||||
} paddle_element_type;
|
||||
|
||||
struct paddle_optimizer;
|
||||
struct paddle_optimizer* paddle_create_SGD_optimizer(double learning_rate);
|
||||
void paddle_release_optimizer(struct paddle_optimizer* o);
|
||||
int paddle_update_parameter(struct paddle_optimizer* o,
|
||||
void* buffer,
|
||||
paddle_element_type element_type,
|
||||
const void* gradient,
|
||||
int num_bytes);
|
||||
|
||||
#endif /* PADDLE_PSERVER_OPTIMIZER_H */
|
@ -1,8 +1,24 @@
|
||||
package pserver
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSGDCreateRelease(t *testing.T) {
|
||||
o := newOptimizer(sgd, 1)
|
||||
func TestOptimizerCreateRelease(t *testing.T) {
|
||||
p := Parameter{
|
||||
Name: "a",
|
||||
ElementType: Int32,
|
||||
}
|
||||
p.Content = []byte{1, 3}
|
||||
config, err := ioutil.ReadFile("./cclient/test/testdata/optimizer.pb.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("read optimizer proto failed")
|
||||
}
|
||||
param := ParameterWithConfig{
|
||||
Param: p,
|
||||
Config: config,
|
||||
}
|
||||
o := newOptimizer(param)
|
||||
o.Cleanup()
|
||||
}
|
||||
|
@ -0,0 +1,78 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
class Tensor {
|
||||
using paddle::platform::Place;
|
||||
using paddle::platform::get_place;
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
const T* data() const {
|
||||
PADDLE_ASSERT(holder_ != nullptr,
|
||||
"Tensor::data must be called after Tensor::mutable_data");
|
||||
return static_cast<const T*>(holder->Ptr());
|
||||
}
|
||||
|
||||
template <typename T, // must be POD types
|
||||
typename = std::enable_if<std::is_pod<T>::value>::type>
|
||||
T* mutable_data(DDim dims, Place place) {
|
||||
if (holder_ == nullptr || holder_->Place() != place ||
|
||||
holder_->Size() < dims.product() * sizeof(T)) {
|
||||
holder_.reset(new PlaceholderImpl(place, dims.product() * sizeof(T)));
|
||||
}
|
||||
return static_cast<T*>(holder_->Ptr());
|
||||
}
|
||||
|
||||
template <typename T, // must be POD types
|
||||
typename = std::enable_if<std::is_pod<T>::value>::type>
|
||||
T* mutable_data(DDim dims) {
|
||||
return mutable_data<T>(dims, paddle::platform::get_place());
|
||||
}
|
||||
|
||||
private:
|
||||
// Placeholder hides type T, so it doesn't appear as a template
|
||||
// parameter of Variable.
|
||||
struct Placeholder {
|
||||
virtual ~Placeholder() {}
|
||||
virtual void* Ptr() const = 0;
|
||||
virtual Place Place() const = 0;
|
||||
virtual size_t Size() const = 0;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct PlaceholderImpl : public Placeholder {
|
||||
PlaceholderImpl(Place pl, size_t size)
|
||||
: ptr_(paddle::memory::Alloc(pl, size), paddle::memory::Deleter(pl)),
|
||||
place_(pl),
|
||||
size_(size) {}
|
||||
|
||||
virtual void* Ptr() const { return static_cast<void*>(ptr_.get()); }
|
||||
virtual size_t Size() const { return size_; }
|
||||
virtual Place Place() const { return place_; }
|
||||
|
||||
std::unique_ptr<T, memory::Deleter> ptr_;
|
||||
Place place_; // record the place of ptr_.
|
||||
size_t size_; // size of the memory block.
|
||||
};
|
||||
|
||||
std::unique_ptr<Placeholder> holder_; // holds the memory block if allocated.
|
||||
};
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
Loading…
Reference in new issue