Merge pull request #8 from reyoung/feature/refactorize_framework_proto
Catch-up with develop branchrevert-3824-remove_grad_op_type
commit
5ac3641baa
@ -0,0 +1,73 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <memory.h>
|
||||
#include <cstring>
|
||||
|
||||
#include "paddle/framework/ddim.h"
|
||||
#include "paddle/framework/tensor.h"
|
||||
#include "paddle/platform/place.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
// Implementation of CPU copy
|
||||
template <typename T>
|
||||
void CPUGather(const T* params, const int* indices, const int slice_size,
|
||||
const int index_size, T* output) {
|
||||
const size_t slice_bytes = slice_size * sizeof(T);
|
||||
|
||||
for (size_t i = 0; i < index_size; ++i) {
|
||||
int index_ = indices[i];
|
||||
memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of GPU copy:
|
||||
template <typename T>
|
||||
void GPUGather(const T* src, const int* index, const int slice_size,
|
||||
const int index_size, T* output);
|
||||
|
||||
/**
|
||||
* Return a new tensor from source tensor, gathered according to index
|
||||
* input[src]: type-T source Tensor
|
||||
* input[index]: type-int index Tensor (1-D)
|
||||
* return: output tensor
|
||||
*/
|
||||
template <typename T>
|
||||
void Gather(const platform::Place& place, const paddle::framework::Tensor* src,
|
||||
const paddle::framework::Tensor* index,
|
||||
paddle::framework::Tensor* output) {
|
||||
// check index of shape 1-D
|
||||
PADDLE_ENFORCE(index->dims().size() == 1);
|
||||
int index_size = index->dims()[0];
|
||||
|
||||
auto src_dims = src->dims();
|
||||
paddle::framework::DDim output_dims(src_dims);
|
||||
output_dims[0] = index_size;
|
||||
|
||||
// slice size
|
||||
int slice_size = 1;
|
||||
for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i];
|
||||
|
||||
// Gathering
|
||||
if (platform::is_cpu_place(place)) {
|
||||
CPUGather<T>(src->data<T>(), index->data<int>(), slice_size, index_size,
|
||||
output->data<T>());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,48 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/gather.h"
|
||||
#include "paddle/framework/ddim.h"
|
||||
#include "paddle/framework/tensor.h"
|
||||
#include "paddle/platform/place.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
TEST(Gather, GatherData) {
|
||||
using namespace paddle::framework;
|
||||
using namespace paddle::platform;
|
||||
using namespace paddle::operators;
|
||||
|
||||
Tensor* src = new Tensor();
|
||||
Tensor* index = new Tensor();
|
||||
Tensor* output = new Tensor();
|
||||
|
||||
int* p_src = nullptr;
|
||||
int* p_index = nullptr;
|
||||
p_src = src->mutable_data<int>(make_ddim({3, 4}), CPUPlace());
|
||||
p_index = index->mutable_data<int>(make_ddim({2}), CPUPlace());
|
||||
|
||||
for (size_t i = 0; i < 12; ++i) p_src[i] = i;
|
||||
p_index[0] = 1;
|
||||
p_index[1] = 0;
|
||||
|
||||
int* p_output = output->mutable_data<int>(make_ddim({2, 4}), CPUPlace());
|
||||
|
||||
Gather<int>(CPUPlace(), src, index, output);
|
||||
|
||||
for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4);
|
||||
for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4);
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <random>
|
||||
#include "paddle/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename T>
|
||||
class GaussianRandomKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
float mean = context.op_.GetAttr<float>("mean");
|
||||
float std = context.op_.GetAttr<float>("std");
|
||||
auto* tensor = context.Output<framework::Tensor>(0);
|
||||
T* data = tensor->mutable_data<T>(context.GetPlace());
|
||||
|
||||
// TODO(dzh): attribute does not support unsigned int.
|
||||
// And we need a global random seed configuration.
|
||||
int seed = context.op_.GetAttr<int>("seed");
|
||||
if (seed == 0) {
|
||||
seed = std::random_device()();
|
||||
}
|
||||
std::mt19937 g(seed);
|
||||
std::normal_distribution<T> distribution(mean, std);
|
||||
ssize_t size = framework::product(tensor->dims());
|
||||
for (int i = 0; i < size; ++i) {
|
||||
data[i] = distribution(g);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class GaussianRandomOp : public framework::OperatorWithKernel {
|
||||
protected:
|
||||
void InferShape(const framework::InferShapeContext& context) const override {
|
||||
auto* tensor = context.Output<framework::Tensor>(0);
|
||||
auto dims = GetAttr<std::vector<int>>("dims");
|
||||
PADDLE_ENFORCE(dims.size() > 0UL,
|
||||
"dims can be one int or array. dims must be set.");
|
||||
tensor->Resize(framework::make_ddim(dims));
|
||||
}
|
||||
};
|
||||
|
||||
class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
GaussianRandomOpMaker(framework::OpProto* proto,
|
||||
framework::OpAttrChecker* op_checker)
|
||||
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddOutput("Out", "output matrix of random op");
|
||||
AddComment(R"DOC(
|
||||
GaussianRandom operator.
|
||||
Use to initialize tensor with gaussian random generator.
|
||||
)DOC");
|
||||
|
||||
AddAttr<std::vector<int>>("dims", "The dimension of random tensor.");
|
||||
AddAttr<float>("mean", "mean value of random.").SetDefault(.0f);
|
||||
AddAttr<float>("std", "minimum value of random value.").SetDefault(1.0f);
|
||||
AddAttr<int>("seed",
|
||||
"Random seed of generator."
|
||||
"0 means use system wide seed")
|
||||
.SetDefault(0);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker);
|
||||
REGISTER_OP_CPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>);
|
@ -0,0 +1,52 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include "paddle/platform/dynload/curand.h"
|
||||
#include "paddle/platform/gpu_info.h"
|
||||
|
||||
#include "paddle/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename T>
|
||||
class GaussianRandomKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
float mean = context.op_.GetAttr<float>("mean");
|
||||
float std = context.op_.GetAttr<float>("std");
|
||||
auto* tensor = context.Output<framework::Tensor>(0);
|
||||
T* data = tensor->mutable_data<T>(context.GetPlace());
|
||||
|
||||
int seed = context.op_.GetAttr<int>("seed");
|
||||
if (seed == 0) {
|
||||
seed = std::random_device()();
|
||||
}
|
||||
curandGenerator_t g;
|
||||
PADDLE_ENFORCE(platform::dynload::curandCreateGenerator(
|
||||
&g, CURAND_RNG_PSEUDO_DEFAULT));
|
||||
PADDLE_ENFORCE(
|
||||
platform::dynload::curandSetPseudoRandomGeneratorSeed(g, seed));
|
||||
curandGenerateNormal(g, data, framework::product(tensor->dims()), mean,
|
||||
std);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_GPU_KERNEL(gaussian_random, ops::GaussianRandomKernel<float>);
|
@ -0,0 +1,32 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/config.hpp>
|
||||
|
||||
#ifndef PADDLE_ONLY_CPU
|
||||
|
||||
// Because boost's variadic templates has bug on nvcc, boost will disable
|
||||
// variadic template support when GPU enabled on nvcc.
|
||||
// Define BOOST_NO_CXX11_VARIADIC_TEMPLATES on gcc/clang to generate same
|
||||
// function symbols.
|
||||
//
|
||||
// https://github.com/PaddlePaddle/Paddle/issues/3386
|
||||
#ifndef BOOST_NO_CXX11_VARIADIC_TEMPLATES
|
||||
#define BOOST_NO_CXX11_VARIADIC_TEMPLATES
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <boost/variant.hpp>
|
@ -0,0 +1,36 @@
|
||||
import unittest
|
||||
import paddle.v2.framework.core as core
|
||||
from paddle.v2.framework.op import Operator
|
||||
import numpy
|
||||
|
||||
|
||||
class GaussianRandomTest(unittest.TestCase):
|
||||
def test_cpu(self):
|
||||
self.gaussian_random_test(place=core.CPUPlace())
|
||||
|
||||
def test_gpu(self):
|
||||
if core.is_compile_gpu():
|
||||
self.gaussian_random_test(place=core.GPUPlace(0))
|
||||
|
||||
def gaussian_random_test(self, place):
|
||||
scope = core.Scope()
|
||||
scope.new_var("Out").get_tensor()
|
||||
|
||||
op = Operator(
|
||||
"gaussian_random",
|
||||
Out="Out",
|
||||
dims=[1000, 784],
|
||||
mean=.0,
|
||||
std=1.,
|
||||
seed=10)
|
||||
|
||||
op.infer_shape(scope)
|
||||
context = core.DeviceContext.create(place)
|
||||
op.run(scope, context)
|
||||
tensor = numpy.array(scope.find_var("Out").get_tensor())
|
||||
self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
|
||||
self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue