new sampling op

revert-12469-sum_op_dim_fix
tangwei12 7 years ago
parent 0964de119b
commit e0ab2f7158

@ -0,0 +1,64 @@
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/sampling_id_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
class SamplingIdOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of RowConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of RowConvOp should not be null.");
auto input_dims = ctx->GetInputDim("X");
framework::DDim dims = input_dims;
ctx->SetOutputDim("Out", dims);
ctx->ShareLoD("X", "Out");
}
};
class SamplingIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The input tensor of softmax. "
"2-D with shape [batch_size, input_feature_dimensions].");
AddOutput("Out", "Sliced data tensor.");
AddComment(R"DOC(
SamplingId Operator.
@brief A layer for sampling id from multinomial distribution from the
input layer. Sampling one id for one sample. The result is stored in
output_.ids.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
slice, ops::SamplingIdKernel<paddle::platform::CUDADeviceContext, float>,
ops::SamplingIdKernel<paddle::platform::CUDADeviceContext, double>,
ops::SamplingIdKernel<paddle::platform::CUDADeviceContext, int>,
ops::SamplingIdKernel<paddle::platform::CUDADeviceContext, int64_t>);

@ -0,0 +1,40 @@
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/sampling_id_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
class SamplingIdOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {}
}
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(samplingid, ops::SamplingIdOp, ops::SamplingIdOpMaker,
paddle::framework::EmptyGradOpMaker);
REGISTER_OP_CPU_KERNEL(
slice, ops::SamplingIdKernel<paddle::platform::CPUDeviceContext, int>,
ops::SamplingIdKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::SamplingIdKernel<paddle::platform::CPUDeviceContext, float>,
ops::SamplingIdKernel<paddle::platform::CPUDeviceContext, double>);

@ -0,0 +1,68 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <random>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class SamplingIdKernel : public framework::OpKernel<T> {
/// Produces random floating-point values, uniformly distributed on [0, 1).
std::uniform_real_distribution<double> rand1_;
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* input = context.Input<Tensor>("X");
const int batch_size = static_cast<int>(input->dims()[0]);
const int width = static_cast<int>(input->dims()[1]);
std::vector<int> ids(batchSize);
auto& reng = get();
for (size_t i = 0; i < batchSize; ++i) {
double r = rand1_(reng);
int id = dim - 1;
for (int j = 0; j < dim; ++j) {
if ((r -= buf[i * dim + j]) < 0) {
id = j;
break;
}
}
ids[i] = id;
}
std::vector<int64_t> out_dim;
out_dim.push_back(static_cast<int64_t>(batch_size));
Tensor* output = context.Output<Tensor>("Output");
output->Resize(framework::make_ddim(in_dim));
output->mutable_data<T>(context.GetPlace());
framework::TensorFromVector(ids, context.device_context(), output);
}
std::default_random_engine& get() {
auto engine = new std::default_random_engine;
engine->seed(defaultSeed);
return *engine;
}
private:
unsigned int defaultSeed = 0;
}
} // namespace operators
} // namespace paddle
Loading…
Cancel
Save