Remove constraint that last dimension is forced to be 1 by adding one_hot_v2 (#19716)
* add one_hot_v2_op to remove last_dims==1 test=develop * add api unittest code for CI_Coverage test=develop * improve CI_Coverage rate by adding test_with_depth test=developexpand_as_op_1
parent
e352467c1c
commit
8c7e411908
@ -0,0 +1,122 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/one_hot_v2_op.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "paddle/fluid/framework/framework.pb.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
class OneHotV2Op : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
void InferShape(framework::InferShapeContext* ctx) const override {
|
||||
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
|
||||
"Input(X) of OneHotOp should not be null.");
|
||||
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
|
||||
"Output(Out) of OneHotOp should not be null.");
|
||||
|
||||
auto x_dims = ctx->GetInputDim("X");
|
||||
PADDLE_ENFORCE_GE(x_dims.size(), 1,
|
||||
"Rank of Input(X) should be at least 1.");
|
||||
|
||||
int depth = ctx->Attrs().Get<int>("depth");
|
||||
if (ctx->HasInput("depth_tensor")) {
|
||||
depth = -1;
|
||||
}
|
||||
|
||||
auto out_dims_vec = framework::vectorize(x_dims);
|
||||
out_dims_vec.push_back(depth);
|
||||
auto out_dims = framework::make_ddim(out_dims_vec);
|
||||
ctx->SetOutputDim("Out", out_dims);
|
||||
ctx->ShareLoD("X", /* --> */ "Out");
|
||||
}
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const override {
|
||||
return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
|
||||
ctx.device_context());
|
||||
}
|
||||
|
||||
framework::OpKernelType GetKernelTypeForVar(
|
||||
const std::string& var_name, const Tensor& tensor,
|
||||
const framework::OpKernelType& expected_kernel_type) const override {
|
||||
if (var_name == "depth_tensor") {
|
||||
return expected_kernel_type;
|
||||
}
|
||||
return framework::OpKernelType(expected_kernel_type.data_type_,
|
||||
tensor.place(), tensor.layout());
|
||||
}
|
||||
};
|
||||
|
||||
class OneHotV2OpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
void Make() override {
|
||||
AddInput("X",
|
||||
"(LoDTensor, LoDTensor<int>) Input variable with rank at least 2. "
|
||||
"The last dimension of X should be 1. Each value of X is an index "
|
||||
"to indicate the position.");
|
||||
AddInput("depth_tensor", "(Tensor, Tensor<int>), Length of one-hot vector")
|
||||
.AsDispensable();
|
||||
AddOutput("Out",
|
||||
"(Tensor, Tensor<float>) Output tensor with same rank as X. "
|
||||
"The tensor consists of one-hot representations of values in X.");
|
||||
|
||||
AddAttr<int>("depth",
|
||||
"A positive integer to specify the length of one-hot vector.")
|
||||
.SetDefault(-1);
|
||||
AddAttr<int>("dtype",
|
||||
"An integer to specify the data type of one-hot "
|
||||
"vector. The default value is FP32.")
|
||||
.SetDefault(paddle::framework::proto::VarType::FP32);
|
||||
AddAttr<bool>("allow_out_of_range",
|
||||
"If it is set true and the input data is out of range, "
|
||||
"the output tensor will be filled zeros. The default value "
|
||||
"is false.")
|
||||
.SetDefault(false);
|
||||
AddComment(R"DOC(
|
||||
One Hot Operator. This operator creates the one-hot representations for input
|
||||
index values. The following example will help to explain the function of this
|
||||
operator:
|
||||
|
||||
X is a LoDTensor:
|
||||
X.lod = [[0, 1, 4]]
|
||||
X.shape = [4]
|
||||
X.data = [1, 1, 3, 0]
|
||||
|
||||
set depth = 4
|
||||
|
||||
Out is a LoDTensor:
|
||||
Out.lod = [[0, 1, 4]]
|
||||
Out.shape = [4, 4]
|
||||
Out.data = [[0., 1., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 0., 1.],
|
||||
[1., 0., 0., 0.]]
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(one_hot_v2, ops::OneHotV2Op, ops::OneHotV2OpMaker,
|
||||
paddle::framework::EmptyGradOpMaker);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
one_hot_v2, ops::OneHotV2Kernel<paddle::platform::CPUDeviceContext, int>,
|
||||
ops::OneHotV2Kernel<paddle::platform::CPUDeviceContext, int64_t>);
|
@ -0,0 +1,99 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/one_hot_v2_op.h"
|
||||
#include "paddle/fluid/platform/cuda_primitives.h"
|
||||
#include "paddle/fluid/platform/gpu_info.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
using platform::PADDLE_CUDA_NUM_THREADS;
|
||||
|
||||
template <typename InT, typename OutT>
|
||||
__global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data,
|
||||
const int64_t numel, const int depth) {
|
||||
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) {
|
||||
*(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename DeviceContext, typename InT>
|
||||
struct OneHotV2OpCUDAFunctor {
|
||||
const framework::LoDTensor* in_;
|
||||
framework::LoDTensor* out_;
|
||||
const DeviceContext& ctx_;
|
||||
int depth_;
|
||||
|
||||
OneHotV2OpCUDAFunctor(const framework::LoDTensor* in,
|
||||
framework::LoDTensor* out, int depth,
|
||||
const DeviceContext& ctx)
|
||||
: in_(in), out_(out), depth_(depth), ctx_(ctx) {}
|
||||
|
||||
template <typename OutT>
|
||||
void apply() const {
|
||||
auto* p_in_data = in_->data<InT>();
|
||||
auto numel = in_->numel();
|
||||
auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace());
|
||||
auto stream = ctx_.stream();
|
||||
math::set_constant(ctx_, out_, 0.0);
|
||||
|
||||
FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
|
||||
PADDLE_CUDA_NUM_THREADS,
|
||||
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
|
||||
p_in_data, p_out_data, numel, depth_);
|
||||
}
|
||||
};
|
||||
|
||||
using LoDTensor = framework::LoDTensor;
|
||||
template <typename DeviceContext, typename T>
|
||||
class OneHotV2CUDAKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
auto* in = context.Input<LoDTensor>("X");
|
||||
auto* out = context.Output<LoDTensor>("Out");
|
||||
|
||||
int depth = -1;
|
||||
if (context.HasInput("depth_tensor")) {
|
||||
auto* depth_tensor = context.Input<framework::Tensor>("depth_tensor");
|
||||
if (platform::is_gpu_place(depth_tensor->place())) {
|
||||
framework::Tensor temp;
|
||||
TensorCopySync(*depth_tensor, platform::CPUPlace(), &temp);
|
||||
depth = *temp.data<int32_t>();
|
||||
} else {
|
||||
depth = *depth_tensor->data<int32_t>();
|
||||
}
|
||||
|
||||
auto out_dims = out->dims();
|
||||
out_dims[out_dims.size() - 1] = depth;
|
||||
out->Resize(out_dims);
|
||||
} else {
|
||||
depth = context.Attr<int>("depth");
|
||||
}
|
||||
framework::VisitDataType(
|
||||
static_cast<framework::proto::VarType::Type>(
|
||||
context.Attr<int>("dtype")),
|
||||
OneHotV2OpCUDAFunctor<DeviceContext, T>(
|
||||
in, out, depth, context.template device_context<DeviceContext>()));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_CUDA_KERNEL(
|
||||
one_hot_v2,
|
||||
ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int>,
|
||||
ops::OneHotV2CUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
|
@ -0,0 +1,94 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/operators/math/math_function.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename DeviceContext, typename InT>
|
||||
struct OneHotV2OpFunctor {
|
||||
const framework::LoDTensor* in_;
|
||||
framework::LoDTensor* out_;
|
||||
int depth_;
|
||||
const DeviceContext& ctx_;
|
||||
bool allow_out_of_range_;
|
||||
|
||||
OneHotV2OpFunctor(const framework::LoDTensor* in, framework::LoDTensor* out,
|
||||
int depth, const DeviceContext& ctx,
|
||||
bool allow_out_of_range = false)
|
||||
: in_(in),
|
||||
out_(out),
|
||||
depth_(depth),
|
||||
ctx_(ctx),
|
||||
allow_out_of_range_(allow_out_of_range) {}
|
||||
|
||||
template <typename OutT>
|
||||
void apply() const {
|
||||
auto* p_in_data = in_->data<InT>();
|
||||
auto numel = in_->numel();
|
||||
auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace());
|
||||
math::set_constant(ctx_, out_, 0.0);
|
||||
|
||||
if (allow_out_of_range_) {
|
||||
for (int i = 0; i < numel; ++i) {
|
||||
if (p_in_data[i] >= 0 && p_in_data[i] < depth_) {
|
||||
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < numel; ++i) {
|
||||
PADDLE_ENFORCE_GE(p_in_data[i], 0,
|
||||
"Illegal index value, should be at least 0.");
|
||||
PADDLE_ENFORCE_LT(
|
||||
p_in_data[i], depth_,
|
||||
"Illegal index value, should be less than depth (%d).", depth_);
|
||||
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using LoDTensor = framework::LoDTensor;
|
||||
using Tensor = framework::Tensor;
|
||||
template <typename DeviceContext, typename T>
|
||||
class OneHotV2Kernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
auto* in = context.Input<LoDTensor>("X");
|
||||
auto* out = context.Output<LoDTensor>("Out");
|
||||
int depth = context.Attr<int>("depth");
|
||||
bool allow_out_of_range = context.Attr<bool>("allow_out_of_range");
|
||||
if (context.HasInput("depth_tensor")) {
|
||||
auto* depth_tensor = context.Input<Tensor>("depth_tensor");
|
||||
auto* depth_data = depth_tensor->data<int32_t>();
|
||||
depth = depth_data[0];
|
||||
auto out_dims = out->dims();
|
||||
out_dims[out_dims.size() - 1] = depth;
|
||||
out->Resize(out_dims);
|
||||
}
|
||||
|
||||
framework::VisitDataType(
|
||||
static_cast<framework::proto::VarType::Type>(
|
||||
context.Attr<int>("dtype")),
|
||||
OneHotV2OpFunctor<DeviceContext, T>(
|
||||
in, out, depth, context.template device_context<DeviceContext>(),
|
||||
allow_out_of_range));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,67 @@
|
||||
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
from .framework import Variable, in_dygraph_mode
|
||||
from .layer_helper import LayerHelper
|
||||
|
||||
__all__ = ['one_hot']
|
||||
|
||||
|
||||
def one_hot(input, depth, allow_out_of_range=False):
|
||||
"""
|
||||
This layer creates the one-hot representations for input indices.
|
||||
|
||||
Args:
|
||||
input(Variable): Input indices represent locations, which takes value 1.0
|
||||
in indices, while all other locations take value 0.
|
||||
depth(scalar): An interger defining the depth of the one-hot dimension.
|
||||
allow_out_of_range(bool): A bool value indicating whether the input
|
||||
indices could be out of range [0, depth). When input indices are
|
||||
out of range, exceptions is raised if allow_out_of_range is False,
|
||||
or zero-filling representations is created if it is set True
|
||||
|
||||
Returns:
|
||||
Variable: The one-hot representations of input.
|
||||
|
||||
Examples:
|
||||
.. code-block:: python
|
||||
|
||||
import paddle.fluid as fluid
|
||||
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
|
||||
one_hot_label = fluid.input.one_hot(input=label, depth=10)
|
||||
"""
|
||||
helper = LayerHelper("one_hot_v2", **locals())
|
||||
|
||||
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
|
||||
|
||||
if in_dygraph_mode():
|
||||
inputs = {'X': input}
|
||||
attrs = {'depth': depth}
|
||||
else:
|
||||
if not isinstance(depth, Variable):
|
||||
# user attribute
|
||||
inputs = {'X': input}
|
||||
attrs = {'depth': depth}
|
||||
else:
|
||||
depth.stop_gradient = True
|
||||
inputs = {'X': input, 'depth_tensor': depth}
|
||||
attrs = {}
|
||||
helper.append_op(
|
||||
type="one_hot_v2",
|
||||
inputs=inputs,
|
||||
attrs=attrs,
|
||||
outputs={'Out': one_hot_out},
|
||||
stop_gradient=True)
|
||||
return one_hot_out
|
@ -0,0 +1,208 @@
|
||||
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
import numpy as np
|
||||
import math
|
||||
from op_test import OpTest
|
||||
import paddle.fluid as fluid
|
||||
import paddle.fluid.core as core
|
||||
import paddle.fluid.framework as framework
|
||||
from paddle.fluid.framework import Program, program_guard
|
||||
|
||||
|
||||
class TestOneHotOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
depth = 10
|
||||
depth_np = np.array(10).astype('int32')
|
||||
dimension = 12
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
|
||||
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
|
||||
|
||||
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
|
||||
|
||||
for i in range(np.product(x.shape)):
|
||||
out[i, x[i]] = 1.0
|
||||
|
||||
self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
|
||||
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)}
|
||||
self.outputs = {'Out': (out, x_lod)}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
|
||||
class TestOneHotOp_attr(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
depth = 10
|
||||
dimension = 12
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
|
||||
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
|
||||
|
||||
out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
|
||||
depth)).astype('float32')
|
||||
|
||||
for i in range(np.product(x.shape)):
|
||||
out[i, 0, x[i]] = 1.0
|
||||
|
||||
self.inputs = {'X': (x, x_lod)}
|
||||
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth}
|
||||
self.outputs = {'Out': (out, x_lod)}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
|
||||
class TestOneHotOp_default_dtype(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
depth = 10
|
||||
depth_np = np.array(10).astype('int32')
|
||||
dimension = 12
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
|
||||
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
|
||||
|
||||
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
|
||||
|
||||
for i in range(np.product(x.shape)):
|
||||
out[i, x[i]] = 1.0
|
||||
|
||||
self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
|
||||
self.attrs = {}
|
||||
self.outputs = {'Out': (out, x_lod)}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
|
||||
class TestOneHotOp_default_dtype_attr(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
depth = 10
|
||||
dimension = 12
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
|
||||
x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1])
|
||||
|
||||
out = np.zeros(shape=(np.product(x.shape[:-1]), 1,
|
||||
depth)).astype('float32')
|
||||
|
||||
for i in range(np.product(x.shape)):
|
||||
out[i, 0, x[i]] = 1.0
|
||||
|
||||
self.inputs = {'X': (x, x_lod)}
|
||||
self.attrs = {'depth': depth}
|
||||
self.outputs = {'Out': (out, x_lod)}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
|
||||
class TestOneHotOp_out_of_range(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
depth = 10
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
x = [np.random.choice([-1, depth]) for i in range(sum(x_lod[0]))]
|
||||
x = np.array(x).astype('int32').reshape([sum(x_lod[0])])
|
||||
|
||||
out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32')
|
||||
|
||||
self.inputs = {'X': (x, x_lod)}
|
||||
self.attrs = {'depth': depth, 'allow_out_of_range': True}
|
||||
self.outputs = {'Out': (out, x_lod)}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
|
||||
class TestOneHotOp_exception(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'one_hot_v2'
|
||||
self.depth = 10
|
||||
self.place = core.CPUPlace()
|
||||
self.dimension = 12
|
||||
self.x = core.LoDTensor()
|
||||
x_lod = [[4, 1, 3, 3]]
|
||||
data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))]
|
||||
data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1])
|
||||
self.x.set(data, self.place)
|
||||
self.x.set_recursive_sequence_lengths(x_lod)
|
||||
|
||||
def test_check_output(self):
|
||||
program = Program()
|
||||
with program_guard(program):
|
||||
x = fluid.layers.data(
|
||||
name='x', shape=[self.dimension], dtype='float32', lod_level=1)
|
||||
block = program.current_block()
|
||||
one_hot_out = block.create_var(
|
||||
name="one_hot_out",
|
||||
type=core.VarDesc.VarType.LOD_TENSOR,
|
||||
dtype='float32')
|
||||
block.append_op(
|
||||
type='one_hot',
|
||||
inputs={'X': x},
|
||||
attrs={'depth': self.depth},
|
||||
outputs={'Out': one_hot_out})
|
||||
exe = fluid.Executor(self.place)
|
||||
|
||||
def run():
|
||||
exe.run(feed={'x': self.x},
|
||||
fetch_list=[one_hot_out],
|
||||
return_numpy=False)
|
||||
|
||||
self.assertRaises(core.EnforceNotMet, run)
|
||||
|
||||
|
||||
class TestOneHotOpApi(unittest.TestCase):
|
||||
def test_api(self):
|
||||
depth = 10
|
||||
self._run(depth)
|
||||
|
||||
def test_api_with_depthTensor(self):
|
||||
depth = fluid.layers.assign(input=np.array([10], dtype=np.int32))
|
||||
self._run(depth)
|
||||
|
||||
def test_api_with_dygraph(self):
|
||||
depth = 10
|
||||
label = np.array([np.random.randint(0, depth - 1)
|
||||
for i in range(6)]).reshape([6, 1])
|
||||
with fluid.dygraph.guard():
|
||||
one_hot_label = fluid.input.one_hot(
|
||||
input=fluid.dygraph.to_variable(label), depth=depth)
|
||||
|
||||
def _run(self, depth):
|
||||
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
|
||||
one_hot_label = fluid.input.one_hot(input=label, depth=depth)
|
||||
|
||||
place = fluid.CPUPlace()
|
||||
label_data = np.array([np.random.randint(0, 10 - 1)
|
||||
for i in range(6)]).reshape([6, 1])
|
||||
|
||||
exe = fluid.Executor(place)
|
||||
exe.run(fluid.default_startup_program())
|
||||
ret = exe.run(feed={'label': label_data, },
|
||||
fetch_list=[one_hot_label],
|
||||
return_numpy=False)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue