Merge pull request #12872 from sneaxiy/stack_op
Add stack_op for DAM modelrevert-12864-feature/process_lod_grad
commit
5ea7bf88ba
@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include "paddle/fluid/platform/hostdevice.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
template <typename T, size_t N>
|
||||
class Array {
|
||||
static_assert(N > 0, "The size of array must be larger than 0");
|
||||
|
||||
public:
|
||||
HOSTDEVICE Array() {}
|
||||
|
||||
HOSTDEVICE explicit Array(const T &val) {
|
||||
for (size_t i = 0; i < N; ++i) data_[i] = val;
|
||||
}
|
||||
|
||||
HOSTDEVICE const T *Get() const { return data_; }
|
||||
|
||||
HOSTDEVICE T *GetMutable() { return data_; }
|
||||
|
||||
HOSTDEVICE T &operator[](size_t index) { return data_[index]; }
|
||||
|
||||
HOSTDEVICE const T &operator[](size_t index) const { return data_[index]; }
|
||||
|
||||
HOSTDEVICE constexpr size_t size() const { return N; }
|
||||
|
||||
private:
|
||||
T data_[N];
|
||||
};
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/stack_op.h"
|
||||
|
||||
namespace plat = paddle::platform;
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OPERATOR(stack, ops::StackOp, ops::StackOpMaker,
|
||||
ops::StackGradOpDescMaker);
|
||||
REGISTER_OPERATOR(stack_grad, ops::StackOpGrad);
|
||||
|
||||
REGISTER_OP_CPU_KERNEL(stack, ops::StackKernel<plat::CPUDeviceContext, float>,
|
||||
ops::StackKernel<plat::CPUDeviceContext, double>);
|
||||
|
||||
REGISTER_OP_CPU_KERNEL(stack_grad,
|
||||
ops::StackGradKernel<plat::CPUDeviceContext, float>,
|
||||
ops::StackGradKernel<plat::CPUDeviceContext, double>);
|
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/operators/stack_op.h"
|
||||
|
||||
namespace plat = paddle::platform;
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_CUDA_KERNEL(stack, ops::StackKernel<plat::CUDADeviceContext, float>,
|
||||
ops::StackKernel<plat::CUDADeviceContext, double>);
|
||||
|
||||
REGISTER_OP_CUDA_KERNEL(stack_grad,
|
||||
ops::StackGradKernel<plat::CUDADeviceContext, float>,
|
||||
ops::StackGradKernel<plat::CUDADeviceContext, double>);
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,92 @@
|
||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from op_test import OpTest
|
||||
import numpy as np
|
||||
import unittest
|
||||
|
||||
|
||||
class TestStackOpBase(OpTest):
|
||||
def initDefaultParameters(self):
|
||||
self.num_inputs = 4
|
||||
self.input_dim = (5, 6, 7)
|
||||
self.axis = 0
|
||||
self.dtype = 'float32'
|
||||
|
||||
def initParameters(self):
|
||||
pass
|
||||
|
||||
def get_x_names(self):
|
||||
x_names = []
|
||||
for i in range(self.num_inputs):
|
||||
x_names.append('x{}'.format(i))
|
||||
return x_names
|
||||
|
||||
def setUp(self):
|
||||
self.initDefaultParameters()
|
||||
self.initParameters()
|
||||
self.op_type = 'stack'
|
||||
self.x = []
|
||||
for i in range(self.num_inputs):
|
||||
self.x.append(
|
||||
np.random.random(size=self.input_dim).astype(self.dtype))
|
||||
|
||||
tmp = []
|
||||
x_names = self.get_x_names()
|
||||
for i in range(self.num_inputs):
|
||||
tmp.append((x_names[i], self.x[i]))
|
||||
|
||||
self.inputs = {'X': tmp}
|
||||
self.outputs = {'Y': np.stack(self.x, axis=self.axis)}
|
||||
self.attrs = {'axis': self.axis}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(self.get_x_names(), 'Y')
|
||||
|
||||
|
||||
class TestStackOp1(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.num_inputs = 16
|
||||
|
||||
|
||||
class TestStackOp2(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.num_inputs = 20
|
||||
|
||||
|
||||
class TestStackOp3(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.axis = -1
|
||||
|
||||
|
||||
class TestStackOp4(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.axis = -4
|
||||
|
||||
|
||||
class TestStackOp5(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.axis = 1
|
||||
|
||||
|
||||
class TestStackOp6(TestStackOpBase):
|
||||
def initParameters(self):
|
||||
self.axis = 3
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue