Merge pull request #2971 from QiJune/implement_basic_OpKernel
Implement some basic op kernelcblas_new
commit
e2880f16c8
@ -1,6 +1,5 @@
|
|||||||
#include "paddle/operators/add_op.h"
|
#include "paddle/operators/add_op.h"
|
||||||
#include "paddle/framework/op_registry.h"
|
#include "paddle/framework/op_registry.h"
|
||||||
|
|
||||||
typedef paddle::operators::AddKernel<::paddle::platform::GPUPlace, float> AddKernel_GPU_float;
|
|
||||||
REGISTER_OP_GPU_KERNEL(add_two,
|
REGISTER_OP_GPU_KERNEL(add_two,
|
||||||
AddKernel_GPU_float);
|
paddle::operators::AddKernel<paddle::platform::GPUPlace, float>);
|
@ -1,6 +1,6 @@
|
|||||||
#include <paddle/framework/op_registry.h>
|
#include "paddle/framework/op_registry.h"
|
||||||
#include <paddle/operators/rowwise_add_op.h>
|
#include "paddle/operators/rowwise_add_op.h"
|
||||||
|
|
||||||
REGISTER_OP_GPU_KERNEL(
|
REGISTER_OP_GPU_KERNEL(
|
||||||
rowwise_add,
|
rowwise_add,
|
||||||
paddle::operators::RowWiseAddKernel<paddle::platform ::GPUPlace>);
|
paddle::operators::RowWiseAddKernel<paddle::platform ::GPUPlace, float>);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include <paddle/operators/sigmoid_op.h>
|
#include "paddle/operators/sigmoid_op.h"
|
||||||
#include <paddle/framework/op_registry.h>
|
#include "paddle/framework/op_registry.h"
|
||||||
|
|
||||||
REGISTER_OP_GPU_KERNEL(
|
REGISTER_OP_GPU_KERNEL(
|
||||||
sigmoid, paddle::operators::SigmoidKernel<paddle::platform::GPUPlace>);
|
sigmoid, paddle::operators::SigmoidKernel<paddle::platform::GPUPlace, float>);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include <paddle/framework/op_registry.h>
|
#include "paddle/framework/op_registry.h"
|
||||||
#include <paddle/operators/softmax_op.h>
|
#include "paddle/operators/softmax_op.h"
|
||||||
|
|
||||||
REGISTER_OP_GPU_KERNEL(
|
REGISTER_OP_GPU_KERNEL(
|
||||||
softmax, paddle::operators::SoftmaxKernel<paddle::platform::GPUPlace>);
|
softmax, paddle::operators::SoftmaxKernel<paddle::platform::GPUPlace, float>);
|
||||||
|
@ -1,3 +1,14 @@
|
|||||||
add_python_test(test_framework test_protobuf.py test_scope.py
|
add_python_test(test_framework
|
||||||
test_default_scope_funcs.py test_op_creation_methods.py
|
test_protobuf.py
|
||||||
test_tensor.py test_fc_op.py test_add_two_op.py test_sgd_op.py test_cross_entropy_op.py)
|
test_scope.py
|
||||||
|
test_default_scope_funcs.py
|
||||||
|
test_op_creation_methods.py
|
||||||
|
test_tensor.py
|
||||||
|
test_fc_op.py
|
||||||
|
test_add_two_op.py
|
||||||
|
test_sgd_op.py
|
||||||
|
test_cross_entropy_op.py
|
||||||
|
test_mul_op.py
|
||||||
|
test_sigmoid_op.py
|
||||||
|
test_softmax_op.py
|
||||||
|
test_rowwise_add_op.py)
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
import unittest
|
||||||
|
from op_test_util import OpTestMeta
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TestMulOp(unittest.TestCase):
|
||||||
|
__metaclass__ = OpTestMeta
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.type = "mul"
|
||||||
|
self.X = np.random.random((32, 784)).astype("float32")
|
||||||
|
self.Y = np.random.random((784, 100)).astype("float32")
|
||||||
|
self.Out = np.dot(self.X, self.Y)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -0,0 +1,17 @@
|
|||||||
|
import unittest
|
||||||
|
from op_test_util import OpTestMeta
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TestRowwiseAddOp(unittest.TestCase):
|
||||||
|
__metaclass__ = OpTestMeta
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.type = "rowwise_add"
|
||||||
|
self.X = np.random.random((32, 784)).astype("float32")
|
||||||
|
self.b = np.random.random(784).astype("float32")
|
||||||
|
self.Out = np.add(self.X, self.b)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -0,0 +1,16 @@
|
|||||||
|
import unittest
|
||||||
|
from op_test_util import OpTestMeta
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TestSigmoidOp(unittest.TestCase):
|
||||||
|
__metaclass__ = OpTestMeta
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.type = "sigmoid"
|
||||||
|
self.X = np.random.random((32, 100)).astype("float32")
|
||||||
|
self.Y = 1 / (1 + np.exp(-self.X))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -0,0 +1,23 @@
|
|||||||
|
import unittest
|
||||||
|
from op_test_util import OpTestMeta
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def stable_softmax(x):
|
||||||
|
"""Compute the softmax of vector x in a numerically stable way."""
|
||||||
|
shiftx = x - np.max(x)
|
||||||
|
exps = np.exp(shiftx)
|
||||||
|
return exps / np.sum(exps)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSoftmaxOp(unittest.TestCase):
|
||||||
|
__metaclass__ = OpTestMeta
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.type = "softmax"
|
||||||
|
self.X = np.random.random((32, 100)).astype("float32")
|
||||||
|
self.Y = np.apply_along_axis(stable_softmax, 1, self.X)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
Loading…
Reference in new issue