From 784740d8bee8e9127270edd1288289e9d9c864b8 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 11 Dec 2017 18:29:05 +0800 Subject: [PATCH 01/40] refine cos-sim-op --- paddle/operators/cos_sim_op.h | 189 +++++++++++++-------- paddle/operators/elementwise_op_function.h | 55 ++++++ 2 files changed, 170 insertions(+), 74 deletions(-) diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index fecb5a79b2..3a7e67506d 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -15,6 +15,7 @@ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/elementwise_add_op.h" namespace paddle { namespace operators { @@ -27,6 +28,28 @@ template using EigenVector = framework::EigenVector; +template +void Function_forward(T* out, T* x_norm, T* y_norm, + ElementIterator& x, + ElementIterator& y, int row, int col) { + for (int i = 0; i < row; ++i) { + T xx = 0; + T yy = 0; + T xy = 0; + for (int j = 0; j < col; ++j) { + xy += (*x) * (*y); + xx += (*x) * (*x); + yy += (*y) * (*y); + ++y; + ++x; + } + x_norm[i] = sqrt(xx); + y_norm[i] = sqrt(yy); + + out[i] = xy / (x_norm[i] * y_norm[i]); + } +} + template class CosSimKernel : public framework::OpKernel { public: @@ -41,32 +64,63 @@ class CosSimKernel : public framework::OpKernel { out_x_norm->mutable_data(context.GetPlace()); out_y_norm->mutable_data(context.GetPlace()); - // convert Tensor to Eigen Tensor int rows_x = in_x->dims()[0]; int rows_y = in_y->dims()[0]; - auto x = EigenMatrix::Reshape(*in_x, 1); - auto y = EigenMatrix::Reshape(*in_y, 1); - auto z = EigenVector::Flatten(*out_z); - auto x_norm = EigenVector::Flatten(*out_x_norm); - auto y_norm = EigenVector::Flatten(*out_y_norm); - - // compute - auto& place = - *context.template device_context().eigen_device(); - auto row_along = Eigen::array({{1}}); - x_norm.device(place) = x.square().sum(row_along).sqrt(); - y_norm.device(place) = y.square().sum(row_along).sqrt(); - if (rows_x == rows_y) { - auto xy = (x * y).sum(Eigen::array({{1}})); - z.device(place) = xy / x_norm / y_norm; - } else { - Eigen::DSizes bcast(rows_x, 1); - auto xy = (x * y.broadcast(bcast)).sum(row_along); - z.device(place) = xy / x_norm / y_norm.broadcast(bcast); - } + + int cols = framework::product(in_x->dims()) / rows_x; + auto x_iter = ElementIterator(in_x->data(), rows_x, + cols, rows_x, cols); + auto y_iter = ElementIterator(in_y->data(), rows_y, + cols, rows_x, cols); + + Function_forward(out_z->data(), out_x_norm->data(), + out_y_norm->data(), x_iter, y_iter, rows_x, cols); + // + // // convert Tensor to Eigen Tensor + //// int rows_x = in_x->dims()[0]; + //// int rows_y = in_y->dims()[0]; + // auto x = EigenMatrix::Reshape(*in_x, 1); + // auto y = EigenMatrix::Reshape(*in_y, 1); + // auto z = EigenVector::Flatten(*out_z); + // auto x_norm = EigenVector::Flatten(*out_x_norm); + // auto y_norm = EigenVector::Flatten(*out_y_norm); + // + // // compute + // auto& place = + // *context.template device_context().eigen_device(); + // auto row_along = Eigen::array({{1}}); + // x_norm.device(place) = x.square().sum(row_along).sqrt(); + // y_norm.device(place) = y.square().sum(row_along).sqrt(); + // if (rows_x == rows_y) { + // auto xy = (x * y).sum(Eigen::array({{1}})); + // z.device(place) = xy / x_norm / y_norm; + // } else { + // Eigen::DSizes bcast(rows_x, 1); + // auto xy = (x * y.broadcast(bcast)).sum(row_along); + // z.device(place) = xy / x_norm / y_norm.broadcast(bcast); + // } } }; +template +void Function_element(T* result, ElementIterator dz, + ElementIterator y, + ElementIterator x_norm, + ElementIterator y_norm, + ElementIterator z, + ElementIterator x, int num, int block) { + for (int i = 0; i < num; ++i) { + result[i % block] += (*dz) * ((*y) / ((*x_norm) * (*y_norm)) - + (*z) * (*x) / ((*x_norm) * (*x_norm))); + ++dz; + ++y; + ++x_norm; + ++y_norm; + ++z; + ++x; + } +} + template class CosSimGradKernel : public framework::OpKernel { public: @@ -81,63 +135,50 @@ class CosSimGradKernel : public framework::OpKernel { auto* out_grad_y = context.Output(framework::GradVarName("Y")); auto* in_grad_z = context.Input(framework::GradVarName("Out")); - // convert Tensor to Eigen Tensor - auto x = EigenMatrix::Reshape(*in_x, 1); - auto y = EigenMatrix::Reshape(*in_y, 1); - auto z = EigenMatrix::Reshape(*in_z, 1); - auto x_norm = EigenMatrix::Reshape(*in_x_norm, 1); - auto y_norm = EigenMatrix::Reshape(*in_y_norm, 1); - auto dz = EigenMatrix::Reshape(*in_grad_z, 1); - // compute gradident int rows_x = in_x->dims()[0]; int rows_y = in_y->dims()[0]; int cols = framework::product(in_x->dims()) / rows_x; - Eigen::DSizes bcast_cols(1, cols); - auto z_bcast = z.broadcast(bcast_cols); - auto dz_bcast = dz.broadcast(bcast_cols); - auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols); - auto& place = - *context.template device_context().eigen_device(); - if (rows_x == rows_y) { - auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols); - auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols); - // compute dx - if (out_grad_x) { - out_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::Reshape(*out_grad_x, 1); - auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast; - dx.device(place) = dz_bcast * grad; - } - // compute dy - if (out_grad_y) { - out_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenMatrix::Reshape(*out_grad_y, 1); - auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; - dy.device(place) = dz_bcast * grad; - } - } else { - Eigen::DSizes bcast_rows(rows_x, 1); - Eigen::DSizes bcast_rows_cols(rows_x, cols); - auto y_bcast = y.broadcast(bcast_rows); - auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols); - auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows)) - .eval() - .broadcast(bcast_cols); - // compute dx - if (out_grad_x) { - out_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::Reshape(*out_grad_x, 1); - auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast; - dx.device(place) = dz_bcast * grad; - } - // compute dy - if (out_grad_y) { - out_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenVector::Flatten(*out_grad_y); - auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast; - dy.device(place) = (dz_bcast * grad).sum(Eigen::array({{0}})); - } + + ////////////////////////////// + // ## + auto x_iter = ElementIterator(in_x->data(), rows_x, + cols, rows_x, cols); + auto y_iter = ElementIterator(in_y->data(), rows_y, + cols, rows_x, cols); + auto z_iter = ElementIterator(in_z->data(), rows_x, 1, + rows_x, cols); + auto dz_iter = ElementIterator(in_grad_z->data(), + rows_x, 1, rows_x, cols); + auto x_norm_iter = ElementIterator( + in_x_norm->data(), rows_x, 1, rows_x, cols); + auto y_norm_iter = ElementIterator( + in_y_norm->data(), rows_y, 1, rows_x, cols); + // ## + ////////////////////////////// + // compute dx + if (out_grad_x) { + out_grad_x->mutable_data(context.GetPlace()); + + ////////////////////////////// + // ## + Function_element(out_grad_x->data(), dz_iter, y_iter, x_norm_iter, + y_norm_iter, z_iter, x_iter, rows_x * cols, + rows_x * cols); + // ## + ////////////////////////////// + } + // compute dy + if (out_grad_y) { + out_grad_y->mutable_data(context.GetPlace()); + + ////////////////////////////// + // ## + Function_element(out_grad_y->data(), dz_iter, x_iter, y_norm_iter, + x_norm_iter, z_iter, y_iter, rows_x * cols, + rows_y * cols); + // ## + ////////////////////////////// } } }; diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 7ebfc7df8c..33b7d06467 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -131,6 +131,61 @@ class MidWiseTransformIterator { int post_; }; +template +class ElementIterator; + +// Fixed(zcd) : Only support 2D +template +class ElementIterator { + public: + ElementIterator(const T* ptr, int t_m, int t_n, int m, int n) + : ptr_(ptr), + index_(0), + i_(0), + j_(0), + t_m_(t_m), + t_n_(t_n), + m_(m), + n_(n) {} + + ElementIterator& operator++() { + ++j_; + + if ((j_ == n_)) { + j_ = 0; + ++i_; + } + int t_i = (t_m_ == 1) ? 0 : i_; + int t_j = (t_n_ == 1) ? 0 : j_; + index_ = t_i * t_n_ + t_j; + + return *this; + } + + bool operator==( + const ElementIterator& rhs) const { + return (ptr_ + index_) == &(*rhs); + } + + bool operator!=( + const ElementIterator& rhs) const { + return (ptr_ + index_) != &(*rhs); + } + + const T& operator*() { return ptr_[index_]; } + + private: + // t_m_ == m_ || t_n_ == n_ || (t_m_ == 1 && t_m_ == 1) + const T* ptr_; + int index_; + int i_; + int j_; + int64_t t_m_; + int64_t t_n_; + int64_t m_; + int64_t n_; +}; + #ifdef __NVCC__ template class RowwiseTransformIterator From bad3d4b661fca8ae74ab45ff980590e4709a71a9 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Mon, 25 Dec 2017 19:21:13 +0800 Subject: [PATCH 02/40] Grad Check For RNN --- .../operators/tensor_array_read_write_op.cc | 11 + paddle/operators/while_op.cc | 15 +- .../fluid/tests/test_dynrnn_gradient_check.py | 215 ++++++++++++++++++ 3 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index 2ee9bf700c..59a4dac940 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -136,6 +136,17 @@ class ReadFromArrayOp : public ArrayOp { auto &dev_ctx = *pool.Borrow(place); framework::CopyFrom(x_array[offset], place, dev_ctx, out_tensor); out_tensor->set_lod(x_array[offset].lod()); + if (Input("X") == "dynamic_rnn_0_output_array_fc_0.tmp_0_0@GRAD") { + VLOG(10) << "Offset = " << offset; + if (x_array[offset].numel() != 0) { + auto d = x_array[offset].dims(); + std::ostringstream sout; + for (int64_t i = 0; i < d[0]; ++i) { + sout << x_array[offset].data()[0 * d[1]] << ", "; + } + VLOG(10) << "Grad = " << sout.str(); + } + } } else { VLOG(10) << "offset " << offset << " >= " << x_array.size(); } diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 11ee96faad..d7c34297cd 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -129,6 +129,9 @@ class WhileGradOp : public framework::OperatorBase { auto &og_inside = detail::Ref(cur_scope.Var(inside_og_name), "Cannot find inside gradient %s", inside_og_name); + + VLOG(10) << "OG " << outside_og_name << " Type is " + << og_outside.Type().name(); if (og_outside.Type().hash_code() == typeid(framework::LoDTensor).hash_code()) { auto &outside_tensor = og_outside.Get(); @@ -145,7 +148,6 @@ class WhileGradOp : public framework::OperatorBase { inside_array.resize(outside_array.size()); for (size_t j = 0; j < inside_array.size(); ++j) { - VLOG(10) << j << " " << outside_array[j].numel(); if (outside_array[j].numel() != 0) { inside_array[j].set_lod(outside_array[j].lod()); inside_array[j].ShareDataWith(outside_array[j]); @@ -198,6 +200,17 @@ class WhileGradOp : public framework::OperatorBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); + + VLOG(10) << "Accumulate the gradient of " << pg_names[param_id]; + + if (pg_names[param_id] == "W@GRAD") { + auto &w_g = detail::Ref(cur_scope.FindVar(new_inside_name)) + .Get(); + VLOG(10) << "W_G is" << w_g.data()[0]; + } else { + VLOG(10) << pg_names[param_id]; + } + sum_op->Run(cur_scope, dev_place); cur_scope.Rename(new_inside_name, inside_grad_name); } diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py new file mode 100644 index 0000000000..99b9285466 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -0,0 +1,215 @@ +import numpy +import random +import collections +import paddle.v2.fluid as fluid +import unittest +import copy + + +class Memory(object): + def __init__(self, shape, dtype='float32'): + self.ex = numpy.zeros(shape=shape, dtype=dtype) + self.cur = None + + def update(self, val): + assert val.shape == self.ex.shape + assert val.dtype == self.ex.dtype + self.cur = val + + def ex(self): + return self.ex + + def next(self): + self.ex = self.cur + self.cur = None + + def __next__(self): + self.next() + + def reset(self): + self.ex = numpy.zeros(shape=self.ex.shape, dtype=self.ex.dtype) + self.cur = None + + +class Output(object): + def __init__(self): + self.outs = [] + + def next_sequence(self): + self.outs.append([]) + + def out(self, val): + self.outs[-1].append(val) + + def last(self): + return self.outs[-1][-1] + + +class BaseRNN(object): + def __init__(self, ins, mems, params, outs, num_seq=5, max_seq_len=15): + self.num_seq = num_seq + self.inputs = collections.defaultdict(list) + + for _ in xrange(num_seq): + seq_len = random.randint(1, max_seq_len - 1) + for iname in ins: + ishape = ins[iname].get('shape', None) + idtype = ins[iname].get('dtype', 'float32') + lst = [] + for _ in xrange(seq_len): + lst.append(numpy.random.random(size=ishape).astype(idtype)) + self.inputs[iname].append(lst) + + self.mems = dict() + for mname in mems: + mshape = mems[mname].get('shape', None) + mdtype = mems[mname].get('dtype', 'float32') + self.mems[mname] = Memory(shape=mshape, dtype=mdtype) + + self.params = dict() + for pname in params: + pshape = params[pname].get('shape', None) + pdtype = params[pname].get('dtype', 'float32') + self.params[pname] = numpy.random.random(size=pshape).astype(pdtype) + + self.outputs = dict() + + for oname in outs: + self.outputs[oname] = Output() + + def step(self, **kwargs): + pass + + def exe(self): + retv = dict() + for out in self.outputs: + retv[out] = [] + + for seq_id in xrange(self.num_seq): + for mname in self.mems: + self.mems[mname].reset() + for out in self.outputs: + self.outputs[out].next_sequence() + + iname0 = self.inputs.keys()[0] + seq_len = len(self.inputs[iname0][seq_id]) + + for step_id in xrange(seq_len): + xargs = dict() + + for iname in self.inputs: + xargs[iname] = self.inputs[iname][seq_id][step_id] + + for mname in self.mems: + xargs[mname] = self.mems[mname] + + for pname in self.params: + xargs[pname] = self.params[pname] + + for out in self.outputs: + xargs[out] = self.outputs[out] + + self.step(**xargs) + + for mname in self.mems: + next(self.mems[mname]) + + for out in self.outputs: + retv[out].append(self.outputs[out].last()) + + for out in retv: + retv[out] = numpy.array(retv[out]) + return retv + + def to_feed(self, place): + feed_dict = dict() + + for iname in self.inputs: + lod = [0] + np_flatten = [] + for seq_id in xrange(len(self.inputs[iname])): + seq_len = len(self.inputs[iname][seq_id]) + lod.append(lod[-1] + seq_len) + np_flatten.extend(self.inputs[iname][seq_id]) + + t = fluid.Tensor() + t.set(numpy.array(np_flatten), place) + t.set_lod([lod]) + feed_dict[iname] = t + + for pname in self.params: + feed_dict[pname] = self.params[pname] + return feed_dict + + def get_numeric_gradient_of_param(self, param_name, delta=0.01): + p = self.params[param_name] + g = numpy.zeros(shape=p.shape, dtype=p.dtype) + + for p_it, g_it in numpy.nditer([p, g], op_flags=['readwrite']): + o = float(p_it) + p_it[...] = o + delta + pos = self._exe_mean_out_() + p_it[...] = o - delta + neg = self._exe_mean_out_() + p_it[...] = o + g[:] = (pos - neg) / (delta * 2) + return g + + def _exe_mean_out_(self): + outs = self.exe() + return numpy.array([o.mean() for o in outs.itervalues()]).mean() + + +class SimpleMul(BaseRNN): + def __init__(self): + super(SimpleMul, self).__init__({ + 'X': { + 'shape': [32] + } + }, {}, {'W': { + 'shape': [32, 10] + }}, ['Out']) + + def step(self, X, W, Out): + Out.out(numpy.matmul(X, W)) + + +class TestSimpleMul(unittest.TestCase): + def setUp(self): + self.python_impl = SimpleMul() + + def test_forward(self): + program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(program, startup_program): + dat = fluid.layers.data(name='X', shape=[32], lod_level=1) + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + d = rnn.step_input(dat) + o = fluid.layers.fc(input=d, + param_attr='W', + bias_attr=False, + size=10, + act=None) + rnn.output(o) + + out = rnn() + out = fluid.layers.sequence_pool(out, pool_type='last') + loss = fluid.layers.mean(x=out) + fluid.backward.append_backward_ops(loss) + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + out, w_g = exe.run(program, + feed=self.python_impl.to_feed(cpu), + fetch_list=[out, "W@GRAD"]) + out_by_python = self.python_impl.exe()['Out'] + self.assertTrue(numpy.allclose(out, out_by_python)) + w_g_num = self.python_impl.get_numeric_gradient_of_param("W") + print w_g_num[0][0] + print w_g_num - w_g + + +if __name__ == '__main__': + unittest.main() From bcf0b56f6a0d649e1f7e71df485d2d0f2a278a77 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 23 Dec 2017 17:59:43 +0800 Subject: [PATCH 03/40] refine iterator --- paddle/operators/cos_sim_op.h | 335 ++++++++++++++------- paddle/operators/elementwise_op_function.h | 55 ---- 2 files changed, 229 insertions(+), 161 deletions(-) diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 3a7e67506d..e96592ab28 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/elementwise_add_op.h" +#include "paddle/operators/elementwise_op_function.h" namespace paddle { namespace operators { @@ -28,27 +28,73 @@ template using EigenVector = framework::EigenVector; -template -void Function_forward(T* out, T* x_norm, T* y_norm, - ElementIterator& x, - ElementIterator& y, int row, int col) { - for (int i = 0; i < row; ++i) { - T xx = 0; +template +static void ForEachZip(IT1 begin1, IT1 last1, IT2 begin2, Callback callback) { + // This method could be implemented in CUDA + for (; begin1 < last1; ++begin1, ++begin2) { + callback(*begin1, *begin2); + } +} + +template +struct CosSimFunctor { + CosSimFunctor(const T* x, const T* y, T* x_norm, T* y_norm, T* z, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + cols_(static_cast(cols)) {} + + inline void operator()(T& x_norm, T& y_norm) const { + size_t x_offset = &x_norm - x_norm_; + size_t y_offset = &y_norm - y_norm_; + + auto* x = x_ + cols_ * x_offset; + + T xx = 0, xy = 0; T yy = 0; - T xy = 0; - for (int j = 0; j < col; ++j) { - xy += (*x) * (*y); - xx += (*x) * (*x); - yy += (*y) * (*y); - ++y; - ++x; + if (same_row) { + auto* y = y_ + cols_ * y_offset; + for (size_t i = 0; i < cols_; ++i) { + xx += x[i] * x[i]; + yy += y[i] * y[i]; + xy += x[i] * y[i]; + } + xx = sqrt(xx); + yy = sqrt(yy); + x_norm_[x_offset] = xx; + y_norm_[y_offset] = yy; + z_[x_offset] = xy / (xx * yy); + } else { + auto* y = y_; + // if (yy == -1) { + // yy = 0; + // for (size_t i = 0; i < cols_; ++i) { + // yy += y[i] * y[i]; + // } + // y_norm[0] = sqrt(yy); + // } + for (size_t i = 0; i < cols_; ++i) { + xx += x[i] * x[i]; + yy += y[i] * y[i]; // only need + xy += x[i] * y[i]; + } + xx = sqrt(xx); + yy = sqrt(yy); + x_norm_[x_offset] = xx; + y_norm_[0] = yy; + z_[x_offset] = xy / (xx * yy); } - x_norm[i] = sqrt(xx); - y_norm[i] = sqrt(yy); - - out[i] = xy / (x_norm[i] * y_norm[i]); } -} + + T* x_norm_; + T* y_norm_; + const T* x_; + const T* y_; + T* z_; + const size_t cols_; +}; template class CosSimKernel : public framework::OpKernel { @@ -68,58 +114,140 @@ class CosSimKernel : public framework::OpKernel { int rows_y = in_y->dims()[0]; int cols = framework::product(in_x->dims()) / rows_x; - auto x_iter = ElementIterator(in_x->data(), rows_x, - cols, rows_x, cols); - auto y_iter = ElementIterator(in_y->data(), rows_y, - cols, rows_x, cols); - - Function_forward(out_z->data(), out_x_norm->data(), - out_y_norm->data(), x_iter, y_iter, rows_x, cols); - // - // // convert Tensor to Eigen Tensor - //// int rows_x = in_x->dims()[0]; - //// int rows_y = in_y->dims()[0]; - // auto x = EigenMatrix::Reshape(*in_x, 1); - // auto y = EigenMatrix::Reshape(*in_y, 1); - // auto z = EigenVector::Flatten(*out_z); - // auto x_norm = EigenVector::Flatten(*out_x_norm); - // auto y_norm = EigenVector::Flatten(*out_y_norm); - // - // // compute - // auto& place = - // *context.template device_context().eigen_device(); - // auto row_along = Eigen::array({{1}}); - // x_norm.device(place) = x.square().sum(row_along).sqrt(); - // y_norm.device(place) = y.square().sum(row_along).sqrt(); - // if (rows_x == rows_y) { - // auto xy = (x * y).sum(Eigen::array({{1}})); - // z.device(place) = xy / x_norm / y_norm; - // } else { - // Eigen::DSizes bcast(rows_x, 1); - // auto xy = (x * y.broadcast(bcast)).sum(row_along); - // z.device(place) = xy / x_norm / y_norm.broadcast(bcast); - // } + + if (rows_x == rows_y) { + CosSimFunctor functor( + in_x->data(), in_y->data(), out_x_norm->data(), + out_y_norm->data(), out_z->data(), cols); + ForEachZip(out_x_norm->data(), out_x_norm->data() + rows_x, + out_y_norm->data(), functor); + } else { + CosSimFunctor functor( + in_x->data(), in_y->data(), out_x_norm->data(), + out_y_norm->data(), out_z->data(), cols); + ForEachZip(out_x_norm->data(), out_x_norm->data() + rows_x, + out_y_norm->data(), functor); + } } }; -template -void Function_element(T* result, ElementIterator dz, - ElementIterator y, - ElementIterator x_norm, - ElementIterator y_norm, - ElementIterator z, - ElementIterator x, int num, int block) { - for (int i = 0; i < num; ++i) { - result[i % block] += (*dz) * ((*y) / ((*x_norm) * (*y_norm)) - - (*z) * (*x) / ((*x_norm) * (*x_norm))); - ++dz; - ++y; - ++x_norm; - ++y_norm; - ++z; - ++x; +template +struct CosSimGradFunctor { + CosSimGradFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dx, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dx_(dx), + cols_(static_cast(cols)) {} + + void operator()(const T& x_norm, const T& y_norm) const { + size_t x_offset = &x_norm - x_norm_; + size_t y_offset = &y_norm - y_norm_; + + auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; + // auto y_norm_square = y_norm_[y_offset] * y_norm_[y_offset]; + auto xy_norm_prod = x_norm_[x_offset] * y_norm_[y_offset]; + auto dz = dz_[x_offset]; + + auto* dx = dx_ + cols_ * x_offset; + auto* x = x_ + cols_ * x_offset; + auto* y = y_ + cols_ * y_offset; + auto z = z_[x_offset]; + + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y[i] / xy_norm_prod - z * x[i] / x_norm_square); + } } -} + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dx_; + const size_t cols_; +}; + +template +struct CosSimDxFunctor { + CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dx, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dx_(dx), + cols_(static_cast(cols)) {} + + void operator()(const T& x_norm, const T& y_norm) const { + size_t x_offset = &x_norm - x_norm_; + + auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; + auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; + auto dz = dz_[x_offset]; + auto z = z_[x_offset]; + + auto* dx = dx_ + cols_ * x_offset; + auto* x = x_ + cols_ * x_offset; + + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y_[i] / xy_norm_prod - z * x[i] / x_norm_square); + } + } + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dx_; + const size_t cols_; +}; + +template +struct CosSimDyFunctor { + CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dy, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dy_(dy), + cols_(static_cast(cols)) {} + + void operator()(const T& x_norm, const T& y_norm) const { + size_t x_offset = &x_norm - x_norm_; + + auto y_norm_square = y_norm_[0] * y_norm_[0]; + auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; + auto dz = dz_[x_offset]; + auto z = z_[x_offset]; + auto* x = x_ + cols_ * x_offset; + + for (size_t i = 0; i < cols_; ++i) { + dy_[i] += dz * (x[i] / xy_norm_prod - z * y_[i] / y_norm_square); + } + } + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dy_; + const size_t cols_; +}; template class CosSimGradKernel : public framework::OpKernel { @@ -140,45 +268,40 @@ class CosSimGradKernel : public framework::OpKernel { int rows_y = in_y->dims()[0]; int cols = framework::product(in_x->dims()) / rows_x; - ////////////////////////////// - // ## - auto x_iter = ElementIterator(in_x->data(), rows_x, - cols, rows_x, cols); - auto y_iter = ElementIterator(in_y->data(), rows_y, - cols, rows_x, cols); - auto z_iter = ElementIterator(in_z->data(), rows_x, 1, - rows_x, cols); - auto dz_iter = ElementIterator(in_grad_z->data(), - rows_x, 1, rows_x, cols); - auto x_norm_iter = ElementIterator( - in_x_norm->data(), rows_x, 1, rows_x, cols); - auto y_norm_iter = ElementIterator( - in_y_norm->data(), rows_y, 1, rows_x, cols); - // ## - ////////////////////////////// - // compute dx - if (out_grad_x) { - out_grad_x->mutable_data(context.GetPlace()); - - ////////////////////////////// - // ## - Function_element(out_grad_x->data(), dz_iter, y_iter, x_norm_iter, - y_norm_iter, z_iter, x_iter, rows_x * cols, - rows_x * cols); - // ## - ////////////////////////////// - } - // compute dy - if (out_grad_y) { - out_grad_y->mutable_data(context.GetPlace()); - - ////////////////////////////// - // ## - Function_element(out_grad_y->data(), dz_iter, x_iter, y_norm_iter, - x_norm_iter, z_iter, y_iter, rows_x * cols, - rows_y * cols); - // ## - ////////////////////////////// + if (rows_x == rows_y) { + if (out_grad_x) { + CosSimGradFunctor functor( + in_x_norm->data(), in_y_norm->data(), in_x->data(), + in_y->data(), in_z->data(), in_grad_z->data(), + out_grad_x->mutable_data(context.GetPlace()), cols); + ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, + in_y_norm->data(), functor); + } + if (out_grad_y) { + CosSimGradFunctor functor( + in_y_norm->data(), in_x_norm->data(), in_y->data(), + in_x->data(), in_z->data(), in_grad_z->data(), + out_grad_y->mutable_data(context.GetPlace()), cols); + ForEachZip(in_y_norm->data(), in_y_norm->data() + rows_x, + in_x_norm->data(), functor); + } + } else { + if (out_grad_x) { + CosSimDxFunctor functor( + in_x_norm->data(), in_y_norm->data(), in_x->data(), + in_y->data(), in_z->data(), in_grad_z->data(), + out_grad_x->mutable_data(context.GetPlace()), cols); + ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, + in_y_norm->data(), functor); + } + if (out_grad_y) { + CosSimDyFunctor functor( + in_x_norm->data(), in_y_norm->data(), in_x->data(), + in_y->data(), in_z->data(), in_grad_z->data(), + out_grad_y->mutable_data(context.GetPlace()), cols); + ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, + in_y_norm->data(), functor); + } } } }; diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 33b7d06467..7ebfc7df8c 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -131,61 +131,6 @@ class MidWiseTransformIterator { int post_; }; -template -class ElementIterator; - -// Fixed(zcd) : Only support 2D -template -class ElementIterator { - public: - ElementIterator(const T* ptr, int t_m, int t_n, int m, int n) - : ptr_(ptr), - index_(0), - i_(0), - j_(0), - t_m_(t_m), - t_n_(t_n), - m_(m), - n_(n) {} - - ElementIterator& operator++() { - ++j_; - - if ((j_ == n_)) { - j_ = 0; - ++i_; - } - int t_i = (t_m_ == 1) ? 0 : i_; - int t_j = (t_n_ == 1) ? 0 : j_; - index_ = t_i * t_n_ + t_j; - - return *this; - } - - bool operator==( - const ElementIterator& rhs) const { - return (ptr_ + index_) == &(*rhs); - } - - bool operator!=( - const ElementIterator& rhs) const { - return (ptr_ + index_) != &(*rhs); - } - - const T& operator*() { return ptr_[index_]; } - - private: - // t_m_ == m_ || t_n_ == n_ || (t_m_ == 1 && t_m_ == 1) - const T* ptr_; - int index_; - int i_; - int j_; - int64_t t_m_; - int64_t t_n_; - int64_t m_; - int64_t n_; -}; - #ifdef __NVCC__ template class RowwiseTransformIterator From 49df2a784be8dabda85f82620ff4601ce113d332 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 25 Dec 2017 20:17:27 +0800 Subject: [PATCH 04/40] refine gradient function --- paddle/operators/cos_sim_op.h | 101 +++++++++++----------------------- 1 file changed, 33 insertions(+), 68 deletions(-) diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index e96592ab28..cd5c703c30 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -13,7 +13,6 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/elementwise_op_function.h" @@ -21,16 +20,9 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenMatrix = framework::EigenMatrix; -template -using EigenVector = framework::EigenVector; template static void ForEachZip(IT1 begin1, IT1 last1, IT2 begin2, Callback callback) { - // This method could be implemented in CUDA for (; begin1 < last1; ++begin1, ++begin2) { callback(*begin1, *begin2); } @@ -66,15 +58,8 @@ struct CosSimFunctor { x_norm_[x_offset] = xx; y_norm_[y_offset] = yy; z_[x_offset] = xy / (xx * yy); - } else { + } else { // This can be wrote in a better way. auto* y = y_; - // if (yy == -1) { - // yy = 0; - // for (size_t i = 0; i < cols_; ++i) { - // yy += y[i] * y[i]; - // } - // y_norm[0] = sqrt(yy); - // } for (size_t i = 0; i < cols_; ++i) { xx += x[i] * x[i]; yy += y[i] * y[i]; // only need @@ -144,22 +129,25 @@ struct CosSimGradFunctor { dx_(dx), cols_(static_cast(cols)) {} - void operator()(const T& x_norm, const T& y_norm) const { + inline void operator()(const T& x_norm, const T& y_norm) const { size_t x_offset = &x_norm - x_norm_; size_t y_offset = &y_norm - y_norm_; auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; - // auto y_norm_square = y_norm_[y_offset] * y_norm_[y_offset]; auto xy_norm_prod = x_norm_[x_offset] * y_norm_[y_offset]; auto dz = dz_[x_offset]; + auto z = z_[x_offset]; auto* dx = dx_ + cols_ * x_offset; auto* x = x_ + cols_ * x_offset; + auto* y = y_ + cols_ * y_offset; - auto z = z_[x_offset]; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + auto reciprocal_x_norm_square = 1 / x_norm_square; for (size_t i = 0; i < cols_; ++i) { - dx[i] = dz * (y[i] / xy_norm_prod - z * x[i] / x_norm_square); + dx[i] = dz * (y[i] * reciprocal_xy_norm_prod - + z * x[i] * reciprocal_x_norm_square); } } @@ -173,10 +161,10 @@ struct CosSimGradFunctor { const size_t cols_; }; -template +template struct CosSimDxFunctor { CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dx, int cols) + const T* z, const T* dz, T* dx, T* dy, int cols) : x_norm_(x_norm), y_norm_(y_norm), x_(x), @@ -184,58 +172,34 @@ struct CosSimDxFunctor { z_(z), dz_(dz), dx_(dx), - cols_(static_cast(cols)) {} - - void operator()(const T& x_norm, const T& y_norm) const { - size_t x_offset = &x_norm - x_norm_; - - auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; - auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; - auto dz = dz_[x_offset]; - auto z = z_[x_offset]; - - auto* dx = dx_ + cols_ * x_offset; - auto* x = x_ + cols_ * x_offset; - - for (size_t i = 0; i < cols_; ++i) { - dx[i] = dz * (y_[i] / xy_norm_prod - z * x[i] / x_norm_square); - } - } - - const T* x_norm_; - const T* y_norm_; - const T* x_; - const T* y_; - const T* z_; - const T* dz_; - T* dx_; - const size_t cols_; -}; - -template -struct CosSimDyFunctor { - CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dy, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - dz_(dz), dy_(dy), cols_(static_cast(cols)) {} - void operator()(const T& x_norm, const T& y_norm) const { + inline void operator()(const T& x_norm, const T& y_norm) const { size_t x_offset = &x_norm - x_norm_; - auto y_norm_square = y_norm_[0] * y_norm_[0]; auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; auto dz = dz_[x_offset]; auto z = z_[x_offset]; auto* x = x_ + cols_ * x_offset; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - for (size_t i = 0; i < cols_; ++i) { - dy_[i] += dz * (x[i] / xy_norm_prod - z * y_[i] / y_norm_square); + if (Dx) { + auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; + auto* dx = dx_ + cols_ * x_offset; + auto* x = x_ + cols_ * x_offset; + auto reciprocal_x_norm_square = 1 / x_norm_square; + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - + z * x[i] * reciprocal_x_norm_square); + } + } else { + auto y_norm_square = y_norm_[0] * y_norm_[0]; + auto reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols_; ++i) { + dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod - + z * y_[i] * reciprocal_y_norm_square); + } } } @@ -245,6 +209,7 @@ struct CosSimDyFunctor { const T* y_; const T* z_; const T* dz_; + T* dx_; T* dy_; const size_t cols_; }; @@ -287,17 +252,17 @@ class CosSimGradKernel : public framework::OpKernel { } } else { if (out_grad_x) { - CosSimDxFunctor functor( + CosSimDxFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), - out_grad_x->mutable_data(context.GetPlace()), cols); + out_grad_x->mutable_data(context.GetPlace()), nullptr, cols); ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, in_y_norm->data(), functor); } if (out_grad_y) { - CosSimDyFunctor functor( + CosSimDxFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), - in_y->data(), in_z->data(), in_grad_z->data(), + in_y->data(), in_z->data(), in_grad_z->data(), nullptr, out_grad_y->mutable_data(context.GetPlace()), cols); ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, in_y_norm->data(), functor); From 4450a312a9228d0237b794d05a75c6de71b3aa55 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 13:13:01 +0800 Subject: [PATCH 05/40] Polish Unittest --- python/paddle/v2/fluid/tests/decorators.py | 27 +++++++ .../fluid/tests/test_dynrnn_gradient_check.py | 80 +++++++++---------- 2 files changed, 67 insertions(+), 40 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/decorators.py diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/decorators.py new file mode 100644 index 0000000000..d3dcf3562d --- /dev/null +++ b/python/paddle/v2/fluid/tests/decorators.py @@ -0,0 +1,27 @@ +import paddle.v2.fluid as fluid + +__all__ = ['many_times', 'prog_scope'] + + +def many_times(times): + def __impl__(fn): + def __fn__(*args, **kwargs): + for _ in range(times): + fn(*args, **kwargs) + + return __fn__ + + return __impl__ + + +def prog_scope(): + def __impl__(fn): + def __fn__(*args, **kwargs): + prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(prog, startup_prog): + fn(*args, **kwargs) + + return __fn__ + + return __impl__ diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 99b9285466..3018588c3a 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -3,7 +3,7 @@ import random import collections import paddle.v2.fluid as fluid import unittest -import copy +from decorators import * class Memory(object): @@ -78,7 +78,7 @@ class BaseRNN(object): self.outputs[oname] = Output() def step(self, **kwargs): - pass + raise NotImplementedError() def exe(self): retv = dict() @@ -141,18 +141,22 @@ class BaseRNN(object): feed_dict[pname] = self.params[pname] return feed_dict - def get_numeric_gradient_of_param(self, param_name, delta=0.01): + def get_numeric_gradient_of_param(self, param_name, delta=0.001): + if len(p.shape) != 2: + raise ValueError("Not support get numeric gradient of an parameter," + " which is not matrix") p = self.params[param_name] g = numpy.zeros(shape=p.shape, dtype=p.dtype) - for p_it, g_it in numpy.nditer([p, g], op_flags=['readwrite']): - o = float(p_it) - p_it[...] = o + delta - pos = self._exe_mean_out_() - p_it[...] = o - delta - neg = self._exe_mean_out_() - p_it[...] = o - g[:] = (pos - neg) / (delta * 2) + for i in xrange(p.shape[0]): + for j in xrange(p.shape[1]): + o = p[i][j] + p[i][j] += delta + pos = self._exe_mean_out_() + p[i][j] -= 2 * delta + neg = self._exe_mean_out_() + p[i][j] = o + g[i][j] = (pos - neg) / (delta * 2) return g def _exe_mean_out_(self): @@ -175,40 +179,36 @@ class SimpleMul(BaseRNN): class TestSimpleMul(unittest.TestCase): - def setUp(self): - self.python_impl = SimpleMul() - - def test_forward(self): - program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(program, startup_program): - dat = fluid.layers.data(name='X', shape=[32], lod_level=1) - - rnn = fluid.layers.DynamicRNN() - with rnn.block(): - d = rnn.step_input(dat) - o = fluid.layers.fc(input=d, - param_attr='W', - bias_attr=False, - size=10, - act=None) - rnn.output(o) - - out = rnn() - out = fluid.layers.sequence_pool(out, pool_type='last') - loss = fluid.layers.mean(x=out) - fluid.backward.append_backward_ops(loss) + # Test many times in local to ensure the random seed cannot breaks CI + # @many_times(10) + @prog_scope() + def test_forward_backward(self): + python_impl = SimpleMul() + dat = fluid.layers.data(name='X', shape=[32], lod_level=1) + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + d = rnn.step_input(dat) + o = fluid.layers.fc(input=d, + param_attr='W', + bias_attr=False, + size=10, + act=None) + rnn.output(o) + + out = rnn() + out = fluid.layers.sequence_pool(out, pool_type='last') + loss = fluid.layers.mean(x=out) + fluid.backward.append_backward_ops(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) - out, w_g = exe.run(program, - feed=self.python_impl.to_feed(cpu), + out, w_g = exe.run(feed=python_impl.to_feed(cpu), fetch_list=[out, "W@GRAD"]) - out_by_python = self.python_impl.exe()['Out'] + out_by_python = python_impl.exe()['Out'] self.assertTrue(numpy.allclose(out, out_by_python)) - w_g_num = self.python_impl.get_numeric_gradient_of_param("W") - print w_g_num[0][0] - print w_g_num - w_g + w_g_num = python_impl.get_numeric_gradient_of_param("W") + self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05)) if __name__ == '__main__': From e566b94fba2a3f5c48629841cbace40af8464fa3 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 13:14:27 +0800 Subject: [PATCH 06/40] Revert C++ changes --- paddle/operators/tensor_array_read_write_op.cc | 11 ----------- paddle/operators/while_op.cc | 15 +-------------- 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index 59a4dac940..2ee9bf700c 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -136,17 +136,6 @@ class ReadFromArrayOp : public ArrayOp { auto &dev_ctx = *pool.Borrow(place); framework::CopyFrom(x_array[offset], place, dev_ctx, out_tensor); out_tensor->set_lod(x_array[offset].lod()); - if (Input("X") == "dynamic_rnn_0_output_array_fc_0.tmp_0_0@GRAD") { - VLOG(10) << "Offset = " << offset; - if (x_array[offset].numel() != 0) { - auto d = x_array[offset].dims(); - std::ostringstream sout; - for (int64_t i = 0; i < d[0]; ++i) { - sout << x_array[offset].data()[0 * d[1]] << ", "; - } - VLOG(10) << "Grad = " << sout.str(); - } - } } else { VLOG(10) << "offset " << offset << " >= " << x_array.size(); } diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index d7c34297cd..11ee96faad 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -129,9 +129,6 @@ class WhileGradOp : public framework::OperatorBase { auto &og_inside = detail::Ref(cur_scope.Var(inside_og_name), "Cannot find inside gradient %s", inside_og_name); - - VLOG(10) << "OG " << outside_og_name << " Type is " - << og_outside.Type().name(); if (og_outside.Type().hash_code() == typeid(framework::LoDTensor).hash_code()) { auto &outside_tensor = og_outside.Get(); @@ -148,6 +145,7 @@ class WhileGradOp : public framework::OperatorBase { inside_array.resize(outside_array.size()); for (size_t j = 0; j < inside_array.size(); ++j) { + VLOG(10) << j << " " << outside_array[j].numel(); if (outside_array[j].numel() != 0) { inside_array[j].set_lod(outside_array[j].lod()); inside_array[j].ShareDataWith(outside_array[j]); @@ -200,17 +198,6 @@ class WhileGradOp : public framework::OperatorBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); - - VLOG(10) << "Accumulate the gradient of " << pg_names[param_id]; - - if (pg_names[param_id] == "W@GRAD") { - auto &w_g = detail::Ref(cur_scope.FindVar(new_inside_name)) - .Get(); - VLOG(10) << "W_G is" << w_g.data()[0]; - } else { - VLOG(10) << pg_names[param_id]; - } - sum_op->Run(cur_scope, dev_place); cur_scope.Rename(new_inside_name, inside_grad_name); } From 2bfa9796bf4615e0898b33b7b97bb3ca0db013d5 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 13:19:57 +0800 Subject: [PATCH 07/40] Fix check --- python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 3018588c3a..d0b805882f 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -142,10 +142,10 @@ class BaseRNN(object): return feed_dict def get_numeric_gradient_of_param(self, param_name, delta=0.001): + p = self.params[param_name] if len(p.shape) != 2: raise ValueError("Not support get numeric gradient of an parameter," " which is not matrix") - p = self.params[param_name] g = numpy.zeros(shape=p.shape, dtype=p.dtype) for i in xrange(p.shape[0]): From 32313994ba0091676616435db7b8d3487d4cb41b Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 13:33:42 +0800 Subject: [PATCH 08/40] Add forward test with mem --- .../fluid/tests/test_dynrnn_gradient_check.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index d0b805882f..ef7d5ca9f5 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -211,5 +211,67 @@ class TestSimpleMul(unittest.TestCase): self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05)) +class TestSimpleMulWithMemory(unittest.TestCase): + DATA_WIDTH = 32 + HIDDEN_WIDTH = 10 + DATA_NAME = 'X' + PARAM_NAME = 'W' + + class SimpleMulWithMemory(BaseRNN): + def __init__(self): + super(TestSimpleMulWithMemory.SimpleMulWithMemory, self).__init__({ + TestSimpleMulWithMemory.DATA_NAME: { + 'shape': [TestSimpleMulWithMemory.DATA_WIDTH] + } + }, {'Mem': { + 'shape': [TestSimpleMulWithMemory.HIDDEN_WIDTH] + }}, { + TestSimpleMulWithMemory.PARAM_NAME: { + 'shape': [ + TestSimpleMulWithMemory.DATA_WIDTH, + TestSimpleMulWithMemory.HIDDEN_WIDTH + ] + } + }, ['Out']) + + def step(self, X, Mem, W, Out): + o = numpy.matmul(X, W) + assert isinstance(Mem, Memory) + o += Mem.ex + Mem.update(o) + assert isinstance(Out, Output) + Out.out(o) + + @prog_scope() + def test_forward_backward(self): + py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() + + data = fluid.layers.data( + name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + d = rnn.step_input(data) + mem = rnn.memory(value=0.0, shape=[self.HIDDEN_WIDTH]) + hidden = fluid.layers.fc(input=d, + size=self.HIDDEN_WIDTH, + param_attr=self.PARAM_NAME, + bias_attr=False, + act=None) + o = fluid.layers.elementwise_add(x=hidden, y=mem) + rnn.update_memory(mem, o) + rnn.output(o) + + out = rnn() + last = fluid.layers.sequence_pool(input=out, pool_type='last') + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + + last_np, = exe.run(feed=py_rnn.to_feed(cpu), fetch_list=[last]) + last_by_py, = py_rnn.exe().values() + + self.assertTrue(numpy.allclose(last_np, last_by_py)) + + if __name__ == '__main__': unittest.main() From 2a36e8ad76e624de8a051bbe1af2b7e7691c3280 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 13:38:32 +0800 Subject: [PATCH 09/40] Make as const name --- .../fluid/tests/test_dynrnn_gradient_check.py | 47 +++++++++++-------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index ef7d5ca9f5..837666b76e 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -164,35 +164,44 @@ class BaseRNN(object): return numpy.array([o.mean() for o in outs.itervalues()]).mean() -class SimpleMul(BaseRNN): - def __init__(self): - super(SimpleMul, self).__init__({ - 'X': { - 'shape': [32] - } - }, {}, {'W': { - 'shape': [32, 10] - }}, ['Out']) +class TestSimpleMul(unittest.TestCase): + DATA_NAME = 'X' + DATA_WIDTH = 32 + PARAM_NAME = 'W' + HIDDEN_WIDTH = 10 + OUT_NAME = 'Out' - def step(self, X, W, Out): - Out.out(numpy.matmul(X, W)) + class SimpleMul(BaseRNN): + def __init__(self): + base = TestSimpleMul + super(base.SimpleMul, self).__init__({ + base.DATA_NAME: { + 'shape': [base.DATA_WIDTH] + } + }, {}, { + base.PARAM_NAME: { + 'shape': [base.DATA_WIDTH, base.HIDDEN_WIDTH] + } + }, [base.OUT_NAME]) + def step(self, X, W, Out): + Out.out(numpy.matmul(X, W)) -class TestSimpleMul(unittest.TestCase): # Test many times in local to ensure the random seed cannot breaks CI # @many_times(10) @prog_scope() def test_forward_backward(self): - python_impl = SimpleMul() - dat = fluid.layers.data(name='X', shape=[32], lod_level=1) + python_impl = TestSimpleMul.SimpleMul() + dat = fluid.layers.data( + name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) rnn = fluid.layers.DynamicRNN() with rnn.block(): d = rnn.step_input(dat) o = fluid.layers.fc(input=d, - param_attr='W', + param_attr=self.PARAM_NAME, bias_attr=False, - size=10, + size=self.HIDDEN_WIDTH, act=None) rnn.output(o) @@ -204,10 +213,10 @@ class TestSimpleMul(unittest.TestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) out, w_g = exe.run(feed=python_impl.to_feed(cpu), - fetch_list=[out, "W@GRAD"]) - out_by_python = python_impl.exe()['Out'] + fetch_list=[out, self.PARAM_NAME + "@GRAD"]) + out_by_python = python_impl.exe()[self.OUT_NAME] self.assertTrue(numpy.allclose(out, out_by_python)) - w_g_num = python_impl.get_numeric_gradient_of_param("W") + w_g_num = python_impl.get_numeric_gradient_of_param(self.PARAM_NAME) self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05)) From 938717ba2b34eb87d25eb451cec5f328c0977148 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 14:37:26 +0800 Subject: [PATCH 10/40] Stash --- paddle/framework/executor.cc | 8 ++ .../fluid/tests/test_dynrnn_gradient_check.py | 79 +++++++++++++++++-- 2 files changed, 79 insertions(+), 8 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 997773c168..a07e8e0b1b 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -66,6 +66,14 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, PADDLE_ENFORCE_LT(static_cast(block_id), pdesc.Size()); auto& block = pdesc.Block(block_id); + if (VLOG_IS_ON(100)) { + std::ostringstream sout; + for (auto& name : scope->GetAllNames(false)) { + sout << name << ", "; + } + VLOG(100) << "Scope has variable " << sout.str(); + } + Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 837666b76e..22bb2b1cdf 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -159,6 +159,39 @@ class BaseRNN(object): g[i][j] = (pos - neg) / (delta * 2) return g + def get_numeric_gradient_of_input(self, + input_name, + delta=0.001, + return_one_tensor=True): + ipt = self.inputs[input_name] + grad = [] + + for seq in ipt: + seq_grad = [] + for item in seq: + item_grad = numpy.zeros(shape=item.shape, dtype=item.dtype) + if len(item.shape) != 1: + raise ValueError("Not support") + + for i in xrange(len(item)): + o = item[i] + item[i] += delta + pos = self._exe_mean_out_() + item[i] -= 2 * delta + neg = self._exe_mean_out_() + item[i] = o + item_grad[i] = (pos - neg) / (delta * 2) + seq_grad.append(item_grad) + grad.append(seq_grad) + + if not return_one_tensor: + return grad + + for i in xrange(len(grad)): + grad[i] = numpy.concatenate(grad[i]) + grad = numpy.concatenate(grad) + return grad + def _exe_mean_out_(self): outs = self.exe() return numpy.array([o.mean() for o in outs.itervalues()]).mean() @@ -191,9 +224,10 @@ class TestSimpleMul(unittest.TestCase): # @many_times(10) @prog_scope() def test_forward_backward(self): - python_impl = TestSimpleMul.SimpleMul() + py_rnn = TestSimpleMul.SimpleMul() dat = fluid.layers.data( name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) + dat.stop_gradient = False rnn = fluid.layers.DynamicRNN() with rnn.block(): @@ -212,17 +246,26 @@ class TestSimpleMul(unittest.TestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) - out, w_g = exe.run(feed=python_impl.to_feed(cpu), - fetch_list=[out, self.PARAM_NAME + "@GRAD"]) - out_by_python = python_impl.exe()[self.OUT_NAME] + out, w_g, i_g = map(numpy.array, + exe.run(feed=py_rnn.to_feed(cpu), + fetch_list=[ + out, self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD" + ], + return_numpy=False)) + out_by_python = py_rnn.exe()[self.OUT_NAME] self.assertTrue(numpy.allclose(out, out_by_python)) - w_g_num = python_impl.get_numeric_gradient_of_param(self.PARAM_NAME) + w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05)) + i_g_num = py_rnn.get_numeric_gradient_of_input( + input_name=self.DATA_NAME) + i_g_num = i_g_num.reshape(i_g.shape) + self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.05)) class TestSimpleMulWithMemory(unittest.TestCase): DATA_WIDTH = 32 - HIDDEN_WIDTH = 10 + HIDDEN_WIDTH = 20 DATA_NAME = 'X' PARAM_NAME = 'W' @@ -251,12 +294,14 @@ class TestSimpleMulWithMemory(unittest.TestCase): assert isinstance(Out, Output) Out.out(o) + # @many_times(10) @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() data = fluid.layers.data( name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) + data.stop_gradient = False rnn = fluid.layers.DynamicRNN() with rnn.block(): d = rnn.step_input(data) @@ -272,14 +317,32 @@ class TestSimpleMulWithMemory(unittest.TestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') + loss = fluid.layers.mean(x=last) + fluid.backward.append_backward_ops(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) - - last_np, = exe.run(feed=py_rnn.to_feed(cpu), fetch_list=[last]) + feed = py_rnn.to_feed(cpu) + for _ in xrange(2): + last_np, w_g, i_g = map(numpy.array, + exe.run(feed=feed, + fetch_list=[ + last, self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD" + ], + return_numpy=False)) last_by_py, = py_rnn.exe().values() self.assertTrue(numpy.allclose(last_np, last_by_py)) + w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) + print w_g[0], w_g_num[0] + self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1)) + i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME) + i_g_num = i_g_num.reshape(i_g.shape) + + # Since this RNN has many float add. The number could be not stable. + # rtol = 0.1 + self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.1)) if __name__ == '__main__': From 6cc4bd536f1c9862bca6e3104cab4b3daf843e1e Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 26 Dec 2017 14:37:47 +0800 Subject: [PATCH 11/40] wip --- paddle/operators/adam_op.h | 120 ++++++++++++++++-- python/paddle/v2/fluid/tests/test_adam_op.py | 125 +++++++++++++++++++ 2 files changed, 232 insertions(+), 13 deletions(-) diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index c4e2c8bb88..aa58c4f990 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -79,6 +79,71 @@ struct AdamFunctor { } }; +template +struct SparseAdamFunctor { + T beta1_; + T beta2_; + T epsilon_; + + const T* beta1_pow_; + const T* beta2_pow_; + const T* moment1_; + T* moment1_out_; + const T* moment2_; + T* moment2_out_; + const T* lr_; + const T* grad_; + const T* param_; + T* param_out_; + + const int64_t* rows_; + int64_t row_numel_; + int64_t height_; + + SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, + const T* beta2_pow, const T* mom1, T* mom1_out, + const T* mom2, T* mom2_out, const T* lr, const T* grad, + const T* param, T* param_out, const int64_t* rows, + int64_t row_numel, int64_t height) + : beta1_(beta1), + beta2_(beta2), + epsilon_(epsilon), + beta1_pow_(beta1_pow), + beta2_pow_(beta2_pow), + moment1_(mom1), + moment1_out_(mom1_out), + moment2_(mom2), + moment2_out_(mom2_out), + lr_(lr), + grad_(grad), + param_(param), + param_out_(param_out), + rows_(rows), + row_numel_(row_numel), + height_(height) {} + + inline HOSTDEVICE void operator()(size_t i) const { + for (int64_t j = 0; j < row_numel_; ++j) { + T g = grad_[i * row_numel_ + j]; + T mom1 = moment1_[rows_[i] * row_numel_ + j]; + T mom2 = moment2_[rows_[i] * row_numel_ + j]; + T lr = *lr_; + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; + T p = param_[rows_[i] * row_numel_ + j]; + + lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; + mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; + p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); + // FIXME(typhoonzero): row id may be duplicate + moment1_out_[rows_[i] * row_numel_ + j] = mom1; + moment2_out_[rows_[i] * row_numel_ + j] = mom2; + param_out_[rows_[i] * row_numel_ + j] = p; + } // for col id + } +}; + template class AdamOpKernel : public framework::OpKernel { public: @@ -90,7 +155,8 @@ class AdamOpKernel : public framework::OpKernel { T beta2 = static_cast(ctx.Attr("beta2")); T epsilon = static_cast(ctx.Attr("epsilon")); auto& param = Ref(ctx.Input("Param"), "Must set Param"); - auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); + // auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); + auto* grad_var = ctx.InputVar("Grad"); auto& mom1 = Ref(ctx.Input("Moment1"), "Must set Moment1"); auto& mom2 = Ref(ctx.Input("Moment2"), "Must set Moment2"); auto& lr = @@ -108,18 +174,46 @@ class AdamOpKernel : public framework::OpKernel { auto& mom2_out = Ref(ctx.Output("Moment2Out"), "Must set Moment1Out"); - AdamFunctor functor(beta1, beta2, epsilon, beta1_pow.template data(), - beta2_pow.template data(), - mom1.template data(), - mom1_out.template mutable_data(ctx.GetPlace()), - mom2.template data(), - mom2_out.template mutable_data(ctx.GetPlace()), - lr.template data(), grad.template data(), - param.template data(), - param_out.template mutable_data(ctx.GetPlace())); - platform::ForRange for_range( - static_cast(ctx.device_context()), param.numel()); - for_range(functor); + if (grad_var->IsType()) { + auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + platform::ForRange for_range( + static_cast(ctx.device_context()), + param.numel()); + for_range(functor); + } else if (grad_var->IsType()) { + auto& grad = + Ref(ctx.Input("Grad"), "Must set Grad"); + auto& grad_tensor = grad.value(); + const T* grad_data = grad_tensor.template data(); + auto* rows = grad.rows().data(); + auto height = grad.height(); + auto row_numel = grad_tensor.numel() / height; + + SparseAdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad_data, param.template data(), + param_out.template mutable_data(ctx.GetPlace()), rows, row_numel, + height); + platform::ForRange for_range( + static_cast(ctx.device_context()), + grad.rows().size()); + for_range(functor); + } else { + PADDLE_THROW("Variable type not supported by adam_op"); + } } }; diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index a0d6655d4c..a66fd33102 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -176,5 +176,130 @@ def adam_step(inputs, attributes): return param_out, moment1_out, moment2_out +def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad): + ''' + Simulate one step of the adam optimizer + :param inputs: dict of inputs + :param attributes: dict of attributes + :return tuple: tuple of output param, moment1, moment2, + beta1 power accumulator and beta2 power accumulator + ''' + param = inputs['Param'] + # grad = inputs['Grad'] + moment1 = inputs['Moment1'] + moment2 = inputs['Moment2'] + lr = inputs['LearningRate'] + beta1_pow = inputs['Beta1Pow'] + beta2_pow = inputs['Beta2Pow'] + + beta1 = attributes['beta1'] + beta2 = attributes['beta2'] + epsilon = attributes['epsilon'] + + moment1_out = np.array([height, row_numel]) + moment2_out = np.array([height, row_numel]) + param_out = np.array([height, row_numel]) + + for idx, row_id in enumerate(rows): + moment1_out[row_id] = beta1 * moment1[row_id] + (1 - beta1 + ) * np_grad[idx] + moment2_out[row_id] = beta2 * moment2[row_id] + ( + 1 - beta2) * np.square(np_grad[idx]) + lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow) + param_out[row_id] = param[row_id] - lr_t * (moment1_out / ( + np.sqrt(moment2_out) + epsilon)) + return param_out, moment1_out, moment2_out + + +class TestSparseAdamOp(unittest.TestCase): + def setup(self, scope, place): + beta1 = 0.78 + beta2 = 0.836 + epsilon = 1e-4 + + height = 10 + rows = [0, 4, 7] + row_numel = 12 + self.dense_inputs = { + "Param": np.full((height, row_numel), 5.0).astype("float32"), + "Moment1": np.full((height, row_numel), 5.0).astype("float32"), + "Moment2": np.full((height, row_numel), 5.0).astype("float32"), + 'Beta1Pow': np.array([0.9**10]).astype("float32"), + 'Beta2Pow': np.array([0.999**10]).astype("float32"), + "LearningRate": np.full((1), 2.0).astype("float32") + } + self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} + + grad_selected_rows = scope.var('Grad').get_selected_rows() + grad_selected_rows.set_height(height) + grad_selected_rows.set_rows(rows) + np_array = np.ones((len(rows), row_numel)).astype("float32") + np_array[0, 0] = 2.0 + np_array[2, 8] = 4.0 + + grad_tensor = grad_selected_rows.get_tensor() + grad_tensor.set(np_array, place) + + self.sparse_inputs = ["Grad"] + + param_out, mom1, mom2 = adam_step_sparse( + self.dense_inputs, self.attrs, height, rows, row_numel, np_array) + self.outputs = { + "Param": param_out, + "Moment1Out": mom1, + "Moment2Out": mom2 + } + + def check_with_place(self, place): + scope = core.Scope() + self.setup(scope, place) + + op_args = dict() + for key, np_array in self.dense_inputs.iteritems(): + var = scope.var(key).get_tensor() + var.set(np_array, place) + op_args[key] = key + for s in self.sparse_inputs: + op_args[s] = s + for k in self.attrs: + op_args[k] = self.attrs[k] + + # create and run sgd operator + sgd_op = Operator("adam", **op_args) + sgd_op.run(scope, place) + + for key, np_array in self.outputs.iteritems(): + out_var = scope.var(key).get_tensor() + actual = np.array(out_var) + actual.reshape([actual.size()]) + np_array.reshape([np_array.size()]) + i = 0 + while i < actual.size(): + self.assertAlmostEqual(actual[i], np_array[i]) + i += 1 + + # # rows[0] = 0, 5.0 - 2.0 * 2.0 + # self.assertAlmostEqual(1.0, result_array[rows[0], 0]) + # # rows[0] = 0, 5.0 - 2.0 * 1.0 + # self.assertAlmostEqual(3.0, result_array[rows[0], 2]) + # # 5.0 - 2.0 * 0.0 + # self.assertAlmostEqual(5.0, result_array[1, 0]) + # # rows[1] = 4, 5.0 - 2.0 * 1.0 + # self.assertAlmostEqual(3.0, result_array[rows[1], 10]) + # # 5.0 - 2.0 * 0.0 + # self.assertAlmostEqual(5.0, result_array[5, 8]) + # # rows[2] = 7, 5.0 - 2.0 * 1.0 + # self.assertAlmostEqual(3.0, result_array[rows[2], 1]) + # # rows[2] = 7, 5.0 - 2.0 * 4.0 + # self.assertAlmostEqual(-3.0, result_array[rows[2], 8]) + + def test_sparse_sgd(self): + places = [core.CPUPlace()] + if core.is_compile_gpu(): + places.append(core.CUDAPlace(0)) + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main() From 82a22d3258b7024e64cd4045c5bbf32aa99f070f Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 17:06:23 +0800 Subject: [PATCH 12/40] Update code --- paddle/framework/executor.cc | 8 ------ paddle/framework/tensor_impl.h | 8 ++++++ paddle/operators/sum_op.h | 2 ++ python/paddle/v2/fluid/executor.py | 25 ++++++++++++++++--- .../tests/book/test_label_semantic_roles.py | 2 +- python/paddle/v2/fluid/tests/decorators.py | 6 +++-- .../fluid/tests/test_dynrnn_gradient_check.py | 20 +++++++-------- 7 files changed, 46 insertions(+), 25 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index a07e8e0b1b..997773c168 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -66,14 +66,6 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, PADDLE_ENFORCE_LT(static_cast(block_id), pdesc.Size()); auto& block = pdesc.Block(block_id); - if (VLOG_IS_ON(100)) { - std::ostringstream sout; - for (auto& name : scope->GetAllNames(false)) { - sout << name << ", "; - } - VLOG(100) << "Scope has variable " << sout.str(); - } - Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 6c6f298edc..46ea3b881d 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,6 +134,14 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } + + if (typeid(float).hash_code() == type.hash_code()) { + auto buf = reinterpret_cast( + reinterpret_cast(holder_->ptr()) + offset_); + for (int64_t i = 0; i < this->numel(); ++i) { + buf[i] = NAN; + } + } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index eaa36aa1ae..cbde9976dc 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -107,10 +107,12 @@ class SumKernel : public framework::OpKernel { out_array.resize(i + 1); } if (out_array[i].numel() == 0) { + VLOG(10) << context.op().Output("Out") << " just copy"; framework::CopyFrom(in_array[i], in_array[i].place(), context.device_context(), &out_array[i]); out_array[i].set_lod(in_array[i].lod()); } else { + VLOG(10) << context.op().Output("Out") << " merged"; PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); auto in = EigenVector::Flatten(in_array[i]); auto result = EigenVector::Flatten(out_array[i]); diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 2c91afb363..1d6c594b41 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,12 +1,31 @@ import numpy as np +import contextlib +from framework import Program, default_main_program from . import core -from framework import Program, default_main_program, Parameter, Variable -__all__ = ['Executor', 'g_scope'] +__all__ = ['Executor', 'global_scope', 'scope_guard', 'switch_scope'] g_scope = core.Scope() +def global_scope(): + return g_scope + + +def switch_scope(scope): + global g_scope + ex = g_scope + g_scope = scope + return ex + + +@contextlib.contextmanager +def scope_guard(scope): + ex = switch_scope(scope) + yield + switch_scope(ex) + + def as_numpy(tensor): if isinstance(tensor, list): return [as_numpy(t) for t in tensor] @@ -117,7 +136,7 @@ class Executor(object): raise TypeError() if scope is None: - scope = g_scope + scope = global_scope() program = program.clone() global_block = program.global_block() diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index c3591a613a..8acd470c5e 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -170,7 +170,7 @@ def main(): exe.run(fluid.default_startup_program()) - embedding_param = fluid.g_scope.find_var(embedding_name).get_tensor() + embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor() embedding_param.set( load_parameter(conll05.get_embedding(), word_dict_len, word_dim), place) diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/decorators.py index d3dcf3562d..154619b0e9 100644 --- a/python/paddle/v2/fluid/tests/decorators.py +++ b/python/paddle/v2/fluid/tests/decorators.py @@ -19,8 +19,10 @@ def prog_scope(): def __fn__(*args, **kwargs): prog = fluid.Program() startup_prog = fluid.Program() - with fluid.program_guard(prog, startup_prog): - fn(*args, **kwargs) + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + fn(*args, **kwargs) return __fn__ diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 22bb2b1cdf..7f61b966fd 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -298,7 +298,6 @@ class TestSimpleMulWithMemory(unittest.TestCase): @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() - data = fluid.layers.data( name=self.DATA_NAME, shape=[self.DATA_WIDTH], lod_level=1) data.stop_gradient = False @@ -323,19 +322,18 @@ class TestSimpleMulWithMemory(unittest.TestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) feed = py_rnn.to_feed(cpu) - for _ in xrange(2): - last_np, w_g, i_g = map(numpy.array, - exe.run(feed=feed, - fetch_list=[ - last, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) + last_np, w_g, i_g = map(numpy.array, + exe.run(feed=feed, + fetch_list=[ + last, self.PARAM_NAME + "@GRAD", + self.DATA_NAME + "@GRAD" + ], + return_numpy=False)) last_by_py, = py_rnn.exe().values() - self.assertTrue(numpy.allclose(last_np, last_by_py)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) - print w_g[0], w_g_num[0] + # print w_g_num[0], w_g[0] + self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1)) i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME) i_g_num = i_g_num.reshape(i_g.shape) From 8728885031be996588520373cf3eec8fab0efee3 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 26 Dec 2017 17:09:57 +0800 Subject: [PATCH 13/40] Revert debug code --- paddle/framework/tensor_impl.h | 8 -------- paddle/operators/sum_op.h | 2 -- 2 files changed, 10 deletions(-) diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 46ea3b881d..6c6f298edc 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,14 +134,6 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } - - if (typeid(float).hash_code() == type.hash_code()) { - auto buf = reinterpret_cast( - reinterpret_cast(holder_->ptr()) + offset_); - for (int64_t i = 0; i < this->numel(); ++i) { - buf[i] = NAN; - } - } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index cbde9976dc..eaa36aa1ae 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -107,12 +107,10 @@ class SumKernel : public framework::OpKernel { out_array.resize(i + 1); } if (out_array[i].numel() == 0) { - VLOG(10) << context.op().Output("Out") << " just copy"; framework::CopyFrom(in_array[i], in_array[i].place(), context.device_context(), &out_array[i]); out_array[i].set_lod(in_array[i].lod()); } else { - VLOG(10) << context.op().Output("Out") << " merged"; PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); auto in = EigenVector::Flatten(in_array[i]); auto result = EigenVector::Flatten(out_array[i]); From 5361911c689e1368adc4c8b0c86ea44c310796dc Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 26 Dec 2017 21:23:08 +0800 Subject: [PATCH 14/40] adam support sparse --- paddle/operators/adam_op.h | 13 ++--- python/paddle/v2/fluid/tests/test_adam_op.py | 58 +++++++++----------- 2 files changed, 32 insertions(+), 39 deletions(-) diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index aa58c4f990..5facd0112f 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -98,13 +98,12 @@ struct SparseAdamFunctor { const int64_t* rows_; int64_t row_numel_; - int64_t height_; SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, - int64_t row_numel, int64_t height) + int64_t row_numel) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), @@ -119,8 +118,7 @@ struct SparseAdamFunctor { param_(param), param_out_(param_out), rows_(rows), - row_numel_(row_numel), - height_(height) {} + row_numel_(row_numel) {} inline HOSTDEVICE void operator()(size_t i) const { for (int64_t j = 0; j < row_numel_; ++j) { @@ -136,6 +134,7 @@ struct SparseAdamFunctor { mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); + // IMPORTANT: // FIXME(typhoonzero): row id may be duplicate moment1_out_[rows_[i] * row_numel_ + j] = mom1; moment2_out_[rows_[i] * row_numel_ + j] = mom2; @@ -195,8 +194,7 @@ class AdamOpKernel : public framework::OpKernel { auto& grad_tensor = grad.value(); const T* grad_data = grad_tensor.template data(); auto* rows = grad.rows().data(); - auto height = grad.height(); - auto row_numel = grad_tensor.numel() / height; + auto row_numel = grad_tensor.numel() / grad.rows().size(); SparseAdamFunctor functor( beta1, beta2, epsilon, beta1_pow.template data(), @@ -205,8 +203,7 @@ class AdamOpKernel : public framework::OpKernel { mom2.template data(), mom2_out.template mutable_data(ctx.GetPlace()), lr.template data(), grad_data, param.template data(), - param_out.template mutable_data(ctx.GetPlace()), rows, row_numel, - height); + param_out.template mutable_data(ctx.GetPlace()), rows, row_numel); platform::ForRange for_range( static_cast(ctx.device_context()), grad.rows().size()); diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index a66fd33102..996fcfe49d 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -1,6 +1,8 @@ import unittest import numpy as np from op_test import OpTest +from paddle.v2.fluid import core +from paddle.v2.fluid.op import Operator class TestAdamOp1(OpTest): @@ -196,9 +198,9 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad): beta2 = attributes['beta2'] epsilon = attributes['epsilon'] - moment1_out = np.array([height, row_numel]) - moment2_out = np.array([height, row_numel]) - param_out = np.array([height, row_numel]) + moment1_out = np.zeros(shape=[height, row_numel]) + moment2_out = np.zeros(shape=[height, row_numel]) + param_out = np.zeros(shape=[height, row_numel]) for idx, row_id in enumerate(rows): moment1_out[row_id] = beta1 * moment1[row_id] + (1 - beta1 @@ -206,8 +208,8 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad): moment2_out[row_id] = beta2 * moment2[row_id] + ( 1 - beta2) * np.square(np_grad[idx]) lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow) - param_out[row_id] = param[row_id] - lr_t * (moment1_out / ( - np.sqrt(moment2_out) + epsilon)) + param_out[row_id] = param[row_id] - lr_t * (moment1_out[row_id] / ( + np.sqrt(moment2_out[row_id]) + epsilon)) return param_out, moment1_out, moment2_out @@ -219,13 +221,15 @@ class TestSparseAdamOp(unittest.TestCase): height = 10 rows = [0, 4, 7] + self.rows = rows row_numel = 12 + self.row_numel = row_numel self.dense_inputs = { "Param": np.full((height, row_numel), 5.0).astype("float32"), "Moment1": np.full((height, row_numel), 5.0).astype("float32"), "Moment2": np.full((height, row_numel), 5.0).astype("float32"), - 'Beta1Pow': np.array([0.9**10]).astype("float32"), - 'Beta2Pow': np.array([0.999**10]).astype("float32"), + 'Beta1Pow': np.array([beta1**10]).astype("float32"), + 'Beta2Pow': np.array([beta2**10]).astype("float32"), "LearningRate": np.full((1), 2.0).astype("float32") } self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2} @@ -245,7 +249,7 @@ class TestSparseAdamOp(unittest.TestCase): param_out, mom1, mom2 = adam_step_sparse( self.dense_inputs, self.attrs, height, rows, row_numel, np_array) self.outputs = { - "Param": param_out, + "ParamOut": param_out, "Moment1Out": mom1, "Moment2Out": mom2 } @@ -261,37 +265,29 @@ class TestSparseAdamOp(unittest.TestCase): op_args[key] = key for s in self.sparse_inputs: op_args[s] = s + for s in self.outputs: + var = scope.var(s).get_tensor() + var.set(self.outputs[s], place) + op_args[s] = s for k in self.attrs: op_args[k] = self.attrs[k] # create and run sgd operator - sgd_op = Operator("adam", **op_args) - sgd_op.run(scope, place) + adam_op = Operator("adam", **op_args) + adam_op.run(scope, place) for key, np_array in self.outputs.iteritems(): out_var = scope.var(key).get_tensor() actual = np.array(out_var) - actual.reshape([actual.size()]) - np_array.reshape([np_array.size()]) - i = 0 - while i < actual.size(): - self.assertAlmostEqual(actual[i], np_array[i]) - i += 1 - - # # rows[0] = 0, 5.0 - 2.0 * 2.0 - # self.assertAlmostEqual(1.0, result_array[rows[0], 0]) - # # rows[0] = 0, 5.0 - 2.0 * 1.0 - # self.assertAlmostEqual(3.0, result_array[rows[0], 2]) - # # 5.0 - 2.0 * 0.0 - # self.assertAlmostEqual(5.0, result_array[1, 0]) - # # rows[1] = 4, 5.0 - 2.0 * 1.0 - # self.assertAlmostEqual(3.0, result_array[rows[1], 10]) - # # 5.0 - 2.0 * 0.0 - # self.assertAlmostEqual(5.0, result_array[5, 8]) - # # rows[2] = 7, 5.0 - 2.0 * 1.0 - # self.assertAlmostEqual(3.0, result_array[rows[2], 1]) - # # rows[2] = 7, 5.0 - 2.0 * 4.0 - # self.assertAlmostEqual(-3.0, result_array[rows[2], 8]) + actual = actual.reshape([actual.size]) + np_array = np_array.reshape([np_array.size]) + for idx, row_id in enumerate(self.rows): + j = 0 + while j < self.row_numel: + pos = row_id * self.row_numel + j + print (actual[pos] - np_array[pos]) / actual[pos] + self.assertLess((actual[pos] - np_array[pos]) / actual[pos], 0.00001) + j += 1 def test_sparse_sgd(self): places = [core.CPUPlace()] From dd21ae6c1ee3b681bfd069760448fead207964ee Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 27 Dec 2017 09:58:51 +0800 Subject: [PATCH 15/40] update --- python/paddle/v2/fluid/tests/test_adam_op.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index 996fcfe49d..3758ca457e 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -285,8 +285,9 @@ class TestSparseAdamOp(unittest.TestCase): j = 0 while j < self.row_numel: pos = row_id * self.row_numel + j - print (actual[pos] - np_array[pos]) / actual[pos] - self.assertLess((actual[pos] - np_array[pos]) / actual[pos], 0.00001) + print(actual[pos] - np_array[pos]) / actual[pos] + self.assertLess((actual[pos] - np_array[pos]) / actual[pos], + 0.00001) j += 1 def test_sparse_sgd(self): From 7ac00dd684b025a8b1ea6a34a4cdf39ce7fd792e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 27 Dec 2017 15:23:49 +0800 Subject: [PATCH 16/40] refine --- paddle/operators/cos_sim_op.cc | 38 +++++++++ paddle/operators/cos_sim_op.cu | 45 +++++++++++ paddle/operators/cos_sim_op.h | 137 ++++++++++++++------------------- 3 files changed, 142 insertions(+), 78 deletions(-) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 440c427cba..ab9cf745e3 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -149,6 +149,44 @@ class CosSimOpGrad : public framework::OperatorWithKernel { } }; +template +struct CosSimDyFunctor { + CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dy, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dy_(dy), + cols_(static_cast(cols)) {} + + inline void operator()(size_t offset) const { + auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; + auto dz = dz_[offset]; + auto z = z_[offset]; + auto* x = x_ + cols_ * offset; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + + auto y_norm_square = y_norm_[0] * y_norm_[0]; + auto reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols_; ++i) { + dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod - + z * y_[i] * reciprocal_y_norm_square); + } + } + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dy_; + const size_t cols_; +}; + } // namespace operators } // namespace paddle diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 1cb01f5945..eacac68bac 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -15,6 +15,51 @@ #define EIGEN_USE_GPU #include "paddle/operators/cos_sim_op.h" +namespace paddle { +namespace operators { + +template +struct CosSimDyFunctor { + CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dy, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dy_(dy), + cols_(static_cast(cols)) {} + + inline void operator()(size_t offset) const { + auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; + auto dz = dz_[offset]; + auto z = z_[offset]; + auto* x = x_ + cols_ * offset; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + + auto y_norm_square = y_norm_[0] * y_norm_[0]; + auto reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols_; ++i) { + T dy = dz * (x[i] * reciprocal_xy_norm_prod - + z * y_[i] * reciprocal_y_norm_square); + paddle::paddleAtomicAdd(dy_ + i, dy) + } + } + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dy_; + const size_t cols_; +}; + +} // namespace operators +} // namespace paddle + namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( cos_sim, ops::CosSimKernel); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index cd5c703c30..8b2a06a41b 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -21,10 +21,17 @@ namespace operators { using Tensor = framework::Tensor; -template -static void ForEachZip(IT1 begin1, IT1 last1, IT2 begin2, Callback callback) { - for (; begin1 < last1; ++begin1, ++begin2) { - callback(*begin1, *begin2); +template +struct CosSimDyFunctor { + CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dy, int cols); + inline void operator()(size_t) const; +}; + +template +static void ForEachZip(size_t num, Callback callback) { + for (size_t i = 0; i < num; ++i) { + callback(i); } } @@ -38,16 +45,11 @@ struct CosSimFunctor { z_(z), cols_(static_cast(cols)) {} - inline void operator()(T& x_norm, T& y_norm) const { - size_t x_offset = &x_norm - x_norm_; - size_t y_offset = &y_norm - y_norm_; - - auto* x = x_ + cols_ * x_offset; - - T xx = 0, xy = 0; - T yy = 0; + inline HOSTDEVICE void operator()(size_t offset) const { + auto* x = x_ + cols_ * offset; + T xx = 0, xy = 0, yy = 0; if (same_row) { - auto* y = y_ + cols_ * y_offset; + auto* y = y_ + cols_ * offset; for (size_t i = 0; i < cols_; ++i) { xx += x[i] * x[i]; yy += y[i] * y[i]; @@ -55,21 +57,20 @@ struct CosSimFunctor { } xx = sqrt(xx); yy = sqrt(yy); - x_norm_[x_offset] = xx; - y_norm_[y_offset] = yy; - z_[x_offset] = xy / (xx * yy); + y_norm_[offset] = yy; + x_norm_[offset] = xx; + z_[offset] = xy / (xx * yy); } else { // This can be wrote in a better way. - auto* y = y_; for (size_t i = 0; i < cols_; ++i) { xx += x[i] * x[i]; - yy += y[i] * y[i]; // only need - xy += x[i] * y[i]; + yy += y_[i] * y_[i]; // only need + xy += x[i] * y_[i]; } xx = sqrt(xx); yy = sqrt(yy); - x_norm_[x_offset] = xx; y_norm_[0] = yy; - z_[x_offset] = xy / (xx * yy); + x_norm_[offset] = xx; + z_[offset] = xy / (xx * yy); } } @@ -104,14 +105,12 @@ class CosSimKernel : public framework::OpKernel { CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); - ForEachZip(out_x_norm->data(), out_x_norm->data() + rows_x, - out_y_norm->data(), functor); + ForEachZip(rows_x, functor); } else { CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); - ForEachZip(out_x_norm->data(), out_x_norm->data() + rows_x, - out_y_norm->data(), functor); + ForEachZip(rows_x, functor); } } }; @@ -129,19 +128,15 @@ struct CosSimGradFunctor { dx_(dx), cols_(static_cast(cols)) {} - inline void operator()(const T& x_norm, const T& y_norm) const { - size_t x_offset = &x_norm - x_norm_; - size_t y_offset = &y_norm - y_norm_; + inline HOSTDEVICE void operator()(size_t offset) const { + auto x_norm_square = x_norm_[offset] * x_norm_[offset]; + auto xy_norm_prod = x_norm_[offset] * y_norm_[offset]; + auto dz = dz_[offset]; + auto z = z_[offset]; - auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; - auto xy_norm_prod = x_norm_[x_offset] * y_norm_[y_offset]; - auto dz = dz_[x_offset]; - auto z = z_[x_offset]; - - auto* dx = dx_ + cols_ * x_offset; - auto* x = x_ + cols_ * x_offset; - - auto* y = y_ + cols_ * y_offset; + auto* dx = dx_ + cols_ * offset; + auto* x = x_ + cols_ * offset; + auto* y = y_ + cols_ * offset; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto reciprocal_x_norm_square = 1 / x_norm_square; @@ -161,10 +156,10 @@ struct CosSimGradFunctor { const size_t cols_; }; -template +template struct CosSimDxFunctor { CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dx, T* dy, int cols) + const T* z, const T* dz, T* dx, int cols) : x_norm_(x_norm), y_norm_(y_norm), x_(x), @@ -172,37 +167,23 @@ struct CosSimDxFunctor { z_(z), dz_(dz), dx_(dx), - dy_(dy), cols_(static_cast(cols)) {} - inline void operator()(const T& x_norm, const T& y_norm) const { - size_t x_offset = &x_norm - x_norm_; - - auto xy_norm_prod = x_norm_[x_offset] * y_norm_[0]; - auto dz = dz_[x_offset]; - auto z = z_[x_offset]; - auto* x = x_ + cols_ * x_offset; + inline HOSTDEVICE void operator()(size_t offset) const { + auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; + auto dz = dz_[offset]; + auto z = z_[offset]; + auto* x = x_ + cols_ * offset; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + auto x_norm_square = x_norm_[offset] * x_norm_[offset]; + auto* dx = dx_ + cols_ * offset; + auto reciprocal_x_norm_square = 1 / x_norm_square; - if (Dx) { - auto x_norm_square = x_norm_[x_offset] * x_norm_[x_offset]; - auto* dx = dx_ + cols_ * x_offset; - auto* x = x_ + cols_ * x_offset; - auto reciprocal_x_norm_square = 1 / x_norm_square; - for (size_t i = 0; i < cols_; ++i) { - dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - - z * x[i] * reciprocal_x_norm_square); - } - } else { - auto y_norm_square = y_norm_[0] * y_norm_[0]; - auto reciprocal_y_norm_square = 1 / y_norm_square; - for (size_t i = 0; i < cols_; ++i) { - dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod - - z * y_[i] * reciprocal_y_norm_square); - } + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - + z * x[i] * reciprocal_x_norm_square); } } - const T* x_norm_; const T* y_norm_; const T* x_; @@ -210,7 +191,6 @@ struct CosSimDxFunctor { const T* z_; const T* dz_; T* dx_; - T* dy_; const size_t cols_; }; @@ -239,33 +219,34 @@ class CosSimGradKernel : public framework::OpKernel { in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_x->mutable_data(context.GetPlace()), cols); - ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, - in_y_norm->data(), functor); + ForEachZip(rows_x, functor); } if (out_grad_y) { CosSimGradFunctor functor( in_y_norm->data(), in_x_norm->data(), in_y->data(), in_x->data(), in_z->data(), in_grad_z->data(), out_grad_y->mutable_data(context.GetPlace()), cols); - ForEachZip(in_y_norm->data(), in_y_norm->data() + rows_x, - in_x_norm->data(), functor); + ForEachZip(rows_x, functor); } } else { if (out_grad_x) { - CosSimDxFunctor functor( + CosSimDxFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), - out_grad_x->mutable_data(context.GetPlace()), nullptr, cols); - ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, - in_y_norm->data(), functor); + out_grad_x->mutable_data(context.GetPlace()), cols); + ForEachZip(rows_x, functor); } if (out_grad_y) { - CosSimDxFunctor functor( + out_grad_y->mutable_data(context.GetPlace()); + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); + set_zero(dev_ctx, out_grad_y, static_cast(0)); + + CosSimDyFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), - in_y->data(), in_z->data(), in_grad_z->data(), nullptr, - out_grad_y->mutable_data(context.GetPlace()), cols); - ForEachZip(in_x_norm->data(), in_x_norm->data() + rows_x, - in_y_norm->data(), functor); + in_y->data(), in_z->data(), in_grad_z->data(), + out_grad_y->data(), cols); + ForEachZip(rows_x, functor); } } } From d48a0e4eae939f3615fabc9f86f11670fcfad6e3 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 27 Dec 2017 21:04:51 +0800 Subject: [PATCH 17/40] WIP: adding generic scattor functors --- .../operators/math/selected_rows_functor.cc | 47 +++++++++++++ .../operators/math/selected_rows_functor.cu | 67 +++++++++++++++++++ paddle/operators/math/selected_rows_functor.h | 47 +++++++++++++ 3 files changed, 161 insertions(+) diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc index ab758d1e7f..21418ba4b0 100644 --- a/paddle/operators/math/selected_rows_functor.cc +++ b/paddle/operators/math/selected_rows_functor.cc @@ -179,6 +179,53 @@ template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; +// This is a separated namespace for manipulate SelectedRows typed +// data. Like merge duplicated rows, adding two SelectedRows etc. +// +// Another group of functors is called "scatter updates", which means +// use SelectedRows to update a dense tensor with different Ops, like +// add or mul. +namespace scatter { + +size_t FindPos(const std::vector& rows, int64_t value) { + return std::find(rows.begin(), rows.end(), value) - rows.begin(); +} + +template +struct MergeAdd { + void operator()(const platform::CPUDeviceContext& context, + const framework::SelectedRows& input, + framework::SelectedRows* out) { + auto input_rows = input.rows(); + std::set row_set(input_rows.begin(), input_rows.end()); + std::vector merge_rows(row_set.begin(), row_set.end()); + + auto input_width = input.value().dims()[1]; + // std::unique_ptr out{ + // new framework::SelectedRows()}; + out->set_rows(merge_rows); + out->set_height(input.height()); + out->mutable_value()->mutable_data( + framework::make_ddim( + {static_cast(merge_rows.size()), input_width}), + context.GetPlace()); + + math::SetConstant constant_functor; + constant_functor(context, out->mutable_value(), 0.0); + + auto* out_data = out->mutable_value()->data(); + auto* input_data = input.value().data(); + + for (size_t i = 0; i < input_rows.size(); i++) { + size_t out_i = FindPos(merge_rows, input_rows[i]); + for (int64_t j = 0; j < input_width; j++) { + out_data[out_i * input_width + j] += input_data[i * input_width + j]; + } + } + } +}; + +} // namespace scatter } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index 9fddd97a36..b2c0fe7bc3 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -222,6 +222,73 @@ template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; + +namespace scatter { + +template +__global__ void MergeAddKernel(const T* input, const int64_t* input_rows, + T* out, const int64_t* out_rows, + size_t out_rows_size, int64_t row_numel) { + const int ty = blockIdx.y; + int tid = threadIdx.x; + __shared__ size_t out_idx; + + if (tid == 0) { + for (size_t i = 0; i < out_rows_size; i++) { + if (input_rows[ty] == out_rows[i]) { + out_idx = i; + } + } + } + + __syncthreads(); + + input += ty * row_numel; + out += out_idx * row_numel; + for (int index = tid; index < row_numel; index += block_size) { + paddle::platform::CudaAtomicAdd(out + index, input[index]); + } +} + +template +struct MergeAdd { + void operator()(const platform::GPUDeviceContext& context, + const framework::SelectedRows& input, + framework::SelectedRows* out) { + auto input_rows = input.rows(); + std::set row_set(input_rows.begin(), input_rows.end()); + std::vector merge_rows(row_set.begin(), row_set.end()); + + auto input_width = input.value().dims()[1]; + // std::unique_ptr out{ + // new framework::SelectedRows()}; + out->set_rows(merge_rows); + out->set_height(input.height()); + out->mutable_value()->mutable_data( + framework::make_ddim( + {static_cast(merge_rows.size()), input_width}), + context.GetPlace()); + + math::SetConstant constant_functor; + constant_functor(context, out->mutable_value(), 0.0); + + auto* out_data = out->mutable_value()->data(); + auto* input_data = input.value().data(); + + const int block_size = 256; + dim3 threads(block_size, 1); + dim3 grid1(1, input_rows.size()); + + MergeAddKernel< + T, 256><<(context) + .stream()>>>(input_data, input.rows().data(), out_data, + out->rows().data(), out->rows().size(), + input_width); + } +}; + +} // namespace scatter } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h index 1149075abf..8adfca77f6 100644 --- a/paddle/operators/math/selected_rows_functor.h +++ b/paddle/operators/math/selected_rows_functor.h @@ -52,6 +52,53 @@ struct SelectedRowsAddToTensor { framework::Tensor* input2); }; +namespace scatter { +// functors for manuplating SelectedRows data + +template +struct MergeAdd { + // unary functor, merge by adding duplicated rows in + // the input SelectedRows object. + void operator()(const DeviceContext& context, + const framework::SelectedRows& input, + framework::SelectedRows* out); +}; + +template +struct Add { + void operator()(const DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2, + framework::SelectedRows* out) { + out->set_rows(input1->rows()); + out->set_height(input1->height()); + out->mutable_value()->mutable_data(input1->value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); + auto e_in1 = framework::EigenVector::Flatten(input1->value()); + auto e_in2 = framework::EigenVector::Flatten(input2->value()); + e_out.device(*context.eigen_device()) = e_in1 + e_in2; + } +}; + +template +struct Mul { + void operator()(const DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2, + framework::SelectedRows* out) { + out->set_rows(input1->rows()); + out->set_height(input1->height()); + out->mutable_value()->mutable_data(input1->value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); + auto e_in1 = framework::EigenVector::Flatten(input1->value()); + auto e_in2 = framework::EigenVector::Flatten(input2->value()); + e_out.device(*context.eigen_device()) = e_in1 * e_in2; + } +}; + +} // namespace scatter } // namespace math } // namespace operators } // namespace paddle From 74b122889cbce2aa3add92784d0b4a621abfdf45 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 27 Dec 2017 21:08:40 +0800 Subject: [PATCH 18/40] wip --- paddle/operators/math/selected_rows_functor.h | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h index 8adfca77f6..eecd5e5362 100644 --- a/paddle/operators/math/selected_rows_functor.h +++ b/paddle/operators/math/selected_rows_functor.h @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/framework/eigen.h" #include "paddle/framework/selected_rows.h" #include "paddle/platform/device_context.h" @@ -70,13 +71,13 @@ struct Add { const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* out) { - out->set_rows(input1->rows()); - out->set_height(input1->height()); - out->mutable_value()->mutable_data(input1->value().dims(), + out->set_rows(input1.rows()); + out->set_height(input1.height()); + out->mutable_value()->mutable_data(input1.value().dims(), context.GetPlace()); auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1->value()); - auto e_in2 = framework::EigenVector::Flatten(input2->value()); + auto e_in1 = framework::EigenVector::Flatten(input1.value()); + auto e_in2 = framework::EigenVector::Flatten(input2.value()); e_out.device(*context.eigen_device()) = e_in1 + e_in2; } }; @@ -87,13 +88,13 @@ struct Mul { const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* out) { - out->set_rows(input1->rows()); - out->set_height(input1->height()); - out->mutable_value()->mutable_data(input1->value().dims(), + out->set_rows(input1.rows()); + out->set_height(input1.height()); + out->mutable_value()->mutable_data(input1.value().dims(), context.GetPlace()); auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1->value()); - auto e_in2 = framework::EigenVector::Flatten(input2->value()); + auto e_in1 = framework::EigenVector::Flatten(input1.value()); + auto e_in2 = framework::EigenVector::Flatten(input2.value()); e_out.device(*context.eigen_device()) = e_in1 * e_in2; } }; From f5c2d175ae105e8938e8343068eff31db5745c19 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Thu, 28 Dec 2017 10:25:18 +0800 Subject: [PATCH 19/40] Refine --- paddle/framework/executor.cc | 9 +++++---- paddle/framework/tensor_impl.h | 13 +++++++++++-- paddle/framework/variable.h | 1 + paddle/operators/fill_constant_op.cc | 1 + paddle/operators/shrink_rnn_memory_op.cc | 5 +++-- paddle/operators/sum_op.h | 4 +--- paddle/operators/tensor_array_read_write_op.cc | 2 +- paddle/operators/while_op.cc | 13 +++++++++++++ 8 files changed, 36 insertions(+), 12 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 9ee2ddb7c3..fe9a42ace0 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -59,15 +59,16 @@ static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { static void CheckTensorNANOrInf(const std::string& name, const framework::Tensor& tensor) { - if (tensor.type().hash_code() != typeid(float).hash_code() && - tensor.type().hash_code() != typeid(double).hash_code()) { + if (tensor.memory_size() == 0) { return; } - if (tensor.memory_size() == 0) { + if (tensor.type().hash_code() != typeid(float).hash_code() && + tensor.type().hash_code() != typeid(double).hash_code()) { return; } PADDLE_ENFORCE(!framework::HasInf(tensor), "Tensor %s has Inf", name); - PADDLE_ENFORCE(!framework::HasNAN(tensor), "Tensor %s has NAN", name); + PADDLE_ENFORCE(!framework::HasNAN(tensor), "Tensor %s has NAN, %p", name, + &tensor); } void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 6c6f298edc..0161ed8c47 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,8 +134,17 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } - return reinterpret_cast(reinterpret_cast(holder_->ptr()) + - offset_); + void* buf = reinterpret_cast( + reinterpret_cast(holder_->ptr()) + offset_); + if (type.hash_code() == typeid(float).hash_code() || + type.hash_code() == typeid(double).hash_code()) { + float* tmp = (float*)(buf); + for (int64_t i = 0; i < numel(); ++i) { + tmp[i] = NAN; + } + } + + return buf; } inline void* Tensor::mutable_data(platform::Place place) { diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index e5a94759f9..3720393601 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -35,6 +35,7 @@ class Variable { template T* GetMutable() { if (!IsType()) { + VLOG(10) << "Resetting " << *this->name_; holder_.reset(new PlaceholderImpl(new T())); } return static_cast(holder_->Ptr()); diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index dcd43a30c8..196c380c73 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -51,6 +51,7 @@ class FillConstantOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); + VLOG(10) << "FillConstant to " << &out; math::set_constant(dev_ctx, &out, value); } }; diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index e5ef0740b6..9ef473e726 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -116,9 +116,10 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { auto height = dout_tensor.dims()[0]; auto slice = dx_tensor.Slice(0, static_cast(height)); framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice); - if (dx_tensor.dims()[0] < height) { + VLOG(10) << dx_tensor.dims()[0] << ", " << height; + if (dx_tensor.dims()[0] > height) { auto rest_tensor = dx_tensor.Slice( - static_cast(height), static_cast(dout_tensor.dims()[0])); + static_cast(height), static_cast(dx_tensor.dims()[0])); math::set_constant(dev_ctx, &rest_tensor, 0.0f); } } diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index eaa36aa1ae..d1277d3edd 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -38,11 +38,9 @@ class SumKernel : public framework::OpKernel { if (out_var->IsType()) { auto *out = context.Output("Out"); - out->mutable_data(context.GetPlace()); - auto result = EigenVector::Flatten(*out); - if (!in_place) { + out->mutable_data(context.GetPlace()); math::SetConstant constant_functor; constant_functor(context.template device_context(), out, 0.0); diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index 53e38ec703..d5ff3e3fce 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -130,9 +130,9 @@ class ReadFromArrayOp : public ArrayOp { auto &x_array = x->Get(); auto *out = scope.FindVar(Output("Out")); PADDLE_ENFORCE(out != nullptr, "Out must be set"); - auto *out_tensor = out->GetMutable(); size_t offset = GetOffset(scope, place); if (offset < x_array.size()) { + auto *out_tensor = out->GetMutable(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 728ef60794..322270c829 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -194,14 +194,27 @@ class WhileGradOp : public framework::OperatorBase { } } + auto check_var_no_nan = [](const framework::Scope &scope, + const std::string &var_name) { + auto *var = scope.FindVar(var_name); + if (var->IsType()) { + VLOG(10) << "Checking " << var_name; + PADDLE_ENFORCE(!framework::HasNAN(var->Get()), + "%s has NAN", var_name); + } + }; + check_var_no_nan(cur_scope, inside_grad_name); auto new_inside_name = cur_scope.Rename(inside_grad_name); + check_var_no_nan(cur_scope, new_inside_name); auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); sum_op->Run(cur_scope, dev_place); + check_var_no_nan(cur_scope, pg_names[param_id]); cur_scope.Rename(new_inside_name, inside_grad_name); } } + VLOG(1) << "Complete WhileOpGrad"; } }; From de26ae416cce48705c930fcbeecd4e556e57a420 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 27 Dec 2017 23:51:23 +0800 Subject: [PATCH 20/40] add gpu code --- paddle/operators/cos_sim_op.cc | 50 +++++++++----------------- paddle/operators/cos_sim_op.cu | 66 +++++++++++++++++----------------- paddle/operators/cos_sim_op.h | 20 +++++------ 3 files changed, 59 insertions(+), 77 deletions(-) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 80e0780030..77492e60f2 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -151,42 +151,26 @@ class CosSimOpGrad : public framework::OperatorWithKernel { template struct CosSimDyFunctor { - CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dy, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - dz_(dz), - dy_(dy), - cols_(static_cast(cols)) {} - - inline HOSTDEVICE void operator()(size_t offset) const { - auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; - auto dz = dz_[offset]; - auto z = z_[offset]; - auto* x = x_ + cols_ * offset; - auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - - auto y_norm_square = y_norm_[0] * y_norm_[0]; - auto reciprocal_y_norm_square = 1 / y_norm_square; - for (size_t i = 0; i < cols_; ++i) { - dy_[i] += dz * (x[i] * reciprocal_xy_norm_prod - - z * y_[i] * reciprocal_y_norm_square); + inline void operator()(const platform::CPUDeviceContext& ctx, const T* x_norm, + const T* y_norm, const T* x, const T* y, const T* z, + const T* dz, const size_t rows, const size_t cols, + T* dy) const { + for (size_t offset = 0; offset < rows; ++offset) { + auto xy_norm_prod = x_norm[offset] * y_norm[0]; + auto dz_data = dz[offset]; + auto z_data = z[offset]; + auto* x_data = x + cols * offset; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + + auto y_norm_square = y_norm[0] * y_norm[0]; + auto reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols; ++i) { + dy[i] += dz_data * (x_data[i] * reciprocal_xy_norm_prod - + z_data * y[i] * reciprocal_y_norm_square); + } } } - - const T* x_norm_; - const T* y_norm_; - const T* x_; - const T* y_; - const T* z_; - const T* dz_; - T* dy_; - const size_t cols_; }; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 88f49c1b14..42194d7a05 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -20,45 +20,45 @@ namespace paddle { namespace operators { template -struct CosSimDyFunctor { - CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dy, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - dz_(dz), - dy_(dy), - cols_(static_cast(cols)) {} - - inline HOSTDEVICE void operator()(size_t offset) const { - auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; - auto dz = dz_[offset]; - auto z = z_[offset]; - auto* x = x_ + cols_ * offset; - auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; +__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, + const T* y, const T* z, const T* dz, + const size_t rows, const size_t cols, T* dy) { + int grid_size = blockDim.x * gridDim.x; + T y_norm_data = y_norm[0]; + for (int offset = blockIdx.x * blockDim.x + threadIdx.x; offset < rows; + offset += grid_size) { + T xy_norm_prod = x_norm[offset] * y_norm_data; + T dz_data = dz[offset]; + T z_data = z[offset]; + const T* x_data = x + cols * offset; + T reciprocal_xy_norm_prod = 1 / xy_norm_prod; - auto y_norm_square = y_norm_[0] * y_norm_[0]; - auto reciprocal_y_norm_square = 1 / y_norm_square; - for (size_t i = 0; i < cols_; ++i) { - T dy = dz * (x[i] * reciprocal_xy_norm_prod - - z * y_[i] * reciprocal_y_norm_square); - // platform::CudaAtomicAdd(dy_ + i, dy); - dy_[i] += dy; + T y_norm_square = y_norm_data * y_norm_data; + T reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols; ++i) { + T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod - + z_data * y[i] * reciprocal_y_norm_square); + platform::CudaAtomicAdd(dy + i, dy_data); } } +} - const T* x_norm_; - const T* y_norm_; - const T* x_; - const T* y_; - const T* z_; - const T* dz_; - T* dy_; - const size_t cols_; +template +struct CosSimDyFunctor { + inline void operator()(const platform::CUDADeviceContext& ctx, + const T* x_norm, const T* y_norm, const T* x, + const T* y, const T* z, const T* dz, const size_t rows, + const size_t cols, T* dy) const { + const int block_size = 512; + dim3 threads(block_size, 1); + dim3 grid(1, (rows + block_size - 1) / block_size); + CosSimDyKernel<<>>( + x_norm, y_norm, x, y, z, dz, rows, cols, dy); + } }; +template struct CosSimDyFunctor; + } // namespace operators } // namespace paddle diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index bb7c893a29..a913e576f9 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -193,9 +193,10 @@ struct CosSimDxFunctor { template struct CosSimDyFunctor { - CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dy, int cols); - inline HOSTDEVICE void operator()(size_t) const; + inline void operator()(const DeviceContext& ctx, const T* x_norm, + const T* y_norm, const T* x, const T* y, const T* z, + const T* dz, const size_t rows, const size_t cols, + T* dy) const; }; template @@ -255,14 +256,11 @@ class CosSimGradKernel : public framework::OpKernel { auto& dev_ctx = context.template device_context(); set_zero(dev_ctx, out_grad_y, static_cast(0)); - CosSimDyFunctor functor( - in_x_norm->data(), in_y_norm->data(), in_x->data(), - in_y->data(), in_z->data(), in_grad_z->data(), - out_grad_y->data(), cols); - platform::ForRange for_range( - static_cast(context.device_context()), - rows_x); - for_range(functor); + CosSimDyFunctor functor; + functor(dev_ctx, in_x_norm->data(), in_y_norm->data(), + in_x->data(), in_y->data(), in_z->data(), + in_grad_z->data(), static_cast(rows_x), + static_cast(cols), out_grad_y->data()); } } } From 96bc335216f418a8682e49f75ddaf50eedb71704 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Thu, 28 Dec 2017 12:49:02 +0800 Subject: [PATCH 21/40] Update --- paddle/framework/variable.h | 1 - paddle/operators/sum_op.h | 6 ++++-- python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py | 5 +++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 3720393601..e5a94759f9 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -35,7 +35,6 @@ class Variable { template T* GetMutable() { if (!IsType()) { - VLOG(10) << "Resetting " << *this->name_; holder_.reset(new PlaceholderImpl(new T())); } return static_cast(holder_->Ptr()); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index d1277d3edd..552b48f608 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -37,10 +37,12 @@ class SumKernel : public framework::OpKernel { bool in_place = out_var == in_vars[0]; if (out_var->IsType()) { - auto *out = context.Output("Out"); - auto result = EigenVector::Flatten(*out); + auto *out = context.Output("Out"); if (!in_place) { out->mutable_data(context.GetPlace()); + } + auto result = EigenVector::Flatten(*out); + if (!in_place) { math::SetConstant constant_functor; constant_functor(context.template device_context(), out, 0.0); diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 7f61b966fd..238fd1a8cb 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -242,7 +242,7 @@ class TestSimpleMul(unittest.TestCase): out = rnn() out = fluid.layers.sequence_pool(out, pool_type='last') loss = fluid.layers.mean(x=out) - fluid.backward.append_backward_ops(loss) + fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) @@ -317,7 +317,7 @@ class TestSimpleMulWithMemory(unittest.TestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') loss = fluid.layers.mean(x=last) - fluid.backward.append_backward_ops(loss) + fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) @@ -330,6 +330,7 @@ class TestSimpleMulWithMemory(unittest.TestCase): ], return_numpy=False)) last_by_py, = py_rnn.exe().values() + print w_g[0] self.assertTrue(numpy.allclose(last_np, last_by_py)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) # print w_g_num[0], w_g[0] From 8bd759007a29aaece5e85b90362d8c886a8a7e5b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 28 Dec 2017 12:53:00 +0800 Subject: [PATCH 22/40] refine CosSimDyFunctor --- paddle/operators/cos_sim_op.cu | 2 -- paddle/operators/cos_sim_op.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 42194d7a05..86dc04995a 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -57,8 +57,6 @@ struct CosSimDyFunctor { } }; -template struct CosSimDyFunctor; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index a913e576f9..7641ca15f1 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -56,7 +56,7 @@ struct CosSimFunctor { tep_x = x[i]; tep_y = y_[i]; xx += tep_x * tep_x; - yy += tep_y * tep_y; // only need + yy += tep_y * tep_y; xy += tep_x * tep_y; } xx = sqrt(xx); From d2cb28413e5728b819cc93ec68a5c81d62f75007 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Thu, 28 Dec 2017 16:58:39 +0800 Subject: [PATCH 23/40] Fix ALL RNN error --- paddle/operators/while_op.cc | 12 +++++++++++- paddle/pybind/tensor_py.h | 8 ++++---- .../v2/fluid/tests/test_dynrnn_gradient_check.py | 6 ++---- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 322270c829..341c163aa1 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include "paddle/framework/executor.h" #include "paddle/framework/lod_tensor_array.h" @@ -201,6 +202,15 @@ class WhileGradOp : public framework::OperatorBase { VLOG(10) << "Checking " << var_name; PADDLE_ENFORCE(!framework::HasNAN(var->Get()), "%s has NAN", var_name); + if (var->Get().type() == + typeid(float)) { // NOLINT + auto &tensor = var->Get(); + auto *buf = tensor.data(); + for (int64_t i = 0; i < tensor.numel(); ++i) { + PADDLE_ENFORCE(!std::isnan(buf[i])); + } + VLOG(10) << buf[0]; + } } }; check_var_no_nan(cur_scope, inside_grad_name); @@ -210,7 +220,7 @@ class WhileGradOp : public framework::OperatorBase { "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); sum_op->Run(cur_scope, dev_place); - check_var_no_nan(cur_scope, pg_names[param_id]); + check_var_no_nan(scope, pg_names[param_id]); cur_scope.Rename(new_inside_name, inside_grad_name); } } diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 4d5e73e2c2..6b4290972b 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -77,10 +77,10 @@ struct CastToPyBufferImpl { } else if (paddle::platform::is_cpu_place(tensor.place())) { dst_tensor = tensor; } - return py::buffer_info( - dst_tensor.mutable_data(dst_tensor.place()), - sizeof(CUR_TYPE), py::format_descriptor::format(), - (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); + return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), + py::format_descriptor::format(), + (size_t)framework::arity(dst_tensor.dims()), + dims_outside, strides); } else { constexpr bool less = I + 1 < std::tuple_size>::value; return CastToPyBufferImpl()(tensor); diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 238fd1a8cb..6569ccb9e6 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -294,7 +294,7 @@ class TestSimpleMulWithMemory(unittest.TestCase): assert isinstance(Out, Output) Out.out(o) - # @many_times(10) + @many_times(10) @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() @@ -330,10 +330,8 @@ class TestSimpleMulWithMemory(unittest.TestCase): ], return_numpy=False)) last_by_py, = py_rnn.exe().values() - print w_g[0] - self.assertTrue(numpy.allclose(last_np, last_by_py)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) - # print w_g_num[0], w_g[0] + self.assertTrue(numpy.allclose(last_np, last_by_py)) self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1)) i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME) From f74dff97ea625a557146fe8c11313e5242c7cd0a Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 28 Dec 2017 17:36:16 +0800 Subject: [PATCH 24/40] Refine the activation type in the GRU operator related --- paddle/operators/gru_op.h | 19 +++++++---- paddle/operators/math/detail/gru_cpu_kernel.h | 34 +++++++++---------- paddle/operators/math/detail/gru_gpu_kernel.h | 10 +++--- paddle/operators/math/detail/gru_kernel.h | 17 +++++----- paddle/operators/math/gru_compute.cc | 12 +++---- paddle/operators/math/gru_compute.cu | 10 +++--- paddle/operators/math/gru_compute.h | 21 ++++++------ 7 files changed, 60 insertions(+), 63 deletions(-) diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h index c6228864d7..d773521259 100644 --- a/paddle/operators/gru_op.h +++ b/paddle/operators/gru_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/operators/math/gru_compute.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/sequence2batch.h" @@ -70,7 +71,7 @@ class GRUKernel : public framework::OpKernel { } int frame_size = hidden_dims[1]; - math::hl_gru_value gru_value; + math::GRUMetaValue gru_value; gru_value.gate_weight = const_cast(weight_data); gru_value.state_weight = const_cast(weight_data + 2 * frame_size * frame_size); @@ -102,8 +103,10 @@ class GRUKernel : public framework::OpKernel { gru_value.reset_output_value = reset_hidden_prev_t.data(); math::GRUUnitFunctor::compute( dev_ctx, gru_value, frame_size, cur_batch_size, - math::ActiveType(context.Attr("activation")), - math::ActiveType(context.Attr("gate_activation"))); + math::detail::GetActivationType( + context.Attr("activation")), + math::detail::GetActivationType( + context.Attr("gate_activation"))); gru_value.prev_out_value = gru_value.output_value; } @@ -170,12 +173,12 @@ class GRUGradKernel : public framework::OpKernel { batch_hidden_grad.set_lod(batch_hidden->lod()); to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse); - math::hl_gru_value gru_value; + math::GRUMetaValue gru_value; gru_value.gate_weight = const_cast(weight_data); gru_value.state_weight = const_cast(weight_data + 2 * frame_size * frame_size); - math::hl_gru_grad gru_grad; + math::GRUMetaGrad gru_grad; if (weight_grad) { gru_grad.gate_weight_grad = weight_grad->mutable_data(context.GetPlace()); @@ -220,8 +223,10 @@ class GRUGradKernel : public framework::OpKernel { math::GRUUnitGradFunctor::compute( dev_ctx, gru_value, gru_grad, frame_size, cur_batch_size, - math::ActiveType(context.Attr("activation")), - math::ActiveType(context.Attr("gate_activation"))); + math::detail::GetActivationType( + context.Attr("activation")), + math::detail::GetActivationType( + context.Attr("gate_activation"))); } if (input_grad) { input_grad->mutable_data(context.GetPlace()); diff --git a/paddle/operators/math/detail/gru_cpu_kernel.h b/paddle/operators/math/detail/gru_cpu_kernel.h index 4c67dec9cb..a61b232f42 100644 --- a/paddle/operators/math/detail/gru_cpu_kernel.h +++ b/paddle/operators/math/detail/gru_cpu_kernel.h @@ -28,7 +28,7 @@ template void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output, T *gate_value, T *reset_output_value, T *prev_output_value, int frame_size, - activation_mode_t active_gate) { + ActivationType active_gate) { T r_value_update_gate; T r_value_reset_gate; T r_value_reset_output; @@ -56,7 +56,7 @@ template void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output, T *gate_value, T *prev_output_value, T *output_value, int frame_size, - activation_mode_t active_node) { + ActivationType active_node) { T r_value_update_gate; T r_value_frame_state; T r_prev_out = 0; @@ -83,7 +83,7 @@ template void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, T *gate_value, T *reset_output_value, T *prev_output_value, int frame_size, - activation_mode_t active_gate) { + ActivationType active_gate) { #ifdef __AVX__ __m256 r_value_update_gate; __m256 r_value_reset_gate; @@ -113,7 +113,7 @@ template void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output, T *gate_value, T *prev_output_value, T *output_value, int frame_size, - activation_mode_t active_node) { + ActivationType active_node) { #ifdef __AVX__ __m256 r_value_update_gate; __m256 r_value_frame_state; @@ -140,9 +140,8 @@ void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output, template inline void forward_reset_output(OpResetOutput op_reset_output, - hl_gru_value value, int frame_size, - int batch_size, - activation_mode_t active_gate) { + GRUMetaValue value, int frame_size, + int batch_size, ActivationType active_gate) { for (int b = 0; b < batch_size; b++) { if (OpResetOutput::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_forward_reset_output( @@ -164,9 +163,8 @@ inline void forward_reset_output(OpResetOutput op_reset_output, template inline void forward_final_output(OpFinalOutput op_final_output, - hl_gru_value value, int frame_size, - int batch_size, - activation_mode_t active_node) { + GRUMetaValue value, int frame_size, + int batch_size, ActivationType active_node) { for (int b = 0; b < batch_size; b++) { if (OpFinalOutput::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_forward_final_output(op_final_output, value.gate_value, @@ -191,7 +189,7 @@ void hl_naive_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *output_grad, int frame_size, - activation_mode_t active_node) { + ActivationType active_node) { T r_update_gate_value; T r_update_gate_grad; T r_frame_state_value; @@ -232,7 +230,7 @@ void hl_naive_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *reset_output_grad, int frame_size, - activation_mode_t active_gate) { + ActivationType active_gate) { T r_update_gate_value; T r_update_gate_grad; T r_reset_gate_value; @@ -277,7 +275,7 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *output_grad, int frame_size, - activation_mode_t active_node) { + ActivationType active_node) { #ifdef __AVX__ __m256 r_update_gate_value; __m256 r_update_gate_grad; @@ -320,7 +318,7 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *reset_output_grad, int frame_size, - activation_mode_t active_gate) { + ActivationType active_gate) { #ifdef __AVX__ __m256 r_update_gate_value; __m256 r_update_gate_grad; @@ -364,9 +362,9 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, template inline void backward_state_grad(OpStateGrad op_state_grad, - hl_gru_value value, hl_gru_grad grad, + GRUMetaValue value, GRUMetaGrad grad, int frame_size, int batch_size, - activation_mode_t active_node) { + ActivationType active_node) { for (int b = 0; b < batch_size; b++) { if (OpStateGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_backward_state_grad( @@ -393,9 +391,9 @@ inline void backward_state_grad(OpStateGrad op_state_grad, template inline void backward_reset_grad(OpResetGrad op_reset_grad, - hl_gru_value value, hl_gru_grad grad, + GRUMetaValue value, GRUMetaGrad grad, int frame_size, int batch_size, - activation_mode_t active_gate) { + ActivationType active_gate) { for (int b = 0; b < batch_size; b++) { if (OpResetGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_backward_reset_grad( diff --git a/paddle/operators/math/detail/gru_gpu_kernel.h b/paddle/operators/math/detail/gru_gpu_kernel.h index d2edcb7f25..1783d46096 100644 --- a/paddle/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/operators/math/detail/gru_gpu_kernel.h @@ -19,8 +19,6 @@ limitations under the License. */ #include "paddle/platform/cuda_helper.h" #include "paddle/platform/device_context.h" -#include - namespace paddle { namespace operators { namespace math { @@ -35,7 +33,7 @@ __global__ void KeGruForwardResetOutput(OpResetOutput op_reset_output, T *gate_value, T *reset_output_value, T *prev_output_value, int frame_size, int batch_size, - activation_mode_t active_gate) { + ActivationType active_gate) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; @@ -74,7 +72,7 @@ __global__ void KeGruForwardFinalOutput(OpFinalOutput op_final_output, T *gate_value, T *prev_output_value, T *output_value, int frame_size, int batch_size, - activation_mode_t active_node) { + ActivationType active_node) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; int batch_idx = 0; @@ -111,7 +109,7 @@ __global__ void KeGruBackwardStateGrad(OpStateGrad op_state_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *output_grad, int frame_size, int batch_size, - activation_mode_t active_node) { + ActivationType active_node) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; int batch_idx = 0; @@ -159,7 +157,7 @@ __global__ void KeGruBackwardResetGrad(OpResetGrad op_reset_grad, T *gate_value, T *gate_grad, T *prev_out_value, T *prev_out_grad, T *reset_output_grad, int frame_size, int batch_size, - activation_mode_t active_gate) { + ActivationType active_gate) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; int batch_idx = 0; diff --git a/paddle/operators/math/detail/gru_kernel.h b/paddle/operators/math/detail/gru_kernel.h index acd84be01d..4d8245cb5d 100644 --- a/paddle/operators/math/detail/gru_kernel.h +++ b/paddle/operators/math/detail/gru_kernel.h @@ -30,7 +30,7 @@ class gru_resetOutput { public: HOSTDEVICE void operator()(T &value_update_gate, T &value_reset_gate, T &prev_out, T &value_reset_output, - activation_mode_t act_gate) { + ActivationType act_gate) { value_update_gate = activation(value_update_gate, act_gate); value_reset_gate = activation(value_reset_gate, act_gate); value_reset_output = prev_out * value_reset_gate; @@ -43,7 +43,7 @@ class gru_resetOutput { HOSTDEVICE void operator()(__m256 &value_update_gate, __m256 &value_reset_gate, __m256 &prev_out, __m256 &value_reset_output, - activation_mode_t act_gate) { + ActivationType act_gate) { value_update_gate = activation(value_update_gate, act_gate); value_reset_gate = activation(value_reset_gate, act_gate); value_reset_output = _mm256_mul_ps(prev_out, value_reset_gate); @@ -57,7 +57,7 @@ class gru_finalOutput { public: HOSTDEVICE void operator()(T &value_update_gate, T &value_frame_state, T &prev_out, T &value_output, - activation_mode_t act_input) { + ActivationType act_input) { value_frame_state = activation(value_frame_state, act_input); value_output = prev_out - (value_update_gate * prev_out) + (value_update_gate * value_frame_state); @@ -69,8 +69,7 @@ class gru_finalOutput { static const bool avx = true; HOSTDEVICE void operator()(__m256 &value_update_gate, __m256 &value_frame_state, __m256 &prev_out, - __m256 &value_output, - activation_mode_t act_input) { + __m256 &value_output, ActivationType act_input) { value_frame_state = activation(value_frame_state, act_input); value_output = _mm256_add_ps( _mm256_sub_ps(prev_out, _mm256_mul_ps(value_update_gate, prev_out)), @@ -89,7 +88,7 @@ class gru_stateGrad { HOSTDEVICE void operator()(T &value_update_gate, T &grad_update_gate, T &value_frame_state, T &grad_frame_state, T &value_prev_out, T &grad_prev_out, - T &grad_output, activation_mode_t act_input) { + T &grad_output, ActivationType act_input) { grad_update_gate = (grad_output * value_frame_state); grad_update_gate -= (grad_output * value_prev_out); grad_prev_out -= (grad_output * value_update_gate); @@ -107,7 +106,7 @@ class gru_stateGrad { __m256 &value_frame_state, __m256 &grad_frame_state, __m256 &value_prev_out, __m256 &grad_prev_out, __m256 &grad_output, - activation_mode_t act_input) { + ActivationType act_input) { grad_update_gate = _mm256_mul_ps(grad_output, value_frame_state); grad_update_gate = _mm256_sub_ps( grad_update_gate, _mm256_mul_ps(grad_output, value_prev_out)); @@ -128,7 +127,7 @@ class gru_resetGrad { HOSTDEVICE void operator()(T &value_update_gate, T &grad_update_gate, T &value_reset_gate, T &grad_reset_gate, T &value_prev_out, T &grad_prev_out, - T &grad_reset_output, activation_mode_t act_gate) { + T &grad_reset_output, ActivationType act_gate) { grad_reset_gate = (grad_reset_output * value_prev_out); grad_prev_out += (grad_reset_output * value_reset_gate); grad_update_gate = @@ -144,7 +143,7 @@ class gru_resetGrad { __m256 &grad_update_gate, __m256 &value_reset_gate, __m256 &grad_reset_gate, __m256 &value_prev_out, __m256 &grad_prev_out, __m256 &grad_reset_output, - activation_mode_t act_gate) { + ActivationType act_gate) { grad_reset_gate = _mm256_mul_ps(grad_reset_output, value_prev_out); grad_prev_out = _mm256_add_ps( grad_prev_out, _mm256_mul_ps(grad_reset_output, value_reset_gate)); diff --git a/paddle/operators/math/gru_compute.cc b/paddle/operators/math/gru_compute.cc index d570c68cd4..101ab85962 100644 --- a/paddle/operators/math/gru_compute.cc +++ b/paddle/operators/math/gru_compute.cc @@ -21,9 +21,9 @@ namespace math { template struct GRUUnitFunctor { static void compute(const platform::CPUDeviceContext &context, - hl_gru_value value, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate) { + GRUMetaValue value, int frame_size, int batch_size, + const detail::ActivationType active_node, + const detail::ActivationType active_gate) { #ifndef __NVCC__ if (value.prev_out_value) { math::gemm( @@ -51,10 +51,10 @@ struct GRUUnitFunctor { template struct GRUUnitGradFunctor { static void compute(const platform::CPUDeviceContext &context, - hl_gru_value value, hl_gru_grad grad, + GRUMetaValue value, GRUMetaGrad grad, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate) { + const detail::ActivationType active_node, + const detail::ActivationType active_gate) { #ifndef __NVCC__ detail::backward_state_grad(detail::backward::gru_stateGrad(), value, grad, frame_size, batch_size, active_node); diff --git a/paddle/operators/math/gru_compute.cu b/paddle/operators/math/gru_compute.cu index dd518cd1e4..aab3e2309b 100644 --- a/paddle/operators/math/gru_compute.cu +++ b/paddle/operators/math/gru_compute.cu @@ -21,9 +21,8 @@ namespace math { template struct GRUUnitFunctor { static void compute(const platform::CUDADeviceContext &context, - hl_gru_value value, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate) { + GRUMetaValue value, int frame_size, int batch_size, + ActivationType active_node, ActivationType active_gate) { auto stream = context.stream(); dim3 threads; dim3 grid; @@ -88,10 +87,9 @@ struct GRUUnitFunctor { template struct GRUUnitGradFunctor { static void compute(const platform::CUDADeviceContext &context, - hl_gru_value value, hl_gru_grad grad, + GRUMetaValue value, GRUMetaGrad grad, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate) { + ActivationType active_node, ActivationType active_gate) { auto stream = context.stream(); dim3 threads; dim3 grid; diff --git a/paddle/operators/math/gru_compute.h b/paddle/operators/math/gru_compute.h index ca1343cb2c..bf69147b50 100644 --- a/paddle/operators/math/gru_compute.h +++ b/paddle/operators/math/gru_compute.h @@ -11,7 +11,7 @@ limitations under the License. */ #pragma once -#include "paddle/operators/math/lstm_compute.h" +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/platform/device_context.h" #include "paddle/platform/enforce.h" @@ -19,9 +19,8 @@ namespace paddle { namespace operators { namespace math { -// TODO(guosheng): refine code style in gru_compute template -struct hl_gru_value { +struct GRUMetaValue { T *gate_weight; T *state_weight; T *gate_value; @@ -31,7 +30,7 @@ struct hl_gru_value { }; template -struct hl_gru_grad { +struct GRUMetaGrad { T *gate_weight_grad; T *state_weight_grad; T *gate_grad; @@ -42,18 +41,18 @@ struct hl_gru_grad { template struct GRUUnitFunctor { - static void compute(const DeviceContext &context, hl_gru_value value, + static void compute(const DeviceContext &context, GRUMetaValue value, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate); + const detail::ActivationType active_node, + const detail::ActivationType active_gate); }; template struct GRUUnitGradFunctor { - static void compute(const DeviceContext &context, hl_gru_value value, - hl_gru_grad grad, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate); + static void compute(const DeviceContext &context, GRUMetaValue value, + GRUMetaGrad grad, int frame_size, int batch_size, + const detail::ActivationType active_node, + const detail::ActivationType active_gate); }; } // namespace math From bb0427add03ce29b8013511f9cebf509e9de3585 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 28 Dec 2017 17:57:17 +0800 Subject: [PATCH 25/40] Add comments for functions in backward.py --- python/paddle/v2/fluid/backward.py | 77 ++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 10 deletions(-) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 6966cc7580..b3c1bab298 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -5,14 +5,17 @@ import collections __all__ = ['append_backward'] -def _rename_arg_(op_desc_list, old_name, new_name, begin_idx=None, - end_idx=None): +def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): + """ + Traverse all ops in op_descs[begin_idx : end_idx], + if any op has inputs/outputs named "old_name", rename it as 'new_name' + """ if begin_idx is None: begin_idx = 0 if end_idx is None: - end_idx = len(op_desc_list) + end_idx = len(op_descs) for i in range(begin_idx, end_idx): - op_desc = op_desc_list[i] + op_desc = op_descs[i] if isinstance(op_desc, tuple): op_desc = op_desc[0] op_desc.rename_input(old_name, new_name) @@ -20,6 +23,9 @@ def _rename_arg_(op_desc_list, old_name, new_name, begin_idx=None, def _create_op_desc_(op_type, inputs, outputs, attrs): + """ + Create a C++ OpDesc object with specified inputs, outputs and attributes. + """ op_desc = core.OpDesc() op_desc.set_type(op_type) for para, args in inputs.iteritems(): @@ -34,9 +40,12 @@ def _create_op_desc_(op_type, inputs, outputs, attrs): return op_desc -def _infer_var_data_type_(var_name, block): - grad_var = block.desc.find_var(var_name.encode("ascii")) - fwd_name = _strip_grad_suffix_(var_name.encode("ascii")) +def _infer_var_data_type_(grad_var_name, block): + """ + Infer the data type of given grad variable + """ + grad_var = block.desc.find_var(grad_var_name.encode("ascii")) + fwd_name = _strip_grad_suffix_(grad_var_name.encode("ascii")) if block.desc.has_var_recursive(fwd_name): fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii")) grad_var.set_dtype(fwd_var.dtype()) @@ -45,6 +54,9 @@ def _infer_var_data_type_(var_name, block): def _all_in_set_(cands, s): + """ + Test if all elements of 'cands' are in set 's' + """ for c in cands: if not c in s: return False @@ -52,18 +64,29 @@ def _all_in_set_(cands, s): def _strip_grad_suffix_(name): + """ + Strip the grad suffix from the given varibale name + e.g. x@GRAD ==> x + y@GRAD@RENAME@1 ==> y + """ pos = name.find(core.grad_var_suffix()) return name[:pos] if pos != -1 else name def _append_grad_suffix_(name): + """ + Append grad suffix to the given variable name + e.g. x ==> x@GRAD + """ return name + core.grad_var_suffix() def _addup_repetitive_outputs_(op_descs): - # In backward part, an variable my be the output of more than one ops. - # In this case, the variable should be the accumulation of all the outputs. - # We adopt adding `sum_op`s to implement the accumulate. + """ + In backward part, an variable may be the output of more than one ops. + In this case, the variable should be the accumulation of all the outputs. + `sum_op`s are added to implement the accumulate. + """ pending_sum_ops = [] var_rename_count = collections.defaultdict(int) renamed_vars = collections.defaultdict(list) @@ -109,6 +132,12 @@ def _addup_repetitive_outputs_(op_descs): def _remove_no_grad_branch_(op_descs, no_grad_set): + """ + Remove unnecessary grad ops + A grad op can be removed in two cases: + 1. all outputs of the grad op are in 'no_grad_set' + 2. (TODO) all grad inputs of the grad op are in 'no_grad_set' + """ # Remove ops whose outputs are all in no_grad_dict op_descs = filter( lambda op_desc: not _all_in_set_(op_desc.output_arg_names(), no_grad_set), @@ -133,6 +162,20 @@ def _append_backward_ops_(target, no_grad_dict, grad_to_var, callback=None): + """ + Create all grad ops, and insert them into given block + + Args: + target(Variable): the target variable of forward pass + block(Block): the block where forward ops are + target_block(Block): the block which is going to hold new generated grad ops + no_grad_dict(dict): + key(int) block index + val(set) a set of varibale names. These varibales have no gradient + grad_to_var(dict)(output argument): + key(str): grad variable name + val(str): corresponding forward variable name + """ grad_op_descs = [] program = block.program for op in reversed(block.ops): @@ -170,6 +213,20 @@ def _append_backward_ops_(target, def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): + """ + Create new variables required by backward pass. + + Args: + block(Block): the block where new variables will be created + start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created + grad_to_var(dict): + key(str): grad variable name + val(str): corresponding forward variable name + In most cases, this dict is generated by _append_backward_ops_() + grad_info_map(dict)(output argument): + key(str): forward variable name + val(tuple): a tuple of (str, int), str is the corresponding grad name, int is the block index + """ for op_idx in range(start_op_idx, block.desc.op_size()): op_desc = block.desc.op(op_idx) if op_desc.has_attr("sub_block"): From 23b53c48df461b11a2a39929e30c661fbc407aee Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 28 Dec 2017 18:09:52 +0800 Subject: [PATCH 26/40] Delete the old activation type for LSTM and GRU operator --- paddle/operators/math/gru_compute.cu | 6 ++++-- paddle/operators/math/lstm_compute.h | 22 ---------------------- 2 files changed, 4 insertions(+), 24 deletions(-) diff --git a/paddle/operators/math/gru_compute.cu b/paddle/operators/math/gru_compute.cu index aab3e2309b..d5a0e630ea 100644 --- a/paddle/operators/math/gru_compute.cu +++ b/paddle/operators/math/gru_compute.cu @@ -22,7 +22,8 @@ template struct GRUUnitFunctor { static void compute(const platform::CUDADeviceContext &context, GRUMetaValue value, int frame_size, int batch_size, - ActivationType active_node, ActivationType active_gate) { + const detail::ActivationType active_node, + const detail::ActivationType active_gate) { auto stream = context.stream(); dim3 threads; dim3 grid; @@ -89,7 +90,8 @@ struct GRUUnitGradFunctor { static void compute(const platform::CUDADeviceContext &context, GRUMetaValue value, GRUMetaGrad grad, int frame_size, int batch_size, - ActivationType active_node, ActivationType active_gate) { + const detail::ActivationType active_node, + const detail::ActivationType active_gate) { auto stream = context.stream(); dim3 threads; dim3 grid; diff --git a/paddle/operators/math/lstm_compute.h b/paddle/operators/math/lstm_compute.h index 954762f922..e1ad6b64d2 100644 --- a/paddle/operators/math/lstm_compute.h +++ b/paddle/operators/math/lstm_compute.h @@ -22,14 +22,6 @@ namespace paddle { namespace operators { namespace math { -typedef enum { - HL_ACTIVATION_SIGMOID = 0, - HL_ACTIVATION_RELU = 1, - HL_ACTIVATION_TANH = 2, - HL_ACTIVATION_LINEAR = 3, - HL_ACTIVATION_END -} activation_mode_t; - template struct LstmMetaValue { T *gate_value; @@ -54,20 +46,6 @@ struct LstmMetaGrad { T *check_og_grad; }; -inline activation_mode_t ActiveType(const std::string &type) { - if (type == "sigmoid") { - return HL_ACTIVATION_SIGMOID; - } else if (type == "relu") { - return HL_ACTIVATION_RELU; - } else if (type == "tanh") { - return HL_ACTIVATION_TANH; - } else if (type == "linear" || type == "identity" || type == "") { - return HL_ACTIVATION_LINEAR; - } else { - PADDLE_THROW("Do not support activation type."); - } -} - template class LstmUnitFunctor { public: From 641b4c0fe6db944ffe47a3dbd8a88c7a966c41f1 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 29 Dec 2017 10:49:28 +0800 Subject: [PATCH 27/40] wip --- paddle/operators/adagrad_op.cc | 44 ++------ paddle/operators/adagrad_op.cu | 48 ++------ paddle/operators/adam_op.h | 17 ++- .../operators/math/selected_rows_functor.cc | 90 +++++++++++++-- .../operators/math/selected_rows_functor.cu | 106 ++++++++++++++++-- paddle/operators/math/selected_rows_functor.h | 74 ++++++++---- python/paddle/v2/fluid/tests/test_adam_op.py | 1 - 7 files changed, 251 insertions(+), 129 deletions(-) diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index 052c793a01..c83318a272 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -105,48 +105,18 @@ struct SparseAdagradFunctor { const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) - auto grad_rows = grad.rows(); - std::set row_set(grad_rows.begin(), grad_rows.end()); - std::vector merge_rows(row_set.begin(), row_set.end()); - auto grad_width = grad.value().dims()[1]; - std::unique_ptr grad_merge{ - new framework::SelectedRows()}; - grad_merge->set_rows(merge_rows); - grad_merge->set_height(grad.height()); - grad_merge->mutable_value()->mutable_data( - framework::make_ddim( - {static_cast(merge_rows.size()), grad_width}), - context.GetPlace()); - - math::SetConstant constant_functor; - constant_functor(context, grad_merge->mutable_value(), 0.0); - - auto* grad_merge_data = grad_merge->mutable_value()->data(); - auto* grad_data = grad.value().data(); - - for (size_t i = 0; i < grad_rows.size(); i++) { - size_t grad_merge_i = FindPos(merge_rows, grad_rows[i]); - for (int64_t j = 0; j < grad_width; j++) { - grad_merge_data[grad_merge_i * grad_width + j] += - grad_data[i * grad_width + j]; - } - } + math::scatter::MergeAdd merge_func; + auto grad_merge = merge_func(context, grad); + auto& merge_rows = grad_merge.rows(); + auto* grad_merge_data = grad_merge.mutable_value()->template data(); // 2. m += g_m * g_m - std::unique_ptr grad_square{ - new framework::SelectedRows()}; - grad_square->set_rows(grad_merge->rows()); - grad_square->set_height(grad_merge->height()); - grad_square->mutable_value()->mutable_data(grad_merge->value().dims(), - context.GetPlace()); - auto gs = - framework::EigenVector::Flatten(*(grad_square->mutable_value())); - auto gm = framework::EigenVector::Flatten(grad_merge->value()); - gs.device(*context.eigen_device()) = gm * gm; + math::scatter::Mul sqare_func; + auto grad_square = sqare_func(context, grad_merge, grad_merge); math::SelectedRowsAddToTensor functor; - functor(context, *grad_square, moment); + functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data(); diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 585b2d9289..86b3dd860d 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -78,51 +78,17 @@ struct SparseAdagradFunctor { const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) - auto grad_rows = grad.rows(); - std::set row_set(grad_rows.begin(), grad_rows.end()); - std::vector merge_rows(row_set.begin(), row_set.end()); - auto grad_width = grad.value().dims()[1]; - std::unique_ptr grad_merge{ - new framework::SelectedRows()}; - grad_merge->set_rows(merge_rows); - grad_merge->set_height(grad.height()); - grad_merge->mutable_value()->mutable_data( - framework::make_ddim( - {static_cast(merge_rows.size()), grad_width}), - context.GetPlace()); - - math::SetConstant constant_functor; - constant_functor(context, grad_merge->mutable_value(), 0.0); - - auto* grad_merge_data = grad_merge->mutable_value()->data(); - auto* grad_data = grad.value().data(); - - const int block_size = 256; - dim3 threads(block_size, 1); - dim3 grid1(1, grad_rows.size()); - - MergeGradKernel< - T, 256><<(context) - .stream()>>>(grad_data, grad.rows().data(), - grad_merge_data, grad_merge->rows().data(), - grad_merge->rows().size(), grad_width); - + math::scatter::MergeAdd merge_func; + auto grad_merge = merge_func(context, grad); + auto* grad_merge_data = grad_merge.mutable_value()->template data(); + auto& merge_rows = grad_merge.rows; // 2. m += g_m * g_m - std::unique_ptr grad_square{ - new framework::SelectedRows()}; - grad_square->set_rows(grad_merge->rows()); - grad_square->set_height(grad_merge->height()); - grad_square->mutable_value()->mutable_data(grad_merge->value().dims(), - context.GetPlace()); - auto gs = - framework::EigenVector::Flatten(*(grad_square->mutable_value())); - auto gm = framework::EigenVector::Flatten(grad_merge->value()); - gs.device(*context.eigen_device()) = gm * gm; + math::scatter::Mul sqare_func; + auto grad_square = sqare_func(context, grad_merge, grad_merge); math::SelectedRowsAddToTensor functor; - functor(context, *grad_square, moment); + functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data(); diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 5facd0112f..3c4148ccc0 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -16,11 +16,14 @@ limitations under the License. */ #include // for sqrt in CPU and CUDA #include "paddle/framework/op_registry.h" #include "paddle/operators/detail/safe_ref.h" +#include "paddle/operators/math/selected_rows_functor.h" #include "paddle/platform/for_range.h" namespace paddle { namespace operators { +namespace scatter = paddle::operators::math::scatter; + template struct AdamFunctor { T beta1_; @@ -134,8 +137,6 @@ struct SparseAdamFunctor { mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); - // IMPORTANT: - // FIXME(typhoonzero): row id may be duplicate moment1_out_[rows_[i] * row_numel_ + j] = mom1; moment2_out_[rows_[i] * row_numel_ + j] = mom2; param_out_[rows_[i] * row_numel_ + j] = p; @@ -191,10 +192,14 @@ class AdamOpKernel : public framework::OpKernel { } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); - auto& grad_tensor = grad.value(); + // merge duplicated rows if any. + scatter::MergeAdd merge_func; + auto grad_merge = + merge_func(ctx.template device_context(), grad); + auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data(); - auto* rows = grad.rows().data(); - auto row_numel = grad_tensor.numel() / grad.rows().size(); + auto* rows = grad_merge.rows().data(); + auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor functor( beta1, beta2, epsilon, beta1_pow.template data(), @@ -206,7 +211,7 @@ class AdamOpKernel : public framework::OpKernel { param_out.template mutable_data(ctx.GetPlace()), rows, row_numel); platform::ForRange for_range( static_cast(ctx.device_context()), - grad.rows().size()); + grad_merge.rows().size()); for_range(functor); } else { PADDLE_THROW("Variable type not supported by adam_op"); diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc index 21418ba4b0..c9f3c10c61 100644 --- a/paddle/operators/math/selected_rows_functor.cc +++ b/paddle/operators/math/selected_rows_functor.cc @@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/selected_rows_functor.h" +#include + #include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/selected_rows_functor.h" namespace paddle { namespace operators { @@ -193,27 +195,25 @@ size_t FindPos(const std::vector& rows, int64_t value) { template struct MergeAdd { - void operator()(const platform::CPUDeviceContext& context, - const framework::SelectedRows& input, - framework::SelectedRows* out) { + framework::SelectedRows operator()(const platform::CPUDeviceContext& context, + const framework::SelectedRows& input) { + framework::SelectedRows out; auto input_rows = input.rows(); std::set row_set(input_rows.begin(), input_rows.end()); std::vector merge_rows(row_set.begin(), row_set.end()); auto input_width = input.value().dims()[1]; - // std::unique_ptr out{ - // new framework::SelectedRows()}; - out->set_rows(merge_rows); - out->set_height(input.height()); - out->mutable_value()->mutable_data( + out.set_rows(merge_rows); + out.set_height(input.height()); + out.mutable_value()->mutable_data( framework::make_ddim( {static_cast(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant constant_functor; - constant_functor(context, out->mutable_value(), 0.0); + constant_functor(context, out.mutable_value(), 0.0); - auto* out_data = out->mutable_value()->data(); + auto* out_data = out.mutable_value()->data(); auto* input_data = input.value().data(); for (size_t i = 0; i < input_rows.size(); i++) { @@ -222,6 +222,74 @@ struct MergeAdd { out_data[out_i * input_width + j] += input_data[i * input_width + j]; } } + return out; + } +}; + +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; + +template +struct UpdateToTensor { + framework::Tensor operator()(const platform::CPUDeviceContext& context, + const ScatterOps& op, + const framework::SelectedRows& input1, + framework::Tensor* input2) { + auto in1_height = input1.height(); + auto in2_dims = input2->dims(); + PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); + + auto& in1_value = input1.value(); + auto& in1_rows = input1.rows(); + + int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); + + auto* in1_data = in1_value.data(); + auto* input2_data = input2->data(); + + // FIXME(typhoonzero): use macro fix the below messy code. + switch (op) { + case ScatterOps::ASSIGN: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] = + in1_data[i * in1_row_numel + j]; + break; + case ScatterOps::ADD: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] += + in1_data[i * in1_row_numel + j]; + break; + case ScatterOps::SUB: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] -= + in1_data[i * in1_row_numel + j]; + break; + case ScatterOps::SUBBY: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] = + in1_data[i * in1_row_numel + j] - + input2_data[in1_rows[i] * in1_row_numel + j]; + break; + case ScatterOps::MUL: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] *= + in1_data[i * in1_row_numel + j]; + break; + case ScatterOps::DIV: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] /= + in1_data[i * in1_row_numel + j]; + break; + case ScatterOps::DIVBY: + INLINE_FOR2(in1_rows.size(), in1_row_numel) + input2_data[in1_rows[i] * in1_row_numel + j] = + in1_data[i * in1_row_numel + j] / + input2_data[in1_rows[i] * in1_row_numel + j]; + break; + } } }; diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index b2c0fe7bc3..48413403db 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -252,27 +252,26 @@ __global__ void MergeAddKernel(const T* input, const int64_t* input_rows, template struct MergeAdd { - void operator()(const platform::GPUDeviceContext& context, - const framework::SelectedRows& input, - framework::SelectedRows* out) { + framework::SelectedRows operator()(const platform::GPUDeviceContext& context, + const framework::SelectedRows& input) { + framework::SelectedRows out; auto input_rows = input.rows(); std::set row_set(input_rows.begin(), input_rows.end()); std::vector merge_rows(row_set.begin(), row_set.end()); auto input_width = input.value().dims()[1]; - // std::unique_ptr out{ - // new framework::SelectedRows()}; - out->set_rows(merge_rows); - out->set_height(input.height()); - out->mutable_value()->mutable_data( + + out.set_rows(merge_rows); + out.set_height(input.height()); + out.mutable_value()->mutable_data( framework::make_ddim( {static_cast(merge_rows.size()), input_width}), context.GetPlace()); math::SetConstant constant_functor; - constant_functor(context, out->mutable_value(), 0.0); + constant_functor(context, out.mutable_value(), 0.0); - auto* out_data = out->mutable_value()->data(); + auto* out_data = out.mutable_value()->data(); auto* input_data = input.value().data(); const int block_size = 256; @@ -283,11 +282,96 @@ struct MergeAdd { T, 256><<(context) .stream()>>>(input_data, input.rows().data(), out_data, - out->rows().data(), out->rows().size(), + out.rows().data(), out.rows().size(), input_width); + return out; } }; +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; + +template +__global__ void UpdateToTensorKernel(const T* selected_rows, + const int64_t* rows, const ScatterOps& op, + T* tensor_out, int64_t row_numel) { + const int ty = blockIdx.y; + int tid = threadIdx.x; + + selected_rows += ty * row_numel; + tensor_out += rows[ty] * row_numel; + // FIXME(typhoonzero): use macro fix the below messy code. + switch (op) { + case ScatterOps::ASSIGN: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] = selected_rows[index]; + } + break; + case ScatterOps::ADD: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] += selected_rows[index]; + } + break; + case ScatterOps::SUB: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] -= selected_rows[index]; + } + break; + case ScatterOps::SUBBY: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] = selected_rows[index] - tensor_out[index]; + } + break; + case ScatterOps::MUL: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] *= selected_rows[index]; + } + break; + case ScatterOps::DIV: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] /= selected_rows[index]; + } + break; + case ScatterOps::DIVBY: + for (int index = tid; index < row_numel; index += block_size) { + tensor_out[index] = selected_rows[index] / tensor_out[index]; + } + break; + } +} + +template +struct UpdateToTensor { + framework::Tensor operator()(const platform::GPUDeviceContext& context, + const ScatterOps& op, + const framework::SelectedRows& input1, + framework::Tensor* input2) { + // NOTE: Use SelectedRowsAddToTensor for better performance + // no additional MergeAdd called. + auto merged_in1 = MergeAdd()(context, input1); + + auto in1_height = merged_in1.height(); + auto in2_dims = input2->dims(); + PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); + + auto& in1_value = merged_in1.value(); + auto& in1_rows = merged_in1.rows(); + + int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); + PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); + + auto* in1_data = in1_value.data(); + auto* input2_data = input2->data(); + + dim3 threads(PADDLE_CUDA_NUM_THREADS, 1); + dim3 grid(1, in1_rows.size()); + UpdateToTensorKernel< + T, PADDLE_CUDA_NUM_THREADS><<>>( + in1_data, in1_rows.data(), op, in2_data, in1_row_numel); + } +}; } // namespace scatter } // namespace math } // namespace operators diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h index eecd5e5362..d4bef72980 100644 --- a/paddle/operators/math/selected_rows_functor.h +++ b/paddle/operators/math/selected_rows_functor.h @@ -16,6 +16,10 @@ limitations under the License. */ #include "paddle/framework/selected_rows.h" #include "paddle/platform/device_context.h" +#define INLINE_FOR2(sizei, sizej) \ + for (int64_t i = 0; i < sizei; i++) \ + for (int64_t j = 0; j < sizej; j++) + namespace paddle { namespace operators { namespace math { @@ -55,50 +59,76 @@ struct SelectedRowsAddToTensor { namespace scatter { // functors for manuplating SelectedRows data - template struct MergeAdd { // unary functor, merge by adding duplicated rows in // the input SelectedRows object. - void operator()(const DeviceContext& context, - const framework::SelectedRows& input, - framework::SelectedRows* out); + framework::SelectedRows operator()(const DeviceContext& context, + const framework::SelectedRows& input); }; template struct Add { - void operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2, - framework::SelectedRows* out) { - out->set_rows(input1.rows()); - out->set_height(input1.height()); - out->mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); + framework::SelectedRows operator()(const DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2) { + framework::SelectedRows out; + out.set_rows(input1.rows()); + out.set_height(input1.height()); + out.mutable_value()->mutable_data(input1.value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); auto e_in1 = framework::EigenVector::Flatten(input1.value()); auto e_in2 = framework::EigenVector::Flatten(input2.value()); e_out.device(*context.eigen_device()) = e_in1 + e_in2; + return out; } }; template struct Mul { - void operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2, - framework::SelectedRows* out) { - out->set_rows(input1.rows()); - out->set_height(input1.height()); - out->mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out->mutable_value())); + // multiply two SelectedRows + framework::SelectedRows operator()(const DeviceContext& context, + const framework::SelectedRows& input1, + const framework::SelectedRows& input2) { + framework::SelectedRows out; + out.set_rows(input1.rows()); + out.set_height(input1.height()); + out.mutable_value()->mutable_data(input1.value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); auto e_in1 = framework::EigenVector::Flatten(input1.value()); auto e_in2 = framework::EigenVector::Flatten(input2.value()); e_out.device(*context.eigen_device()) = e_in1 * e_in2; + return out; + } + // multiply scalar to SelectedRows + framework::SelectedRows operator()(const DeviceContext& context, + const framework::SelectedRows& input1, + const T input2) { + framework::SelectedRows out; + out.set_rows(input1.rows()); + out.set_height(input1.height()); + out.mutable_value()->mutable_data(input1.value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); + auto e_in1 = framework::EigenVector::Flatten(input1.value()); + e_out.device(*context.eigen_device()) = input2 * e_in1; + return out; } }; +enum class ScatterOps { ASSIGN, ADD, SUB, SUBBY, MUL, DIV, DIVBY }; + +// out = seleted_rows_in / tensor +template +struct UpdateToTensor { + framework::Tensor operator()(const DeviceContext& context, + const ScatterOps& op, + const framework::SelectedRows& input1, + framework::Tensor* input2); +}; + } // namespace scatter } // namespace math } // namespace operators diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index 3758ca457e..7dbc2fa085 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -285,7 +285,6 @@ class TestSparseAdamOp(unittest.TestCase): j = 0 while j < self.row_numel: pos = row_id * self.row_numel + j - print(actual[pos] - np_array[pos]) / actual[pos] self.assertLess((actual[pos] - np_array[pos]) / actual[pos], 0.00001) j += 1 From 0fd4a04abdc6f411ebb77d7a389108e951223c7e Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Fri, 29 Dec 2017 13:10:53 +0800 Subject: [PATCH 28/40] Remove debug codes --- paddle/framework/tensor_impl.h | 13 ++----------- paddle/operators/fill_constant_op.cc | 1 - paddle/operators/shrink_rnn_memory_op.cc | 1 - paddle/operators/while_op.cc | 23 ----------------------- 4 files changed, 2 insertions(+), 36 deletions(-) diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 0161ed8c47..6c6f298edc 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,17 +134,8 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } - void* buf = reinterpret_cast( - reinterpret_cast(holder_->ptr()) + offset_); - if (type.hash_code() == typeid(float).hash_code() || - type.hash_code() == typeid(double).hash_code()) { - float* tmp = (float*)(buf); - for (int64_t i = 0; i < numel(); ++i) { - tmp[i] = NAN; - } - } - - return buf; + return reinterpret_cast(reinterpret_cast(holder_->ptr()) + + offset_); } inline void* Tensor::mutable_data(platform::Place place) { diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 196c380c73..dcd43a30c8 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -51,7 +51,6 @@ class FillConstantOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); - VLOG(10) << "FillConstant to " << &out; math::set_constant(dev_ctx, &out, value); } }; diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index 9ef473e726..b37269b471 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -116,7 +116,6 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { auto height = dout_tensor.dims()[0]; auto slice = dx_tensor.Slice(0, static_cast(height)); framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice); - VLOG(10) << dx_tensor.dims()[0] << ", " << height; if (dx_tensor.dims()[0] > height) { auto rest_tensor = dx_tensor.Slice( static_cast(height), static_cast(dx_tensor.dims()[0])); diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 341c163aa1..728ef60794 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include "paddle/framework/executor.h" #include "paddle/framework/lod_tensor_array.h" @@ -195,36 +194,14 @@ class WhileGradOp : public framework::OperatorBase { } } - auto check_var_no_nan = [](const framework::Scope &scope, - const std::string &var_name) { - auto *var = scope.FindVar(var_name); - if (var->IsType()) { - VLOG(10) << "Checking " << var_name; - PADDLE_ENFORCE(!framework::HasNAN(var->Get()), - "%s has NAN", var_name); - if (var->Get().type() == - typeid(float)) { // NOLINT - auto &tensor = var->Get(); - auto *buf = tensor.data(); - for (int64_t i = 0; i < tensor.numel(); ++i) { - PADDLE_ENFORCE(!std::isnan(buf[i])); - } - VLOG(10) << buf[0]; - } - } - }; - check_var_no_nan(cur_scope, inside_grad_name); auto new_inside_name = cur_scope.Rename(inside_grad_name); - check_var_no_nan(cur_scope, new_inside_name); auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); sum_op->Run(cur_scope, dev_place); - check_var_no_nan(scope, pg_names[param_id]); cur_scope.Rename(new_inside_name, inside_grad_name); } } - VLOG(1) << "Complete WhileOpGrad"; } }; From fcd84c15303cac9573432a6ce4516c2d643064e8 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Fri, 29 Dec 2017 13:14:31 +0800 Subject: [PATCH 29/40] Comment debug code --- python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 6569ccb9e6..c02c59284e 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -294,7 +294,8 @@ class TestSimpleMulWithMemory(unittest.TestCase): assert isinstance(Out, Output) Out.out(o) - @many_times(10) + # many_times used locally for debug. Make sure the calculation is stable. + # @many_times(10) @prog_scope() def test_forward_backward(self): py_rnn = TestSimpleMulWithMemory.SimpleMulWithMemory() From 1039c1e3b7b391963fe2e4f1dba22d3358104a98 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 29 Dec 2017 13:51:41 +0800 Subject: [PATCH 30/40] scatter optimizers --- paddle/operators/adagrad_op.cu | 10 +++-- .../operators/math/selected_rows_functor.cc | 7 ++-- .../operators/math/selected_rows_functor.cu | 38 ++++++++++--------- paddle/operators/math/selected_rows_functor.h | 7 ++-- 4 files changed, 32 insertions(+), 30 deletions(-) diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 86b3dd860d..fed2e29367 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -79,12 +79,12 @@ struct SparseAdagradFunctor { framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; - math::scatter::MergeAdd merge_func; + math::scatter::MergeAdd merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data(); - auto& merge_rows = grad_merge.rows; + auto& merge_rows = grad_merge.rows(); // 2. m += g_m * g_m - math::scatter::Mul sqare_func; + math::scatter::Mul sqare_func; auto grad_square = sqare_func(context, grad_merge, grad_merge); math::SelectedRowsAddToTensor functor; @@ -95,11 +95,13 @@ struct SparseAdagradFunctor { auto* param_data = param->data(); auto* moment_data = moment->data(); + const int block_size = 256; + dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); SparseAdagradFunctorKernel< T, 256><<(context) - .stream()>>>(grad_merge_data, grad_merge->rows().data(), + .stream()>>>(grad_merge_data, grad_merge.rows().data(), lr, param_data, moment_data, grad_width, epsilon); } diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc index c9f3c10c61..8a1ebb58c2 100644 --- a/paddle/operators/math/selected_rows_functor.cc +++ b/paddle/operators/math/selected_rows_functor.cc @@ -233,10 +233,9 @@ template struct MergeAdd; template struct UpdateToTensor { - framework::Tensor operator()(const platform::CPUDeviceContext& context, - const ScatterOps& op, - const framework::SelectedRows& input1, - framework::Tensor* input2) { + void operator()(const platform::CPUDeviceContext& context, + const ScatterOps& op, const framework::SelectedRows& input1, + framework::Tensor* input2) { auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index 48413403db..0ee456f9bc 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/selected_rows_functor.h" #include "paddle/platform/cuda_helper.h" @@ -251,8 +253,8 @@ __global__ void MergeAddKernel(const T* input, const int64_t* input_rows, } template -struct MergeAdd { - framework::SelectedRows operator()(const platform::GPUDeviceContext& context, +struct MergeAdd { + framework::SelectedRows operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input) { framework::SelectedRows out; auto input_rows = input.rows(); @@ -288,10 +290,10 @@ struct MergeAdd { } }; -template struct MergeAdd; -template struct MergeAdd; -template struct MergeAdd; -template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; +template struct MergeAdd; template __global__ void UpdateToTensorKernel(const T* selected_rows, @@ -343,14 +345,14 @@ __global__ void UpdateToTensorKernel(const T* selected_rows, } template -struct UpdateToTensor { - framework::Tensor operator()(const platform::GPUDeviceContext& context, - const ScatterOps& op, - const framework::SelectedRows& input1, - framework::Tensor* input2) { +struct UpdateToTensor { + void operator()(const platform::CUDADeviceContext& context, + const ScatterOps& op, const framework::SelectedRows& input1, + framework::Tensor* input2) { // NOTE: Use SelectedRowsAddToTensor for better performance // no additional MergeAdd called. - auto merged_in1 = MergeAdd()(context, input1); + MergeAdd merge_func; + auto merged_in1 = merge_func(context, input1); auto in1_height = merged_in1.height(); auto in2_dims = input2->dims(); @@ -362,14 +364,14 @@ struct UpdateToTensor { int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); - auto* in1_data = in1_value.data(); - auto* input2_data = input2->data(); + auto* in1_data = in1_value.template data(); + auto* in2_data = input2->data(); - dim3 threads(PADDLE_CUDA_NUM_THREADS, 1); + dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); dim3 grid(1, in1_rows.size()); - UpdateToTensorKernel< - T, PADDLE_CUDA_NUM_THREADS><<>>( - in1_data, in1_rows.data(), op, in2_data, in1_row_numel); + UpdateToTensorKernel<<< + grid, threads, 0, context.stream()>>>(in1_data, in1_rows.data(), op, + in2_data, in1_row_numel); } }; } // namespace scatter diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h index d4bef72980..09d4631905 100644 --- a/paddle/operators/math/selected_rows_functor.h +++ b/paddle/operators/math/selected_rows_functor.h @@ -123,10 +123,9 @@ enum class ScatterOps { ASSIGN, ADD, SUB, SUBBY, MUL, DIV, DIVBY }; // out = seleted_rows_in / tensor template struct UpdateToTensor { - framework::Tensor operator()(const DeviceContext& context, - const ScatterOps& op, - const framework::SelectedRows& input1, - framework::Tensor* input2); + void operator()(const DeviceContext& context, const ScatterOps& op, + const framework::SelectedRows& input1, + framework::Tensor* input2); }; } // namespace scatter From 903d5609c61046cfa37280af5506ca21e350b852 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 29 Dec 2017 14:11:37 +0800 Subject: [PATCH 31/40] follow comment1 --- paddle/operators/adam_op.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 3c4148ccc0..9cc34bdded 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -124,19 +124,20 @@ struct SparseAdamFunctor { row_numel_(row_numel) {} inline HOSTDEVICE void operator()(size_t i) const { + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; for (int64_t j = 0; j < row_numel_; ++j) { T g = grad_[i * row_numel_ + j]; T mom1 = moment1_[rows_[i] * row_numel_ + j]; T mom2 = moment2_[rows_[i] * row_numel_ + j]; T lr = *lr_; - T beta1_pow = *beta1_pow_; - T beta2_pow = *beta2_pow_; T p = param_[rows_[i] * row_numel_ + j]; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); + moment1_out_[rows_[i] * row_numel_ + j] = mom1; moment2_out_[rows_[i] * row_numel_ + j] = mom2; param_out_[rows_[i] * row_numel_ + j] = p; From d25f382d0b8c095008e1f5694e7aaf6f7fa7c075 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Fri, 29 Dec 2017 14:52:40 +0800 Subject: [PATCH 32/40] Remove debug codes --- paddle/framework/executor.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index d465f88888..bf1f0471cc 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -67,8 +67,7 @@ static void CheckTensorNANOrInf(const std::string& name, return; } PADDLE_ENFORCE(!framework::HasInf(tensor), "Tensor %s has Inf", name); - PADDLE_ENFORCE(!framework::HasNAN(tensor), "Tensor %s has NAN, %p", name, - &tensor); + PADDLE_ENFORCE(!framework::HasNAN(tensor), "Tensor %s has NAN", name); } void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, From 4a11fdb4ef698bb757ad310b53592c0968893b95 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 29 Dec 2017 15:07:07 +0800 Subject: [PATCH 33/40] follow comments --- paddle/operators/cos_sim_op.cc | 10 +++---- paddle/operators/cos_sim_op.cu | 12 ++++----- paddle/operators/cos_sim_op.h | 48 +++++++++++++++++----------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 77492e60f2..d4f3ca5e32 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -155,11 +155,11 @@ struct CosSimDyFunctor { const T* y_norm, const T* x, const T* y, const T* z, const T* dz, const size_t rows, const size_t cols, T* dy) const { - for (size_t offset = 0; offset < rows; ++offset) { - auto xy_norm_prod = x_norm[offset] * y_norm[0]; - auto dz_data = dz[offset]; - auto z_data = z[offset]; - auto* x_data = x + cols * offset; + for (size_t row_id = 0; row_id < rows; ++row_id) { + auto xy_norm_prod = x_norm[row_id] * y_norm[0]; + auto dz_data = dz[row_id]; + auto z_data = z[row_id]; + auto* x_data = x + cols * row_id; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto y_norm_square = y_norm[0] * y_norm[0]; diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 86dc04995a..891436c948 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -25,12 +25,12 @@ __global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, const size_t rows, const size_t cols, T* dy) { int grid_size = blockDim.x * gridDim.x; T y_norm_data = y_norm[0]; - for (int offset = blockIdx.x * blockDim.x + threadIdx.x; offset < rows; - offset += grid_size) { - T xy_norm_prod = x_norm[offset] * y_norm_data; - T dz_data = dz[offset]; - T z_data = z[offset]; - const T* x_data = x + cols * offset; + for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows; + row_id += grid_size) { + T xy_norm_prod = x_norm[row_id] * y_norm_data; + T dz_data = dz[row_id]; + T z_data = z[row_id]; + const T* x_data = x + cols * row_id; T reciprocal_xy_norm_prod = 1 / xy_norm_prod; T y_norm_square = y_norm_data * y_norm_data; diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 7641ca15f1..160edb0b56 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -32,11 +32,11 @@ struct CosSimFunctor { z_(z), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t offset) const { - auto* x = x_ + cols_ * offset; + inline HOSTDEVICE void operator()(size_t row_id) const { + auto* x = x_ + cols_ * row_id; T xx = 0, xy = 0, yy = 0; if (same_row) { - auto* y = y_ + cols_ * offset; + auto* y = y_ + cols_ * row_id; T tep_x, tep_y; for (size_t i = 0; i < cols_; ++i) { tep_x = x[i]; @@ -47,9 +47,9 @@ struct CosSimFunctor { } xx = sqrt(xx); yy = sqrt(yy); - y_norm_[offset] = yy; - x_norm_[offset] = xx; - z_[offset] = xy / (xx * yy); + y_norm_[row_id] = yy; + x_norm_[row_id] = xx; + z_[row_id] = xy / (xx * yy); } else { // This can be wrote in a better way. T tep_x, tep_y; for (size_t i = 0; i < cols_; ++i) { @@ -61,9 +61,9 @@ struct CosSimFunctor { } xx = sqrt(xx); yy = sqrt(yy); - if (offset == 0) y_norm_[0] = yy; - x_norm_[offset] = xx; - z_[offset] = xy / (xx * yy); + if (row_id == 0) y_norm_[0] = yy; + x_norm_[row_id] = xx; + z_[row_id] = xy / (xx * yy); } } @@ -125,15 +125,15 @@ struct CosSimGradFunctor { dx_(dx), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t offset) const { - auto x_norm_square = x_norm_[offset] * x_norm_[offset]; - auto xy_norm_prod = x_norm_[offset] * y_norm_[offset]; - auto dz = dz_[offset]; - auto z = z_[offset]; + inline HOSTDEVICE void operator()(size_t row_id) const { + auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; + auto xy_norm_prod = x_norm_[row_id] * y_norm_[row_id]; + auto dz = dz_[row_id]; + auto z = z_[row_id]; - auto* dx = dx_ + cols_ * offset; - auto* x = x_ + cols_ * offset; - auto* y = y_ + cols_ * offset; + auto* dx = dx_ + cols_ * row_id; + auto* x = x_ + cols_ * row_id; + auto* y = y_ + cols_ * row_id; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; auto reciprocal_x_norm_square = 1 / x_norm_square; @@ -166,14 +166,14 @@ struct CosSimDxFunctor { dx_(dx), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t offset) const { - auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; - auto dz = dz_[offset]; - auto z = z_[offset]; - auto* x = x_ + cols_ * offset; + inline HOSTDEVICE void operator()(size_t row_id) const { + auto xy_norm_prod = x_norm_[row_id] * y_norm_[0]; + auto dz = dz_[row_id]; + auto z = z_[row_id]; + auto* x = x_ + cols_ * row_id; auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - auto x_norm_square = x_norm_[offset] * x_norm_[offset]; - auto* dx = dx_ + cols_ * offset; + auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; + auto* dx = dx_ + cols_ * row_id; auto reciprocal_x_norm_square = 1 / x_norm_square; for (size_t i = 0; i < cols_; ++i) { From 24cf2fcd90a8409da2e5e38118c73eb4af13121f Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 29 Dec 2017 15:16:49 +0800 Subject: [PATCH 34/40] move cos_sim_functor to math --- paddle/operators/CMakeLists.txt | 4 +- paddle/operators/cos_sim_op.cc | 22 --- paddle/operators/cos_sim_op.cu | 45 ------ paddle/operators/cos_sim_op.h | 153 +-------------------- paddle/operators/math/CMakeLists.txt | 2 + paddle/operators/math/cos_sim_functor.cc | 48 +++++++ paddle/operators/math/cos_sim_functor.cu | 64 +++++++++ paddle/operators/math/cos_sim_functor.h | 166 +++++++++++++++++++++++ 8 files changed, 290 insertions(+), 214 deletions(-) create mode 100644 paddle/operators/math/cos_sim_functor.cc create mode 100644 paddle/operators/math/cos_sim_functor.cu create mode 100644 paddle/operators/math/cos_sim_functor.h diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 5aaaf99332..c6da04b5b4 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -210,7 +210,8 @@ set(DEPS_OPS save_op load_op send_op - recv_op) + recv_op + cos_sim_op) if(WITH_DISTRIBUTE) add_subdirectory(detail) @@ -256,6 +257,7 @@ op_library(lstm_op DEPS sequence2batch lstm_compute) op_library(conv_transpose_op DEPS vol2col) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op SRCS recurrent_op.cc DEPS executor) +op_library(cos_sim_op DEPS cos_sim_functor) # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index d4f3ca5e32..9019a1edb3 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -149,28 +149,6 @@ class CosSimOpGrad : public framework::OperatorWithKernel { } }; -template -struct CosSimDyFunctor { - inline void operator()(const platform::CPUDeviceContext& ctx, const T* x_norm, - const T* y_norm, const T* x, const T* y, const T* z, - const T* dz, const size_t rows, const size_t cols, - T* dy) const { - for (size_t row_id = 0; row_id < rows; ++row_id) { - auto xy_norm_prod = x_norm[row_id] * y_norm[0]; - auto dz_data = dz[row_id]; - auto z_data = z[row_id]; - auto* x_data = x + cols * row_id; - auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - - auto y_norm_square = y_norm[0] * y_norm[0]; - auto reciprocal_y_norm_square = 1 / y_norm_square; - for (size_t i = 0; i < cols; ++i) { - dy[i] += dz_data * (x_data[i] * reciprocal_xy_norm_prod - - z_data * y[i] * reciprocal_y_norm_square); - } - } - } -}; } // namespace operators } // namespace paddle diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 891436c948..9e5d1b6e4f 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -14,51 +14,6 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/cos_sim_op.h" -#include "paddle/platform/cuda_helper.h" - -namespace paddle { -namespace operators { - -template -__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, - const T* y, const T* z, const T* dz, - const size_t rows, const size_t cols, T* dy) { - int grid_size = blockDim.x * gridDim.x; - T y_norm_data = y_norm[0]; - for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows; - row_id += grid_size) { - T xy_norm_prod = x_norm[row_id] * y_norm_data; - T dz_data = dz[row_id]; - T z_data = z[row_id]; - const T* x_data = x + cols * row_id; - T reciprocal_xy_norm_prod = 1 / xy_norm_prod; - - T y_norm_square = y_norm_data * y_norm_data; - T reciprocal_y_norm_square = 1 / y_norm_square; - for (size_t i = 0; i < cols; ++i) { - T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod - - z_data * y[i] * reciprocal_y_norm_square); - platform::CudaAtomicAdd(dy + i, dy_data); - } - } -} - -template -struct CosSimDyFunctor { - inline void operator()(const platform::CUDADeviceContext& ctx, - const T* x_norm, const T* y_norm, const T* x, - const T* y, const T* z, const T* dz, const size_t rows, - const size_t cols, T* dy) const { - const int block_size = 512; - dim3 threads(block_size, 1); - dim3 grid(1, (rows + block_size - 1) / block_size); - CosSimDyKernel<<>>( - x_norm, y_norm, x, y, z, dz, rows, cols, dy); - } -}; - -} // namespace operators -} // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 160edb0b56..eadcca55f9 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/op_registry.h" +#include "paddle/operators/math/cos_sim_functor.h" #include "paddle/operators/math/math_function.h" #include "paddle/platform/for_range.h" @@ -22,59 +23,6 @@ namespace operators { using Tensor = framework::Tensor; -template -struct CosSimFunctor { - CosSimFunctor(const T* x, const T* y, T* x_norm, T* y_norm, T* z, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - cols_(static_cast(cols)) {} - - inline HOSTDEVICE void operator()(size_t row_id) const { - auto* x = x_ + cols_ * row_id; - T xx = 0, xy = 0, yy = 0; - if (same_row) { - auto* y = y_ + cols_ * row_id; - T tep_x, tep_y; - for (size_t i = 0; i < cols_; ++i) { - tep_x = x[i]; - tep_y = y[i]; - xx += tep_x * tep_x; - yy += tep_y * tep_y; - xy += tep_x * tep_y; - } - xx = sqrt(xx); - yy = sqrt(yy); - y_norm_[row_id] = yy; - x_norm_[row_id] = xx; - z_[row_id] = xy / (xx * yy); - } else { // This can be wrote in a better way. - T tep_x, tep_y; - for (size_t i = 0; i < cols_; ++i) { - tep_x = x[i]; - tep_y = y_[i]; - xx += tep_x * tep_x; - yy += tep_y * tep_y; - xy += tep_x * tep_y; - } - xx = sqrt(xx); - yy = sqrt(yy); - if (row_id == 0) y_norm_[0] = yy; - x_norm_[row_id] = xx; - z_[row_id] = xy / (xx * yy); - } - } - - T* x_norm_; - T* y_norm_; - const T* x_; - const T* y_; - T* z_; - const size_t cols_; -}; - template class CosSimKernel : public framework::OpKernel { public: @@ -95,14 +43,14 @@ class CosSimKernel : public framework::OpKernel { int cols = framework::product(in_x->dims()) / rows_x; if (rows_x == rows_y) { - CosSimFunctor functor( + math::CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); platform::ForRange for_range( static_cast(context.device_context()), rows_x); for_range(functor); } else { - CosSimFunctor functor( + math::CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); platform::ForRange for_range( @@ -112,93 +60,6 @@ class CosSimKernel : public framework::OpKernel { } }; -template -struct CosSimGradFunctor { - CosSimGradFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dx, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - dz_(dz), - dx_(dx), - cols_(static_cast(cols)) {} - - inline HOSTDEVICE void operator()(size_t row_id) const { - auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; - auto xy_norm_prod = x_norm_[row_id] * y_norm_[row_id]; - auto dz = dz_[row_id]; - auto z = z_[row_id]; - - auto* dx = dx_ + cols_ * row_id; - auto* x = x_ + cols_ * row_id; - auto* y = y_ + cols_ * row_id; - - auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - auto reciprocal_x_norm_square = 1 / x_norm_square; - for (size_t i = 0; i < cols_; ++i) { - dx[i] = dz * (y[i] * reciprocal_xy_norm_prod - - z * x[i] * reciprocal_x_norm_square); - } - } - - const T* x_norm_; - const T* y_norm_; - const T* x_; - const T* y_; - const T* z_; - const T* dz_; - T* dx_; - const size_t cols_; -}; - -template -struct CosSimDxFunctor { - CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dx, int cols) - : x_norm_(x_norm), - y_norm_(y_norm), - x_(x), - y_(y), - z_(z), - dz_(dz), - dx_(dx), - cols_(static_cast(cols)) {} - - inline HOSTDEVICE void operator()(size_t row_id) const { - auto xy_norm_prod = x_norm_[row_id] * y_norm_[0]; - auto dz = dz_[row_id]; - auto z = z_[row_id]; - auto* x = x_ + cols_ * row_id; - auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; - auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; - auto* dx = dx_ + cols_ * row_id; - auto reciprocal_x_norm_square = 1 / x_norm_square; - - for (size_t i = 0; i < cols_; ++i) { - dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - - z * x[i] * reciprocal_x_norm_square); - } - } - const T* x_norm_; - const T* y_norm_; - const T* x_; - const T* y_; - const T* z_; - const T* dz_; - T* dx_; - const size_t cols_; -}; - -template -struct CosSimDyFunctor { - inline void operator()(const DeviceContext& ctx, const T* x_norm, - const T* y_norm, const T* x, const T* y, const T* z, - const T* dz, const size_t rows, const size_t cols, - T* dy) const; -}; - template class CosSimGradKernel : public framework::OpKernel { public: @@ -220,7 +81,7 @@ class CosSimGradKernel : public framework::OpKernel { if (rows_x == rows_y) { if (out_grad_x) { - CosSimGradFunctor functor( + math::CosSimGradFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_x->mutable_data(context.GetPlace()), cols); @@ -230,7 +91,7 @@ class CosSimGradKernel : public framework::OpKernel { for_range(functor); } if (out_grad_y) { - CosSimGradFunctor functor( + math::CosSimGradFunctor functor( in_y_norm->data(), in_x_norm->data(), in_y->data(), in_x->data(), in_z->data(), in_grad_z->data(), out_grad_y->mutable_data(context.GetPlace()), cols); @@ -241,7 +102,7 @@ class CosSimGradKernel : public framework::OpKernel { } } else { if (out_grad_x) { - CosSimDxFunctor functor( + math::CosSimDxFunctor functor( in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_x->mutable_data(context.GetPlace()), cols); @@ -256,7 +117,7 @@ class CosSimGradKernel : public framework::OpKernel { auto& dev_ctx = context.template device_context(); set_zero(dev_ctx, out_grad_y, static_cast(0)); - CosSimDyFunctor functor; + math::CosSimDyFunctor functor; functor(dev_ctx, in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), static_cast(rows_x), diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index bf47879f77..830ae53cbe 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -16,6 +16,7 @@ if(WITH_GPU) nv_library(maxouting SRCS maxouting.cc maxouting.cu DEPS device_context) nv_library(unpooling SRCS unpooling.cc unpooling.cu DEPS device_context) nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function) + nv_library(cos_sim_functor SRCS cos_sim_functor.cc cos_sim_functor.cu DEPS device_context) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context framework_proto) cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function) @@ -30,6 +31,7 @@ else() cc_library(maxouting SRCS maxouting.cc DEPS device_context) cc_library(unpooling SRCS unpooling.cc DEPS device_context) cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function) + cc_library(cos_sim_functor SRCS cos_sim_functor.cc DEPS device_context) endif() cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/cos_sim_functor.cc b/paddle/operators/math/cos_sim_functor.cc new file mode 100644 index 0000000000..f52a82b108 --- /dev/null +++ b/paddle/operators/math/cos_sim_functor.cc @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/cos_sim_functor.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct CosSimDyFunctor { + void operator()(const platform::CPUDeviceContext& ctx, const T* x_norm, + const T* y_norm, const T* x, const T* y, const T* z, + const T* dz, const size_t rows, const size_t cols, + T* dy) const { + for (size_t row_id = 0; row_id < rows; ++row_id) { + auto xy_norm_prod = x_norm[row_id] * y_norm[0]; + auto dz_data = dz[row_id]; + auto z_data = z[row_id]; + auto* x_data = x + cols * row_id; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + + auto y_norm_square = y_norm[0] * y_norm[0]; + auto reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols; ++i) { + dy[i] += dz_data * (x_data[i] * reciprocal_xy_norm_prod - + z_data * y[i] * reciprocal_y_norm_square); + } + } + } +}; + +template class CosSimDyFunctor; +template class CosSimDyFunctor; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/cos_sim_functor.cu b/paddle/operators/math/cos_sim_functor.cu new file mode 100644 index 0000000000..fb19a8b38a --- /dev/null +++ b/paddle/operators/math/cos_sim_functor.cu @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/cos_sim_functor.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, + const T* y, const T* z, const T* dz, + const size_t rows, const size_t cols, T* dy) { + int grid_size = blockDim.x * gridDim.x; + T y_norm_data = y_norm[0]; + for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows; + row_id += grid_size) { + T xy_norm_prod = x_norm[row_id] * y_norm_data; + T dz_data = dz[row_id]; + T z_data = z[row_id]; + const T* x_data = x + cols * row_id; + T reciprocal_xy_norm_prod = 1 / xy_norm_prod; + + T y_norm_square = y_norm_data * y_norm_data; + T reciprocal_y_norm_square = 1 / y_norm_square; + for (size_t i = 0; i < cols; ++i) { + T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod - + z_data * y[i] * reciprocal_y_norm_square); + platform::CudaAtomicAdd(dy + i, dy_data); + } + } +} + +template +struct CosSimDyFunctor { + void operator()(const platform::CUDADeviceContext& ctx, const T* x_norm, + const T* y_norm, const T* x, const T* y, const T* z, + const T* dz, const size_t rows, const size_t cols, + T* dy) const { + const int block_size = 512; + dim3 threads(block_size, 1); + dim3 grid(1, (rows + block_size - 1) / block_size); + CosSimDyKernel<<>>( + x_norm, y_norm, x, y, z, dz, rows, cols, dy); + } +}; + +template class CosSimDyFunctor; +template class CosSimDyFunctor; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/cos_sim_functor.h b/paddle/operators/math/cos_sim_functor.h new file mode 100644 index 0000000000..aae8ab5b7a --- /dev/null +++ b/paddle/operators/math/cos_sim_functor.h @@ -0,0 +1,166 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/platform/device_context.h" +#include "paddle/platform/hostdevice.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct CosSimFunctor { + CosSimFunctor(const T* x, const T* y, T* x_norm, T* y_norm, T* z, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + cols_(static_cast(cols)) {} + + inline HOSTDEVICE void operator()(size_t row_id) const { + auto* x = x_ + cols_ * row_id; + T xx = 0, xy = 0, yy = 0; + if (same_row) { + auto* y = y_ + cols_ * row_id; + T tep_x, tep_y; + for (size_t i = 0; i < cols_; ++i) { + tep_x = x[i]; + tep_y = y[i]; + xx += tep_x * tep_x; + yy += tep_y * tep_y; + xy += tep_x * tep_y; + } + xx = sqrt(xx); + yy = sqrt(yy); + y_norm_[row_id] = yy; + x_norm_[row_id] = xx; + z_[row_id] = xy / (xx * yy); + } else { // This can be wrote in a better way. + T tep_x, tep_y; + for (size_t i = 0; i < cols_; ++i) { + tep_x = x[i]; + tep_y = y_[i]; + xx += tep_x * tep_x; + yy += tep_y * tep_y; + xy += tep_x * tep_y; + } + xx = sqrt(xx); + yy = sqrt(yy); + if (row_id == 0) y_norm_[0] = yy; + x_norm_[row_id] = xx; + z_[row_id] = xy / (xx * yy); + } + } + + T* x_norm_; + T* y_norm_; + const T* x_; + const T* y_; + T* z_; + const size_t cols_; +}; + +template +struct CosSimGradFunctor { + CosSimGradFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dx, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dx_(dx), + cols_(static_cast(cols)) {} + + inline HOSTDEVICE void operator()(size_t row_id) const { + auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; + auto xy_norm_prod = x_norm_[row_id] * y_norm_[row_id]; + auto dz = dz_[row_id]; + auto z = z_[row_id]; + + auto* dx = dx_ + cols_ * row_id; + auto* x = x_ + cols_ * row_id; + auto* y = y_ + cols_ * row_id; + + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + auto reciprocal_x_norm_square = 1 / x_norm_square; + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y[i] * reciprocal_xy_norm_prod - + z * x[i] * reciprocal_x_norm_square); + } + } + + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dx_; + const size_t cols_; +}; + +template +struct CosSimDxFunctor { + CosSimDxFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dx, int cols) + : x_norm_(x_norm), + y_norm_(y_norm), + x_(x), + y_(y), + z_(z), + dz_(dz), + dx_(dx), + cols_(static_cast(cols)) {} + + inline HOSTDEVICE void operator()(size_t row_id) const { + auto xy_norm_prod = x_norm_[row_id] * y_norm_[0]; + auto dz = dz_[row_id]; + auto z = z_[row_id]; + auto* x = x_ + cols_ * row_id; + auto reciprocal_xy_norm_prod = 1 / xy_norm_prod; + auto x_norm_square = x_norm_[row_id] * x_norm_[row_id]; + auto* dx = dx_ + cols_ * row_id; + auto reciprocal_x_norm_square = 1 / x_norm_square; + + for (size_t i = 0; i < cols_; ++i) { + dx[i] = dz * (y_[i] * reciprocal_xy_norm_prod - + z * x[i] * reciprocal_x_norm_square); + } + } + const T* x_norm_; + const T* y_norm_; + const T* x_; + const T* y_; + const T* z_; + const T* dz_; + T* dx_; + const size_t cols_; +}; + +template +struct CosSimDyFunctor { + void operator()(const DeviceContext& ctx, const T* x_norm, const T* y_norm, + const T* x, const T* y, const T* z, const T* dz, + const size_t rows, const size_t cols, T* dy) const; +}; + +} // namespace math +} // namespace operators +} // namespace paddle From a6ff5240f519380257f206fbc9c7f720fff4badc Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 29 Dec 2017 20:41:07 +0800 Subject: [PATCH 35/40] Refine the activation type of GRUOp by following comments --- paddle/operators/gru_op.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h index d773521259..b1957fb9ce 100644 --- a/paddle/operators/gru_op.h +++ b/paddle/operators/gru_op.h @@ -90,6 +90,10 @@ class GRUKernel : public framework::OpKernel { } auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; + auto active_node = math::detail::GetActivationType( + context.Attr("activation")); + auto active_gate = math::detail::GetActivationType( + context.Attr("gate_activation")); for (size_t n = 0; n < num_batch; n++) { int bstart = static_cast(batch_starts[n]); int bend = static_cast(batch_starts[n + 1]); @@ -102,11 +106,8 @@ class GRUKernel : public framework::OpKernel { gru_value.gate_value = gate_t.data(); gru_value.reset_output_value = reset_hidden_prev_t.data(); math::GRUUnitFunctor::compute( - dev_ctx, gru_value, frame_size, cur_batch_size, - math::detail::GetActivationType( - context.Attr("activation")), - math::detail::GetActivationType( - context.Attr("gate_activation"))); + dev_ctx, gru_value, frame_size, cur_batch_size, active_node, + active_gate); gru_value.prev_out_value = gru_value.output_value; } @@ -192,6 +193,10 @@ class GRUGradKernel : public framework::OpKernel { auto batch_starts = batch_hidden_grad.lod()[0]; size_t num_batch = batch_starts.size() - 1; + auto active_node = math::detail::GetActivationType( + context.Attr("activation")); + auto active_gate = math::detail::GetActivationType( + context.Attr("gate_activation")); for (int n = static_cast(num_batch) - 1; n >= 0; n--) { int bstart = static_cast(batch_starts[n]); int bend = static_cast(batch_starts[n + 1]); @@ -222,11 +227,8 @@ class GRUGradKernel : public framework::OpKernel { } math::GRUUnitGradFunctor::compute( - dev_ctx, gru_value, gru_grad, frame_size, cur_batch_size, - math::detail::GetActivationType( - context.Attr("activation")), - math::detail::GetActivationType( - context.Attr("gate_activation"))); + dev_ctx, gru_value, gru_grad, frame_size, cur_batch_size, active_node, + active_gate); } if (input_grad) { input_grad->mutable_data(context.GetPlace()); From 8543ad64635294c0dc52cd6701f076cd3d5981a2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 1 Jan 2018 20:21:41 +0800 Subject: [PATCH 36/40] update backward doc --- paddle/framework/backward.md | 183 +++++++++++++++++++---------- python/paddle/v2/fluid/backward.py | 26 ++-- 2 files changed, 136 insertions(+), 73 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index ac60be5724..acc95e99c4 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -1,100 +1,161 @@ -# Operator/expression 's Backward +# Backward Building ## Motivation -In Neural Network, most models are solved by the backpropagation algorithm(known as **BP**) at present. Technically, BP calculates the gradient of the loss function, then propagates it back through the networks following the chain rule. Hence we need a module that chains the gradient operators/expressions together to construct the backward pass. Every forward network needs a backward network to construct the full computation graph. The operator/expression's backward pass will be generated with respect to the forward pass. +In Neural Network, most models are solved by the backpropagation algorithm(known as **BP**) at present. Technically, BP calculates the gradient of the loss function, then propagates it back through the networks following the chain rule. However, when configuring the model structure, users do not need to definate the backward part. So a mechanism is required by the framework which is able to complete the model's backward part automatically acoording to the given forward part. -## Implementation - -In this design doc, we exported only one API for generating the backward pass. - -```c++ -std::unique_ptr Backward(const OperatorBase& forwardOp, - const std::unordered_set& no_grad_vars); -``` +When implementing a certain `op`, the developer is also asked to implement its backward version, called `grad_op`. A `grad_op` takes gradients of its corresponding `op`'s outputs, and calculate gradients of the `op`'s inputs. During the building of a model's backward part, the framework creates each forward `op`'s `grad_op`, and then string them together in reverse order of forward part. In this way, gradients spread from the end to the beginning of the model, in other word, from the loss to parameters. -The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**. +## Challenges -### Backward Operator Registry +The motivation of backward building is obvious. However, to implement it correctly is not so easy. In the **Fluid** design, a deep learning model is described by `Program`, `Block`, `Op` and `Variable`. The `Block` itself can be nested. It means that the `op`s and `variable`s are scattered across different blocks rather than all be gathered in a single graph. Our backward building algorithm shall visit blocks in recursive order and be able to insert `grad_op`s and new created `variable`s into right place. -A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients. +## Usage -| | forward operator | backward operator -| ---------------------- | ---------------- |------------------------- | -| **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients | -| **Operator::outputs_** | Outputs | InputGradients | +Although the whole algorithm is comprised of many functions, only one is exposed as API: - In most cases, there is a one-to-one relation between the forward and backward operators. These relations are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and to make operators pluggable, the registry mechanism is introduced. +```python +def append_backward(loss, parameter_list=None, no_grad_set=None): + """ + Append backward part to main_program -For example, we have `mul_op`, and we can register its information and corresponding backward operator by the following macro: + Args: + loss(Variable): The variable generated by cost function. + parameter_list(list): Parameters that need to be updated by optimizer. + If None, it means all parameters need to be updated. -```cpp -REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); + no_grad_set(set): Variables that have no gradients in Block 0. + If None, the set will be generated inside the function and + contains all variables with `step_gradient=True` from all blocks. + + Return: + (list[Variable]): list of (parameters, gradients) pair. + """ ``` -`mul` is the operator's type. `MulOp` and `MulOpMaker` are the operator class and the operator maker class respectively. +By invoking this API, the framework appends backward part for the program where the `loss` is. It takes three arguments. `loss` means the final loss value. It must be a scalar and is usually the output of the loss layer. It is also where the gradient generated and backpropagation starts. `parameter_list` marks all parameters needs updating. If it's `None`, all parameter will be updated by optimizers. `no_grad_set` marks variables without gradient. if all outputs of some `grad_op` are in `no_grad_set`, the `grad_op` will not be run. -`mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. +This API will be invoked automatically before optimizer building. +As a result, in most cases users do not need to invoke the API by themselves to append backward part. -### Backward Opeartor Creating - -Given a certain forward operator, we can get its corresponding backward operator by calling: +## Implementation -```cpp -OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op); +The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided to two independent parts: creating of `grad_op`s and creating of new variables. + +### Creating `grad_op`s + +The creating of `grad_op`s is implemented by: + +```python +def _append_backward_ops_(target, + block, + target_block, + no_grad_dict, + grad_to_var): + """ + Create all grad ops, and insert them into given block + + Args: + target(Variable): the target variable of forward pass + block(Block): the block where forward ops are + target_block(Block): the block which is going to hold new generated grad ops + no_grad_dict(dict): + key(int) block index + val(set) a set of varibale names. These varibales have no gradient + grad_to_var(dict)(output argument): + key(str): grad variable name + val(str): corresponding forward variable name + """ ``` -The function `BuildGradOp` will sequentially execute following processes: - -1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`. - -2. Build two maps named `inputs` and `outputs` to temporarily store backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these, are not necessary for gradient computing. +Given a `block`, the function will traverses all `op`s in this block in reverse order, gets corresponding `grad_op` from the C++ core via `core.get_grad_op_desc()`, then append it to `target_block`. -3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`. +However, some specific `op`(e.g. `while_op`, `if_else_op`) can hold its own sub-block. For these sub-blocks contains `op`s as well, the `grad_op` creating should be recursive. -4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. +During the reverse traversal, we check each `op` whether it has an attribute named `sub_block`. If so, it means there is a sub-block and we need to deal with it first. After creating a new block whose father is the one in `op`'s attribute, we invoke `_append_backward_ops_()` recursively, assigning the new block to parameter `target_block` and the one in `op`'s attribute to `block`. The *pseudo-code* shows this process: -### Backward Network Building - -A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and appending them together one by one. There are some corner cases that need special processing. - -1. Op - - When the input forward network is an Op, return its gradient Operator immediately. If all of its outputs are in no gradient set, then return a special `NOP`. +``` +******* pseudo-code ******** +for op in reversed(block.ops): + if op has an attribute named 'sub_block': + Get the sub-block(`s_block`) from op's attribute. + Create a new block(`grad_s_block`), whose father is `s_block`. + Invoke _append_backward_ops_(), with `block=s_block` and `target_block=grad_s_block` + + Invoke `core.get_grad_op_desc()` to get op's grad_op. + Insert name correspondings between variables and their gradients of the grad_op to grad_to_var + Assign grad_s_block to grad_op as it's 'sub_block' attribute. + Append grad_op to current target_block. +``` -2. NetOp +The first invoking of `_append_backward_ops_()` is initiated by `append_backward()`, in which parameters `block` and `target_block` are all assigned with root block(the block with index 0). - In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. +### Corner Cases of `grad_op` Creating -3. RnnOp +In the previous section, we show the regular process of `grad_op` creating. However, in some corner cases, regular algorithm is not enough to get the correct result and appending handling is required. These addtional processes run after the above-mentioned algorithm and do some special adjusts on its output `grad_op`s. - RnnOp is a nested stepnet operator. Backward module needs to recusively call `Backward` for every stepnet. +#### Shared Variables -4. Sharing Variables +If a variable is readed by more than one `op` in the forward pass, its gradient is likey to be written by more than one `grad_op`s in the following backward pass. To make the gradient result being the sum of all `grad_op`s' outputs instead of the last running one, we assign each output with a temporary variables, and then add a `sum_op` to add them up. - As illustrated in the figure 1 and figure 2, two operators share the same variable name **W@GRAD**, which will overwrite their shared input variable. +For the debug convinience, if the final gradient name is `w@GRAD`, it's corresponding temporary variables will be named as `w@GRAD@RENAME@0`, `w@GRAD@RENAME@1`... -

-
+

+ + +
-​ Figure 1. Sharing variables in operators. +See function `_addup_repetitive_outputs_` in `backward.py` for implementation details. -

+#### No Gradient Variables -​ Sharing variable between operators or same input variable used in multiple operators can lead to duplicate gradient variables. As illustrated in figure 2, we need to rename the gradient names recursively and add a generic add operator to prevent overwriting. +In our framework, variables can be marked as *no_gradient*, it means that the gradient of this variable is unnecessary and can be considered as zero in model training. Obviously, when all the outputs of some `grad_op` is marked as *no_gradient*, the `grad_op` itself can be skipped in backward pass. -

-
+But these unnecessary gradients still need to be creating and initialized by something, otherwise following `grad_op`s who take these gradients as inputs take the risk of using uninitialized memory. In our code, we employ `fill_zeros_like_op` to initialize them as all zeros. -​ Figure 2. Replace sharing variable's gradient with `Add` operator. +This features are implemented in function `_remove_no_grad_branch_`. It checks new created `grad_op`'s one-by-one, removes whose outputs are all in `no_grad_set` or inserts `fill_zeros_like_op` when its necessary. We can get the `no_grad_set` from the `_append_backward_ops_` argument `no_grad_dict` or generate it on fly by scanning all variables' `no_gradient` attribute(True or False). -

+### Creating Backward Variables -​ Because the framework finds variables according to their names, we need to rename the output links. We add an integer suffix to represent its position in the clockwise direction. +Up to now, we have completed all creating and adjusting jobs of `grad_op`s. However, backward variables have not been created. Now they are only represented by `grad_op`'s input and output arguments. The backward variable creating job will be done by: -5. Part of the Gradient is Zero. +```python +def _append_backward_vars_(block, + start_op_idx, + grad_to_var, + grad_info_map): + """ + Create new variables required by backward pass. - In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implementation, we insert a special `fillZeroLike` operator. + Args: + block(Block): the block where new variables will be created + start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created + grad_to_var(dict): + key(str): grad variable name + val(str): corresponding forward variable name + In most cases, this dict is generated by _append_backward_ops_() + grad_info_map(dict)(output argument): + key(str): forward variable name + val(tuple): a tuple of (str, int), str is the corresponding grad name, int is the block index + """ +``` +Given a `block`, this function traverses all the `grad_op`s in it(The argument `start_op_idx` indicates where the grad_op sequence starts.) and creates all the uncreated outputs. The *pseudo-code* shows this process: -Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. +``` +for op in block.ops[start_op_idx : ]: + + if op has an attribute named 'sub_block': + Get the sub-block(`s_block`) from op's attribute. + Invoke _append_backward_vars_(), with `block=s_block` + + for var_name in op.all_output_names(): + if block.has_var_recursive(var_name) or var_name is the name of empty variable: + continue + create a new variable named 'var_name' in block + if grad_to_var.has_key(var_name): + set grad_info_map[grad_to_var[var_name]] as a tuple of (var_name. block) + + do op's var type inference + do op's shape inference +``` diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index b3c1bab298..f11c83f59c 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -176,6 +176,7 @@ def _append_backward_ops_(target, key(str): grad variable name val(str): corresponding forward variable name """ + # grad_op_descs holds created grad_op, and will be appended to target_block grad_op_descs = [] program = block.program for op in reversed(block.ops): @@ -188,6 +189,7 @@ def _append_backward_ops_(target, no_grad_dict, grad_to_var, callback) grad_sub_block_list.append(grad_sub_block.desc) + # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( op.desc, no_grad_dict[block.idx], grad_sub_block_list) grad_op_descs.extend(grad_op_desc) @@ -254,18 +256,18 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): def append_backward(loss, parameter_list=None, no_grad_set=None): """ - Create and add gradient Operators in BlockDesc to compute - gradients of `loss` for parameters in parameter_list - - :param loss: an variable generated by cost function. - :type loss: Variable - :param no_grad_dict: variable that should not create gradient - :type no_grad_dict: set - :param parameter_list: parameters that need to compute gradient and - update to optimize the lost. - :type: list - :return: list of (parameters, gradients) pair. - :rtype: list[Variable] + Append backward part to main_program + + Args: + loss(Variable): The variable generated by cost function. + parameter_list(list): Parameters that need to be updated by optimizer. + If None, it means all parameters need to be updated. + no_grad_set(set): Variables that have no gradients in Block 0. + If None, the set will be generated inside the function and + contains all variables with `step_gradient=True` from all blocks. + + Return: + (list[Variable]): list of (parameters, gradients) pair. """ assert isinstance(loss, framework.Variable) From a0e70cb1990a8143060e7b156de06391d962a850 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 1 Jan 2018 20:22:51 +0800 Subject: [PATCH 37/40] move backward doc postion --- {paddle/framework => doc}/backward.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {paddle/framework => doc}/backward.md (100%) diff --git a/paddle/framework/backward.md b/doc/backward.md similarity index 100% rename from paddle/framework/backward.md rename to doc/backward.md From deacfa9eb9c7e8cd55dd16a5b25424c7d9d04b9e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 2 Jan 2018 01:04:32 +0800 Subject: [PATCH 38/40] fix typo --- doc/{ => design}/backward.md | 29 ++++++++---------- .../design}/images/duplicate_op.graffle | Bin .../design}/images/duplicate_op.png | Bin .../design}/images/duplicate_op2.graffle | Bin .../design}/images/duplicate_op2.png | Bin 5 files changed, 12 insertions(+), 17 deletions(-) rename doc/{ => design}/backward.md (68%) rename {paddle/framework => doc/design}/images/duplicate_op.graffle (100%) rename {paddle/framework => doc/design}/images/duplicate_op.png (100%) rename {paddle/framework => doc/design}/images/duplicate_op2.graffle (100%) rename {paddle/framework => doc/design}/images/duplicate_op2.png (100%) diff --git a/doc/backward.md b/doc/design/backward.md similarity index 68% rename from doc/backward.md rename to doc/design/backward.md index acc95e99c4..85f45b5c74 100644 --- a/doc/backward.md +++ b/doc/design/backward.md @@ -2,13 +2,13 @@ ## Motivation -In Neural Network, most models are solved by the backpropagation algorithm(known as **BP**) at present. Technically, BP calculates the gradient of the loss function, then propagates it back through the networks following the chain rule. However, when configuring the model structure, users do not need to definate the backward part. So a mechanism is required by the framework which is able to complete the model's backward part automatically acoording to the given forward part. +In Neural Network, most models are solved by the backpropagation algorithm(known as **BP**) at present. Technically, BP calculates the gradient of the loss function, then propagates it back through the networks following the chain rule. However, when configuring the model structure, users do not need to define the backward part. So a mechanism is required by the framework which can complete the model's backward part automatically according to the given forward part. -When implementing a certain `op`, the developer is also asked to implement its backward version, called `grad_op`. A `grad_op` takes gradients of its corresponding `op`'s outputs, and calculate gradients of the `op`'s inputs. During the building of a model's backward part, the framework creates each forward `op`'s `grad_op`, and then string them together in reverse order of forward part. In this way, gradients spread from the end to the beginning of the model, in other word, from the loss to parameters. +When implementing a specific `op`, the developer is also asked to implement its backward version, called `grad_op`. A `grad_op` takes gradients of its corresponding `op`'s outputs, and calculate gradients of the `op`'s inputs. During the building of a model's backward part, the framework creates each forward `op`'s `grad_op`, and then string them together in reverse order of forwarding part. In this way, gradients spread from the end to the beginning of the model, in another word, from the loss to parameters. ## Challenges -The motivation of backward building is obvious. However, to implement it correctly is not so easy. In the **Fluid** design, a deep learning model is described by `Program`, `Block`, `Op` and `Variable`. The `Block` itself can be nested. It means that the `op`s and `variable`s are scattered across different blocks rather than all be gathered in a single graph. Our backward building algorithm shall visit blocks in recursive order and be able to insert `grad_op`s and new created `variable`s into right place. +The motivation of backward building is apparent. However, implementation it correctly is not so easy. In the **Fluid** design, a deep learning model is described by `Program`, `Block`, `Op` and `Variable`. The `Block` itself can be nested. It means that the `op`s and `variable`s are scattered across different blocks rather than all be gathered in a single graph. Our backward building algorithm shall visit blocks in recursive order and be able to insert `grad_op`s and new created `variable`s into the right place. ## Usage @@ -20,8 +20,8 @@ def append_backward(loss, parameter_list=None, no_grad_set=None): Append backward part to main_program Args: - loss(Variable): The variable generated by cost function. - parameter_list(list): Parameters that need to be updated by optimizer. + loss(Variable): The variable generated by the cost function. + parameter_list(list): Parameters that need to be updated by optimizers. If None, it means all parameters need to be updated. no_grad_set(set): Variables that have no gradients in Block 0. @@ -33,14 +33,14 @@ def append_backward(loss, parameter_list=None, no_grad_set=None): """ ``` -By invoking this API, the framework appends backward part for the program where the `loss` is. It takes three arguments. `loss` means the final loss value. It must be a scalar and is usually the output of the loss layer. It is also where the gradient generated and backpropagation starts. `parameter_list` marks all parameters needs updating. If it's `None`, all parameter will be updated by optimizers. `no_grad_set` marks variables without gradient. if all outputs of some `grad_op` are in `no_grad_set`, the `grad_op` will not be run. +By invoking this API, the framework appends backward part of the program where the `loss` is. It takes three arguments. `loss` means the final loss value. It must be a scalar and is usually the output of the loss layer. It is also where the gradient generated and backpropagation starts. `parameter_list` marks all parameters needs updating. If it's `None`, all parameter will be updated by optimizers. `no_grad_set` marks variables without gradient. if all outputs of some `grad_op` are in `no_grad_set`, the `grad_op` will not be run. This API will be invoked automatically before optimizer building. -As a result, in most cases users do not need to invoke the API by themselves to append backward part. +As a result, in most cases, users do not need to invoke the API by themselves to append backward part. ## Implementation -The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided to two independent parts: creating of `grad_op`s and creating of new variables. +The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided into two independent parts: creating of `grad_op`s and creating new variables. ### Creating `grad_op`s @@ -92,24 +92,19 @@ The first invoking of `_append_backward_ops_()` is initiated by `append_backward ### Corner Cases of `grad_op` Creating -In the previous section, we show the regular process of `grad_op` creating. However, in some corner cases, regular algorithm is not enough to get the correct result and appending handling is required. These addtional processes run after the above-mentioned algorithm and do some special adjusts on its output `grad_op`s. +In the previous section, we show the regular process of `grad_op` creating. However, in some corner cases, the conventional algorithm is not enough to get the correct result and appending handling is required. These additional processes run after the algorithm mentioned above and do some special adjusts on its output `grad_op`s. #### Shared Variables -If a variable is readed by more than one `op` in the forward pass, its gradient is likey to be written by more than one `grad_op`s in the following backward pass. To make the gradient result being the sum of all `grad_op`s' outputs instead of the last running one, we assign each output with a temporary variables, and then add a `sum_op` to add them up. +If a variable is read by more than one `op` in the forward pass, its gradient is likely to be written by more than one `grad_op`s in the next backward pass. To make the gradient result being the sum of all `grad_op`s' outputs instead of the last running one, we assign each output with a temporary variable and then add a `sum_op` to add them up. -For the debug convinience, if the final gradient name is `w@GRAD`, it's corresponding temporary variables will be named as `w@GRAD@RENAME@0`, `w@GRAD@RENAME@1`... - -
- - -
+For the debug convenience, if the final gradient name is `w@GRAD`, it's corresponding temporary variables will be named as `w@GRAD@RENAME@0`, `w@GRAD@RENAME@1`... See function `_addup_repetitive_outputs_` in `backward.py` for implementation details. #### No Gradient Variables -In our framework, variables can be marked as *no_gradient*, it means that the gradient of this variable is unnecessary and can be considered as zero in model training. Obviously, when all the outputs of some `grad_op` is marked as *no_gradient*, the `grad_op` itself can be skipped in backward pass. +In our framework, variables can be marked as *no_gradient*, it means that the gradient of this variable is unnecessary and can be considered as zero in model training. Apparently, when all the outputs of some `grad_op` are marked as *no_gradient*, the `grad_op` itself can be skipped in backward pass. But these unnecessary gradients still need to be creating and initialized by something, otherwise following `grad_op`s who take these gradients as inputs take the risk of using uninitialized memory. In our code, we employ `fill_zeros_like_op` to initialize them as all zeros. diff --git a/paddle/framework/images/duplicate_op.graffle b/doc/design/images/duplicate_op.graffle similarity index 100% rename from paddle/framework/images/duplicate_op.graffle rename to doc/design/images/duplicate_op.graffle diff --git a/paddle/framework/images/duplicate_op.png b/doc/design/images/duplicate_op.png similarity index 100% rename from paddle/framework/images/duplicate_op.png rename to doc/design/images/duplicate_op.png diff --git a/paddle/framework/images/duplicate_op2.graffle b/doc/design/images/duplicate_op2.graffle similarity index 100% rename from paddle/framework/images/duplicate_op2.graffle rename to doc/design/images/duplicate_op2.graffle diff --git a/paddle/framework/images/duplicate_op2.png b/doc/design/images/duplicate_op2.png similarity index 100% rename from paddle/framework/images/duplicate_op2.png rename to doc/design/images/duplicate_op2.png From 46a69e995f6e0e0ac450a25bfe2216ed4932bfb2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 2 Jan 2018 01:08:27 +0800 Subject: [PATCH 39/40] fix typo --- doc/design/backward.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design/backward.md b/doc/design/backward.md index 85f45b5c74..35f03692bb 100644 --- a/doc/design/backward.md +++ b/doc/design/backward.md @@ -40,7 +40,7 @@ As a result, in most cases, users do not need to invoke the API by themselves to ## Implementation -The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided into two independent parts: creating of `grad_op`s and creating new variables. +The implementation of backward building algorithm is in `backward.py` file. The whole algorithm can be divided into two independent parts: creating `grad_op`s and creating new variables. ### Creating `grad_op`s @@ -108,7 +108,7 @@ In our framework, variables can be marked as *no_gradient*, it means that the gr But these unnecessary gradients still need to be creating and initialized by something, otherwise following `grad_op`s who take these gradients as inputs take the risk of using uninitialized memory. In our code, we employ `fill_zeros_like_op` to initialize them as all zeros. -This features are implemented in function `_remove_no_grad_branch_`. It checks new created `grad_op`'s one-by-one, removes whose outputs are all in `no_grad_set` or inserts `fill_zeros_like_op` when its necessary. We can get the `no_grad_set` from the `_append_backward_ops_` argument `no_grad_dict` or generate it on fly by scanning all variables' `no_gradient` attribute(True or False). +This features are implemented in function `_remove_no_grad_branch_`. It checks new created `grad_op`s one-by-one, removes whose outputs are all in `no_grad_set` or inserts `fill_zeros_like_op` when its necessary. We can get the `no_grad_set` from the `_append_backward_ops_` argument `no_grad_dict` or generate it on the fly by scanning all variables' `no_gradient` attribute(True or False). ### Creating Backward Variables From 105ee86d14200253b77a06f9607bf6d19936c2f6 Mon Sep 17 00:00:00 2001 From: QI JUN Date: Tue, 2 Jan 2018 11:07:29 +0800 Subject: [PATCH 40/40] fix compile (#7125) --- paddle/operators/math/cos_sim_functor.cc | 4 ++-- paddle/operators/math/cos_sim_functor.cu | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/operators/math/cos_sim_functor.cc b/paddle/operators/math/cos_sim_functor.cc index f52a82b108..6af9f0fcd9 100644 --- a/paddle/operators/math/cos_sim_functor.cc +++ b/paddle/operators/math/cos_sim_functor.cc @@ -41,8 +41,8 @@ struct CosSimDyFunctor { } }; -template class CosSimDyFunctor; -template class CosSimDyFunctor; +template struct CosSimDyFunctor; +template struct CosSimDyFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/cos_sim_functor.cu b/paddle/operators/math/cos_sim_functor.cu index fb19a8b38a..6eb0a4ea4c 100644 --- a/paddle/operators/math/cos_sim_functor.cu +++ b/paddle/operators/math/cos_sim_functor.cu @@ -57,8 +57,8 @@ struct CosSimDyFunctor { } }; -template class CosSimDyFunctor; -template class CosSimDyFunctor; +template struct CosSimDyFunctor; +template struct CosSimDyFunctor; } // namespace math } // namespace operators } // namespace paddle