format some files

avx_docs
hedaoyuan 8 years ago
parent d04c206f30
commit abdcb8e128

@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef HL_MATRIX_TYPE_CUH_
#define HL_MATRIX_TYPE_CUH_

@ -25,6 +25,7 @@ template<class T>
class add_scale {
private:
const T p;
public:
INLINE add_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a + p; }
@ -34,6 +35,7 @@ template<class T>
class sub_scale {
private:
const T p;
public:
INLINE sub_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a - p; }
@ -43,6 +45,7 @@ template<class T>
class mul_scale {
private:
const T p;
public:
INLINE mul_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a * p; }
@ -52,6 +55,7 @@ template<class T>
class div_scale {
private:
const T p;
public:
INLINE div_scale(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a / p; }
@ -109,6 +113,7 @@ template<class T>
class min {
private:
const T p;
public:
INLINE min(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a > p ? p : a; }
@ -118,6 +123,7 @@ template<class T>
class max {
private:
const T p;
public:
INLINE max(const T s) : p(s) {}
INLINE T operator()(const T a) const { return a < p ? p : a; }
@ -127,6 +133,7 @@ template<class T>
class pow_op {
private:
const T p;
public:
INLINE pow_op(const T s) : p(s) {}
INLINE T operator()(const T a) const { return std::pow(a, p); }
@ -136,6 +143,7 @@ template<class T>
class constant {
private:
const T p;
public:
INLINE constant(const T s) : p(s) {}
INLINE T operator()(int i) const { return p; }
@ -146,6 +154,7 @@ template<class T>
class cmp_eq {
private:
const T p;
public:
INLINE cmp_eq(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a == p; }
@ -155,6 +164,7 @@ template<class T>
class cmp_ne {
private:
const T p;
public:
INLINE cmp_ne(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a != p; }
@ -164,6 +174,7 @@ template<class T>
class cmp_le {
private:
const T p;
public:
INLINE cmp_le(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a <= p; }
@ -173,6 +184,7 @@ template<class T>
class cmp_lt {
private:
const T p;
public:
INLINE cmp_lt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a < p; }
@ -182,6 +194,7 @@ template<class T>
class cmp_ge {
private:
const T p;
public:
INLINE cmp_ge(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a >= p; }
@ -191,6 +204,7 @@ template<class T>
class cmp_gt {
private:
const T p;
public:
INLINE cmp_gt(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a > p; }
@ -200,6 +214,7 @@ template<class T>
class and_op {
private:
const T p;
public:
INLINE and_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a && p; }
@ -209,6 +224,7 @@ template<class T>
class or_op {
private:
const T p;
public:
INLINE or_op(const T s) : p(s) {}
INLINE bool operator()(const T a) const { return a || p; }
@ -228,11 +244,10 @@ class add_scale {
private:
const T p1;
const T p2;
public:
INLINE add_scale(const T s1, const T s2) : p1(s1), p2(s2) {}
INLINE T operator()(const T a, const T b) const {
return p1 * a + p2 * b;
}
INLINE T operator()(const T a, const T b) const { return p1 * a + p2 * b; }
};
template <class T>
@ -317,4 +332,3 @@ public:
} // namespace hppl
#endif // HL_TENSOR_OPS_H_

@ -23,21 +23,16 @@ template<typename Derived, class T>
class TensorApply {
public:
explicit INLINE TensorApply(const Derived& p)
: data_(p.data_), stride_(p.stride_),
height_(p.height_), width_(p.width_), useGpu_(p.useGpu_) {}
: data_(p.data_),
stride_(p.stride_),
height_(p.height_),
width_(p.width_),
useGpu_(p.useGpu_) {}
INLINE T apply(int i, int j) const {
return data_[i * stride_ + j];
}
INLINE T apply(int index) const {
return data_[index];
}
INLINE T& applyRef(int i, int j) {
return data_[i * stride_ + j];
}
INLINE T& applyRef(int index) {
return data_[index];
}
INLINE T apply(int i, int j) const { return data_[i * stride_ + j]; }
INLINE T apply(int index) const { return data_[index]; }
INLINE T& applyRef(int i, int j) { return data_[i * stride_ + j]; }
INLINE T& applyRef(int index) { return data_[index]; }
INLINE size_t getWidth() const { return width_; }
INLINE size_t getHeight() const { return height_; }
@ -53,22 +48,20 @@ public:
/**
* \brief The tensor evaluator classes.
*
* evaluator for rvalues
*/
template <typename Derived, class T>
class TensorApply<const Derived, T> {
public:
explicit INLINE TensorApply(const Derived& p)
: data_(p.data_), stride_(p.stride_),
height_(p.height_), width_(p.width_), useGpu_(p.useGpu_) {}
: data_(p.data_),
stride_(p.stride_),
height_(p.height_),
width_(p.width_),
useGpu_(p.useGpu_) {}
INLINE T apply(int i, int j) const {
return data_[i * stride_ + j];
}
INLINE T apply(int index) const {
return data_[index];
}
INLINE T apply(int i, int j) const { return data_[i * stride_ + j]; }
INLINE T apply(int index) const { return data_[index]; }
INLINE size_t getWidth() const { return width_; }
INLINE size_t getHeight() const { return height_; }
@ -88,12 +81,8 @@ public:
explicit TensorApply(const TensorExpression<Derived, T>& expr)
: expr_(expr.derived()) {}
INLINE T apply(int i, int j) const {
return expr_.apply(i, j);
}
INLINE T apply(int index) const {
return expr_.apply(index);
}
INLINE T apply(int i, int j) const { return expr_.apply(i, j); }
INLINE T apply(int index) const { return expr_.apply(index); }
INLINE size_t getWidth() const { return expr_.getWidth(); }
INLINE size_t getHeight() const { return expr_.getHeight(); }
@ -112,12 +101,8 @@ public:
explicit INLINE TensorApply(const TensorUnaryOp<OP, ArgType, T>& expr)
: op_(expr.op_), expr_(expr.expr_) {}
INLINE T apply(int i, int j) const {
return op_(expr_.apply(i, j));
}
INLINE T apply(int index) const {
return op_(expr_.apply(index));
}
INLINE T apply(int i, int j) const { return op_(expr_.apply(i, j)); }
INLINE T apply(int index) const { return op_(expr_.apply(index)); }
INLINE size_t getWidth() const { return expr_.getWidth(); }
INLINE size_t getHeight() const { return expr_.getHeight(); }
@ -192,8 +177,8 @@ public:
INLINE size_t getWidth() const { return expr1_.getWidth(); }
INLINE size_t getHeight() const { return expr1_.getHeight(); }
INLINE bool isContiguous() const {
return expr1_.isContiguous() &&
expr2_.isContiguous() && expr3_.isContiguous();
return expr1_.isContiguous() && expr2_.isContiguous() &&
expr3_.isContiguous();
}
INLINE bool useGpu() const { return expr1_.useGpu(); }
@ -211,12 +196,8 @@ public:
explicit INLINE TensorApply(const TensorConstant<OP, ArgType, T>& expr)
: op_(expr.op_), expr_(expr.expr_) {}
INLINE T apply(int i, int j) const {
return op_(i, j);
}
INLINE T apply(int index) const {
return op_(index);
}
INLINE T apply(int i, int j) const { return op_(i, j); }
INLINE T apply(int index) const { return op_(index); }
INLINE size_t getWidth() const { return expr_.getWidth(); }
INLINE size_t getHeight() const { return expr_.getHeight(); }

@ -55,8 +55,11 @@ private:
};
template <typename Assign, typename... AssignOp>
void AssignCpuEvaluate(int height, int width, bool isContiguous,
Assign&& assign, AssignOp&& ... args) {
void AssignCpuEvaluate(int height,
int width,
bool isContiguous,
Assign&& assign,
AssignOp&&... args) {
if (isContiguous) {
int size = height * width;
for (int index = 0; index < size; index++) {
@ -75,8 +78,9 @@ void AssignCpuEvaluate(int height, int width, bool isContiguous,
#ifdef __NVCC__
template <typename Assign, typename... AssignOp>
__global__
void AssignGpuEvaluate1(const int border, Assign assign, AssignOp ... args) {
__global__ void AssignGpuEvaluate1(const int border,
Assign assign,
AssignOp... args) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < border) {
assign.apply(idx);
@ -85,9 +89,10 @@ void AssignGpuEvaluate1(const int border, Assign assign, AssignOp ... args) {
}
template <typename Assign, typename... AssignOp>
__global__
void AssignGpuEvaluate2(const int height, const int width,
Assign assign, AssignOp ... args) {
__global__ void AssignGpuEvaluate2(const int height,
const int width,
Assign assign,
AssignOp... args) {
const int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
const int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = rowIdx; i < height; i += gridDim.y * blockDim.y) {
@ -130,8 +135,8 @@ void AssignEvaluate(Assign&& assign, AssignOp&& ... args) {
int size = height * width;
int blockSize = size <= 1024 ? size : 1024;
int gridSize = (size + 1024 - 1) / 1024;
AssignGpuEvaluate1
<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(size, assign, args...);
AssignGpuEvaluate1<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
size, assign, args...);
} else {
int blockSizeY = std::min(32, (int)height);
int blockSizeX = (32 / blockSizeY) * 32;
@ -139,8 +144,8 @@ void AssignEvaluate(Assign&& assign, AssignOp&& ... args) {
int gridSizeY = std::min(32, (int)(height + blockSizeY - 1) / blockSizeY);
dim3 threads(blockSizeX, blockSizeY);
dim3 grid(gridSizeX, gridSizeY);
AssignGpuEvaluate2
<<<grid, threads, 0, STREAM_DEFAULT>>>(height, width, assign, args...);
AssignGpuEvaluate2<<<grid, threads, 0, STREAM_DEFAULT>>>(
height, width, assign, args...);
}
CHECK_SYNC("AssignEvaluate failed");
@ -151,4 +156,3 @@ void AssignEvaluate(Assign&& assign, AssignOp&& ... args) {
}
} // namespace paddle

@ -49,8 +49,9 @@ inline void TensorCpuApply(LeftType& lhs, const RightType& rhs) {
#ifdef __NVCC__
template <typename LeftType, typename RightType>
__global__
void TensorElementWiseOp(LeftType lhs, RightType rhs, const int border) {
__global__ void TensorElementWiseOp(LeftType lhs,
RightType rhs,
const int border) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < border) {
lhs.applyRef(idx) = rhs.apply(idx);
@ -86,8 +87,8 @@ inline void TensorGpuApply(LeftType& lhs, const RightType& rhs) {
int size = dimM * dimN;
int blockSize = size <= 1024 ? size : 1024;
int gridSize = (size + 1024 - 1) / 1024;
TensorElementWiseOp
<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(lhs_, rhs_, size);
TensorElementWiseOp<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
lhs_, rhs_, size);
} else {
int blockSizeY = std::min(32, dimM);
int blockSizeX = (32 / blockSizeY) * 32;
@ -95,16 +96,14 @@ inline void TensorGpuApply(LeftType& lhs, const RightType& rhs) {
int gridSizeY = std::min(32, (dimM + blockSizeY - 1) / blockSizeY);
dim3 threads(blockSizeX, blockSizeY);
dim3 grid(gridSizeX, gridSizeY);
TensorElementWiseOp
<<<grid, threads, 0, STREAM_DEFAULT>>>(lhs_, rhs_);
TensorElementWiseOp<<<grid, threads, 0, STREAM_DEFAULT>>>(lhs_, rhs_);
}
CHECK_SYNC("TensorGpuApply failed");
}
#else
template <class T, typename LeftType, typename RightType>
inline void TensorGpuApply(LeftType& lhs, RightType& rhs) {
}
inline void TensorGpuApply(LeftType& lhs, RightType& rhs) {}
#endif
} // namespace paddle

File diff suppressed because it is too large Load Diff

@ -355,4 +355,3 @@ void adamaxApply(BaseMatrix& value,
} // namespace paddle
#endif

@ -119,5 +119,4 @@ extern void adamaxApply(BaseMatrix& value,
real beta2,
int64_t step,
real alpha);
} // namespace paddle

@ -31,7 +31,8 @@ void SparseMomentumParameterOptimizer(const VectorPtr vecs[],
tau * alpha * gamma * learningRate);
vecs[PARAMETER_VALUE]->add(*vecs[PARAMETER_MOMENTUM_UT],
tau / beta + 1.0 / alpha,
*vecs[PARAMETER_MOMENTUM_VT], 1.0 / beta);
*vecs[PARAMETER_MOMENTUM_VT],
1.0 / beta);
}
void AdagradParameterOptimizer(const VectorPtr vecs[],
@ -46,10 +47,12 @@ void AdagradParameterOptimizer(const VectorPtr vecs[],
vecs[PARAMETER_LEARNING_RATE]->add(epsilon);
vecs[PARAMETER_LEARNING_RATE]->invSqrt(*vecs[PARAMETER_LEARNING_RATE]);
vecs[PARAMETER_VALUE]->sgdUpdate(
*vecs[PARAMETER_GRADIENT], *vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE], learningRate,
momentum, decayRate);
vecs[PARAMETER_VALUE]->sgdUpdate(*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE],
learningRate,
momentum,
decayRate);
}
void AdaDeltaParameterOptimizer(const VectorPtr vecs[],
@ -59,24 +62,29 @@ void AdaDeltaParameterOptimizer(const VectorPtr vecs[],
real momentum,
real decayRate) {
// E(g_t^2) = \rou * E(g_{t-1}^2) + (1-\rou) * g^2
vecs[PARAMETER_GRADIENT_SQURESUM]->decayAddSquare(*vecs[PARAMETER_GRADIENT],
rou, 1.0f - rou);
vecs[PARAMETER_GRADIENT_SQURESUM]->decayAddSquare(
*vecs[PARAMETER_GRADIENT], rou, 1.0f - rou);
// learn_rate = sqrt( ( E(dx_{t-1}^2) + epsilon ) / ( E(g_t^2) + epsilon ) )
vecs[PARAMETER_LEARNING_RATE]->dotDiv(*vecs[PARAMETER_GRADIENT_SQURESUM1],
*vecs[PARAMETER_GRADIENT_SQURESUM],
epsilon, epsilon);
epsilon,
epsilon);
vecs[PARAMETER_LEARNING_RATE]->sqrt2();
// E(dx_t^2) = \rou * E(dx_{t-1}^2) + (1-\rou) * (-g*learn_rate)^2
vecs[PARAMETER_GRADIENT_SQURESUM1]->decayAddSquareMul(
*vecs[PARAMETER_GRADIENT], *vecs[PARAMETER_LEARNING_RATE], rou,
*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_LEARNING_RATE],
rou,
1.0f - rou);
vecs[PARAMETER_VALUE]->sgdUpdate(
*vecs[PARAMETER_GRADIENT], *vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE], learningRate,
momentum, decayRate);
vecs[PARAMETER_VALUE]->sgdUpdate(*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE],
learningRate,
momentum,
decayRate);
}
void RMSPropParameterOptimizer(const VectorPtr vecs[],
@ -91,12 +99,11 @@ void RMSPropParameterOptimizer(const VectorPtr vecs[],
// For the first time update, make the sum be the current square
// so that the initial estimation of E(g_t^2) will not be too small.
vecs[PARAMETER_GRADIENT_SQURESUM]->decayAddSquare(
*vecs[PARAMETER_GRADIENT], accumulatedRou,
firstTime ? 1.0f : 1.0f - rou);
*vecs[PARAMETER_GRADIENT], accumulatedRou, firstTime ? 1.0f : 1.0f - rou);
// E(g_t) = \rou * E(g_{t-1}) + (1-\rou) * g
vecs[PARAMETER_GRADIENT_SQURESUM1]->add(*vecs[PARAMETER_GRADIENT],
accumulatedRou, 1.0f - rou);
vecs[PARAMETER_GRADIENT_SQURESUM1]->add(
*vecs[PARAMETER_GRADIENT], accumulatedRou, 1.0f - rou);
// learn_rate = 1/sqrt( ( E(g_t^2) - (E(g_t))^2 + epsilon )
// Basiclly if the sign of the gradient changes more often,
@ -107,10 +114,12 @@ void RMSPropParameterOptimizer(const VectorPtr vecs[],
vecs[PARAMETER_LEARNING_RATE]->add(epsilon);
vecs[PARAMETER_LEARNING_RATE]->invSqrt(*vecs[PARAMETER_LEARNING_RATE]);
vecs[PARAMETER_VALUE]->sgdUpdate(
*vecs[PARAMETER_GRADIENT], *vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE], learningRate,
momentum, decayRate);
vecs[PARAMETER_VALUE]->sgdUpdate(*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE],
learningRate,
momentum,
decayRate);
}
void DecayedAdagradParameterOptimizer(const VectorPtr vecs[],
@ -125,8 +134,7 @@ void DecayedAdagradParameterOptimizer(const VectorPtr vecs[],
// For the first time update, make the sum be the current square
// so that the initial estimation of E(g_t^2) will not be too small.
vecs[PARAMETER_GRADIENT_SQURESUM]->decayAddSquare(
*vecs[PARAMETER_GRADIENT], accumulatedRou,
firstTime ? 1.0f : 1.0f - rou);
*vecs[PARAMETER_GRADIENT], accumulatedRou, firstTime ? 1.0f : 1.0f - rou);
// learn_rate = 1/sqrt( ( E(g_t^2) + epsilon )
// Basiclly if the bigger the magnitude gradient is,
@ -135,10 +143,12 @@ void DecayedAdagradParameterOptimizer(const VectorPtr vecs[],
vecs[PARAMETER_LEARNING_RATE]->add(*vecs[PARAMETER_GRADIENT_SQURESUM]);
vecs[PARAMETER_LEARNING_RATE]->invSqrt(*vecs[PARAMETER_LEARNING_RATE]);
vecs[PARAMETER_VALUE]->sgdUpdate(
*vecs[PARAMETER_GRADIENT], *vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE], learningRate,
momentum, decayRate);
vecs[PARAMETER_VALUE]->sgdUpdate(*vecs[PARAMETER_GRADIENT],
*vecs[PARAMETER_MOMENTUM],
*vecs[PARAMETER_LEARNING_RATE],
learningRate,
momentum,
decayRate);
}
void AdamParameterOptimizer(const VectorPtr vecs[],
@ -164,16 +174,13 @@ void AdamParameterOptimizer(const VectorPtr vecs[],
// \theta_t = \theta_{t-1} - \alpha * \sqrt(1-\beta_2^t) / (1-\beta_1^t) * tmp
g->sqrt2(*v);
g->dotDiv(*m, *g, 0., epsilon);
real alpha = learningRate *
std::sqrt((real)1 - beta2_power) / ((real)1 - beta1_power);
real alpha =
learningRate * std::sqrt((real)1 - beta2_power) / ((real)1 - beta1_power);
theta->add(*theta, 1.0, *g, -alpha);
}
void AdamaxParameterOptimizer(const VectorPtr vecs[],
real beta1,
real beta2,
int64_t step,
real alpha) {
void AdamaxParameterOptimizer(
const VectorPtr vecs[], real beta1, real beta2, int64_t step, real alpha) {
Vector* m = vecs[PARAMETER_MOMENTUM].get();
Vector* g = vecs[PARAMETER_GRADIENT].get();
Vector* u = vecs[PARAMETER_WEIGHTED_INFINITY_NORM].get();
@ -192,4 +199,3 @@ void AdamaxParameterOptimizer(const VectorPtr vecs[],
real learningRate = alpha / (1 - std::pow(beta1, step));
theta->add(*theta, 1.0, *g, -learningRate);
}

Loading…
Cancel
Save