test=develop

local_add_cudnn_lstm
JiabinYang 6 years ago
parent 014e50c284
commit af9a3301da

@ -121,7 +121,9 @@ class SelectedRows {
int64_t AutoGrownIndex(int64_t key, bool auto_grown);
void SyncIndex();
/*
* @brief Get complete Dims before
*/
DDim GetCompleteDims() const {
std::vector<int64_t> dims = vectorize(value_->dims());
dims[0] = height_;
@ -136,7 +138,7 @@ class SelectedRows {
std::unordered_map<int64_t, int64_t>
id_to_index_; // should not be used when ids has duplicate member
std::unique_ptr<Tensor> value_{nullptr};
int64_t height_;
int64_t height_; // height indicates the underline tensor's height
std::unique_ptr<RWLock> rwlock_{nullptr};
};

@ -145,8 +145,9 @@ class HierarchicalSigmoidGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(ctx->HasInput("PreOut"),
"Input(Preout) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("W")),
"Output(W@Grad should not be null.)");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")));
"Output(W@Grad should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Output(X@Grad should not be null.");
if (ctx->HasOutput(framework::GradVarName("Bias"))) {
ctx->SetOutputDim(framework::GradVarName("Bias"),
ctx->GetInputDim("Bias"));

@ -191,10 +191,10 @@ class HierarchicalSigmoidGradOpKernel : public framework::OpKernel<T> {
framework::Vector<int64_t> real_rows = cal_rows(path);
auto* w_grad =
ctx.Output<framework::SelectedRows>(framework::GradVarName("W"));
w_grad->set_rows(real_rows);
// build ids -> rows index map
w_grad->SyncIndex();
w_grad->set_height(w->dims()[0]);
auto* w_grad_value = w_grad->mutable_value();
framework::DDim temp_dim(w->dims());
set(temp_dim, 0, real_rows.size());

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save