Remove debug codes

del_some_in_makelist
Yang Yu 8 years ago
parent dd0a4c3566
commit 0fd4a04abd

@ -134,17 +134,8 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
#endif #endif
offset_ = 0; offset_ = 0;
} }
void* buf = reinterpret_cast<void*>( return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_); offset_);
if (type.hash_code() == typeid(float).hash_code() ||
type.hash_code() == typeid(double).hash_code()) {
float* tmp = (float*)(buf);
for (int64_t i = 0; i < numel(); ++i) {
tmp[i] = NAN;
}
}
return buf;
} }
inline void* Tensor::mutable_data(platform::Place place) { inline void* Tensor::mutable_data(platform::Place place) {

@ -51,7 +51,6 @@ class FillConstantOp : public framework::OperatorBase {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(dev_place); auto &dev_ctx = *pool.Get(dev_place);
VLOG(10) << "FillConstant to " << &out;
math::set_constant(dev_ctx, &out, value); math::set_constant(dev_ctx, &out, value);
} }
}; };

@ -116,7 +116,6 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
auto height = dout_tensor.dims()[0]; auto height = dout_tensor.dims()[0];
auto slice = dx_tensor.Slice(0, static_cast<int>(height)); auto slice = dx_tensor.Slice(0, static_cast<int>(height));
framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice); framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice);
VLOG(10) << dx_tensor.dims()[0] << ", " << height;
if (dx_tensor.dims()[0] > height) { if (dx_tensor.dims()[0] > height) {
auto rest_tensor = dx_tensor.Slice( auto rest_tensor = dx_tensor.Slice(
static_cast<int>(height), static_cast<int>(dx_tensor.dims()[0])); static_cast<int>(height), static_cast<int>(dx_tensor.dims()[0]));

@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <cmath>
#include <vector> #include <vector>
#include "paddle/framework/executor.h" #include "paddle/framework/executor.h"
#include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/lod_tensor_array.h"
@ -195,36 +194,14 @@ class WhileGradOp : public framework::OperatorBase {
} }
} }
auto check_var_no_nan = [](const framework::Scope &scope,
const std::string &var_name) {
auto *var = scope.FindVar(var_name);
if (var->IsType<LoDTensor>()) {
VLOG(10) << "Checking " << var_name;
PADDLE_ENFORCE(!framework::HasNAN(var->Get<framework::LoDTensor>()),
"%s has NAN", var_name);
if (var->Get<framework::LoDTensor>().type() ==
typeid(float)) { // NOLINT
auto &tensor = var->Get<framework::LoDTensor>();
auto *buf = tensor.data<float>();
for (int64_t i = 0; i < tensor.numel(); ++i) {
PADDLE_ENFORCE(!std::isnan(buf[i]));
}
VLOG(10) << buf[0];
}
}
};
check_var_no_nan(cur_scope, inside_grad_name);
auto new_inside_name = cur_scope.Rename(inside_grad_name); auto new_inside_name = cur_scope.Rename(inside_grad_name);
check_var_no_nan(cur_scope, new_inside_name);
auto sum_op = framework::OpRegistry::CreateOp( auto sum_op = framework::OpRegistry::CreateOp(
"sum", {{"X", {pg_names[param_id], new_inside_name}}}, "sum", {{"X", {pg_names[param_id], new_inside_name}}},
{{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{});
sum_op->Run(cur_scope, dev_place); sum_op->Run(cur_scope, dev_place);
check_var_no_nan(scope, pg_names[param_id]);
cur_scope.Rename(new_inside_name, inside_grad_name); cur_scope.Rename(new_inside_name, inside_grad_name);
} }
} }
VLOG(1) << "Complete WhileOpGrad";
} }
}; };

Loading…
Cancel
Save