Replace all errors thrown by LOG(FATAL) with PADDLE_THROW (#24759)

* remove REPLACE_ENFORCE_GLOG compile option & add ci rule prohibit LOG(FATAL) using, test=develop

* remove ci test case, test=develop

* replace all LOG(FATAL) & polish message, test=develop

* fix typo, test=develop

* polish error info detail, test=develop
revert-24981-add_device_attr_for_regulization
Chen Weihang 5 years ago committed by GitHub
parent a4f6003404
commit d1062d5278
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -80,7 +80,6 @@ option(WITH_PSLIB "Compile with pslib support" OFF)
option(WITH_BOX_PS "Compile with box_ps support" OFF)
option(WITH_XBYAK "Compile with xbyak support" ON)
option(WITH_CONTRIB "Compile the third-party contributation" OFF)
option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF)
option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE})
option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF)
option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION})
@ -103,11 +102,6 @@ if(NOT CMAKE_BUILD_TYPE)
FORCE)
endif()
# Replace PADDLE_ENFORCE with glog/CHECK for better debug
if(REPLACE_ENFORCE_GLOG)
add_definitions("-DREPLACE_ENFORCE_GLOG")
endif()
# the type of sanitizer, options are: Address, Leak, Memory, Thread, Undefined. Default: OFF
if(SANITIZER_TYPE AND NOT "${SANITIZER_TYPE}" MATCHES "^(Address|Leak|Memory|Thread|Undefined)$")
message("Choose the correct type of sanitizer")

@ -32,6 +32,7 @@
#include <valarray>
#include <vector>
#include "paddle/fluid/framework/expect.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
@ -43,7 +44,10 @@ class ArchiveBase {
// Archive is not copyable. But to allow move capture by function objects,
// check it at runtime rather than at compile time.
ArchiveBase(const ArchiveBase&) { LOG(FATAL) << "Not supported"; }
ArchiveBase(const ArchiveBase&) {
PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support copy construction."));
}
ArchiveBase(ArchiveBase&& other)
: buffer_(other.buffer_),
@ -62,7 +66,8 @@ class ArchiveBase {
public:
ArchiveBase& operator=(const ArchiveBase&) {
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support assignment construction."));
return *this;
}

@ -34,7 +34,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const char* filename) {
paddle::framework::DataFeedDesc data_feed_desc;
int file_descriptor = open(filename, O_RDONLY);
PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavaliable(
PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavailable(
"Cannot open file %s.", filename));
google::protobuf::io::FileInputStream fileInput(file_descriptor);
google::protobuf::TextFormat::Parse(&fileInput, &data_feed_desc);
@ -45,7 +45,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const std::vector<std::string> load_filelist_from_file(const char* filename) {
std::vector<std::string> filelist;
std::ifstream fin(filename);
PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavaliable(
PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavailable(
"Cannot open file %s.", filename));
std::string line;
while (getline(fin, line)) {

@ -187,16 +187,8 @@ void AllReduceOpHandle::SyncNCCLAllReduce() {
nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_);
auto &nccl_ctx = nccl_ctxs->at(dev_id);
auto stream = nccl_ctx.stream();
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
}
cudaError_t e_get = cudaGetLastError();
if (e_get != 0) {
LOG(FATAL) << "cudaGetLastError " << cudaGetErrorString(e_get)
<< " errno:" << e_get;
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
}
}
}

@ -34,9 +34,10 @@ class ExceptionHolder {
} catch (platform::EnforceNotMet& exp) {
Catch(exp);
} catch (std::exception& ex) {
LOG(FATAL) << "std::exception caught, " << ex.what();
PADDLE_THROW(platform::errors::Fatal(
"Unknown std::exception caught:\n%s.", ex.what()));
} catch (...) {
LOG(FATAL) << "Unknown exception caught";
PADDLE_THROW(platform::errors::Fatal("Unknown exception caught."));
}
}

@ -104,7 +104,8 @@ void DensePullThread::wait_all() {
}
if (_pull_dense_fail_times > 20) {
LOG(FATAL) << "pull dense failed times more than 20 times";
PADDLE_THROW(
platform::errors::Fatal("Pull dense failed more than 20 times."));
exit(-1);
}

@ -214,35 +214,35 @@ class AfsManager {
int fd_read[2];
int fd_write[2];
if (read) {
if (pipe(fd_read) != 0) {
LOG(FATAL) << "create read pipe failed";
return -1;
}
PADDLE_ENFORCE_EQ(
pipe(fd_read), 0,
platform::errors::External("Create read pipe failed in AfsManager."));
}
if (write) {
if (pipe(fd_write) != 0) {
LOG(FATAL) << "create write pipe failed";
return -1;
}
PADDLE_ENFORCE_EQ(pipe(fd_write), 0,
platform::errors::External(
"Create write pipe failed in AfsManager."));
}
pid = vfork();
if (pid < 0) {
LOG(FATAL) << "fork failed";
return -1;
}
PADDLE_ENFORCE_GE(
pid, 0,
platform::errors::External(
"Failed to create a child process via fork in AfsManager."));
if (pid == 0) {
if (read) {
if (-1 == dup2(fd_read[1], STDOUT_FILENO)) {
LOG(FATAL) << "dup2 failed";
}
PADDLE_ENFORCE_NE(
dup2(fd_read[1], STDOUT_FILENO), -1,
platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_read[1]);
close(fd_read[0]);
}
if (write) {
if (-1 == dup2(fd_write[0], STDIN_FILENO)) {
LOG(FATAL) << "dup2 failed";
}
PADDLE_ENFORCE_NE(
dup2(fd_write[0], STDIN_FILENO), -1,
platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_write[0]);
close(fd_write[1]);
}
@ -265,20 +265,20 @@ class AfsManager {
close(fd_read[1]);
fcntl(fd_read[0], F_SETFD, FD_CLOEXEC);
fp_read = fdopen(fd_read[0], "r");
if (0 == fp_read) {
LOG(FATAL) << "fdopen failed.";
return -1;
}
PADDLE_ENFORCE_NE(
fp_read, 0,
platform::errors::External(
"Failed to open file descriptor via fdopen in AfsManager."));
}
if (write) {
close(fd_write[0]);
fcntl(fd_write[1], F_SETFD, FD_CLOEXEC);
fp_write = fdopen(fd_write[1], "w");
if (0 == fp_write) {
LOG(FATAL) << "fdopen failed.";
return -1;
}
PADDLE_ENFORCE_NE(
fp_write, 0,
platform::errors::External(
"Failed to open file descriptor via fdopen in AfsManager."));
}
return 0;
}

@ -1085,7 +1085,8 @@ void FleetWrapper::ShrinkDenseTable(int table_id, Scope* scope,
push_status.wait();
auto status = push_status.get();
if (status != 0) {
LOG(FATAL) << "push shrink dense param failed, status[" << status << "]";
PADDLE_THORW(platform::errors::Fatal(
"push shrink dense param failed, status is [%d].", status));
sleep(sleep_seconds_before_fail_exit_);
exit(-1);
}

@ -13,8 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/io/fs.h"
#include <memory>
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
@ -127,7 +130,8 @@ std::shared_ptr<FILE> localfs_open_write(std::string path,
int64_t localfs_file_size(const std::string& path) {
struct stat buf;
if (0 != stat(path.c_str(), &buf)) {
LOG(FATAL) << "file stat not zero";
PADDLE_THROW(platform::errors::External(
"Failed to get file status via stat function."));
return -1;
}
return (int64_t)buf.st_size;
@ -365,7 +369,9 @@ std::shared_ptr<FILE> fs_open_read(const std::string& path, int* err_no,
return hdfs_open_read(path, err_no, converter);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
@ -381,7 +387,9 @@ std::shared_ptr<FILE> fs_open_write(const std::string& path, int* err_no,
return hdfs_open_write(path, err_no, converter);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
@ -397,7 +405,9 @@ std::shared_ptr<FILE> fs_open(const std::string& path, const std::string& mode,
return fs_open_write(path, err_no, converter);
}
LOG(FATAL) << "Unknown mode: " << mode;
PADDLE_THROW(platform::errors::Unavailable(
"Unsupport file open mode: %s. Only supports 'r', 'rb', 'w' or 'wb'.",
mode));
return {};
}
@ -407,7 +417,8 @@ int64_t fs_file_size(const std::string& path) {
return localfs_file_size(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system."));
}
return 0;
@ -422,7 +433,9 @@ void fs_remove(const std::string& path) {
return hdfs_remove(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
}
@ -435,7 +448,9 @@ std::vector<std::string> fs_list(const std::string& path) {
return hdfs_list(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
@ -450,7 +465,9 @@ std::string fs_tail(const std::string& path) {
return hdfs_tail(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return "";
@ -465,7 +482,9 @@ bool fs_exists(const std::string& path) {
return hdfs_exists(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return false;
@ -480,7 +499,9 @@ void fs_mkdir(const std::string& path) {
return hdfs_mkdir(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
}

@ -29,14 +29,16 @@ std::shared_ptr<FILE> shell_fopen(const std::string& path,
}
FILE* fp;
if (!(fp = fopen(path.c_str(), mode.c_str()))) {
LOG(FATAL) << "fopen fail, path[" << path << "], mode[" << mode << "]";
PADDLE_THROW(platform::errors::Unavailable(
"Failed to open file, path[%s], mode[%s].", path, mode));
}
return {fp, [path](FILE* fp) {
if (shell_verbose()) {
LOG(INFO) << "Closing file[" << path << "]";
}
if (0 != fclose(fp)) {
LOG(FATAL) << "fclose fail, path[" << path << "]";
PADDLE_THROW(platform::errors::Unavailable(
"Failed to close file, path[%s].", path));
}
}};
#endif
@ -58,7 +60,7 @@ static int close_open_fds_internal() {
int dir_fd = -1;
if ((dir_fd = open("/proc/self/fd", O_RDONLY)) < 0) {
LOG(FATAL) << "proc/self/fd open fail";
PADDLE_THROW(platform::errors::Unavailable("Failed to open proc/self/fd."));
return -1;
}
char buffer[sizeof(linux_dirent)];
@ -68,7 +70,8 @@ static int close_open_fds_internal() {
if ((bytes = syscall(SYS_getdents, dir_fd,
reinterpret_cast<linux_dirent*>(buffer),
sizeof(buffer))) < 0) {
LOG(FATAL) << "syscall fail";
PADDLE_THROW(platform::errors::Unavailable(
"System call failed via syscall function."));
return -1;
}

@ -281,7 +281,9 @@ void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp(
loss_scale = 0;
break;
default:
LOG(FATAL) << "Unknown gradient scale strategy.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown gradient scale strategy. Now only supports One, "
"CoeffNumDevice and Customized strategies."));
break;
}
@ -1054,7 +1056,9 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result,
}
break;
default:
LOG(FATAL) << "Unknown reduce strategy.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown reduce strategy. Now only supports Reduce and AllReduce "
"strategies."));
break;
}
}

@ -126,7 +126,8 @@ class Pass {
protected:
virtual void ApplyImpl(Graph *graph) const {
LOG(FATAL) << "Calling virtual Pass not implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"The virtual Pass called is not implemented."));
}
// Some Pass must be placed before this Pass, and some

@ -70,8 +70,8 @@ void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) {
size_t MAX_FAIL_NUM = 20;
if (pull_dense_fail_times_ > MAX_FAIL_NUM) {
LOG(FATAL) << "Pull Dense Failed Times More Than " << MAX_FAIL_NUM
<< " Times";
PADDLE_THROW(platform::errors::Fatal(
"Pull dense failed more than %d times.", MAX_FAIL_NUM));
exit(-1);
}
status_vec->resize(0);

@ -38,10 +38,11 @@ struct ExceptionHandler {
void operator()() const {
auto ex = this->future_.get();
if (ex != nullptr) {
LOG(FATAL) << "The exception is thrown inside the thread pool. You "
"should use RunAndGetException to handle the exception.\n"
"The default exception handler is LOG(FATAL)."
<< ex->what();
PADDLE_THROW(platform::errors::Fatal(
"The exception is thrown inside the thread pool. You "
"should use RunAndGetException to handle the exception."
"The exception is:\n %s.",
ex->what()));
}
}
};
@ -78,9 +79,11 @@ class ThreadPool {
return std::unique_ptr<platform::EnforceNotMet>(
new platform::EnforceNotMet(ex));
} catch (const std::exception& e) {
LOG(FATAL) << "Unexpected exception is catched in thread pool. All "
"throwable exception in Fluid should be an EnforceNotMet."
<< e.what();
PADDLE_THROW(platform::errors::Fatal(
"Unexpected exception is catched in thread pool. All "
"throwable exception in Paddle should be an EnforceNotMet."
"The exception is:\n %s.",
e.what()));
}
return nullptr;
});

@ -579,11 +579,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
if (config.thread_local_stream_enabled() &&
process_level_allocator_enabled) {
LOG(FATAL) << " When binding threads and streams, the use of "
PADDLE_THROW(platform::errors::Fatal(
"When binding threads and streams, the use of "
"process-level allocators will result in undefined result "
"errors due to memory asynchronous operations."
"The thread and stream binding configuration of all "
"predictors should be the same in a single process.";
"predictors should be the same in a single process."));
}
}
@ -917,8 +918,9 @@ std::string AnalysisPredictor::GetSerializedProgram() const {
bool AnalysisPredictor::CheckOperatorCompatible() {
if (!inference_program_) {
LOG(FATAL) << "Inference program version check failed because the program "
"does not exist.";
PADDLE_THROW(platform::errors::PreconditionNotMet(
"Inference program version check failed because the program does not "
"exist."));
return false;
}
bool res = true;

@ -46,7 +46,8 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) {
pt.data.Reset(t->data<void>(), t->numel() * sizeof(int32_t));
pt.dtype = PaddleDType::INT32;
} else {
LOG(FATAL) << "unsupported type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported tensor date type. Now only supports INT64, FP32, INT32."));
}
pt.shape = framework::vectorize<int>(t->dims());
return pt;

@ -47,7 +47,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) {
case TargetType::kCUDA:
return platform::CUDAPlace(id);
default:
LOG(FATAL) << "Error target type.";
PADDLE_THROW(
platform::errors::Unavailable("Unsupported target type. Now only "
"supports Host, x86, CUDA target."));
return platform::Place();
}
}
@ -70,7 +72,9 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) {
case framework::proto::VarType_Type_INT64:
return PrecisionType::kInt64;
default:
LOG(FATAL) << "Error precision type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return PrecisionType::kUnk;
}
}
@ -87,7 +91,9 @@ framework::proto::VarType::Type GetNativePrecisionType(
case PrecisionType::kInt64:
return framework::proto::VarType_Type_INT64;
default:
LOG(FATAL) << "Error precision type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return static_cast<framework::proto::VarType::Type>(-1);
}
}
@ -97,7 +103,8 @@ framework::DataLayout GetNativeLayoutType(const DataLayoutType& type) {
case DataLayoutType::kNCHW:
return framework::DataLayout::kNCHW;
default:
LOG(FATAL) << "Error layout type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported layout type. Now only supports NCHW."));
return static_cast<framework::DataLayout>(-1);
}
}
@ -112,10 +119,12 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
#ifdef PADDLE_WITH_CUDA
if (platform::is_cpu_place(dst_place) &&
platform::is_gpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy GPU->CPU is not yet implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy GPU->CPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) &&
platform::is_cpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy CPU->GPU is not yet implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy CPU->GPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) &&
platform::is_gpu_place(src_place)) {
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
@ -124,7 +133,8 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
static_cast<const platform::CUDADeviceContext&>(ctx).stream());
}
#else
LOG(FATAL) << "You must define PADDLE_WITH_CUDA for using CUDAPlace.";
PADDLE_THROW(platform::errors::PreconditionNotMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif
}
}

@ -78,8 +78,9 @@ bool TRTInt8Calibrator::setBatch(
for (const auto& it : data) {
auto dataptr = data_buffers_.find(it.first);
if (dataptr == data_buffers_.end()) {
LOG(FATAL) << "FATAL " << engine_name_ << " input name '" << it.first
<< "' does not match with the buffer names";
PADDLE_THROW(platform::errors::Fatal(
"%s input name '%s' does not match with the buffer names.",
engine_name_, it.first));
}
const auto& d = dataptr->second;
PADDLE_ENFORCE(
@ -109,8 +110,10 @@ bool TRTInt8Calibrator::getBatch(void** bindings, const char** names,
for (int i = 0; i < num_bindings; i++) {
auto it = data_buffers_.find(names[i]);
if (it == data_buffers_.end()) {
LOG(FATAL) << "Calibration engine asked for unknown tensor name '"
<< names[i] << "' at position " << i;
PADDLE_THROW(
platform::errors::Fatal("Calibration engine asked for unknown tensor "
"name '%s' at position %d.",
names[i], i));
}
bindings[i] = it->second.first;
}

@ -180,15 +180,14 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place,
platform::CUDADeviceGuard(place.device);
size_t avail, total;
platform::GpuMemoryUsage(&avail, &total);
LOG(FATAL) << "Cannot allocate " << string::HumanReadableSize(size)
<< " in GPU " << place.device << ", available "
<< string::HumanReadableSize(avail) << ", total "
<< string::HumanReadableSize(total) << ", GpuMinChunkSize "
<< string::HumanReadableSize(buddy_allocator->GetMinChunkSize())
<< ", GpuMaxChunkSize "
<< string::HumanReadableSize(buddy_allocator->GetMaxChunkSize())
<< ", GPU memory used: "
<< string::HumanReadableSize(Used<platform::CUDAPlace>(place));
PADDLE_THROW(platform::errors::ResourceExhausted(
"Cannot allocate %s in GPU %d, avaliable %s, total %s, GpuMinChunkSize "
"%s, GpuMaxChunkSize %s, GPU memory used: %s.",
string::HumanReadableSize(size), place.device,
string::HumanReadableSize(avail), string::HumanReadableSize(total),
string::HumanReadableSize(buddy_allocator->GetMinChunkSize()),
string::HumanReadableSize(buddy_allocator->GetMaxChunkSize()),
string::HumanReadableSize(Used<platform::CUDAPlace>(place))));
} else {
if (FLAGS_init_allocated_mem) {
cudaMemset(ptr, 0xEF, size);

@ -27,7 +27,8 @@ ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
BOOST_GET_CONST(platform::CUDAPlace, place_).device)),
platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()));
} else {
LOG(FATAL) << "Thread local allocator only supports CUDAPlace now.";
PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now."));
}
}

@ -47,7 +47,8 @@ void OpTester::Init(const OpTesterConfig &config) {
CreateInputVarDesc();
CreateOutputVarDesc();
} else {
LOG(FATAL) << "Op \"" << config_.op_type << "\" is not registered.";
PADDLE_THROW(platform::errors::NotFound("Operator '%s' is not registered.",
config_.op_type));
}
if (config_.device_id >= 0) {
@ -169,10 +170,10 @@ void OpTester::CreateInputVarDesc() {
std::vector<std::string> input_names = GetOpProtoInputNames();
for (auto &name : input_names) {
const OpInputConfig *input = config_.GetInput(name);
if (input == nullptr) {
LOG(FATAL) << "The input " << name << " of op " << config_.op_type
<< " is not correctlly provided.";
}
PADDLE_ENFORCE_NOT_NULL(
input, platform::errors::NotFound(
"The input %s of operator %s is not correctlly provided.",
name, config_.op_type));
std::string var_name = config_.op_type + "." + name;
framework::VarDesc *var = Var(var_name);
@ -207,9 +208,10 @@ void OpTester::CreateOpDesc() {
GetOpProtoAttrNames();
for (auto item : config_.attrs) {
const std::string &name = item.first;
if (attr_types.find(name) == attr_types.end()) {
LOG(FATAL) << "Operator " << type_ << " do not have attr " << name;
}
PADDLE_ENFORCE_NE(
attr_types.find(name), attr_types.end(),
platform::errors::NotFound("Operator %s does not have attribute %d.",
type_, name));
const std::string &value_str = item.second;
const framework::proto::AttrType &type = attr_types[name];
@ -231,7 +233,8 @@ void OpTester::CreateOpDesc() {
case framework::proto::AttrType::INTS:
case framework::proto::AttrType::FLOATS:
case framework::proto::AttrType::STRINGS:
LOG(FATAL) << "Not supported yet.";
PADDLE_THROW(
platform::errors::Unimplemented("Not supported STRINGS type yet."));
break;
case framework::proto::AttrType::LONG: {
int64_t value = StringTo<int64_t>(value_str);

@ -43,10 +43,7 @@ class CSyncCalcStreamOp : public framework::OperatorBase {
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto dev_ctx = static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(place));
cudaError_t e_sync = cudaStreamSynchronize(dev_ctx->stream());
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync cuda stream: " << cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(dev_ctx->stream()));
#else
PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif

@ -45,10 +45,7 @@ class CSyncCommStreamOp : public framework::OperatorBase {
int ring_id = Attr<int>("ring_id");
auto stream =
platform::NCCLCommContext::Instance().Get(ring_id, place)->stream();
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: " << cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
#else
PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif

@ -192,11 +192,7 @@ class DataNormGradKernel<platform::CUDADeviceContext, T>
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: "
<< cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "

@ -471,7 +471,9 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> {
mAP += average_precisions;
++count;
} else {
LOG(FATAL) << "Unkown ap version: " << ap_type;
PADDLE_THROW(platform::errors::Unimplemented(
"Unkown ap version %s. Now only supports integral and l1point.",
ap_type));
}
}
if (count != 0) mAP /= count;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save