windows staff

revert-14324-fix_vlog
dzhwinter 7 years ago
parent c3e1fb5a3e
commit 372caf4000

@ -82,7 +82,6 @@ if(WIN32)
if (NOT MSVC)
message(FATAL "Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA.")
endif(NOT MSVC)
add_definitions(/DPADDLE_COMPILE)
endif(WIN32)
if(NOT WITH_GOLANG)

@ -67,6 +67,7 @@ bool NativePaddlePredictor::Init(
} else {
place_ = paddle::platform::CPUPlace();
}
VLOG(3) << "before scope";
if (parent_scope) {
scope_ = parent_scope;
sub_scope_ = &(parent_scope->NewScope());
@ -75,26 +76,30 @@ bool NativePaddlePredictor::Init(
paddle::framework::InitDevices(false);
scope_.reset(new paddle::framework::Scope());
}
VLOG(3) << "after scope"
executor_.reset(new paddle::framework::Executor(place_));
VLOG(3) << "executor";
// Initialize the inference program
if (!config_.model_dir.empty()) {
// Parameters are saved in separate files sited in
// the specified `dirname`.
VLOG(3) << config_.model_dir;
inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
config_.model_dir);
VLOG(3) << "load model Finish";
} else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
// All parameters are saved in a single file.
// The file names should be consistent with that used
// in Python API `fluid.io.save_inference_model`.
VLOG(3) << "load program";
inference_program_ = paddle::inference::Load(
executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
VLOG(3) << "load program finish";
} else {
LOG(ERROR) << "fail to load inference model.";
return false;
}
VLOG(3) << "prepare";
ctx_ = executor_->Prepare(*inference_program_, 0);
executor_->CreateVariables(*inference_program_,
sub_scope_ ? sub_scope_ : scope_.get(), 0);
@ -289,10 +294,13 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
VLOG(3) << "create NativePaddlePredictor";
if (config.use_gpu) {
// 1. GPU memeroy
PADDLE_ENFORCE_GT(
config.fraction_of_gpu_memory, 0.f,
"fraction_of_gpu_memory in the config should be set to range (0., 1.]");
VLOG(3) << "before check";
// PADDLE_ENFORCE_GT(
// config.fraction_of_gpu_memory, 0.f,
// "fraction_of_gpu_memory in the config should be set to range (0., 1.]");
VLOG(3) << "failed on first";
PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
VLOG(3) << "after flags";
std::vector<std::string> flags;
if (config.fraction_of_gpu_memory >= 0.0f ||
config.fraction_of_gpu_memory <= 0.95f) {
@ -302,9 +310,10 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
flags.push_back(flag);
VLOG(3) << "set flag: " << flag;
framework::InitGflags(flags);
VLOG(3) << "flags setting";
}
}
VLOG(3) << "Init flags Done";
std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
return nullptr;

@ -17,6 +17,7 @@ endmacro()
if (WIN32)
if (WITH_STATIC_LIB)
safe_set_static_flag()
add_definitions(-DSTATIC_LIB)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} "/w")
set(CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE} "/w")
endif()

File diff suppressed because it is too large Load Diff

@ -25,7 +25,6 @@ limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/platform/macros.h"
namespace paddle {
@ -34,7 +33,7 @@ enum PaddleDType {
INT64,
};
class PADDLE_DLL PaddleBuf {
class PaddleBuf {
public:
PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other);
@ -46,7 +45,7 @@ class PADDLE_DLL PaddleBuf {
PaddleBuf(void* data, size_t length)
: data_(data), length_(length), memory_owned_{false} {}
// Own memory.
explicit PaddleBuf(size_t length)
PaddleBuf(size_t length)
: data_(new char[length]), length_(length), memory_owned_(true) {}
// Resize to `length` bytes.
void Resize(size_t length);
@ -65,7 +64,7 @@ class PADDLE_DLL PaddleBuf {
bool memory_owned_{true};
};
struct PADDLE_DLL PaddleTensor {
struct PaddleTensor {
PaddleTensor() = default;
std::string name; // variable name.
std::vector<int> shape;
@ -88,7 +87,7 @@ enum class PaddleEngineKind {
* A simple Inference API for Paddle. Currently this API can be used by
* non-sequence scenerios.
*/
class PADDLE_DLL PaddlePredictor {
class PaddlePredictor {
public:
struct Config;
PaddlePredictor() = default;
@ -97,6 +96,7 @@ class PADDLE_DLL PaddlePredictor {
// Predict an record.
// The caller should be responsible for allocating and releasing the memory of
// `inputs`. `inputs` should be available until Run returns. Caller should be
// responsible for the output tensor's buffer, either allocated or passed from
// outside.
virtual bool Run(const std::vector<PaddleTensor>& inputs,
@ -111,12 +111,12 @@ class PADDLE_DLL PaddlePredictor {
virtual ~PaddlePredictor() = default;
// The common configs for all the predictors.
struct PADDLE_DLL Config {
struct Config {
std::string model_dir; // path to the model directory.
};
};
struct PADDLE_DLL NativeConfig : public PaddlePredictor::Config {
struct NativeConfig : public PaddlePredictor::Config {
// GPU related fields.
bool use_gpu{false};
int device{0};
@ -129,7 +129,7 @@ struct PADDLE_DLL NativeConfig : public PaddlePredictor::Config {
};
// Configurations for Anakin engine.
struct PADDLE_DLL AnakinConfig : public PaddlePredictor::Config {
struct AnakinConfig : public PaddlePredictor::Config {
enum TargetType { NVGPU = 0, X86 };
int device;
std::string model_file;
@ -137,7 +137,7 @@ struct PADDLE_DLL AnakinConfig : public PaddlePredictor::Config {
TargetType target_type;
};
struct PADDLE_DLL TensorRTConfig : public NativeConfig {
struct TensorRTConfig : public NativeConfig {
// Determine whether a subgraph will be executed by TRT.
int min_subgraph_size{1};
// While TensorRT allows an engine optimized for a given max batch size
@ -159,9 +159,8 @@ struct PADDLE_DLL TensorRTConfig : public NativeConfig {
//
// Similarly, each engine kind should map to a unique predictor implementation.
template <typename ConfigT, PaddleEngineKind engine = PaddleEngineKind::kNative>
PADDLE_DLL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(
const ConfigT& config);
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(const ConfigT& config);
PADDLE_DLL int PaddleDtypeSize(PaddleDType dtype);
int PaddleDtypeSize(PaddleDType dtype);
} // namespace paddle

@ -308,6 +308,8 @@ inline void throw_on_error(T e) {
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__)
#define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \
__PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__)
#if !defined(_WIN32)
#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \
do { \
if (UNLIKELY(nullptr == (__VAL))) { \
@ -327,6 +329,20 @@ inline void throw_on_error(T e) {
paddle::string::Sprintf("" __VA_ARGS__)); \
} \
} while (0)
#else
#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \
do { \
if (!((__VAL0)__CMP(__VAL1))) { \
PADDLE_THROW("Windows disable the enforce. Enforce failed."); \
} \
} while(0)
#define PADDLE_ENFORCE_NOT_NULL(__VAL1, ...) \
do { \
if (nullptr == (__VAL1)) { \
PADDLE_THROW("Windows disable the enforce. Enforce failed"); \
} \
} while(0)
#endif // !_WIN32
} // namespace platform
} // namespace paddle

@ -30,11 +30,14 @@ limitations under the License. */
#endif // __FLT_MAX__
#ifdef _WIN32
#ifdef PADDLE_COMPILE
#if defined(PADDLE_COMPILE)
// by default, msvc has predefined macro _LIB for static library
// only shared library need to export and import symbols
// static library export all symbols by default.
#define PADDLE_DLL __declspec(dllexport)
#else
#define PADDLE_DLL __declspec(dllimport)
#endif
#else
#define PADDLE_COMPILE
#define PADDLE_DLL
#endif

Loading…
Cancel
Save