Refine paddle.manual_seed (#26496)

* refine manual seed

* fix ci problem

* fix unittests

* fix unittest

* set is_init_py=false in manual_seed

* fix unittest

* fix bernoulli_op

* fix(unittest): change random_seed to manual_seed

* 🐞fix(unittest): fix manual_seed

* trigger ci

* fix test_sentiment

* fix test_imperative_save_load

* fix test_uniform_random_op

* fix test_uniform_random_op

* fix test_jit_save_load

* merge develop

* fix manual_seed

* fix manual_seed

* use global engine

* use shared_ptr

* fix double free

* fix bug

* fix bug

* fix bug

* fix test bug

* fix test bug

* fix test bug

* fix ci
revert-26856-strategy_example2
Leo Chen 5 years ago committed by GitHub
parent 31f422ae5e
commit 844583c8fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,67 +12,122 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include <glog/logging.h>
#include <deque>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "paddle/fluid/framework/generator.h"
namespace paddle {
namespace framework {
std::shared_ptr<Generator> Generator::gen_instance_ = NULL;
const std::shared_ptr<Generator>& DefaultCPUGenerator() {
static auto default_cpu_generator =
std::make_shared<Generator>(GetRandomSeed());
VLOG(4) << "initial seed: " << default_cpu_generator->GetCurrentSeed()
<< ", cpu engine: " << default_cpu_generator->GetCPUEngine().get();
return default_cpu_generator;
}
std::shared_ptr<std::mt19937_64> OpDefaultCPUEngine() {
static auto op_default_cpu_engine = std::make_shared<std::mt19937_64>();
return op_default_cpu_engine;
}
// NOTE(zhiqiu): there are 3 conditions:
// (1) op seed is not set and DefaultCPUGenerator is inited, use
// DefaultCPUGenerator
// (2) op seed is not set and DefaultCPUGenerator is not inited, use se
// OpDefaultCPUEngine() and set a radnom seed
// (3) op seed is set, use OpDefaultCPUEngine() and set the seed
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) {
if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) {
VLOG(4) << "Use random engine from generator";
return DefaultCPUGenerator()->GetCPUEngine();
} else {
// NOTE(zhiqiu): creating an engine instance everytime instead of using
// OpDefaultCPUEngine(), this is the legacy behavior of random operators.
// The benefit is that when runing PE with fixed-seed in multiple thrads,
// each thread has their own engine, and doesn't affect each other.
//
// And we need to measure the determinacy of Generator in PE.
auto engine = std::make_shared<std::mt19937_64>();
if (seed == 0) {
seed = GetRandomSeed();
VLOG(4) << "Use default random engine with random seed = " << seed;
} else {
VLOG(4) << "Use default random engine with fixed random seed = " << seed;
}
static std::mutex mu_;
{
std::lock_guard<std::mutex> lock(mu_);
engine->seed(seed);
}
return engine;
}
}
GeneratorState* Generator::GetState() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_.get();
GeneratorState Generator::GetState() {
std::lock_guard<std::mutex> lock(this->mu_);
state_.cpu_engine = *engine_;
return this->state_;
}
void Generator::SetState(GeneratorState* state_in) {
std::lock_guard<std::mutex> lock(this->mutex);
*this->state_ = *state_in;
void Generator::SetState(const GeneratorState& state) {
std::lock_guard<std::mutex> lock(this->mu_);
this->state_ = state;
this->engine_ = std::make_shared<std::mt19937_64>(state.cpu_engine);
}
uint64_t Generator::GetCurrentSeed() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->current_seed;
std::lock_guard<std::mutex> lock(this->mu_);
return this->state_.current_seed;
}
uint64_t Generator::Seed() {
std::lock_guard<std::mutex> lock(this->mutex);
std::lock_guard<std::mutex> lock(this->mu_);
uint64_t seed;
std::random_device de;
seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF;
this->state_->current_seed = seed;
this->state_.current_seed = seed;
std::seed_seq seq({seed});
this->state_->cpu_engine.seed(seq);
this->engine_->seed(seq);
return this->state_->current_seed;
return this->state_.current_seed;
}
void Generator::SetCurrentSeed(uint64_t seed) {
std::lock_guard<std::mutex> lock(this->mutex);
this->state_->current_seed = uint64_t(seed);
std::lock_guard<std::mutex> lock(this->mu_);
this->state_.current_seed = seed;
std::seed_seq seq({seed});
this->state_->cpu_engine.seed(seq);
this->engine_->seed(seq);
}
std::mt19937_64& Generator::GetCPUEngine() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->cpu_engine;
std::shared_ptr<std::mt19937_64> Generator::GetCPUEngine() {
std::lock_guard<std::mutex> lock(this->mu_);
return this->engine_;
}
void Generator::SetCPUEngine(std::mt19937_64 engine) {
std::lock_guard<std::mutex> lock(this->mutex);
this->state_->cpu_engine = std::mt19937_64(engine);
void Generator::SetCPUEngine(std::shared_ptr<std::mt19937_64> engine) {
std::lock_guard<std::mutex> lock(this->mu_);
this->engine_ = engine;
}
uint64_t Generator::Random64() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->cpu_engine();
std::lock_guard<std::mutex> lock(this->mu_);
auto engine = this->engine_;
return (*engine)();
}
void Generator::SetIsInitPy(bool is_init_py) {
this->is_init_py_ = is_init_py;
VLOG(4) << "SetIsInitPy:" << this->is_init_py_;
}
bool Generator::GetIsInitPy() const { return this->is_init_py_; }
} // namespace framework
} // namespace paddle

@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <glog/logging.h>
#include <stdint.h>
#include <atomic>
#include <deque>
#include <iostream> // temp for debug
@ -27,6 +29,12 @@ limitations under the License. */
namespace paddle {
namespace framework {
static uint64_t GetRandomSeed() {
std::random_device rd;
// double has 53 bit significant, so limit uint64 to 53 bits
return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF;
}
struct GeneratorState {
int64_t device = -1;
uint64_t current_seed = 34342423252;
@ -35,62 +43,67 @@ struct GeneratorState {
struct Generator {
Generator() {
GeneratorState default_gen_state_cpu;
default_gen_state_cpu.device = -1;
default_gen_state_cpu.current_seed = 34342423252;
std::seed_seq seq({34342423252});
default_gen_state_cpu.cpu_engine = std::mt19937_64(seq);
this->state_ = std::make_shared<GeneratorState>(default_gen_state_cpu);
auto seed = GetRandomSeed();
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
explicit Generator(uint64_t seed) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
this->is_init_py_ = true; // TODO(zhiqiu): remove it in future
}
explicit Generator(GeneratorState state_in)
: state_{std::make_shared<GeneratorState>(state_in)} {}
Generator(const Generator& other)
: Generator(other, std::lock_guard<std::mutex>(other.mutex)) {}
Generator(const Generator& other) = delete;
// get random state
GeneratorState* GetState();
GeneratorState GetState();
// set random state
void SetState(GeneratorState* state_in);
void SetState(const GeneratorState&);
// get current seed
uint64_t GetCurrentSeed();
// random a seed and get
uint64_t Seed();
// set seed
void SetCurrentSeed(uint64_t seed);
// get cpu engine
std::mt19937_64& GetCPUEngine();
std::shared_ptr<std::mt19937_64> GetCPUEngine();
// set cpu engine
void SetCPUEngine(std::mt19937_64 engine);
void SetCPUEngine(std::shared_ptr<std::mt19937_64>);
uint64_t Random64();
bool is_init_py = false;
void SetIsInitPy(bool);
bool GetIsInitPy() const;
// CPU Generator singleton
static std::shared_ptr<Generator> GetInstance() {
if (NULL == gen_instance_) {
gen_instance_.reset(new paddle::framework::Generator());
}
return gen_instance_;
}
private:
GeneratorState state_;
std::shared_ptr<std::mt19937_64> engine_;
mutable std::mutex mu_;
// NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with
// old seed, and it should be removed after all random-related operators
// and unittests upgrades to use generator.
bool is_init_py_ = false;
};
static std::shared_ptr<Generator> GetInstanceX() {
if (NULL == gen_instance_) {
gen_instance_.reset(new paddle::framework::Generator());
}
gen_instance_->is_init_py = true;
return gen_instance_;
}
// The DefaultCPUGenerator is used in manual_seed()
const std::shared_ptr<Generator>& DefaultCPUGenerator();
private:
static std::shared_ptr<Generator> gen_instance_;
std::shared_ptr<GeneratorState> state_;
mutable std::mutex mutex;
// If op seed is set or global is not set, the OpDefaultCPUEngine is used.
std::shared_ptr<std::mt19937_64> OpDefaultCPUEngine();
Generator(const Generator& other, const std::lock_guard<std::mutex>&)
: state_(std::make_shared<GeneratorState>(*(other.state_))) {}
};
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t);
} // namespace framework
} // namespace paddle

@ -64,11 +64,11 @@ class BernoulliOpKernel<platform::CPUDeviceContext, T>
int64_t size = x->numel();
std::uniform_real_distribution<T> dist(0.0, 1.0);
auto gen_ptr = framework::Generator::GetInstance();
std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine();
auto gen_ptr = framework::DefaultCPUGenerator();
auto engine = gen_ptr->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
out_data[i] = BernoulliFunctor(in_data[i], dist(gen_engine));
out_data[i] = BernoulliFunctor(in_data[i], dist(*engine));
}
}
}; // namespace operators

@ -14,20 +14,19 @@
#pragma once
#include <ThreadPool.h>
#include <gflags/gflags.h>
#include <functional>
#include <future> // NOLINT
#include <memory>
#include <string>
#include <thread> // NOLINT
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <thread> // NOLINT
#include <ThreadPool.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/rw_lock.h"
@ -89,26 +88,17 @@ class UniformInitializer : public Initializer {
min_ = std::stof(attrs[2]);
max_ = std::stof(attrs[3]);
if (seed_ == 0) {
seed_ = std::random_device()();
}
random_engine_.seed(seed_);
dist_ = std::uniform_real_distribution<float>(min_, max_);
random_engine_ = framework::GetCPURandomEngine(seed_);
}
float GetValue() override {
return framework::Generator::GetInstance()->is_init_py
? dist_(framework::Generator::GetInstance()->GetCPUEngine())
: dist_(random_engine_);
// return dist_(random_engine_);
}
float GetValue() override { return dist_(*random_engine_); }
private:
float min_;
float max_;
std::minstd_rand random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::uniform_real_distribution<float> dist_;
};
@ -139,26 +129,18 @@ class GaussianInitializer : public Initializer {
mean_ = std::stof(attrs[2]);
std_ = std::stof(attrs[3]);
if (seed_ == 0) {
seed_ = std::random_device()();
}
random_engine_ = framework::GetCPURandomEngine(seed_);
random_engine_.seed(seed_);
dist_ = std::normal_distribution<float>(mean_, std_);
}
float GetValue() override {
return framework::Generator::GetInstance()->is_init_py
? dist_(framework::Generator::GetInstance()->GetCPUEngine())
: dist_(random_engine_);
// return dist_(random_engine_);
}
float GetValue() override { return dist_(*random_engine_); }
private:
float std_;
float mean_;
std::minstd_rand random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::normal_distribution<float> dist_;
};

@ -55,30 +55,22 @@ class CPUDropoutKernel : public framework::OpKernel<T> {
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
bool init_generator_py = framework::Generator::GetInstance()->is_init_py;
// std::minstd_rand engine;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
std::random_device rnd;
std::minstd_rand engine;
int seed_data;
int seed_data = 0;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0;
}
engine.seed(seed_data);
auto engine = framework::GetCPURandomEngine(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
float cur_random =
init_generator_py
? dist(framework::Generator::GetInstance()->GetCPUEngine())
: dist(engine);
if (cur_random < dropout_prob) {
if (dist(*engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {

@ -39,26 +39,14 @@ class CPUGaussianRandomKernel : public framework::OpKernel<T> {
tensor->Resize(shape);
int64_t size = tensor->numel();
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
}
};
}; // namespace operators
template <typename T>
class CPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel<T> {

@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sampler.h"
#include <glog/logging.h>
#include <iostream>
#include <queue>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/generator.h"
namespace paddle {
@ -28,22 +31,17 @@ Sampler::~Sampler() {}
UniformSampler::UniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), inv_range_(1.0 / (range + 1)) {
random_engine_ = std::make_shared<std::mt19937_64>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
}
int64_t UniformSampler::Sample() const {
return framework::Generator::GetInstance()->is_init_py
? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*dist_)(*random_engine_);
// return (*dist_)(*random_engine_);
}
int64_t UniformSampler::Sample() const { return (*dist_)(*random_engine_); }
float UniformSampler::Probability(int64_t value) const { return inv_range_; }
LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), log_range_(log(range + 1)) {
random_engine_ = std::make_shared<std::mt19937_64>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
}
@ -52,10 +50,7 @@ int64_t LogUniformSampler::Sample() const {
// inverse_transform_sampling method
// More details:
// https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/
auto cur_random =
framework::Generator::GetInstance()->is_init_py
? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*dist_)(*random_engine_);
auto cur_random = (*dist_)(*random_engine_);
const int64_t value = static_cast<int64_t>(exp(cur_random * log_range_)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
@ -74,7 +69,7 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities,
const int *alias, const float *alias_probabilities,
unsigned int seed)
: Sampler(range, seed) {
random_engine_ = std::make_shared<std::mt19937>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
real_dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
int_dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
@ -84,14 +79,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities,
}
int64_t CustomSampler::Sample() const {
auto index =
framework::Generator::GetInstance()->is_init_py
? (*int_dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*int_dist_)(*random_engine_);
auto p =
framework::Generator::GetInstance()->is_init_py
? (*real_dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*real_dist_)(*random_engine_);
auto index = (*int_dist_)(*random_engine_);
auto p = (*real_dist_)(*random_engine_);
if (p > alias_probs_[index]) {
int alias = alias_[index];

@ -26,8 +26,8 @@ namespace math {
// TODO(wanghaoshuang): Support for GPU
/**
* Sample integers from [0, range).
*/
* Sample integers from [0, range).
*/
class Sampler {
public:
explicit Sampler(int64_t range, unsigned int seed = 0UL) : range_(range) {
@ -117,7 +117,7 @@ class CustomSampler : public Sampler {
const int* alias_;
const float* probs_;
const int exceptional_val = -1;
std::shared_ptr<std::mt19937> random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::shared_ptr<std::uniform_real_distribution<>> real_dist_;
std::shared_ptr<std::uniform_int_distribution<>> int_dist_;
};

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/operators/mean_op.h"
@ -35,23 +36,11 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
T* data = tensor->mutable_data<T>(context.GetPlace());
int64_t size = tensor->numel();
std::normal_distribution<T> dist(mean, std);
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
tensor->set_layout(DataLayout::kMKLDNN);

@ -46,22 +46,11 @@ class CPURandintKernel : public framework::OpKernel<T> {
std::uniform_int_distribution<T> dist(ctx.Attr<int>("low"),
ctx.Attr<int>("high") - 1);
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) data[i] = dist(gen_engine);
} else {
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
}
};

@ -19,6 +19,7 @@ limitations under the License. */
#include <ctime>
#include <string>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor_util.h"
@ -29,20 +30,12 @@ namespace operators {
template <typename T>
static inline void random_permate(T* data_ptr, int num, unsigned int seed) {
auto engine = framework::GetCPURandomEngine(seed);
for (int i = 0; i < num; ++i) {
data_ptr[i] = static_cast<T>(i);
}
if (framework::Generator::GetInstance()->is_init_py) {
std::shuffle(data_ptr, data_ptr + num,
framework::Generator::GetInstance()->GetCPUEngine());
} else {
if (seed == 0) {
seed = std::random_device()();
}
std::srand(seed);
std::random_shuffle(data_ptr, data_ptr + num);
}
std::shuffle(data_ptr, data_ptr + num, *engine);
}
template <typename DeviceContext, typename T>

@ -51,20 +51,15 @@ class SamplingIdKernel : public framework::OpKernel<T> {
framework::TensorToVector(*input, context.device_context(), &ins_vector);
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
std::uniform_real_distribution<T> dist(
static_cast<T>(context.Attr<float>("min")),
static_cast<T>(context.Attr<float>("max")));
auto engine = framework::GetCPURandomEngine(seed);
std::vector<int64_t> ids(batch_size);
for (int i = 0; i < batch_size; ++i) {
T r = framework::Generator::GetInstance()->is_init_py
? dist(framework::Generator::GetInstance()->GetCPUEngine())
: dist(engine);
T r = dist(*engine);
int idx = width - 1;
for (int j = 0; j < width; ++j) {
if ((r -= ins_vector[i * width + j]) < 0) {

@ -14,6 +14,7 @@ limitations under the License. */
#include <limits>
#include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
@ -167,22 +168,10 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel<T> {
TruncatedNormal<T> truncated_normal(mean, std);
int64_t size = tensor->numel();
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(gen_engine));
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(engine));
}
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(*engine));
}
}
};

@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/uniform_random_op.h"
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
@ -62,34 +64,12 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
std::uniform_real_distribution<T> dist(
static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max")));
auto gen_ptr = framework::Generator::GetInstance();
if (gen_ptr->is_init_py) {
std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine();
// auto gen_engine = gen_ptr_->GetCPUEngine();
// std::uniform_real_distribution<T> dist(
// static_cast<T>(ctx.Attr<float>("min")),
// static_cast<T>(ctx.Attr<float>("max")));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
// std::uniform_real_distribution<T> dist(
// static_cast<T>(ctx.Attr<float>("min")),
// static_cast<T>(ctx.Attr<float>("max")));
// int64_t size = tensor->numel();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
// std::mt19937_64 &engine = gen_ptr->GetCPUEngine();
// auto engine = gen_ptr_->GetCPUEngine();
unsigned int diag_num =
static_cast<unsigned int>(ctx.Attr<int>("diag_num"));
@ -139,12 +119,12 @@ class UniformRandomOp : public framework::OperatorWithKernel {
if (ctx->HasInputs("ShapeTensorList")) {
// top prority shape
auto inputs_name = ctx->Inputs("ShapeTensorList");
PADDLE_ENFORCE_GT(
inputs_name.size(), 0,
platform::errors::InvalidArgument(
"Input(ShapeTensorList)'size of Op(uniform_random) can't be zero."
"Please check the Attr(shape)'s size of"
"Op(fluid.layers.uniform_random).)"));
PADDLE_ENFORCE_GT(inputs_name.size(), 0,
platform::errors::InvalidArgument(
"Input(ShapeTensorList)'size of "
"Op(uniform_random) can't be zero."
"Please check the Attr(shape)'s size of"
"Op(fluid.layers.uniform_random).)"));
auto out_dims = std::vector<int>(inputs_name.size(), -1);
ctx->SetOutputDim("Out", framework::make_ddim(out_dims));

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/random.h>
#include <thrust/transform.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
@ -88,15 +89,12 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
}
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (framework::Generator::GetInstance()->is_init_py) {
seed = static_cast<unsigned int>(
framework::Generator::GetInstance()->GetCurrentSeed());
} else {
if (seed == 0) {
std::random_device rd;
seed = rd();
}
if (seed == 0) {
std::random_device rd;
seed = rd();
}
T min = static_cast<T>(context.Attr<float>("min"));
T max = static_cast<T>(context.Attr<float>("max"));
unsigned int diag_num =

@ -29,23 +29,36 @@ namespace py = pybind11;
namespace paddle {
namespace pybind {
void BindGenerator(py::module* m) {
py::class_<framework::GeneratorState>(*m, "GeneratorState", "");
py::class_<std::mt19937_64>(*m, "mt19937_64", "");
void BindGenerator(py::module* m_ptr) {
auto& m = *m_ptr;
py::class_<framework::GeneratorState,
std::shared_ptr<framework::GeneratorState>>(m, "GeneratorState")
.def("current_seed",
[](std::shared_ptr<framework::GeneratorState>& self) {
return self->current_seed;
});
py::class_<std::mt19937_64>(m, "mt19937_64", "");
py::class_<framework::Generator, std::shared_ptr<framework::Generator>>(
*m, "Generator")
.def(py::init([]() { return framework::Generator::GetInstanceX(); }),
py::return_value_policy::reference)
.def("get_state", &framework::Generator::GetState,
py::return_value_policy::move)
m, "Generator")
.def("__init__",
[](framework::Generator& self) {
new (&self) framework::Generator();
})
.def("get_state", &framework::Generator::GetState)
.def("set_state", &framework::Generator::SetState)
.def("manual_seed", &framework::Generator::SetCurrentSeed)
.def("manual_seed",
[](std::shared_ptr<framework::Generator>& self, uint64_t seed) {
self->SetCurrentSeed(seed);
return self;
})
.def("seed", &framework::Generator::Seed)
.def("initial_seed", &framework::Generator::GetCurrentSeed)
.def("random", &framework::Generator::Random64)
.def("get_cpu_engine", &framework::Generator::GetCPUEngine,
py::return_value_policy::move)
.def("set_cpu_engine", &framework::Generator::SetCPUEngine);
// .def("get_cpu_engine", &framework::Generator::GetCPUEngine)
// .def("set_cpu_engine", &framework::Generator::SetCPUEngine)
.def_property("_is_init_py", &framework::Generator::GetIsInitPy,
&framework::Generator::SetIsInitPy);
m.def("default_cpu_generator", &framework::DefaultCPUGenerator);
} // end Generator
} // end namespace pybind
} // end namespace paddle
} // namespace paddle

@ -42,9 +42,11 @@ class TestSentimentMethods(unittest.TestCase):
def test_data_set(self):
data_set = st.load_sentiment_data()
last_label = -1
for each in st.test():
self.assertNotEqual(each[1], last_label)
last_label = each[1]
self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES)
self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES)
self.assertEqual(

@ -92,9 +92,11 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
@ -113,9 +115,11 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay2(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)

@ -17,44 +17,28 @@ from . import core
__all__ = ['Generator']
default_rng_seed_val = 34342423252
class Generator(object):
class Generator(core.Generator):
"""Generator class"""
def __init__(self, device="CPU"):
"""init"""
self.device = device
seed_in = default_rng_seed_val
if self.device == "CPU":
self.generator = core.Generator()
# self.generator.manual_seed(seed_in)
else:
raise ValueError(
"generator class with device %s does not exist, currently only support generator with device 'CPU' "
% device)
def get_state(self):
return self.generator.get_state()
def set_state(self, state):
self.generator.set_state(state)
def __init__(self, place=None):
"""
Create a generator object which manages the random number generation. ( Experimental Feature )
def manual_seed(self, seed):
self.generator.manual_seed(seed)
Parameters:
place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place.
def seed(self):
return self.generator.seed()
Returns:
Generator: A generator object.
def initial_seed(self):
return self.generator.initial_seed()
def random(self):
return self.generator.random()
def get_cpu_engine(self):
return self.generator.get_cpu_engine()
def set_cpu_engine(self, cpu_engine):
self.generator.set_cpu_engine(cpu_engine)
"""
self.place = place
if not place:
place = core.CPUPlace()
if isinstance(place, core.CPUPlace):
super(Generator, self).__init__()
else:
raise ValueError(
"Generator class with %s does is not supported yet, currently only support generator with CPUPlace "
% place)

@ -15,6 +15,7 @@
import math
import numpy as np
import unittest
import paddle
from paddle.jit import to_static
import paddle.fluid as fluid
from paddle.fluid import ParamAttr
@ -560,8 +561,8 @@ def train_bmn(args, place, to_static):
loss_data = []
with fluid.dygraph.guard(place):
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
global local_random
local_random = np.random.RandomState(SEED)

@ -21,6 +21,7 @@ import unittest
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
@ -448,8 +449,8 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
reader = get_random_input_data(args.batch_size, args.vocab_size,
args.num_labels)

@ -14,6 +14,7 @@
import time
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
@ -447,8 +448,8 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place):
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
if args.model == "MobileNetV1":
net = MobileNetV1(class_dim=args.class_dim, scale=1.0)

@ -19,7 +19,7 @@ import time
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
@ -218,8 +218,8 @@ def train(place):
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,

@ -16,6 +16,7 @@ import gym
import math
import itertools
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
from paddle.fluid.dygraph import to_variable, Layer
@ -64,8 +65,8 @@ def train(args, place, to_static):
env.seed(SEED)
with fluid.dygraph.guard(place):
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED)
policy = Policy()

@ -215,8 +215,8 @@ def train(to_static):
"""
with fluid.dygraph.guard(place):
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save