Revert/barrier for sync (#25417)
* add retry for prefetch
* Revert "Fix/sync barrier (#25016)"
This reverts commit be6a315fbd
.
* reopen dist UT, test=develop
* remove fl UT, test=develop
fix_copy_if_different
parent
dd9d146ec3
commit
4b3778a3ee
@ -1,166 +0,0 @@
|
|||||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#include "paddle/fluid/operators/distributed/barrier_monitor.h"
|
|
||||||
#include <gflags/gflags.h>
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <future> // NOLINT
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <unordered_set>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <thread> // NOLINT
|
|
||||||
|
|
||||||
#include <ThreadPool.h>
|
|
||||||
|
|
||||||
#include "paddle/fluid/platform/enforce.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
namespace distributed {
|
|
||||||
bool BarrierMonitor::IncreaseBarrier(const int worker_id,
|
|
||||||
const std::string &barrier) {
|
|
||||||
release_ = false;
|
|
||||||
|
|
||||||
if (barrier == BATCH_BARRIER_MESSAGE) {
|
|
||||||
VLOG(4) << "BarrierMonitor send queue recv trainer: " << worker_id;
|
|
||||||
send_barrier_queue->Push(worker_id);
|
|
||||||
} else if (barrier == FETCH_BARRIER_MESSAGE) {
|
|
||||||
VLOG(4) << "BarrierMonitor recv queue recv trainer: " << worker_id;
|
|
||||||
recv_barrier_queue->Push(worker_id);
|
|
||||||
} else {
|
|
||||||
PADDLE_THROW(platform::errors::Unavailable(
|
|
||||||
"unknown Message status %s, only "
|
|
||||||
"BATCH_BARRIER_MESSAGE/FETCH_BARRIER_MESSAGE",
|
|
||||||
barrier));
|
|
||||||
}
|
|
||||||
return Wait();
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::DecreaseWorker() {
|
|
||||||
std::unique_lock<std::mutex> lck(mutex_);
|
|
||||||
workers_--;
|
|
||||||
VLOG(1) << "decrement worker num to " << workers_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::Reset(int workers, BarrierType type) {
|
|
||||||
std::unique_lock<std::mutex> lk(server_mutex_);
|
|
||||||
|
|
||||||
workers_ = workers;
|
|
||||||
barrier_type = type;
|
|
||||||
|
|
||||||
send_barrier_queue->Clear();
|
|
||||||
recv_barrier_queue->Clear();
|
|
||||||
VLOG(2) << "reset monitor workers: " << workers_ << " type: " << barrier_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::Monitor() {
|
|
||||||
while (!IsReady() && running_) {
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
|
|
||||||
VLOG(3) << "sync at first time, wait all trainer ready";
|
|
||||||
}
|
|
||||||
|
|
||||||
while (running_) {
|
|
||||||
int timer = 0;
|
|
||||||
|
|
||||||
if (IsReady()) {
|
|
||||||
Swap(true);
|
|
||||||
} else {
|
|
||||||
VLOG(4) << "running timer: " << timer << " barrier: " << barrier_type
|
|
||||||
<< " sendQ:" << send_barrier_queue->Size()
|
|
||||||
<< " recvQ: " << recv_barrier_queue->Size();
|
|
||||||
|
|
||||||
timer++;
|
|
||||||
if (max_wait_ms == -1 || timer < max_wait_ms) {
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
|
||||||
} else {
|
|
||||||
VLOG(1) << "time out of " << max_wait_ms
|
|
||||||
<< ", need barreir: " << barrier_type << " retry";
|
|
||||||
Swap(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BarrierMonitor::IsReady() {
|
|
||||||
if (barrier_type == BarrierType::kSendBarrier) {
|
|
||||||
return static_cast<int>(send_barrier_queue->Size()) == workers_;
|
|
||||||
} else {
|
|
||||||
return static_cast<int>(recv_barrier_queue->Size()) == workers_;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::Swap(bool is_valid) {
|
|
||||||
std::unique_lock<std::mutex> lck(mutex_);
|
|
||||||
|
|
||||||
valid_ = is_valid;
|
|
||||||
release_ = true;
|
|
||||||
|
|
||||||
if (barrier_type == BarrierType::kSendBarrier) {
|
|
||||||
barrier_type = BarrierType::kRecvBarrier;
|
|
||||||
send_barrier_queue->Clear();
|
|
||||||
VLOG(4) << "barrier monitor server clean up queue and barrier";
|
|
||||||
ServerWeakup();
|
|
||||||
VLOG(4) << "barrier monitor server weak up sync to do";
|
|
||||||
WaitServerWeakup();
|
|
||||||
VLOG(4) << "barrier monitor server weak up sync done";
|
|
||||||
|
|
||||||
} else {
|
|
||||||
barrier_type = BarrierType::kSendBarrier;
|
|
||||||
recv_barrier_queue->Clear();
|
|
||||||
VLOG(4) << "barrier monitor server switch to send barrier";
|
|
||||||
}
|
|
||||||
|
|
||||||
worker_cv_.notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::Stop() {
|
|
||||||
valid_ = true;
|
|
||||||
release_ = true;
|
|
||||||
running_ = false;
|
|
||||||
|
|
||||||
barrier_type = BarrierType::kRecvBarrier;
|
|
||||||
send_barrier_queue->Clear();
|
|
||||||
recv_barrier_queue->Clear();
|
|
||||||
|
|
||||||
worker_cv_.notify_all();
|
|
||||||
server_cv_.notify_all();
|
|
||||||
|
|
||||||
if (monitor_thread_) monitor_thread_->join();
|
|
||||||
monitor_thread_ = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BarrierMonitor::Wait() {
|
|
||||||
std::unique_lock<std::mutex> lk(mutex_);
|
|
||||||
worker_cv_.wait(lk, [this] { return (release_); });
|
|
||||||
return valid_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::WaitServerWeakup() {
|
|
||||||
std::unique_lock<std::mutex> lk(server_mutex_);
|
|
||||||
server_cv_.wait(lk);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BarrierMonitor::ServerWeakup() { server_cv_.notify_all(); }
|
|
||||||
|
|
||||||
std::once_flag BarrierMonitor::init_flag_;
|
|
||||||
std::unique_ptr<BarrierMonitor> BarrierMonitor::monitor_(nullptr);
|
|
||||||
|
|
||||||
} // namespace distributed
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
@ -1,186 +0,0 @@
|
|||||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <gflags/gflags.h>
|
|
||||||
|
|
||||||
#include <chrono> // NOLINT
|
|
||||||
#include <deque>
|
|
||||||
#include <functional>
|
|
||||||
#include <future> // NOLINT
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <unordered_set>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include <thread> // NOLINT
|
|
||||||
|
|
||||||
#include <ThreadPool.h>
|
|
||||||
|
|
||||||
#include "paddle/fluid/operators/distributed/rpc_server.h"
|
|
||||||
#include "paddle/fluid/platform/enforce.h"
|
|
||||||
|
|
||||||
namespace paddle {
|
|
||||||
namespace operators {
|
|
||||||
namespace distributed {
|
|
||||||
|
|
||||||
enum BarrierType { kSendBarrier, kRecvBarrier };
|
|
||||||
|
|
||||||
constexpr int64_t kMaxWaitMS = 120000;
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
class BlockingQueueForBarrier {
|
|
||||||
public:
|
|
||||||
explicit BlockingQueueForBarrier(size_t capacity) : capacity_(capacity) {
|
|
||||||
PADDLE_ENFORCE_GT(capacity_, 0,
|
|
||||||
platform::errors::InvalidArgument(
|
|
||||||
"The capacity must be greater than 0."));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Push(const T &elem) {
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_);
|
|
||||||
worker_cv_.wait(lock, [&] { return queue_.size() < capacity_; });
|
|
||||||
queue_.push_back(elem);
|
|
||||||
}
|
|
||||||
worker_cv_.notify_one();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Push(T &&elem) {
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_);
|
|
||||||
worker_cv_.wait(lock, [&] { return queue_.size() < capacity_; });
|
|
||||||
queue_.emplace_back(std::move(elem));
|
|
||||||
}
|
|
||||||
worker_cv_.notify_one();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
T Pop() {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_);
|
|
||||||
worker_cv_.wait(lock, [=] { return !queue_.empty(); });
|
|
||||||
T rc(std::move(queue_.front()));
|
|
||||||
queue_.pop_front();
|
|
||||||
worker_cv_.notify_one();
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Cap() const {
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
|
||||||
return capacity_;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Size() const {
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
|
||||||
return queue_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Clear() {
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
|
||||||
std::deque<T>().swap(queue_);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const size_t capacity_;
|
|
||||||
std::deque<T> queue_;
|
|
||||||
|
|
||||||
mutable std::mutex mutex_;
|
|
||||||
std::condition_variable worker_cv_;
|
|
||||||
};
|
|
||||||
|
|
||||||
class BarrierMonitor {
|
|
||||||
public:
|
|
||||||
explicit BarrierMonitor(int workers)
|
|
||||||
: BarrierMonitor(workers, BarrierType::kRecvBarrier, kMaxWaitMS) {}
|
|
||||||
|
|
||||||
explicit BarrierMonitor(int workers, BarrierType type, int64_t max_wait_times)
|
|
||||||
: workers_(workers), barrier_type(type), max_wait_ms(max_wait_times) {
|
|
||||||
PADDLE_ENFORCE_GT(workers, 0, platform::errors::InvalidArgument(
|
|
||||||
"trainers must have one or more"));
|
|
||||||
|
|
||||||
send_barrier_queue =
|
|
||||||
std::make_shared<BlockingQueueForBarrier<int>>(workers);
|
|
||||||
recv_barrier_queue =
|
|
||||||
std::make_shared<BlockingQueueForBarrier<int>>(workers);
|
|
||||||
|
|
||||||
running_ = true;
|
|
||||||
monitor_thread_.reset(
|
|
||||||
new std::thread(std::bind(&BarrierMonitor::Monitor, this)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static BarrierMonitor *Init(int workers) {
|
|
||||||
InitImpl(workers);
|
|
||||||
return GetInstance();
|
|
||||||
}
|
|
||||||
|
|
||||||
static BarrierMonitor *GetInstance() { return monitor_.get(); }
|
|
||||||
|
|
||||||
bool IncreaseBarrier(const int worker_id, const std::string &barrier);
|
|
||||||
|
|
||||||
void DecreaseWorker();
|
|
||||||
|
|
||||||
int GetWorkerNum() { return workers_; }
|
|
||||||
|
|
||||||
void Monitor();
|
|
||||||
|
|
||||||
void Swap(bool is_valid);
|
|
||||||
|
|
||||||
void Stop();
|
|
||||||
|
|
||||||
bool IsReady();
|
|
||||||
|
|
||||||
bool Wait();
|
|
||||||
|
|
||||||
void WaitServerWeakup();
|
|
||||||
|
|
||||||
void ServerWeakup();
|
|
||||||
|
|
||||||
void WorkerWeakup();
|
|
||||||
|
|
||||||
void Reset(int workers, BarrierType type);
|
|
||||||
|
|
||||||
private:
|
|
||||||
// Init is called by GetInstance.
|
|
||||||
static void InitImpl(int workers) {
|
|
||||||
monitor_.reset(new BarrierMonitor(workers));
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::once_flag init_flag_;
|
|
||||||
static std::unique_ptr<BarrierMonitor> monitor_;
|
|
||||||
|
|
||||||
int workers_;
|
|
||||||
bool running_ = false;
|
|
||||||
bool valid_ = false;
|
|
||||||
bool release_ = false;
|
|
||||||
|
|
||||||
std::condition_variable worker_cv_;
|
|
||||||
std::condition_variable server_cv_;
|
|
||||||
|
|
||||||
std::mutex server_mutex_;
|
|
||||||
std::mutex mutex_;
|
|
||||||
|
|
||||||
BarrierType barrier_type;
|
|
||||||
int64_t max_wait_ms;
|
|
||||||
std::unique_ptr<std::thread> monitor_thread_{nullptr};
|
|
||||||
std::shared_ptr<BlockingQueueForBarrier<int>> send_barrier_queue;
|
|
||||||
std::shared_ptr<BlockingQueueForBarrier<int>> recv_barrier_queue;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace distributed
|
|
||||||
} // namespace operators
|
|
||||||
} // namespace paddle
|
|
@ -1,219 +0,0 @@
|
|||||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""test f1 listen and serv_op."""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import paddle
|
|
||||||
import paddle.fluid as fluid
|
|
||||||
from paddle.fluid import Program
|
|
||||||
import os
|
|
||||||
import signal
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import unittest
|
|
||||||
from multiprocessing import Process
|
|
||||||
from op_test import OpTest
|
|
||||||
import numpy
|
|
||||||
import urllib
|
|
||||||
import sys
|
|
||||||
from dist_test_utils import *
|
|
||||||
|
|
||||||
cache_path = os.path.expanduser('~/.cache/paddle/dataset')
|
|
||||||
|
|
||||||
|
|
||||||
def run_trainer(use_cuda, sync_mode, ip, port, trainers, trainer_id):
|
|
||||||
'''
|
|
||||||
This function is run trainer.
|
|
||||||
Args:
|
|
||||||
use_cuda (bool): whether use cuda.
|
|
||||||
sync_mode (nouse): specify sync mode.
|
|
||||||
ip (string): the ip address.
|
|
||||||
port (string): the port for listening.
|
|
||||||
trainers (int): the count of trainer.
|
|
||||||
trainer_id (int): the id of trainer.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
'''
|
|
||||||
x = fluid.layers.data(name='x', shape=[1], dtype='float32')
|
|
||||||
y_predict = fluid.layers.fc(input=x, size=1, act=None)
|
|
||||||
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
|
|
||||||
# loss function
|
|
||||||
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
|
|
||||||
avg_cost = fluid.layers.mean(cost)
|
|
||||||
# optimizer
|
|
||||||
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
|
|
||||||
sgd_optimizer.minimize(avg_cost)
|
|
||||||
with open("{}/trainer_recv_program.dms".format(cache_path), "rb") as f:
|
|
||||||
trainer_recv_program_desc_str = f.read()
|
|
||||||
with open("{}/trainer_main_program.dms".format(cache_path), "rb") as f:
|
|
||||||
trainer_main_program_desc_str = f.read()
|
|
||||||
with open("{}/trainer_send_program.dms".format(cache_path), "rb") as f:
|
|
||||||
trainer_send_program_desc_str = f.read()
|
|
||||||
recv_program = Program.parse_from_string(trainer_recv_program_desc_str)
|
|
||||||
main_program = Program.parse_from_string(trainer_main_program_desc_str)
|
|
||||||
send_program = Program.parse_from_string(trainer_send_program_desc_str)
|
|
||||||
|
|
||||||
trainer_startup_program = fluid.default_startup_program()
|
|
||||||
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
|
|
||||||
exe = fluid.Executor(place)
|
|
||||||
|
|
||||||
exe.run(trainer_startup_program)
|
|
||||||
for i in range(5):
|
|
||||||
exe.run(recv_program)
|
|
||||||
exe.run(fluid.default_main_program(),
|
|
||||||
feed={
|
|
||||||
"x": numpy.array([1, 2]).astype('float32').reshape(2, 1),
|
|
||||||
"y": numpy.array([2, 3]).astype('float32').reshape(2, 1)
|
|
||||||
})
|
|
||||||
exe.run(send_program)
|
|
||||||
|
|
||||||
|
|
||||||
def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id):
|
|
||||||
'''
|
|
||||||
This function is run trainer.
|
|
||||||
Args:
|
|
||||||
use_cuda (bool): whether use cuda.
|
|
||||||
sync_mode (nouse): specify sync mode.
|
|
||||||
ip (string): the ip address.
|
|
||||||
port (string): the port for listening.
|
|
||||||
trainers (int): the count of trainer.
|
|
||||||
trainer_id (int): the id of trainer.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
'''
|
|
||||||
remove_ps_flag(os.getpid())
|
|
||||||
x = fluid.layers.data(name='x', shape=[1], dtype='float32')
|
|
||||||
y_predict = fluid.layers.fc(input=x, size=1, act=None)
|
|
||||||
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
|
|
||||||
# loss function
|
|
||||||
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
|
|
||||||
avg_cost = fluid.layers.mean(cost)
|
|
||||||
# optimizer
|
|
||||||
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
|
|
||||||
sgd_optimizer.minimize(avg_cost)
|
|
||||||
with open("{}/pserver_startup_program.dms".format(cache_path), "rb") as f:
|
|
||||||
pserver_startup_program_desc_str = f.read()
|
|
||||||
with open("{}/pserver_main_program.dms".format(cache_path), "rb") as f:
|
|
||||||
pserver_main_program_desc_str = f.read()
|
|
||||||
|
|
||||||
startup_program = Program.parse_from_string(
|
|
||||||
pserver_startup_program_desc_str)
|
|
||||||
main_program = Program.parse_from_string(pserver_main_program_desc_str)
|
|
||||||
|
|
||||||
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
|
|
||||||
exe = fluid.Executor(place)
|
|
||||||
exe.run(startup_program)
|
|
||||||
exe.run(main_program)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFlListenAndServOp(unittest.TestCase):
|
|
||||||
"""This class is Test Fl Listen And ServOp."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""This function si set Up."""
|
|
||||||
self.ps_timeout = 5
|
|
||||||
self.ip = "127.0.0.1"
|
|
||||||
self.port = "6000"
|
|
||||||
self.trainers = 2
|
|
||||||
self.trainer_id = 0
|
|
||||||
|
|
||||||
def _start_pserver(self, use_cuda, sync_mode, pserver_func):
|
|
||||||
"""This function is start pserver."""
|
|
||||||
p = Process(
|
|
||||||
target=pserver_func,
|
|
||||||
args=(use_cuda, sync_mode, self.ip, self.port, self.trainers,
|
|
||||||
self.trainer_id))
|
|
||||||
p.daemon = True
|
|
||||||
p.start()
|
|
||||||
return p
|
|
||||||
|
|
||||||
def _start_trainer0(self, use_cuda, sync_mode, pserver_func):
|
|
||||||
"""This function is start trainer0."""
|
|
||||||
p = Process(
|
|
||||||
target=pserver_func,
|
|
||||||
args=(use_cuda, sync_mode, self.ip, self.port, self.trainers, 0))
|
|
||||||
p.daemon = True
|
|
||||||
p.start()
|
|
||||||
return p
|
|
||||||
|
|
||||||
def _start_trainer1(self, use_cuda, sync_mode, pserver_func):
|
|
||||||
"""This function is start trainer1."""
|
|
||||||
p = Process(
|
|
||||||
target=pserver_func,
|
|
||||||
args=(use_cuda, sync_mode, self.ip, self.port, self.trainers, 1))
|
|
||||||
p.daemon = True
|
|
||||||
p.start()
|
|
||||||
return p
|
|
||||||
|
|
||||||
def _wait_ps_ready(self, pid):
|
|
||||||
"""This function is wait ps ready."""
|
|
||||||
start_left_time = self.ps_timeout
|
|
||||||
sleep_time = 0.5
|
|
||||||
while True:
|
|
||||||
assert start_left_time >= 0, "wait ps ready failed"
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
try:
|
|
||||||
os.stat("/tmp/paddle.%d.port" % pid)
|
|
||||||
return
|
|
||||||
except os.error:
|
|
||||||
start_left_time -= sleep_time
|
|
||||||
|
|
||||||
def test_rpc_interfaces(self):
|
|
||||||
"""TODO(Yancey1989): need to make sure the rpc interface correctly."""
|
|
||||||
# TODO(Yancey1989): need to make sure the rpc interface correctly.
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_handle_signal_in_serv_op(self):
|
|
||||||
"""run pserver on CPU in sync mode."""
|
|
||||||
# run pserver on CPU in sync mode
|
|
||||||
if sys.platform == 'win32' or sys.platform == 'sys.platform':
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
print(sys.platform)
|
|
||||||
file_list = [
|
|
||||||
'pserver_startup_program.dms', 'pserver_main_program.dms',
|
|
||||||
'trainer_recv_program.dms', 'trainer_main_program.dms',
|
|
||||||
'trainer_send_program.dms'
|
|
||||||
]
|
|
||||||
if not os.path.exists(cache_path):
|
|
||||||
os.makedirs(cache_path)
|
|
||||||
prefix = 'wget --no-check-certificate https://paddlefl.bj.bcebos.com/test_fl_listen_and_serv/'
|
|
||||||
for f in file_list:
|
|
||||||
if not os.path.exists('{}/{}'.format(cache_path, f)):
|
|
||||||
cmd = "wget --no-check-certificate https://paddlefl.bj.bcebos.com/test_fl_listen_and_serv/{} -P {}/".format(
|
|
||||||
f, cache_path)
|
|
||||||
os.system(cmd)
|
|
||||||
p1 = self._start_pserver(False, True, run_pserver)
|
|
||||||
self._wait_ps_ready(p1.pid)
|
|
||||||
time.sleep(5)
|
|
||||||
t1 = self._start_trainer0(False, True, run_trainer)
|
|
||||||
time.sleep(2)
|
|
||||||
t2 = self._start_trainer1(False, True, run_trainer)
|
|
||||||
# raise SIGTERM to pserver
|
|
||||||
time.sleep(2)
|
|
||||||
cmd_del = "rm trainer*dms* pserver*dms*"
|
|
||||||
os.system(cmd_del)
|
|
||||||
os.kill(p1.pid, signal.SIGINT)
|
|
||||||
p1.join()
|
|
||||||
os.kill(t1.pid, signal.SIGINT)
|
|
||||||
t1.join()
|
|
||||||
os.kill(t2.pid, signal.SIGINT)
|
|
||||||
t2.join()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
Loading…
Reference in new issue