!6450 Change PS directory.

Merge pull request !6450 from ZPaC/master-change-ps-dir
pull/6450/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit c8e9d46391

@ -161,7 +161,7 @@ set(SUB_COMP
frontend/operator
pipeline/jit
pipeline/pynative
common debug pybind_api utils vm profiler
common debug pybind_api utils vm profiler ps
)
foreach (_comp ${SUB_COMP})

@ -15,7 +15,7 @@
*/
#include "backend/kernel_compiler/cpu/ps/embedding_look_up_proxy_kernel.h"
#include <vector>
#include "frontend/parallel/ps/worker.h"
#include "ps/worker.h"
namespace mindspore {
namespace kernel {
@ -30,7 +30,7 @@ void EmbeddingLookUpProxyKernel::InitKernel(const CNodePtr &kernel_node) {
input_dims_ *= dim;
}
if (mindspore::parallel::ps::Util::IsRoleOfWorker()) {
if (mindspore::ps::Util::IsRoleOfWorker()) {
key_ = AnfAlgo::GetNodeAttr<size_t>(kernel_node, kAttrPsKey);
}
std::vector<size_t> keys{key_, key_, key_};
@ -41,9 +41,9 @@ void EmbeddingLookUpProxyKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(INFO) << "Init embedding lookup proxy kernel, input shape:" << input_shape
<< ", indices_shape:" << indices_shape << ", output_shape:" << output_shape;
std::vector<int> lens{SizeToInt(input_shape.size()), SizeToInt(indices_shape.size()), SizeToInt(output_shape.size())};
if (mindspore::parallel::ps::Util::IsRoleOfWorker()) {
parallel::ps::worker.AddEmbeddingTable(key_, input_shape[axis]);
parallel::ps::worker.InitPSEmbeddingTable(keys, values, lens);
if (mindspore::ps::Util::IsRoleOfWorker()) {
mindspore::ps::worker.AddEmbeddingTable(key_, input_shape[axis]);
mindspore::ps::worker.InitPSEmbeddingTable(keys, values, lens);
}
}
@ -70,8 +70,8 @@ bool EmbeddingLookUpProxyKernel::Launch(const std::vector<kernel::AddressPtr> &i
MS_LOG(EXCEPTION) << "Lookup id memcpy failed.";
return false;
}
parallel::ps::worker.DoPSEmbeddingLookup({key_}, lookup_ids, lengths, &lookup_result,
parallel::ps::kEmbeddingLookupCmd);
mindspore::ps::worker.DoPSEmbeddingLookup({key_}, lookup_ids, lengths, &lookup_result,
mindspore::ps::kEmbeddingLookupCmd);
auto ret2 = memcpy_s(output_addr, outputs[0]->size, lookup_result.data(), output_size);
if (ret2 != EOK) {

@ -19,12 +19,12 @@
#include <memory>
#include <functional>
#include "backend/kernel_compiler/common_utils.h"
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
namespace mindspore {
namespace kernel {
namespace ps {
using mindspore::parallel::ps::Util;
using mindspore::ps::Util;
constexpr int kAxis = 0;
void EmbeddingLookUpPSKernel::InitKernel(
const std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>> &shapes) {

@ -19,12 +19,12 @@
#include <vector>
#include <memory>
#include "backend/kernel_compiler/kernel.h"
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
namespace mindspore {
namespace kernel {
namespace ps {
using mindspore::parallel::ps::Util;
using mindspore::ps::Util;
class PServerKernel {
public:
PServerKernel(size_t rank_id, size_t pserver_num, size_t worker_num)

@ -19,8 +19,8 @@
#include <vector>
#include <string>
#include "frontend/parallel/ps/worker.h"
#include "frontend/parallel/ps/util.h"
#include "ps/worker.h"
#include "ps/util.h"
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
@ -36,10 +36,10 @@ class PullKernel : public CPUKernel {
if (inputs.size() != 2) {
MS_LOG(EXCEPTION) << "Inputs size is " << inputs.size() << ", but PullKernel needs 2.";
}
bool init_in_server = parallel::ps::worker.GetParamInitInServer(param_name_);
bool init_in_server = mindspore::ps::worker.GetParamInitInServer(param_name_);
// If init_in_server, forward kernel should run in server too.
if (!init_in_server) {
parallel::ps::Worker<T>::GetInstance().Pull(key_, inputs[1]->addr, inputs[1]->size);
mindspore::ps::worker.Pull(key_, inputs[1]->addr, inputs[1]->size);
}
return true;
}
@ -62,7 +62,7 @@ class PullKernel : public CPUKernel {
MS_EXCEPTION_IF_NULL(param_node);
param_name_ = param_node->fullname_with_scope();
if (mindspore::parallel::ps::Util::IsRoleOfWorker()) {
if (mindspore::ps::Util::IsRoleOfWorker()) {
key_ = AnfAlgo::GetNodeAttr<size_t>(kernel_node, kAttrPsKey);
}
InitSizeLists();

@ -19,8 +19,8 @@
#include <vector>
#include <algorithm>
#include "frontend/parallel/ps/worker.h"
#include "frontend/parallel/ps/util.h"
#include "ps/worker.h"
#include "ps/util.h"
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
@ -45,7 +45,7 @@ class PushKernel : public CPUKernel {
addrs.push_back(reinterpret_cast<uintptr_t>(input->addr));
sizes.push_back(SizeToInt(input->size) / sizeof(T));
}
parallel::ps::Worker<T>::GetInstance().Push(keys, addrs, sizes);
mindspore::ps::worker.Push(keys, addrs, sizes);
auto ret = memcpy_s(outputs[0]->addr, outputs[0]->size, &key_, sizeof(size_t));
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Lookup id memcpy failed.";
@ -62,7 +62,7 @@ class PushKernel : public CPUKernel {
MS_LOG(INFO) << "Only init shape indices are " << only_shape_indices;
for (size_t i = 0; i < optim_input_shapes.size(); i++) {
auto shape = optim_input_shapes[i];
parallel::ps::worker.SetOptimInputShapes(key_, shape);
mindspore::ps::worker.SetOptimInputShapes(key_, shape);
if (std::count(only_shape_indices.begin(), only_shape_indices.end(), i) == 0) {
size_t size = sizeof(T);
for (size_t j = 0; j < shape.size(); j++) {

@ -17,7 +17,7 @@
#include <memory>
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_device_address.h"
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
namespace mindspore {
namespace kernel {

@ -17,7 +17,7 @@
#include <memory>
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_device_address.h"
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
namespace mindspore {
namespace kernel {

@ -27,7 +27,7 @@
#include "backend/optimizer/common/pass_manager.h"
#include "backend/optimizer/pass/replace_node_by_proxy.h"
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
#endif
namespace mindspore {
@ -69,7 +69,7 @@ GraphId CPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList
SetKernelInfo(graph.get());
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
AssignParamKey(graph);
if (parallel::ps::Util::IsRoleOfWorker()) {
if (ps::Util::IsRoleOfWorker()) {
Optimize(graph);
}
#endif

@ -36,9 +36,9 @@
#include "ir/anf.h"
#include "ir/func_graph_cloner.h"
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "frontend/parallel/ps/worker.h"
#include "frontend/parallel/ps/common.h"
#include "frontend/parallel/ps/util.h"
#include "ps/worker.h"
#include "ps/common.h"
#include "ps/util.h"
#endif
namespace mindspore {
@ -1380,7 +1380,7 @@ void SessionBasic::RunGraphAsync(const GraphId &graph_id, const std::vector<tens
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
void SessionBasic::AssignParamKey(const KernelGraphPtr &kernel_graph) {
if (!parallel::ps::Util::IsRoleOfWorker()) {
if (!ps::Util::IsRoleOfWorker()) {
MS_LOG(INFO) << "Not parameter server mode.";
return;
}
@ -1393,7 +1393,7 @@ void SessionBasic::AssignParamKey(const KernelGraphPtr &kernel_graph) {
if (AnfAlgo::GetCNodeName(node) == kEmbeddingLookupOpName) {
size_t embedding_table_idx = 0;
auto embedding_table = AnfAlgo::GetInputNode(node->cast<CNodePtr>(), embedding_table_idx);
size_t key = parallel::ps::worker.SetParamKey(embedding_table->fullname_with_scope());
size_t key = ps::worker.SetParamKey(embedding_table->fullname_with_scope());
AnfAlgo::SetNodeAttr(kAttrPsKey, MakeValue(key), node);
} else if (AnfAlgo::GetCNodeName(node) == kPushOpName) {
auto pull_node = FindPullNode(node, node_list);
@ -1404,12 +1404,12 @@ void SessionBasic::AssignParamKey(const KernelGraphPtr &kernel_graph) {
// Second input of Pull node is the trainable parameter.
size_t parameter_index = 1;
auto parameter_node = AnfAlgo::GetInputNode(pull_node->cast<CNodePtr>(), parameter_index);
size_t key = parallel::ps::worker.SetParamKey(parameter_node->fullname_with_scope());
size_t key = ps::worker.SetParamKey(parameter_node->fullname_with_scope());
AnfAlgo::SetNodeAttr(kAttrPsKey, MakeValue(key), node);
AnfAlgo::SetNodeAttr(kAttrPsKey, MakeValue(key), pull_node);
std::string optimizer_name = AnfAlgo::GetNodeAttr<std::string>(node, kAttrOptimizerType);
parallel::ps::worker.SetKeyOptimId(key, optimizer_name);
ps::worker.SetKeyOptimId(key, optimizer_name);
}
}
}
@ -1417,7 +1417,7 @@ void SessionBasic::AssignParamKey(const KernelGraphPtr &kernel_graph) {
void SessionBasic::InitPSParamAndOptim(const KernelGraphPtr &kernel_graph,
const std::vector<tensor::TensorPtr> &inputs_const) {
if (!parallel::ps::Util::IsRoleOfWorker()) {
if (!ps::Util::IsRoleOfWorker()) {
return;
}
std::vector<tensor::TensorPtr> inputs(inputs_const);
@ -1440,7 +1440,7 @@ void SessionBasic::InitPSParamAndOptim(const KernelGraphPtr &kernel_graph,
MS_EXCEPTION_IF_NULL(input_node);
if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
auto pk_node = input_node->cast<ParameterPtr>();
parallel::ps::worker.InitPSParamAndOptim(pk_node->fullname_with_scope(), tensor);
ps::worker.InitPSParamAndOptim(pk_node->fullname_with_scope(), tensor);
}
}
}

@ -1,12 +1,5 @@
file(GLOB_RECURSE _PARALLEL_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
if (NOT (ENABLE_CPU AND (ENABLE_D OR ENABLE_GPU)))
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/optimizer_info_builder.cc")
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/optimizer_info.cc")
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/scheduler.cc")
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/util.cc")
endif()
if (ENABLE_DUMP_PROTO)
list(REMOVE_ITEM _PARALLEL_SRC_FILES "parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc")
endif ()

@ -42,9 +42,9 @@
#include "parse/python_adapter.h"
#include "frontend/optimizer/py_pass_manager.h"
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "frontend/parallel/ps/parameter_server.h"
#include "frontend/parallel/ps/scheduler.h"
#include "frontend/parallel/ps/worker.h"
#include "ps/parameter_server.h"
#include "ps/scheduler.h"
#include "ps/worker.h"
#endif
namespace mindspore {
@ -419,19 +419,19 @@ bool ExecuteAction(const ResourcePtr &res) {
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
bool StartPSWorkerAction(const ResourcePtr &res) {
parallel::ps::worker.Run();
ps::worker.Run();
return true;
}
bool StartPSServerAction(const ResourcePtr &res) {
FuncGraphPtr func_graph = res->func_graph();
auto &ps = parallel::ps::ParameterServer<float>::GetInstance();
auto &ps = ps::ParameterServer<float>::GetInstance();
ps.Run(func_graph);
return true;
}
bool StartPSSchedulerAction(const ResourcePtr &res) {
parallel::ps::Scheduler::GetInstance().Run();
ps::Scheduler::GetInstance().Run();
return true;
}
#endif
@ -585,7 +585,7 @@ std::vector<ActionItem> VmPipeline() {
actions.emplace_back(std::make_pair("validate", ValidateAction));
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
if (parallel::ps::Util::IsRoleOfWorker()) {
if (ps::Util::IsRoleOfWorker()) {
actions.emplace_back(std::make_pair("worker", StartPSWorkerAction));
}
#endif

@ -34,9 +34,9 @@
#include "runtime/device/gpu/distribution/collective_fake_init.h"
#endif
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
#endif
#include "frontend/parallel/ps/ps_context.h"
#include "ps/ps_context.h"
#include "pybind_api/gil_scoped_long_running.h"
@ -53,7 +53,7 @@ using OpInfoLoaderPy = mindspore::kernel::OpInfoLoaderPy;
using ParallelContext = mindspore::parallel::ParallelContext;
using CostModelContext = mindspore::parallel::CostModelContext;
using mindspore::MsCtxParam;
using PSContext = mindspore::parallel::ps::PSContext;
using PSContext = mindspore::ps::PSContext;
// Interface with python
PYBIND11_MODULE(_c_expression, m) {

@ -47,9 +47,9 @@
#include "pybind_api/pybind_patch.h"
#include "utils/shape_utils.h"
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
#include "frontend/parallel/ps/common.h"
#include "frontend/parallel/ps/util.h"
#include "frontend/parallel/ps/worker.h"
#include "ps/common.h"
#include "ps/util.h"
#include "ps/worker.h"
#endif
#if (ENABLE_GE || ENABLE_D)
@ -413,14 +413,14 @@ std::vector<ActionItem> GetPipline(const ResourcePtr &resource, const std::strin
std::string backend = MsContext::GetInstance()->backend_policy();
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
if (mindspore::parallel::ps::Util::IsParamServerMode()) {
mindspore::parallel::ps::Util::SetInternalEnvVar();
if (mindspore::ps::Util::IsParamServerMode()) {
mindspore::ps::Util::SetInternalEnvVar();
}
if (parallel::ps::Util::IsRoleOfPServer()) {
if (ps::Util::IsRoleOfPServer()) {
resource->results()[kBackend] = compile::CreateBackend();
return PServerPipeline();
}
if (parallel::ps::Util::IsRoleOfScheduler()) {
if (ps::Util::IsRoleOfScheduler()) {
return PSchedulerPipeline();
}
#endif
@ -1002,9 +1002,9 @@ void ClearResAtexit() {
pynative::ClearPyNativeSession();
session::ClearPythonParasMap();
#if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
if (parallel::ps::Util::IsParamServerMode()) {
if (parallel::ps::Util::IsRoleOfWorker()) {
parallel::ps::worker.Finalize();
if (ps::Util::IsParamServerMode()) {
if (ps::Util::IsRoleOfWorker()) {
ps::worker.Finalize();
}
}
#endif

@ -0,0 +1,11 @@
file(GLOB_RECURSE _PS_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
if (NOT (ENABLE_CPU AND (ENABLE_D OR ENABLE_GPU)))
list(REMOVE_ITEM _PS_SRC_FILES "optimizer_info_builder.cc")
list(REMOVE_ITEM _PS_SRC_FILES "optimizer_info.cc")
list(REMOVE_ITEM _PS_SRC_FILES "scheduler.cc")
list(REMOVE_ITEM _PS_SRC_FILES "util.cc")
endif()
set_property(SOURCE ${_PS_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_PS)
add_library(_mindspore_ps_obj OBJECT ${_PS_SRC_FILES})

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_COMMON_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_COMMON_H_
#ifndef MINDSPORE_CCSRC_PS_COMMON_H_
#define MINDSPORE_CCSRC_PS_COMMON_H_
#include <iostream>
#include <vector>
@ -25,7 +25,6 @@
#include "ps/ps.h"
namespace mindspore {
namespace parallel {
namespace ps {
constexpr char kEnvCommType[] = "MS_COMM_TYPE";
constexpr char kEnvInterface[] = "MS_INTERFACE";
@ -131,6 +130,5 @@ const std::map<std::string, OptimOriginIdx> kOptimToPSSendIdx = {{kApplyMomentum
} \
}
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_COMMON_H_
#endif // MINDSPORE_CCSRC_PS_COMMON_H_

@ -14,15 +14,14 @@
* limitations under the License.
*/
#include "frontend/parallel/ps/optimizer_info.h"
#include "ps/optimizer_info.h"
#include <map>
#include <memory>
#include <string>
#include <functional>
#include "frontend/parallel/ps/util.h"
#include "ps/util.h"
namespace mindspore {
namespace parallel {
namespace ps {
void OptimizerInfo::AddWorkspace(const AddressPtr &workspace) { workspaces_.push_back(workspace); }
@ -367,5 +366,4 @@ size_t SparseFtrlOptimInfo::indices_index() {
return ps_indices_index;
}
} // namespace ps
} // namespace parallel
} // namespace mindspore

@ -14,16 +14,15 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_H_
#ifndef MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_H_
#define MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/kernel.h"
#include "frontend/parallel/ps/common.h"
#include "ps/common.h"
namespace mindspore {
namespace parallel {
namespace ps {
using mindspore::kernel::AddressPtr;
class OptimizerInfo {
@ -126,6 +125,5 @@ class SparseFtrlOptimInfo : public SparseOptimInfo {
size_t indices_index() override;
};
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_H_
#endif // MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_H_

@ -14,14 +14,13 @@
* limitations under the License.
*/
#include "frontend/parallel/ps/optimizer_info_builder.h"
#include "ps/optimizer_info_builder.h"
#include <vector>
#include <memory>
#include <functional>
#include "backend/kernel_compiler/cpu/ps/sparse_apply_ftrl_ps_kernel.h"
namespace mindspore {
namespace parallel {
namespace ps {
using mindspore::kernel::ps::SparseApplyFtrlPSKernel;
OptimizerInfo *OptimizerInfoBuilder::Build(const std::shared_ptr<PServerKernel> &pserver_kernel,
@ -101,6 +100,7 @@ AddressPtr OptimizerInfoBuilder::GenInputAddrPtr(const std::string &optim_type,
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
delete[] buffer;
buffer = nullptr;
return nullptr;
}
return addr_ptr;
@ -123,6 +123,7 @@ OptimizerInfo *MomentumOptimInfoBuilder::BuildInputs(const WeightPtr &weight, co
if (ret != 0) {
MS_LOG(EXCEPTION) << "memset_s error, errorno(" << ret << ")";
delete[] reinterpret_cast<float *>(accumulate->addr);
accumulate->addr = nullptr;
return nullptr;
}
@ -149,6 +150,7 @@ OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
if (ret != 0) {
MS_LOG(EXCEPTION) << "memset_s error, errorno(" << ret << ")";
delete[] reinterpret_cast<float *>(m->addr);
m->addr = nullptr;
return nullptr;
}
@ -161,7 +163,9 @@ OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
if (ret != 0) {
MS_LOG(EXCEPTION) << "memset_s error, errorno(" << ret << ")";
delete[] reinterpret_cast<float *>(v->addr);
v->addr = nullptr;
delete[] reinterpret_cast<float *>(m->addr);
m->addr = nullptr;
return nullptr;
}
@ -205,6 +209,7 @@ OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
if (ret != 0) {
MS_LOG(EXCEPTION) << "memset_s error, errorno(" << ret << ")";
delete[] reinterpret_cast<float *>(linear->addr);
linear->addr = nullptr;
return nullptr;
}
linear->size = weight->size() * sizeof(float);
@ -214,5 +219,4 @@ OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
return new SparseFtrlOptimInfo(weight_addr, accum, linear, grad, indices);
}
} // namespace ps
} // namespace parallel
} // namespace mindspore

@ -14,18 +14,17 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#ifndef MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_BUILDER_H_
#define MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_BUILDER_H_
#include <vector>
#include <memory>
#include <string>
#include "backend/kernel_compiler/kernel.h"
#include "backend/kernel_compiler/cpu/ps/pserver_kernel.h"
#include "frontend/parallel/ps/optimizer_info.h"
#include "ps/optimizer_info.h"
namespace mindspore {
namespace parallel {
namespace ps {
using mindspore::kernel::KernelMod;
using mindspore::kernel::ps::PServerKernel;
@ -55,6 +54,7 @@ class OptimizerInfoBuilder {
class MomentumOptimInfoBuilder : public OptimizerInfoBuilder {
public:
explicit MomentumOptimInfoBuilder(size_t worker_num) : OptimizerInfoBuilder(worker_num) {}
~MomentumOptimInfoBuilder() = default;
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shape, size_t worker_num,
const std::shared_ptr<PServerKernel> &pserver_kernel) override;
@ -63,6 +63,7 @@ class MomentumOptimInfoBuilder : public OptimizerInfoBuilder {
class SparseAdamOptimInfoBuilder : public OptimizerInfoBuilder {
public:
explicit SparseAdamOptimInfoBuilder(size_t worker_num) : OptimizerInfoBuilder(worker_num) {}
~SparseAdamOptimInfoBuilder() = default;
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shape, size_t worker_num,
const std::shared_ptr<PServerKernel> &pserver_kernel) override;
@ -71,11 +72,11 @@ class SparseAdamOptimInfoBuilder : public OptimizerInfoBuilder {
class SparseFtrlOptimInfoBuilder : public OptimizerInfoBuilder {
public:
explicit SparseFtrlOptimInfoBuilder(size_t worker_num) : OptimizerInfoBuilder(worker_num) {}
~SparseFtrlOptimInfoBuilder() = default;
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shape, size_t worker_num,
const std::shared_ptr<PServerKernel> &pserver_kernel) override;
};
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#endif // MINDSPORE_CCSRC_PS_OPTIMIZER_INFO_BUILDER_H_

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_PARAMETER_SERVER_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_PARAMETER_SERVER_H_
#ifndef MINDSPORE_CCSRC_PS_PARAMETER_SERVER_H_
#define MINDSPORE_CCSRC_PS_PARAMETER_SERVER_H_
#include <unistd.h>
#include <unordered_map>
@ -36,11 +36,11 @@
#include "backend/session/session_basic.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/session/session_factory.h"
#include "frontend/parallel/ps/common.h"
#include "frontend/parallel/ps/optimizer_info.h"
#include "frontend/parallel/ps/optimizer_info_builder.h"
#include "frontend/parallel/ps/util.h"
#include "frontend/parallel/ps/ps_context.h"
#include "ps/common.h"
#include "ps/optimizer_info.h"
#include "ps/optimizer_info_builder.h"
#include "ps/util.h"
#include "ps/ps_context.h"
#include "runtime/device/cpu/kernel_select_cpu.h"
#include "utils/ms_context.h"
#include "backend/kernel_compiler/kernel.h"
@ -53,7 +53,6 @@
#include "backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.h"
namespace mindspore {
namespace parallel {
namespace ps {
using mindspore::kernel::ps::PServerKernel;
using AnfAlgo = session::AnfRuntimeAlgorithm;
@ -780,6 +779,5 @@ void ParameterServer<T>::Run(const FuncGraphPtr &func_graph) {
MS_LOG(INFO) << "PServer finalized successfully.";
}
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_PARAMETER_SERVER_H_
#endif // MINDSPORE_CCSRC_PS_PARAMETER_SERVER_H_

@ -14,12 +14,11 @@
* limitations under the License.
*/
#include "frontend/parallel/ps/ps_context.h"
#include "ps/ps_context.h"
#include "utils/log_adapter.h"
#include "utils/ms_utils.h"
namespace mindspore {
namespace parallel {
namespace ps {
std::shared_ptr<PSContext> PSContext::instance() {
static std::shared_ptr<PSContext> ps_instance = nullptr;
@ -82,5 +81,4 @@ void PSContext::SetPSRankId(int rank_id) { rank_id_ = rank_id; }
int PSContext::ps_rank_id() const { return rank_id_; }
} // namespace ps
} // namespace parallel
} // namespace mindspore

@ -14,14 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_CONTEXT_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_CONTEXT_H_
#ifndef MINDSPORE_CCSRC_PS_CONTEXT_H_
#define MINDSPORE_CCSRC_PS_CONTEXT_H_
#include <string>
#include <memory>
namespace mindspore {
namespace parallel {
namespace ps {
constexpr char kEnvRole[] = "MS_ROLE";
constexpr char kEnvRoleOfPServer[] = "MS_PSERVER";
@ -55,7 +54,6 @@ class PSContext {
int rank_id_;
};
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_CONTEXT_H_
#endif // MINDSPORE_CCSRC_PS_CONTEXT_H_

@ -14,11 +14,10 @@
* limitations under the License.
*/
#include "frontend/parallel/ps/scheduler.h"
#include "ps/scheduler.h"
#include "ps/ps.h"
namespace mindspore {
namespace parallel {
namespace ps {
void Scheduler::Run() {
::ps::Start(0);
@ -26,5 +25,4 @@ void Scheduler::Run() {
exit(1);
}
} // namespace ps
} // namespace parallel
} // namespace mindspore

@ -14,10 +14,9 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_SCHEDULER_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_SCHEDULER_H_
#ifndef MINDSPORE_CCSRC_PS_SCHEDULER_H_
#define MINDSPORE_CCSRC_PS_SCHEDULER_H_
namespace mindspore {
namespace parallel {
namespace ps {
class Scheduler {
public:
@ -35,6 +34,5 @@ class Scheduler {
Scheduler &operator=(const Scheduler &) = delete;
};
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_SCHEDULER_H_
#endif // MINDSPORE_CCSRC_PS_SCHEDULER_H_

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save