[Sharding] add new features (#28568)
* add lars to fleet meta optimizer * add lamb to proto * add lamb to fleet meta optimizer * fixed syntax bug * fixed syntax bug * fixed syntax error in lamb, add config setter of lamb in distributed_strategy * trigger unitest to rerun * add new unitest func for lamb * revise unitest for lars and lamb * revise dgc meta unitest * revise lars document in distribute_strategy * revise lars lamb document in distributed_strategy.py * revise lars lamb document in distributed_strategy.py * add weight decay exclude logic to lars * restore optimzier.py * restore optimizer.py as develop except lars * add epsilon and exclude fn to distributed_sttrategy * add lars epsilon * revise unitest for fleet lars and lamb * revise lars lamb unitest for CI coverage * revise lars argument api * revise lars argument api * revise lars argument api * revise api doc of lars * fix op role * add sharding save and add_sync_comm_for_test function * add comm_analyse to utlis * revise sharding_utils * add sharding saving unittest * revise sharding utils for unittestmusl/fix_failed_unittests_in_musl
parent
8c75b2554a
commit
5a9f6889c1
@ -0,0 +1,90 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import paddle
|
||||
import paddle.fluid as fluid
|
||||
from test_dist_base import TestDistRunnerBase, runtime_main
|
||||
from dist_mnist import cnn_model
|
||||
# from paddle.fluid.incubate.fleet.collective import fleet
|
||||
import paddle.distributed.fleet as fleet
|
||||
import paddle.distributed.fleet.base.role_maker as role_maker
|
||||
from paddle.distributed.fleet.meta_optimizers.sharding.utils import sharding_save_persistables
|
||||
|
||||
import os
|
||||
import six
|
||||
import sys
|
||||
import pickle
|
||||
|
||||
# Fix seed for test
|
||||
fluid.default_startup_program().random_seed = 1
|
||||
fluid.default_main_program().random_seed = 1
|
||||
|
||||
def runtime_main():
|
||||
import paddle.distributed.fleet as fleet
|
||||
|
||||
# model definition
|
||||
train_prog = paddle.fluid.Program()
|
||||
startup_prog = paddle.fluid.Program()
|
||||
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
|
||||
fleet.init(role)
|
||||
with fluid.program_guard(train_prog, startup_prog):
|
||||
with fluid.unique_name.guard():
|
||||
input_x = paddle.fluid.layers.data(
|
||||
name="x", shape=[32], dtype='float32')
|
||||
input_y = paddle.fluid.layers.data(
|
||||
name="y", shape=[1], dtype='int64')
|
||||
|
||||
fc_1 = paddle.fluid.layers.fc(input=input_x,
|
||||
size=64,
|
||||
act='tanh')
|
||||
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh')
|
||||
prediction = paddle.fluid.layers.fc(input=[fc_2],
|
||||
size=2,
|
||||
act='softmax')
|
||||
cost = paddle.fluid.layers.cross_entropy(
|
||||
input=prediction, label=input_y)
|
||||
avg_cost = paddle.fluid.layers.mean(x=cost)
|
||||
|
||||
strategy = paddle.distributed.fleet.DistributedStrategy()
|
||||
strategy.sharding = True
|
||||
strategy.sharding_configs = {"fuse_broadcast_MB": 0.2}
|
||||
|
||||
optimizer = paddle.fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
|
||||
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
|
||||
optimizer.minimize(avg_cost)
|
||||
|
||||
# execution
|
||||
device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
|
||||
place = fluid.CUDAPlace(device_id)
|
||||
exe = fluid.Executor(place)
|
||||
exe.run(startup_prog)
|
||||
dirname="./ut_sharding_save_model"
|
||||
sharding_save_persistables(exe, dirname, main_program=train_prog, filename=None)
|
||||
|
||||
out_losses=[]
|
||||
if six.PY2:
|
||||
print(pickle.dumps(out_losses))
|
||||
else:
|
||||
sys.stdout.buffer.write(pickle.dumps(out_losses))
|
||||
|
||||
if __name__ == "__main__":
|
||||
#NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py
|
||||
# but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from
|
||||
# paddle.fluid.incubate.fleet.collective which is not support by sharding (paddle.distributed.fleet).
|
||||
# this should be update in future.
|
||||
# runtime_main(TestDistMnist2x2)
|
||||
runtime_main()
|
||||
|
||||
@ -0,0 +1,79 @@
|
||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
import shutil
|
||||
import os
|
||||
import unittest
|
||||
from test_dist_base import TestDistBase
|
||||
import paddle
|
||||
|
||||
paddle.enable_static()
|
||||
|
||||
|
||||
class TestDistMnistFleetSave(TestDistBase):
|
||||
def _setup_config(self):
|
||||
self._sync_mode = True
|
||||
self._use_reduce = False
|
||||
self._use_reader_alloc = False
|
||||
self._nccl2_mode = True
|
||||
self._gpu_fleet_api = True
|
||||
self._sharding_save = True
|
||||
self._enforce_place = "GPU"
|
||||
|
||||
|
||||
def _rm_temp_files(self, dirname):
|
||||
shutil.rmtree(dirname)
|
||||
|
||||
def _test_saved_files(self, dirname):
|
||||
|
||||
sharding_save_files = sorted(os.listdir(dirname))
|
||||
|
||||
check_files = ['fc_0.b_0', 'fc_0.b_0_velocity_0', 'fc_0.w_0', 'fc_0.w_0_velocity_0', 'fc_1.b_0',
|
||||
'fc_1.b_0_velocity_0', 'fc_1.w_0', 'fc_1.w_0_velocity_0', 'fc_2.b_0',
|
||||
'fc_2.b_0_velocity_0', 'fc_2.w_0', 'fc_2.w_0_velocity_0', 'learning_rate_0']
|
||||
|
||||
if sharding_save_files != check_files:
|
||||
self._rm_temp_files(dirname)
|
||||
raise ValueError("Test Failed.")
|
||||
self._rm_temp_files(dirname)
|
||||
|
||||
return True
|
||||
|
||||
def check_with_place(self,
|
||||
model_file,
|
||||
delta=1e-3,
|
||||
check_error_log=True,
|
||||
need_envs={},
|
||||
log_name=""):
|
||||
required_envs = self._get_required_envs(check_error_log, need_envs)
|
||||
|
||||
tr0_losses, tr1_losses = self._run_cluster_nccl2(
|
||||
model_file,
|
||||
required_envs,
|
||||
False,
|
||||
check_error_log,
|
||||
log_name=log_name)
|
||||
|
||||
dirname = './ut_sharding_save_model'
|
||||
self._test_saved_files(dirname)
|
||||
|
||||
def test_dist_train(self):
|
||||
import paddle.fluid as fluid
|
||||
if fluid.core.is_compiled_with_cuda():
|
||||
self.check_with_place("dist_sharding_save.py", delta=1e-5)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Loading…
Reference in new issue