remove two context param

pull/1038/head
jinyaohui 5 years ago
parent 298a784878
commit 391a060f21

@ -58,7 +58,6 @@ options:
--epoch_size epoch size: N, default is 1
--device_num number of used devices: N, default is 1
--device_id device id: N, default is 0
--enable_task_sink enable task sink: "true" | "false", default is "true"
--enable_loop_sink enable loop sink: "true" | "false", default is "true"
--enable_mem_reuse enable memory reuse: "true" | "false", default is "true"
--enable_save_ckpt enable save checkpoint: "true" | "false", default is "true"

@ -50,7 +50,6 @@ do
--epoch_size=$EPOCH_SIZE \
--device_id=$DEVICE_ID \
--device_num=$RANK_SIZE \
--enable_task_sink="true" \
--enable_loop_sink="true" \
--enable_mem_reuse="true" \
--enable_save_ckpt="true" \

@ -59,7 +59,6 @@ def run_pretrain():
parser.add_argument("--epoch_size", type=int, default="1", help="Epoch size, default is 1.")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
parser.add_argument("--enable_task_sink", type=str, default="true", help="Enable task sink, default is true.")
parser.add_argument("--enable_loop_sink", type=str, default="true", help="Enable loop sink, default is true.")
parser.add_argument("--enable_mem_reuse", type=str, default="true", help="Enable mem reuse, default is true.")
parser.add_argument("--enable_save_ckpt", type=str, default="true", help="Enable save checkpoint, default is true.")
@ -76,8 +75,7 @@ def run_pretrain():
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
context.set_context(enable_task_sink=(args_opt.enable_task_sink == "true"),
enable_loop_sink=(args_opt.enable_loop_sink == "true"),
context.set_context(enable_loop_sink=(args_opt.enable_loop_sink == "true"),
enable_mem_reuse=(args_opt.enable_mem_reuse == "true"))
context.set_context(reserve_class_name_in_scope=False)

@ -29,7 +29,6 @@ python run_pretrain.py \
--distribute="false" \
--epoch_size=$EPOCH_SIZE \
--device_id=$DEVICE_ID \
--enable_task_sink="true" \
--enable_loop_sink="true" \
--enable_mem_reuse="true" \
--enable_save_ckpt="true" \

@ -70,7 +70,6 @@ if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
context.set_context(device_id=args_opt.device_id)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -34,7 +34,6 @@ args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -54,7 +54,6 @@ rank_size = int(os.getenv('RANK_SIZE'))
run_distribute = rank_size > 1
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -46,7 +46,6 @@ args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False, device_id=device_id)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -49,7 +49,6 @@ args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False, device_id=device_id)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -39,7 +39,7 @@ args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
context.set_context(enable_task_sink=True, device_id=device_id)
context.set_context(device_id=device_id)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -42,7 +42,7 @@ args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
context.set_context(enable_task_sink=True, device_id=device_id)
context.set_context(device_id=device_id)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -71,7 +71,7 @@ if __name__ == '__main__':
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True)
context.set_context(enable_loop_sink=True, enable_mem_reuse=True)
config = ConfigSSD()
prefix = "ssd_eval.mindrecord"

@ -93,7 +93,7 @@ def main():
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True)
context.set_context(enable_loop_sink=True, enable_mem_reuse=True)
if args_opt.distribute:
device_num = args_opt.device_num

@ -64,7 +64,6 @@ if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
context.set_context(device_id=args_opt.device_id)
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)

@ -82,7 +82,7 @@ if __name__ == '__main__':
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True)
context.set_context(enable_loop_sink=True, enable_mem_reuse=True)
# It will generate mindrecord file in args_opt.mindrecord_dir,
# and the file name is yolo.mindrecord0, 1, ... file_num.

@ -85,7 +85,7 @@ def main():
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True)
context.set_context(enable_loop_sink=True, enable_mem_reuse=True)
if args_opt.distribute:
device_num = args_opt.device_num
context.reset_auto_parallel_context()

@ -115,12 +115,8 @@ PYBIND11_MODULE(_c_expression, m) {
.def("set_device_id", &mindspore::MsContext::set_device_id, "Set device id.")
.def("open_tsd", &mindspore::MsContext::OpenTsd, "Open tdt dataset client.")
.def("close_tsd", &mindspore::MsContext::CloseTsd, "Close tdt dataset client.")
.def("set_task_sink_flag", &mindspore::MsContext::set_enable_task_sink, "Set enable task sink.")
.def("get_task_sink_flag", &mindspore::MsContext::enable_task_sink, "Get whether to enable task sink.")
.def("get_save_graphs_flag", &mindspore::MsContext::save_graphs_flag, "Get whether to save graphs.")
.def("set_save_graphs_flag", &mindspore::MsContext::set_save_graphs_flag, "Set whether to save graphs.")
.def("get_ir_fusion_flag", &mindspore::MsContext::ir_fusion_flag, "Get whether to enable ir fusion.")
.def("set_ir_fusion_flag", &mindspore::MsContext::set_ir_fusion_flag, "Set whether to enable ir fusion.")
.def("get_auto_mixed_precision_flag", &mindspore::MsContext::auto_mixed_precision_flag,
"Get whether to enable auto mixed precision.")
.def("set_auto_mixed_precision_flag", &mindspore::MsContext::set_auto_mixed_precision_flag,

@ -62,7 +62,6 @@ class MsContext {
bool enable_pynative_infer() const { return enable_pynative_infer_; }
void set_enable_pynative_infer(bool enable_pynative_infer) { enable_pynative_infer_ = enable_pynative_infer; }
void set_enable_task_sink(bool enable_task_sink) { enable_task_sink_ = enable_task_sink; }
bool enable_task_sink() const { return enable_task_sink_; }
void set_precompile_only(bool precompile_only) { precompile_only_ = precompile_only; }
@ -90,7 +89,6 @@ class MsContext {
bool enable_hccl() const { return enable_hccl_; }
bool PynativeInitGe();
void set_ir_fusion_flag(bool ir_fusion_flag) { ir_fusion_flag_ = ir_fusion_flag; }
bool ir_fusion_flag() const { return ir_fusion_flag_; }
void set_loop_sink_flag(bool loop_sink_flag) { enable_loop_sink_ = loop_sink_flag; }

@ -142,15 +142,6 @@ class _Context:
raise ValueError("Context handle is none in context!!!")
return value
# For Ascend task sink mode execution
@property
def enable_task_sink(self):
return self._context_handle.get_task_sink_flag()
@enable_task_sink.setter
def enable_task_sink(self, task_sink):
self._context_handle.set_task_sink_flag(task_sink)
@property
def mode(self):
return self._context_handle.get_execution_mode()
@ -224,14 +215,6 @@ class _Context:
if not success:
raise RuntimeError("Device id set failed!!!")
@property
def enable_ir_fusion(self):
return self._context_handle.get_ir_fusion_flag()
@enable_ir_fusion.setter
def enable_ir_fusion(self, enable_ir_fusion):
self._context_handle.set_ir_fusion_flag(enable_ir_fusion)
@property
def enable_loop_sink(self):
return self._context_handle.get_loop_sink_flag()
@ -485,11 +468,9 @@ def reset_auto_parallel_context():
_reset_auto_parallel_context()
@args_type_check(mode=int, precompile_only=bool, device_target=str,
device_id=int, enable_ir_fusion=bool, save_graphs=bool,
enable_task_sink=bool, save_graphs_path=str, enable_loop_sink=bool,
enable_mem_reuse=bool, save_ms_model=bool, save_ms_model_path=str,
enable_auto_mixed_precision=bool, enable_dump=bool, save_dump_path=str,
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
save_graphs_path=str, enable_loop_sink=bool, enable_mem_reuse=bool, save_ms_model=bool,
save_ms_model_path=str, enable_auto_mixed_precision=bool, enable_dump=bool, save_dump_path=str,
enable_reduce_precision=bool, graph_memory_max_size=str,
variable_memory_max_size=str, enable_profiling=bool, profiling_options=str)
def set_context(**kwargs):
@ -517,10 +498,8 @@ def set_context(**kwargs):
device_target (str): The target device to run, support "Ascend", "GPU", "CPU". Default: "Ascend".
device_id (int): Id of target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should no more than 4096. Default: 0.
enable_ir_fusion (bool): Whether to enable ir fusion. Default: True.
save_graphs (bool): Whether to save graphs. Default: False.
enable_loop_sink (bool): Whether to enable loop sink. Default: True.
enable_task_sink (bool): Whether to enable task sink. Default: True.
enable_mem_reuse (bool): Whether to enable memory reuse. Default: True.
save_ms_model (bool): Whether to save lite model converted by graph. Default: False.
save_ms_model_path (str): Path to save converted lite model. Default: "."
@ -559,7 +538,6 @@ def set_context(**kwargs):
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_task_sink=True)
>>> context.set_context(enable_mem_reuse=True)
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(save_ms_model=True, save_ms_model_path=".")

@ -33,9 +33,7 @@ def setup_module():
global rank_id
np.random.seed(0)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True,
device_id=device_id)
context.set_context(enable_ir_fusion=True)
context.set_context(device_id=device_id)
context.set_context(enable_loop_sink=False)
distributedTool.init()
device_num = distributedTool.get_group_size()
@ -86,15 +84,15 @@ class DataGenerator():
return data
def input_data(self, shape):
data = (self.generate_data(shape)*2).astype(np.float32)
stra = [1]*len(shape)
data = (self.generate_data(shape) * 2).astype(np.float32)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])
def label_data(self, shape, classes):
data = (self.generate_data(shape)*(classes-1)).astype(np.int32)
stra = [1]*len(shape)
data = (self.generate_data(shape) * (classes - 1)).astype(np.int32)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])

@ -37,7 +37,7 @@ device_id = int(os.getenv('DEVICE_ID'))
rank_id = 0
embed = 128
classes = 32
batch_size = 32*2
batch_size = 32 * 2
MatmulParamShape = (classes, embed)
@ -46,9 +46,7 @@ def setup_module():
global rank_id
np.random.seed(0)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True,
device_id=device_id)
context.set_context(enable_ir_fusion=True)
context.set_context(device_id=device_id)
context.set_context(enable_loop_sink=False)
distributedTool.init()
rank_id = distributedTool.get_rank()
@ -77,20 +75,20 @@ class DataGenerator():
def generate_data(self, shape):
size = np.cumprod(shape)[-1]
num_range = min(size, 1000)
data = (np.arange(0, size) % num_range)/num_range
data = (np.arange(0, size) % num_range) / num_range
data = np.reshape(data, shape)
return data
def input_data(self, shape):
data = (self.generate_data(shape)*0.1).astype(np.float32)
stra = [1]*len(shape)
data = (self.generate_data(shape) * 0.1).astype(np.float32)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])
def label_data(self, shape, embed):
data = (self.generate_data(shape)*(embed-1)).astype(np.int32)
stra = [1]*len(shape)
data = (self.generate_data(shape) * (embed - 1)).astype(np.int32)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])
@ -141,7 +139,7 @@ class SoftmaxCrossEntropyExpand(Cell):
def __init__(self, sparse=False, stra_list=[]):
super(SoftmaxCrossEntropyExpand, self).__init__()
if len(stra_list) < 11:
stra_list = [None]*11
stra_list = [None] * 11
self.exp = P.Exp()
self.reduce_sum = P.ReduceSum(keep_dims=True).set_strategy(strategy=stra_list[1])
self.onehot = P.OneHot().set_strategy(strategy=stra_list[2])

@ -31,8 +31,7 @@ from mindspore.train.callback import Callback
from mindspore.parallel import set_algo_parameters
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True, device_id=int(os.getenv('DEVICE_ID')))
context.set_context(enable_ir_fusion=True)
context.set_context(device_id=int(os.getenv('DEVICE_ID')))
context.set_context(enable_loop_sink=False)
init()
context.set_auto_parallel_context(mirror_mean=True, parallel_mode=ParallelMode.AUTO_PARALLEL)
@ -316,14 +315,14 @@ class DataGenerator():
def input_data(self, shape):
data = (self.generate_data(shape)).astype(np.float32)
stra = [1]*len(shape)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])
def label_data(self, shape):
data = (self.generate_data(shape)*1000/np.prod(shape)).astype(np.int32)
stra = [1]*len(shape)
data = (self.generate_data(shape) * 1000 / np.prod(shape)).astype(np.int32)
stra = [1] * len(shape)
stra[0] = device_num
datas = self.get_parallel_blocks(data, stra)
return Tensor(data), Tensor(datas[rank_id])
@ -378,8 +377,8 @@ def test_train_feed(num_classes=8192):
set_algo_parameters(elementwise_op_strategy_follow=True)
parallel_callback = ModelCallback()
dataGen = DataGenerator()
input_full, input_part = dataGen.input_data((32*2, 3, 224, 224))
label_full, label_part = dataGen.label_data((32*2,))
input_full, input_part = dataGen.input_data((32 * 2, 3, 224, 224))
label_full, label_part = dataGen.label_data((32 * 2,))
dataset = Dataset(input_part, label_part)
net = resnet50(num_classes)
loss = SoftmaxCrossEntropyExpand(sparse=True)
@ -398,8 +397,8 @@ def test_train_feed2(num_classes=1001):
set_algo_parameters(elementwise_op_strategy_follow=True)
parallel_callback = ModelCallback()
dataGen = DataGenerator()
input_full, input_part = dataGen.input_data((32*2, 3, 224, 224))
label_full, label_part = dataGen.label_data((32*2,))
input_full, input_part = dataGen.input_data((32 * 2, 3, 224, 224))
label_full, label_part = dataGen.label_data((32 * 2,))
dataset = Dataset(input_part, label_part)
net = resnet50(num_classes)
loss = SoftmaxCrossEntropyExpand(sparse=True)

@ -14,17 +14,14 @@
# ============================================================================
""" test_multigraph_sink """
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common import ms_function
from mindspore.ops import operations as P
def setup_module(module):
context.set_context(mode = context.PYNATIVE_MODE, device_target = "Ascend")
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
c1 = Tensor([2], mstype.int32)
@ -208,4 +205,3 @@ def test_while_in_while_in_while():
output = while_in_while_in_while(c1, c2, c3)
expect = Tensor([2534], mstype.int32)
assert output == expect

@ -31,7 +31,6 @@ def t1_while(x, y, z):
def test_net():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True)
c1 = Tensor([2], mstype.int32)
c2 = Tensor([14], mstype.int32)
c3 = Tensor([1], mstype.int32)

@ -21,7 +21,7 @@ from mindspore.common.initializer import initializer
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.optim import Momentum
context.set_context(device_target="Ascend", enable_task_sink=True)
context.set_context(device_target="Ascend")
input_channel = 2048
output_channel = 512

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save