modify code doc

pull/9656/head
changzherui 4 years ago
parent ea32b6b28c
commit 91583f8c4d

@ -430,7 +430,7 @@ class Validator:
@staticmethod
def check_file_name_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
if reg is None:
reg = r"^[0-9a-zA-Z\_\.\/\\]*$"
reg = r"^[0-9a-zA-Z\_\-\.\/\\]*$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(

@ -84,17 +84,16 @@ class SequentialCell(Cell):
``Ascend`` ``GPU``
Examples:
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid')
>>> bn = nn.BatchNorm2d(2)
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid', weight_init="ones")
>>> relu = nn.ReLU()
>>> seq = nn.SequentialCell([conv, bn, relu])
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
>>> seq = nn.SequentialCell([conv, relu])
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
>>> output = seq(x)
>>> print(output)
[[[[0.02531557 0. ]
[0.04933941 0.04880078]]
[[0. 0. ]
[0. 0. ]]]]
[[[[27. 27.]
[27. 27.]]
[[27. 27.]
[27. 27.]]]]
"""
def __init__(self, *args):
super(SequentialCell, self).__init__()

@ -42,8 +42,10 @@ class FixedLossScaleManager(LossScaleManager):
drop_overflow_update (bool): whether to execute optimizer if there is an overflow. Default: True.
Examples:
>>> net = Net()
>>> loss_scale_manager = FixedLossScaleManager()
>>> model = Model(net, loss_scale_manager=loss_scale_manager)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
"""
def __init__(self, loss_scale=128.0, drop_overflow_update=True):
if loss_scale < 1:
@ -85,8 +87,10 @@ class DynamicLossScaleManager(LossScaleManager):
scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000.
Examples:
>>> net = Net()
>>> loss_scale_manager = DynamicLossScaleManager()
>>> model = Model(net, loss_scale_manager=loss_scale_manager)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
"""
def __init__(self,
init_loss_scale=2 ** 24,

Loading…
Cancel
Save