modify Note

pull/9729/head
lilei 4 years ago
parent 38b76acdbd
commit 7ed181ec7c

@ -87,10 +87,10 @@ class WithLossCell(Cell):
>>> net_with_criterion = nn.WithLossCell(net, loss_fn)
>>>
>>> batch_size = 2
>>> data = Tensor(np.ones([batch_size, 3, 64, 64]).astype(np.float32) * 0.01)
>>> label = Tensor(np.ones([batch_size, 1, 1, 1]).astype(np.int32))
>>> data = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32) * 0.01)
>>> label = Tensor(np.ones([batch_size, 10]).astype(np.float32))
>>>
>>> net_with_criterion(data, label)
>>> output_data = net_with_criterion(data, label)
"""
def __init__(self, backbone, loss_fn):
@ -205,20 +205,20 @@ class TrainOneStepCell(Cell):
>>> train_net = nn.TrainOneStepCell(loss_net, optim)
>>>
>>> #2) Using user-defined WithLossCell
>>> class MyWithLossCell(nn.cell):
>>> def __init__(self, backbone, loss_fn):
>>> super(MyWithLossCell, self).__init__(auto_prefix=False)
>>> self._backbone = backbone
>>> self._loss_fn = loss_fn
>>>
>>> def construct(self, x, y, label):
>>> out = self._backbone(x, y)
>>> return self._loss_fn(out, label)
>>>
>>> @property
>>> def backbone_network(self):
>>> return self._backbone
>>>
>>> class MyWithLossCell(Cell):
... def __init__(self, backbone, loss_fn):
... super(MyWithLossCell, self).__init__(auto_prefix=False)
... self._backbone = backbone
... self._loss_fn = loss_fn
...
... def construct(self, x, y, label):
... out = self._backbone(x, y)
... return self._loss_fn(out, label)
...
... @property
... def backbone_network(self):
... return self._backbone
...
>>> loss_net = MyWithLossCell(net, loss_fn)
>>> train_net = nn.TrainOneStepCell(loss_net, optim)
"""
@ -408,7 +408,7 @@ class ParameterUpdate(Cell):
>>> update = nn.ParameterUpdate(param)
>>> update.phase = "update_param"
>>> weight = Tensor(np.arrange(12).reshape((4, 3)), mindspore.float32)
>>> update(weight)
>>> network_updata = update(weight)
"""
def __init__(self, param):

@ -85,16 +85,16 @@ class DynamicLossScaleUpdateCell(Cell):
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
>>>
>>> class Net(nn.Cell):
>>> def __init__(self, in_features, out_features):
>>> super(Net, self).__init__()
>>> self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
>>> name='weight')
>>> self.matmul = P.MatMul()
>>>
>>> def construct(self, x):
>>> output = self.matmul(x, self.weight)
>>> return output
>>>
... def __init__(self, in_features, out_features):
... super(Net, self).__init__()
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
... name='weight')
... self.matmul = P.MatMul()
...
... def construct(self, x):
... output = self.matmul(x, self.weight)
... return output
...
>>> in_features, out_features = 16, 10
>>> net = Net(in_features, out_features)
>>> loss = nn.MSELoss()
@ -168,16 +168,16 @@ class FixedLossScaleUpdateCell(Cell):
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
>>>
>>> class Net(nn.Cell):
>>> def __init__(self, in_features, out_features):
>>> super(Net, self).__init__()
>>> self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
>>> name='weight')
>>> self.matmul = P.MatMul()
>>>
>>> def construct(self, x):
>>> output = self.matmul(x, self.weight)
>>> return output
>>>
... def __init__(self, in_features, out_features):
... super(Net, self).__init__()
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
... name='weight')
... self.matmul = P.MatMul()
...
... def construct(self, x):
... output = self.matmul(x, self.weight)
... return output
...
>>> in_features, out_features = 16, 10
>>> net = Net(in_features, out_features)
>>> loss = nn.MSELoss()
@ -237,16 +237,16 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
>>> from mindspore.common import dtype as mstype
>>>
>>> class Net(nn.Cell):
>>> def __init__(self, in_features, out_features):
>>> super(Net, self).__init__()
>>> self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
>>> name='weight')
>>> self.matmul = P.MatMul()
>>>
>>> def construct(self, x):
>>> output = self.matmul(x, self.weight)
>>> return output
>>>
... def __init__(self, in_features, out_features):
... super(Net, self).__init__()
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
... name='weight')
... self.matmul = P.MatMul()
...
... def construct(self, x):
... output = self.matmul(x, self.weight)
... return output
...
>>> size, in_features, out_features = 16, 16, 10
>>> #1) when the type of scale_sense is Cell:
>>> net = Net(in_features, out_features)

Loading…
Cancel
Save