pull/9279/head
bai-yangfan 4 years ago
parent a09f1e30b6
commit 7bf9f4819f

@ -55,13 +55,13 @@ class Cell(Cell_):
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class MyCell(Cell):
>>> def __init__(self):
>>> super(MyCell, self).__init__()
>>> self.relu = P.ReLU()
>>>
>>> def construct(self, x):
>>> return self.relu(x)
>>> class MyCell(nn.Cell):
... def __init__(self):
... super(MyCell, self).__init__()
... self.relu = P.ReLU()
...
... def construct(self, x):
... return self.relu(x)
"""
IGNORE_LIST = ['_scope', '_cell_init_args', '_auto_prefix', '_cells', '_params', '_construct_inputs_names',
'_construct_inputs_num', '_create_time', '_mindspore_flags', '_parallel_inputs_run',
@ -776,8 +776,9 @@ class Cell(Cell_):
Examples:
>>> net = Net()
>>> parameters = []
>>> for item in net.get_parameters():
>>> print(item)
... parameters.append(item)
"""
for _, param in self.parameters_and_names(expand=expand):
yield param
@ -805,8 +806,8 @@ class Cell(Cell_):
>>> n = Net()
>>> names = []
>>> for m in n.parameters_and_names():
>>> if m[0]:
>>> names.append(m[0])
... if m[0]:
... names.append(m[0])
"""
cells = []
if expand:
@ -842,8 +843,8 @@ class Cell(Cell_):
>>> n = Net()
>>> names = []
>>> for m in n.cells_and_names():
>>> if m[0]:
>>> names.append(m[0])
... if m[0]:
... names.append(m[0])
"""
t_cells = cells if cells else set()
if self in t_cells:
@ -1016,7 +1017,7 @@ class Cell(Cell_):
fn must be defined as the following code. `cell_name` is the name of registered cell.
`grad_input` is gradient passed to the cell. `grad_output` is the gradient computed and passed to the
next cell or primitve, which may be modified and returned.
>>> hook_fn(cell_name, grad_input, grad_output) -> Tensor or None
hook_fn(cell_name, grad_input, grad_output) -> Tensor or None.
Args:
fn (function): Specifies the hook function with grad as input.
@ -1051,13 +1052,13 @@ class GraphKernel(Cell):
enable_graph_kernel in context is set to True.
Examples:
>>> class Relu(GraphKernel):
>>> def __init__(self):
>>> super(Relu, self).__init__()
>>> self.max = P.Maximum()
>>>
>>> def construct(self, x):
>>> return self.max(P.Fill()(P.DType()(x), P.Shape()(x), 0.0), x)
>>> class Relu(nn.GraphKernel):
... def __init__(self):
... super(Relu, self).__init__()
... self.max = P.Maximum()
...
... def construct(self, x):
... return self.max(P.Fill()(P.DType()(x), P.Shape()(x), 0.0), x)
"""
def __init__(self, auto_prefix=True, pips=None):

@ -88,25 +88,29 @@ class Model:
Examples:
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')
>>> self.bn = nn.BatchNorm2d(64)
>>> self.relu = nn.ReLU()
>>> self.flatten = nn.Flatten()
>>> self.fc = nn.Dense(64*224*224, 12) # padding=0
>>>
>>> def construct(self, x):
>>> x = self.conv(x)
>>> x = self.bn(x)
>>> x = self.relu(x)
>>> x = self.flatten(x)
>>> out = self.fc(x)
>>> return out
... def __init__(self, num_class=10, num_channel=1):
... super(Net, self).__init__()
... self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
... self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
... self.fc1 = nn.Dense(16*5*5, 120, weight_init='ones')
... self.fc2 = nn.Dense(120, 84, weight_init='ones')
... self.fc3 = nn.Dense(84, num_class, weight_init='ones')
... self.relu = nn.ReLU()
... self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
... self.flatten = nn.Flatten()
...
... def construct(self, x):
... x = self.max_pool2d(self.relu(self.conv1(x)))
... x = self.max_pool2d(self.relu(self.conv2(x)))
... x = self.flatten(x)
... x = self.relu(self.fc1(x))
... x = self.relu(self.fc2(x))
... x = self.fc3(x)
... return x
>>>
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
>>> # For details about how to build the dataset, please refer to the tutorial document on the official website.
>>> dataset = create_custom_dataset()
@ -545,9 +549,10 @@ class Model:
If dataset_sink_mode is False, set sink_size as invalid. Default: -1.
Examples:
>>> from mindspore.train.loss_scale_manager import FixedLossScaleManager
>>> dataset = create_custom_dataset()
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> loss_scale_manager = FixedLossScaleManager()
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
@ -667,9 +672,9 @@ class Model:
Examples:
>>> dataset = create_custom_dataset()
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'})
>>> model.eval(dataset)
>>> acc = model.eval(dataset, dataset_sink_mode=False)
"""
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
_device_number_check(self._parallel_mode, self._device_number)
@ -713,9 +718,9 @@ class Model:
Tensor, array(s) of predictions.
Examples:
>>> input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mindspore.float32)
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), mindspore.float32)
>>> model = Model(Net())
>>> model.predict(input_data)
>>> result = model.predict(input_data)
"""
self._predict_network.set_train(False)
check_input_data(*predict_data, data_class=Tensor)

Loading…
Cancel
Save