From a3120501a3c43da8ec2a0a17008c3b5ee7fcf452 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Sat, 10 Oct 2020 17:48:26 +0800 Subject: [PATCH] fix bugs --- mindspore/nn/layer/normalization.py | 5 +++-- mindspore/ops/_op_impl/tbe/neg.py | 1 + mindspore/ops/operations/math_ops.py | 2 ++ mindspore/ops/operations/nn_ops.py | 6 +++--- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index f340c959ec..1dd454b270 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -490,7 +490,7 @@ class LayerNorm(Cell): Args: normalized_shape (Union(tuple[int], list[int]): The normalization is performed over axis `begin_norm_axis ... R - 1`. - begin_norm_axis (int): It first normalization dimension: normalization will be performed along dimensions + begin_norm_axis (int): The first normalization dimension: normalization will be performed along dimensions `begin_norm_axis: rank(inputs)`, the value should be in [-1, rank(input)). Default: -1. begin_params_axis (int): The first parameter(beta, gamma)dimension: scale and centering parameters will have dimensions `begin_params_axis: rank(inputs)` and will be broadcast with @@ -514,7 +514,8 @@ class LayerNorm(Cell): >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) >>> shape1 = x.shape[1:] >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) - >>> m(x) + >>> m(x).shape + (20, 5, 10, 10) """ def __init__(self, diff --git a/mindspore/ops/_op_impl/tbe/neg.py b/mindspore/ops/_op_impl/tbe/neg.py index ee281a3bb2..5b44ab4699 100644 --- a/mindspore/ops/_op_impl/tbe/neg.py +++ b/mindspore/ops/_op_impl/tbe/neg.py @@ -29,6 +29,7 @@ neg_op_info = TBERegOp("Neg") \ .dtype_format(DataType.I32_None, DataType.I32_None) \ .dtype_format(DataType.F16_None, DataType.F16_None) \ .dtype_format(DataType.F32_None, DataType.F32_None) \ + .dtype_format(DataType.I8_None, DataType.I8_None) \ .get_op_info() diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 7ec6a39cad..3b26079fdb 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -449,6 +449,8 @@ class ReduceAny(_Reduce): >>> input_x = Tensor(np.array([[True, False], [True, True]])) >>> op = P.ReduceAny(keep_dims=True) >>> output = op(input_x, 1) + [[True], + [True]] """ def __infer__(self, input_x, axis): diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 934f72d209..24857d08bd 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -5549,10 +5549,10 @@ class DynamicRNN(PrimitiveWithInfer): The data type must be float16 or float32. - **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`). The data type must be float16 or float32. - - **seq_length (Tensor) - The length of each batch. Tensor of shape (`batch_size`). + - **seq_length** (Tensor) - The length of each batch. Tensor of shape (`batch_size`). Only `None` is currently supported. - - **init_h (Tensor) - Hidden state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`). - - **init_c (Tensor) - Cell state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`). + - **init_h** (Tensor) - Hidden state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`). + - **init_c** (Tensor) - Cell state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`). Outputs: - **y** (Tensor) - A Tensor of shape (`num_step`, `batch_size`, `hidden_size`).